summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@jbarnes-mobile.amr.corp.intel.com>2007-09-24 14:41:46 -0700
committerJesse Barnes <jbarnes@jbarnes-mobile.amr.corp.intel.com>2007-09-24 14:41:46 -0700
commit5cc3083179b19678456905a9122a3d0f04e6f623 (patch)
tree9b776bdd317aa5af68b7cbf8fabde12e20eccf8a
parent2a2d02bbc500140a861380df52ce66abcac39312 (diff)
parent54df1b9ff3b79097fedd8ed7bf54aca30a660cbd (diff)
Merge branch 'master' into modesetting-101 - TTM & typedef removal
Conflicts: linux-core/drmP.h linux-core/drm_bo.c linux-core/drm_drv.c linux-core/drm_objects.h shared-core/drm.h shared-core/i915_dma.c shared-core/i915_drv.h shared-core/i915_irq.c Mostly removing typedefs that snuck into the modesetting code and updating to the latest TTM APIs. As of today, the i915 driver builds, but there are likely to be problems, so debugging and bugfixes will come next.
-rw-r--r--.gitignore65
-rw-r--r--Makefile.am2
-rw-r--r--bsd-core/Makefile64
-rw-r--r--bsd-core/ati_pcigart.c8
l---------bsd-core/drm.h1
-rw-r--r--bsd-core/drmP.h198
-rw-r--r--bsd-core/drm_agpsupport.c34
-rw-r--r--bsd-core/drm_auth.c116
-rw-r--r--bsd-core/drm_bufs.c258
-rw-r--r--bsd-core/drm_context.c136
-rw-r--r--bsd-core/drm_dma.c29
-rw-r--r--bsd-core/drm_drawable.c121
-rw-r--r--bsd-core/drm_drv.c237
-rw-r--r--bsd-core/drm_fops.c13
-rw-r--r--bsd-core/drm_ioctl.c170
-rw-r--r--bsd-core/drm_irq.c138
-rw-r--r--bsd-core/drm_lock.c67
-rw-r--r--bsd-core/drm_memory.c11
-rw-r--r--bsd-core/drm_pci.c14
l---------bsd-core/drm_sarea.h1
-rw-r--r--bsd-core/drm_scatter.c49
-rw-r--r--bsd-core/drm_sysctl.c5
-rw-r--r--bsd-core/drm_vm.c8
l---------bsd-core/i915_dma.c1
l---------bsd-core/i915_drm.h1
l---------bsd-core/i915_drv.h1
l---------bsd-core/i915_irq.c1
l---------bsd-core/i915_mem.c1
l---------bsd-core/mach64_dma.c1
l---------bsd-core/mach64_drm.h1
l---------bsd-core/mach64_drv.h1
l---------bsd-core/mach64_irq.c1
l---------bsd-core/mach64_state.c1
l---------bsd-core/mga_dma.c1
l---------bsd-core/mga_drm.h1
l---------bsd-core/mga_drv.h1
l---------bsd-core/mga_irq.c1
l---------bsd-core/mga_state.c1
l---------bsd-core/mga_ucode.h1
l---------bsd-core/mga_warp.c1
l---------bsd-core/r128_cce.c1
l---------bsd-core/r128_drm.h1
l---------bsd-core/r128_drv.h1
l---------bsd-core/r128_irq.c1
l---------bsd-core/r128_state.c1
l---------bsd-core/r300_cmdbuf.c1
l---------bsd-core/r300_reg.h1
l---------bsd-core/radeon_cp.c1
l---------bsd-core/radeon_drm.h1
l---------bsd-core/radeon_drv.h1
l---------bsd-core/radeon_irq.c1
l---------bsd-core/radeon_mem.c1
l---------bsd-core/radeon_state.c1
l---------bsd-core/savage_bci.c1
l---------bsd-core/savage_drm.h1
l---------bsd-core/savage_drv.h1
l---------bsd-core/savage_state.c1
l---------bsd-core/sis_drm.h1
l---------bsd-core/sis_drv.h1
l---------bsd-core/sis_ds.c1
l---------bsd-core/sis_ds.h1
l---------bsd-core/sis_mm.c1
l---------bsd-core/tdfx_drv.h1
l---------bsd-core/via_3d_reg.h1
l---------bsd-core/via_dma.c1
l---------bsd-core/via_drm.h1
l---------bsd-core/via_drv.h1
l---------bsd-core/via_ds.c1
l---------bsd-core/via_ds.h1
l---------bsd-core/via_irq.c1
l---------bsd-core/via_map.c1
l---------bsd-core/via_mm.c1
l---------bsd-core/via_mm.h1
l---------bsd-core/via_verifier.c1
l---------bsd-core/via_verifier.h1
l---------bsd-core/via_video.c1
-rw-r--r--configure.ac7
-rw-r--r--libdrm/xf86drm.c337
-rw-r--r--libdrm/xf86drm.h1
-rw-r--r--libdrm/xf86drmHash.c1
-rw-r--r--libdrm/xf86drmRandom.c1
-rw-r--r--libdrm/xf86drmSL.c1
-rw-r--r--libdrm/xf86mm.h24
-rw-r--r--linux-core/Makefile9
-rw-r--r--linux-core/Makefile.kernel8
-rw-r--r--linux-core/ati_pcigart.c8
-rw-r--r--linux-core/drmP.h599
-rw-r--r--linux-core/drm_agpsupport.c200
-rw-r--r--linux-core/drm_auth.c78
-rw-r--r--linux-core/drm_bo.c1163
-rw-r--r--linux-core/drm_bo_move.c64
-rw-r--r--linux-core/drm_bufs.c334
-rw-r--r--linux-core/drm_compat.c80
-rw-r--r--linux-core/drm_compat.h13
-rw-r--r--linux-core/drm_context.c240
-rw-r--r--linux-core/drm_crtc.c128
-rw-r--r--linux-core/drm_crtc.h46
-rw-r--r--linux-core/drm_dma.c21
-rw-r--r--linux-core/drm_drawable.c130
-rw-r--r--linux-core/drm_drv.c302
-rw-r--r--linux-core/drm_edid.c2
-rw-r--r--linux-core/drm_fence.c466
-rw-r--r--linux-core/drm_fops.c64
-rw-r--r--linux-core/drm_hashtab.c34
-rw-r--r--linux-core/drm_hashtab.h24
-rw-r--r--linux-core/drm_ioc32.c91
-rw-r--r--linux-core/drm_ioctl.c213
-rw-r--r--linux-core/drm_irq.c113
-rw-r--r--linux-core/drm_lock.c86
-rw-r--r--linux-core/drm_memory.c20
-rw-r--r--linux-core/drm_memory_debug.c2
-rw-r--r--linux-core/drm_memory_debug.h2
-rw-r--r--linux-core/drm_mm.c66
-rw-r--r--linux-core/drm_object.c86
-rw-r--r--linux-core/drm_objects.h283
-rw-r--r--linux-core/drm_os_linux.h18
-rw-r--r--linux-core/drm_pci.c6
-rw-r--r--linux-core/drm_proc.c50
-rw-r--r--linux-core/drm_scatter.c55
-rw-r--r--linux-core/drm_sman.c92
-rw-r--r--linux-core/drm_sman.h50
-rw-r--r--linux-core/drm_stub.c48
-rw-r--r--linux-core/drm_sysfs.c152
-rw-r--r--linux-core/drm_ttm.c40
-rw-r--r--linux-core/drm_vm.c100
-rw-r--r--linux-core/ffb_context.c22
-rw-r--r--linux-core/ffb_drv.c8
-rw-r--r--linux-core/ffb_drv.h2
-rw-r--r--linux-core/i810_dma.c409
-rw-r--r--linux-core/i810_drm.h30
-rw-r--r--linux-core/i810_drv.h27
-rw-r--r--linux-core/i915_buffer.c37
-rw-r--r--linux-core/i915_drv.c4
-rw-r--r--linux-core/i915_fence.c24
-rw-r--r--linux-core/intel_crt.c16
-rw-r--r--linux-core/intel_display.c68
-rw-r--r--linux-core/intel_drv.h16
-rw-r--r--linux-core/intel_fb.c17
-rw-r--r--linux-core/intel_i2c.c14
-rw-r--r--linux-core/intel_lvds.c24
-rw-r--r--linux-core/intel_sdvo.c22
-rw-r--r--linux-core/mga_drv.c4
l---------linux-core/nouveau_dma.c1
l---------linux-core/nouveau_dma.h1
-rw-r--r--linux-core/nouveau_drv.c2
-rw-r--r--linux-core/nouveau_sgdma.c335
l---------linux-core/nv04_instmem.c1
l---------linux-core/nv50_instmem.c1
-rw-r--r--linux-core/sis_drv.c6
-rw-r--r--linux-core/sis_mm.c122
-rw-r--r--linux-core/via_buffer.c17
-rw-r--r--linux-core/via_dmablit.c76
-rw-r--r--linux-core/via_dmablit.h2
-rw-r--r--linux-core/via_fence.c18
-rw-r--r--linux-core/via_mm.c85
-rw-r--r--linux-core/xgi_cmdlist.c322
-rw-r--r--linux-core/xgi_cmdlist.h66
l---------linux-core/xgi_drm.h1
-rw-r--r--linux-core/xgi_drv.c431
-rw-r--r--linux-core/xgi_drv.h117
-rw-r--r--linux-core/xgi_fb.c130
-rw-r--r--linux-core/xgi_fence.c127
-rw-r--r--linux-core/xgi_ioc32.c140
-rw-r--r--linux-core/xgi_misc.c477
-rw-r--r--linux-core/xgi_misc.h37
-rw-r--r--linux-core/xgi_pcie.c126
-rw-r--r--linux-core/xgi_regs.h169
-rw-r--r--shared-core/Makefile.am3
-rw-r--r--shared-core/drm.h654
-rw-r--r--shared-core/drm_pciids.txt39
-rw-r--r--shared-core/drm_sarea.h26
-rw-r--r--shared-core/i915_dma.c464
-rw-r--r--shared-core/i915_drm.h34
-rw-r--r--shared-core/i915_drv.h277
-rw-r--r--shared-core/i915_init.c37
-rw-r--r--shared-core/i915_irq.c392
-rw-r--r--shared-core/i915_mem.c143
-rw-r--r--shared-core/mach64_dma.c144
-rw-r--r--shared-core/mach64_drm.h4
-rw-r--r--shared-core/mach64_drv.h82
-rw-r--r--shared-core/mach64_irq.c10
-rw-r--r--shared-core/mach64_state.c174
-rw-r--r--shared-core/mga_dma.c201
-rw-r--r--shared-core/mga_drm.h6
-rw-r--r--shared-core/mga_drv.h43
-rw-r--r--shared-core/mga_irq.c12
-rw-r--r--shared-core/mga_state.c225
-rw-r--r--shared-core/mga_warp.c8
-rw-r--r--shared-core/nouveau_dma.c177
-rw-r--r--shared-core/nouveau_dma.h98
-rw-r--r--shared-core/nouveau_drm.h98
-rw-r--r--shared-core/nouveau_drv.h583
-rw-r--r--shared-core/nouveau_fifo.c401
-rw-r--r--shared-core/nouveau_irq.c375
-rw-r--r--shared-core/nouveau_mem.c578
-rw-r--r--shared-core/nouveau_notifier.c126
-rw-r--r--shared-core/nouveau_object.c1154
-rw-r--r--shared-core/nouveau_reg.h73
-rw-r--r--shared-core/nouveau_state.c285
-rw-r--r--shared-core/nv04_fb.c6
-rw-r--r--shared-core/nv04_fifo.c63
-rw-r--r--shared-core/nv04_graph.c701
-rw-r--r--shared-core/nv04_instmem.c159
-rw-r--r--shared-core/nv04_mc.c8
-rw-r--r--shared-core/nv04_timer.c27
-rw-r--r--shared-core/nv10_fb.c6
-rw-r--r--shared-core/nv10_fifo.c58
-rw-r--r--shared-core/nv10_graph.c552
-rw-r--r--shared-core/nv20_graph.c122
-rw-r--r--shared-core/nv30_graph.c2918
-rw-r--r--shared-core/nv40_fb.c6
-rw-r--r--shared-core/nv40_fifo.c73
-rw-r--r--shared-core/nv40_graph.c354
-rw-r--r--shared-core/nv40_mc.c8
-rw-r--r--shared-core/nv50_fifo.c268
-rw-r--r--shared-core/nv50_graph.c262
-rw-r--r--shared-core/nv50_instmem.c320
-rw-r--r--shared-core/nv50_mc.c7
-rw-r--r--shared-core/r128_cce.c170
-rw-r--r--shared-core/r128_drm.h22
-rw-r--r--shared-core/r128_drv.h41
-rw-r--r--shared-core/r128_irq.c10
-rw-r--r--shared-core/r128_state.c407
-rw-r--r--shared-core/r300_cmdbuf.c78
-rw-r--r--shared-core/radeon_cp.c188
-rw-r--r--shared-core/radeon_drm.h6
-rw-r--r--shared-core/radeon_drv.h80
-rw-r--r--shared-core/radeon_irq.c61
-rw-r--r--shared-core/radeon_mem.c108
-rw-r--r--shared-core/radeon_state.c764
-rw-r--r--shared-core/savage_bci.c180
-rw-r--r--shared-core/savage_drm.h4
-rw-r--r--shared-core/savage_drv.h27
-rw-r--r--shared-core/savage_state.c227
-rw-r--r--shared-core/sis_drv.h15
-rw-r--r--shared-core/sis_mm.c190
-rw-r--r--shared-core/via_dma.c163
-rw-r--r--shared-core/via_drm.h4
-rw-r--r--shared-core/via_drv.c4
-rw-r--r--shared-core/via_drv.h73
-rw-r--r--shared-core/via_irq.c60
-rw-r--r--shared-core/via_map.c22
-rw-r--r--shared-core/via_mm.c49
-rw-r--r--shared-core/via_verifier.c16
-rw-r--r--shared-core/via_verifier.h6
-rw-r--r--shared-core/via_video.c20
-rw-r--r--shared-core/xgi_drm.h133
-rw-r--r--tests/Makefile27
-rw-r--r--tests/Makefile.am29
-rw-r--r--tests/auth.c137
-rw-r--r--tests/drmtest.c83
-rw-r--r--tests/drmtest.h37
-rw-r--r--tests/getclient.c60
-rw-r--r--tests/getstats.c51
-rw-r--r--tests/getversion.c47
-rw-r--r--tests/lock.c262
-rw-r--r--tests/openclose.c37
-rw-r--r--tests/setversion.c84
-rw-r--r--tests/updatedraw.c148
259 files changed, 18719 insertions, 9565 deletions
diff --git a/.gitignore b/.gitignore
index 0fb0f49f..0991da8c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,55 +1,5 @@
-bsd-core/linux
-bsd-core/drm.h
-bsd-core/drm_sarea.h
-bsd-core/i915_dma.c
-bsd-core/i915_drm.h
-bsd-core/i915_drv.h
-bsd-core/i915_irq.c
-bsd-core/i915_mem.c
-bsd-core/mach64_dma.c
-bsd-core/mach64_drm.h
-bsd-core/mach64_drv.h
-bsd-core/mach64_irq.c
-bsd-core/mach64_state.c
-bsd-core/mga_dma.c
-bsd-core/mga_drm.h
-bsd-core/mga_drv.h
-bsd-core/mga_irq.c
-bsd-core/mga_state.c
-bsd-core/mga_ucode.h
-bsd-core/mga_warp.c
-bsd-core/nv_drv.h
-bsd-core/r128_cce.c
-bsd-core/r128_drm.h
-bsd-core/r128_drv.h
-bsd-core/r128_irq.c
-bsd-core/r128_state.c
-bsd-core/r300_cmdbuf.c
-bsd-core/r300_reg.h
-bsd-core/radeon_cp.c
-bsd-core/radeon_drm.h
-bsd-core/radeon_drv.h
-bsd-core/radeon_irq.c
-bsd-core/radeon_mem.c
-bsd-core/radeon_state.c
-bsd-core/savage_bci.c
-bsd-core/savage_drm.h
-bsd-core/savage_drv.h
-bsd-core/savage_state.c
-bsd-core/sis_drm.h
-bsd-core/sis_drv.h
-bsd-core/tdfx_drv.h
-bsd-core/via_3d_reg.h
-bsd-core/via_dma.c
-bsd-core/via_drm.h
-bsd-core/via_drv.c
-bsd-core/via_drv.h
-bsd-core/via_irq.c
-bsd-core/via_map.c
-bsd-core/via_verifier.c
-bsd-core/via_verifier.h
-bsd-core/via_video.c
-*~
+bsd-core/*/@
+bsd-core/*/machine
*.flags
*.ko
*.ko.cmd
@@ -75,6 +25,7 @@ config.log
config.status
config.sub
configure
+configure.lineno
cscope.*
depcomp
device_if.h
@@ -100,3 +51,13 @@ sis.kld
stamp-h1
tdfx.kld
via.kld
+tests/auth
+tests/dristat
+tests/drmstat
+tests/getclient
+tests/getstats
+tests/getversion
+tests/lock
+tests/openclose
+tests/setversion
+tests/updatedraw
diff --git a/Makefile.am b/Makefile.am
index 8c5dc702..5b1ae60a 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -22,7 +22,7 @@
# here too, but let's just do libdrm for now
AUTOMAKE_OPTIONS = foreign
-SUBDIRS = libdrm shared-core
+SUBDIRS = libdrm shared-core tests
pkgconfigdir = @pkgconfigdir@
pkgconfig_DATA = libdrm.pc
diff --git a/bsd-core/Makefile b/bsd-core/Makefile
index 00889dae..a58ac0a5 100644
--- a/bsd-core/Makefile
+++ b/bsd-core/Makefile
@@ -1,71 +1,11 @@
SHARED= ../shared-core
-SHAREDFILES= drm.h \
- drm_sarea.h \
- i915_dma.c \
- i915_drm.h \
- i915_drv.h \
- i915_irq.c \
- i915_mem.c \
- mach64_dma.c \
- mach64_drm.h \
- mach64_drv.h \
- mach64_irq.c \
- mach64_state.c \
- mga_dma.c \
- mga_drm.h \
- mga_drv.h \
- mga_irq.c \
- mga_state.c \
- mga_ucode.h \
- mga_warp.c \
- r128_cce.c \
- r128_drm.h \
- r128_drv.h \
- r128_irq.c \
- r128_state.c \
- radeon_cp.c \
- radeon_drm.h \
- radeon_drv.h \
- radeon_irq.c \
- radeon_mem.c \
- radeon_state.c \
- r300_cmdbuf.c \
- r300_reg.h \
- savage_bci.c \
- savage_drm.h \
- savage_drv.h \
- savage_state.c \
- sis_drm.h \
- sis_drv.h \
- sis_ds.c \
- sis_ds.h \
- sis_mm.c \
- tdfx_drv.h \
- via_3d_reg.h \
- via_dma.c \
- via_drm.h \
- via_drv.h \
- via_ds.c \
- via_ds.h \
- via_irq.c \
- via_map.c \
- via_mm.c \
- via_mm.h \
- via_verifier.c \
- via_verifier.h \
- via_video.c
SUBDIR = drm mach64 mga r128 radeon savage sis tdfx i915 # via
-CLEANFILES+= ${SHAREDFILES}
-
.include <bsd.obj.mk>
-depend: drm_pciids.h ${SHAREDFILES}
-all: drm_pciids.h ${SHAREDFILES}
+depend: drm_pciids.h
+all: drm_pciids.h
drm_pciids.h: ${SHARED}/drm_pciids.txt
sh ../scripts/create_bsd_pci_lists.sh < ${SHARED}/drm_pciids.txt
-
-${SHAREDFILES}:
- ln -sf ${SHARED}/$@ $@
diff --git a/bsd-core/ati_pcigart.c b/bsd-core/ati_pcigart.c
index 682eace6..db19a75d 100644
--- a/bsd-core/ati_pcigart.c
+++ b/bsd-core/ati_pcigart.c
@@ -1,6 +1,3 @@
-/* ati_pcigart.h -- ATI PCI GART support -*- linux-c -*-
- * Created: Wed Dec 13 21:52:19 2000 by gareth@valinux.com
- */
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -29,6 +26,11 @@
*
*/
+/** @file ati_pcigart.c
+ * Implementation of ATI's PCIGART, which provides an aperture in card virtual
+ * address space with addresses remapped to system memory.
+ */
+
#include "drmP.h"
#define ATI_PCIGART_PAGE_SIZE 4096 /* PCI GART page size */
diff --git a/bsd-core/drm.h b/bsd-core/drm.h
new file mode 120000
index 00000000..29636692
--- /dev/null
+++ b/bsd-core/drm.h
@@ -0,0 +1 @@
+../shared-core/drm.h \ No newline at end of file
diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h
index 9ba3d502..8a768f0c 100644
--- a/bsd-core/drmP.h
+++ b/bsd-core/drmP.h
@@ -59,6 +59,8 @@ typedef struct drm_file drm_file_t;
#include <sys/bus.h>
#include <sys/signalvar.h>
#include <sys/poll.h>
+#include <sys/tree.h>
+#include <sys/taskqueue.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/vm_extern.h>
@@ -152,6 +154,7 @@ typedef struct drm_file drm_file_t;
#define DRM_MEM_CTXBITMAP 17
#define DRM_MEM_STUB 18
#define DRM_MEM_SGLISTS 19
+#define DRM_MEM_DRAWABLE 20
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
@@ -184,10 +187,15 @@ MALLOC_DECLARE(M_DRM);
#define DRM_CURPROC curthread
#define DRM_STRUCTPROC struct thread
#define DRM_SPINTYPE struct mtx
-#define DRM_SPININIT(l,name) mtx_init(&l, name, NULL, MTX_DEF)
-#define DRM_SPINUNINIT(l) mtx_destroy(&l)
+#define DRM_SPININIT(l,name) mtx_init(l, name, NULL, MTX_DEF)
+#define DRM_SPINUNINIT(l) mtx_destroy(l)
#define DRM_SPINLOCK(l) mtx_lock(l)
-#define DRM_SPINUNLOCK(u) mtx_unlock(u);
+#define DRM_SPINUNLOCK(u) mtx_unlock(u)
+#define DRM_SPINLOCK_IRQSAVE(l, irqflags) do { \
+ mtx_lock(l); \
+ (void)irqflags; \
+} while (0)
+#define DRM_SPINUNLOCK_IRQRESTORE(u, irqflags) mtx_unlock(u)
#define DRM_SPINLOCK_ASSERT(l) mtx_assert(l, MA_OWNED)
#define DRM_CURRENTPID curthread->td_proc->p_pid
#define DRM_LOCK() mtx_lock(&dev->dev_lock)
@@ -209,10 +217,6 @@ MALLOC_DECLARE(M_DRM);
#define spldrm() spltty()
#endif /* __NetBSD__ || __OpenBSD__ */
-/* Currently our DRMFILE (filp) is a void * which is actually the pid
- * of the current process. It should be a per-open unique pointer, but
- * code for that is not yet written */
-#define DRMFILE void *
#define DRM_IRQ_ARGS void *arg
typedef void irqreturn_t;
#define IRQ_HANDLED /* nothing */
@@ -226,11 +230,15 @@ enum {
#define DRM_AGP_MEM struct agp_memory_info
#if defined(__FreeBSD__)
-#define DRM_DEVICE \
- drm_device_t *dev = kdev->si_drv1
-#define DRM_IOCTL_ARGS struct cdev *kdev, u_long cmd, caddr_t data, \
- int flags, DRM_STRUCTPROC *p, DRMFILE filp
+#define drm_get_device_from_kdev(_kdev) (_kdev->si_drv1)
+#elif defined(__NetBSD__)
+#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, minor(_kdev))
+#elif defined(__OpenBSD__)
+#define drm_get_device_from_kdev(_kdev) device_lookup(&drm_cd, \
+ minor(_kdev)))->dv_cfdata->cf_driver->cd_devs[minor(_kdev)]
+#endif
+#if defined(__FreeBSD__)
#define PAGE_ALIGN(addr) round_page(addr)
/* DRM_SUSER returns true if the user is superuser */
#if __FreeBSD_version >= 700000
@@ -244,17 +252,6 @@ enum {
#else /* __FreeBSD__ */
-#if defined(__NetBSD__)
-#define DRM_DEVICE \
- drm_device_t *dev = device_lookup(&drm_cd, minor(kdev))
-#elif defined(__OpenBSD__)
-#define DRM_DEVICE \
- drm_device_t *dev = (device_lookup(&drm_cd, \
- minor(kdev)))->dv_cfdata->cf_driver->cd_devs[minor(kdev)]
-#endif /* __OpenBSD__ */
-#define DRM_IOCTL_ARGS dev_t kdev, u_long cmd, caddr_t data, \
- int flags, DRM_STRUCTPROC *p, DRMFILE filp
-
#define CDEV_MAJOR 34
#define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
/* DRM_SUSER returns true if the user is superuser */
@@ -342,14 +339,6 @@ typedef vaddr_t vm_offset_t;
(!uvm_useracc((caddr_t)uaddr, size, VM_PROT_READ))
#endif /* !__FreeBSD__ */
-#define DRM_COPY_TO_USER_IOCTL(user, kern, size) \
- if ( IOCPARM_LEN(cmd) != size) \
- return EINVAL; \
- *user = kern;
-#define DRM_COPY_FROM_USER_IOCTL(kern, user, size) \
- if ( IOCPARM_LEN(cmd) != size) \
- return EINVAL; \
- kern = *user;
#define DRM_COPY_TO_USER(user, kern, size) \
copyout(kern, user, size)
#define DRM_COPY_FROM_USER(kern, user, size) \
@@ -369,7 +358,6 @@ typedef vaddr_t vm_offset_t;
#define cpu_to_le32(x) htole32(x)
#define le32_to_cpu(x) le32toh(x)
-#define DRM_ERR(v) v
#define DRM_HZ hz
#define DRM_UDELAY(udelay) DELAY(udelay)
#define DRM_TIME_SLICE (hz/20) /* Time slice for GLXContexts */
@@ -378,23 +366,10 @@ typedef vaddr_t vm_offset_t;
(_map) = (_dev)->context_sareas[_ctx]; \
} while(0)
-#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) \
-do { \
- if (_filp != (DRMFILE)(intptr_t)DRM_CURRENTPID) { \
- DRM_ERROR("filp doesn't match curproc\n"); \
- return EINVAL; \
- } \
- _priv = drm_find_file_by_proc(dev, DRM_CURPROC); \
- if (_priv == NULL) { \
- DRM_ERROR("can't find authenticator\n"); \
- return EINVAL; \
- } \
-} while (0)
-
-#define LOCK_TEST_WITH_RETURN(dev, filp) \
+#define LOCK_TEST_WITH_RETURN(dev, file_priv) \
do { \
if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
- dev->lock.filp != filp) { \
+ dev->lock.file_priv != file_priv) { \
DRM_ERROR("%s called without lock held\n", \
__FUNCTION__); \
return EINVAL; \
@@ -402,22 +377,24 @@ do { \
} while (0)
#if defined(__FreeBSD__) && __FreeBSD_version > 500000
+/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
DRM_UNLOCK(); \
mtx_lock(&dev->irq_lock); \
if (!(condition)) \
- ret = msleep(&(queue), &dev->irq_lock, \
+ ret = -msleep(&(queue), &dev->irq_lock, \
PZERO | PCATCH, "drmwtq", (timeout)); \
mtx_unlock(&dev->irq_lock); \
DRM_LOCK(); \
}
#else
+/* Returns -errno to shared code */
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
for ( ret = 0 ; !ret && !(condition) ; ) { \
int s = spldrm(); \
if (!(condition)) \
- ret = tsleep( &(queue), PZERO | PCATCH, \
+ ret = -tsleep( &(queue), PZERO | PCATCH, \
"drmwtq", (timeout) ); \
splx(s); \
}
@@ -447,9 +424,16 @@ typedef struct drm_pci_id_list
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
typedef struct drm_ioctl_desc {
- int (*func)(DRM_IOCTL_ARGS);
+ unsigned long cmd;
+ int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv);
int flags;
} drm_ioctl_desc_t;
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#define DRM_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
typedef struct drm_magic_entry {
drm_magic_t magic;
@@ -472,7 +456,7 @@ typedef struct drm_buf {
unsigned long bus_address; /* Bus address of buffer */
struct drm_buf *next; /* Kernel-only: used for free list */
__volatile__ int pending; /* On hardware DMA queue */
- DRMFILE filp; /* Unique identifier of holding process */
+ struct drm_file *file_priv; /* Unique identifier of holding process */
int context; /* Kernel queue for this buffer */
enum {
DRM_LIST_NONE = 0,
@@ -534,7 +518,7 @@ struct drm_file {
typedef struct drm_lock_data {
drm_hw_lock_t *hw_lock; /* Hardware lock */
- DRMFILE filp; /* Unique identifier of holding process (NULL is kernel)*/
+ struct drm_file *file_priv; /* Unique identifier of holding process (NULL is kernel)*/
int lock_queue; /* Queue of blocked processes */
unsigned long lock_time; /* Time of last lock in jiffies */
} drm_lock_data_t;
@@ -638,12 +622,13 @@ struct drm_driver_info {
int (*load)(struct drm_device *, unsigned long flags);
int (*firstopen)(struct drm_device *);
int (*open)(struct drm_device *, drm_file_t *);
- void (*preclose)(struct drm_device *, void *filp);
+ void (*preclose)(struct drm_device *, struct drm_file *file_priv);
void (*postclose)(struct drm_device *, drm_file_t *);
void (*lastclose)(struct drm_device *);
int (*unload)(struct drm_device *);
- void (*reclaim_buffers_locked)(struct drm_device *, void *filp);
- int (*dma_ioctl)(DRM_IOCTL_ARGS);
+ void (*reclaim_buffers_locked)(struct drm_device *,
+ struct drm_file *file_priv);
+ int (*dma_ioctl)(drm_device_t *dev, void *data, struct drm_file *file_priv);
void (*dma_ready)(struct drm_device *);
int (*dma_quiescent)(struct drm_device *);
int (*dma_flush_block_and_flush)(struct drm_device *, int context,
@@ -732,6 +717,8 @@ struct drm_device {
struct mtx irq_lock; /* protects irq condition checks */
struct mtx dev_lock; /* protects everything else */
#endif
+ DRM_SPINTYPE drw_lock;
+
/* Usage Counters */
int open_count; /* Outstanding files open */
int buf_use; /* Buffers in use -- cannot alloc */
@@ -780,6 +767,7 @@ struct drm_device {
int last_context; /* Last current context */
int vbl_queue; /* vbl wait channel */
atomic_t vbl_received;
+ atomic_t vbl_received2;
#ifdef __FreeBSD__
struct sigio *buf_sigio; /* Processes waiting for SIGIO */
@@ -796,6 +784,13 @@ struct drm_device {
void *dev_private;
unsigned int agp_buffer_token;
drm_local_map_t *agp_buffer_map;
+
+ struct unrhdr *drw_unrhdr;
+ /* RB tree of drawable infos */
+ RB_HEAD(drawable_tree, bsd_drm_drawable_info) drw_head;
+
+ struct task locked_task;
+ void (*locked_task_call)(drm_device_t *dev);
};
extern int drm_debug_flag;
@@ -883,7 +878,7 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request);
int drm_dma_setup(drm_device_t *dev);
void drm_dma_takedown(drm_device_t *dev);
void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf);
-void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
+void drm_reclaim_buffers(drm_device_t *dev, struct drm_file *file_priv);
#define drm_core_reclaim_buffers drm_reclaim_buffers
/* IRQ support (drm_irq.c) */
@@ -915,6 +910,7 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
/* Scatter Gather Support (drm_scatter.c) */
void drm_sg_cleanup(drm_sg_mem_t *entry);
+int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request);
#ifdef __FreeBSD__
/* sysctl support (drm_sysctl.h) */
@@ -929,68 +925,72 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev,
drm_ati_pcigart_info *gart_info);
/* Locking IOCTL support (drm_drv.c) */
-int drm_lock(DRM_IOCTL_ARGS);
-int drm_unlock(DRM_IOCTL_ARGS);
-int drm_version(DRM_IOCTL_ARGS);
-int drm_setversion(DRM_IOCTL_ARGS);
+int drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_version(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* Misc. IOCTL support (drm_ioctl.c) */
-int drm_irq_by_busid(DRM_IOCTL_ARGS);
-int drm_getunique(DRM_IOCTL_ARGS);
-int drm_setunique(DRM_IOCTL_ARGS);
-int drm_getmap(DRM_IOCTL_ARGS);
-int drm_getclient(DRM_IOCTL_ARGS);
-int drm_getstats(DRM_IOCTL_ARGS);
-int drm_noop(DRM_IOCTL_ARGS);
+int drm_irq_by_busid(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getunique(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_setunique(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getmap(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getclient(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_noop(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* Context IOCTL support (drm_context.c) */
-int drm_resctx(DRM_IOCTL_ARGS);
-int drm_addctx(DRM_IOCTL_ARGS);
-int drm_modctx(DRM_IOCTL_ARGS);
-int drm_getctx(DRM_IOCTL_ARGS);
-int drm_switchctx(DRM_IOCTL_ARGS);
-int drm_newctx(DRM_IOCTL_ARGS);
-int drm_rmctx(DRM_IOCTL_ARGS);
-int drm_setsareactx(DRM_IOCTL_ARGS);
-int drm_getsareactx(DRM_IOCTL_ARGS);
+int drm_resctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_addctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_modctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_switchctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_newctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_rmctx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_setsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_getsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* Drawable IOCTL support (drm_drawable.c) */
-int drm_adddraw(DRM_IOCTL_ARGS);
-int drm_rmdraw(DRM_IOCTL_ARGS);
+int drm_adddraw(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_rmdraw(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_update_draw(drm_device_t *dev, void *data, struct drm_file *file_priv);
+struct drm_drawable_info *drm_get_drawable_info(drm_device_t *dev, int handle);
/* Authentication IOCTL support (drm_auth.c) */
-int drm_getmagic(DRM_IOCTL_ARGS);
-int drm_authmagic(DRM_IOCTL_ARGS);
+int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* Buffer management support (drm_bufs.c) */
-int drm_addmap_ioctl(DRM_IOCTL_ARGS);
-int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
-int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
-int drm_infobufs(DRM_IOCTL_ARGS);
-int drm_markbufs(DRM_IOCTL_ARGS);
-int drm_freebufs(DRM_IOCTL_ARGS);
-int drm_mapbufs(DRM_IOCTL_ARGS);
+int drm_addmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_rmmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_addbufs_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_infobufs(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_markbufs(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_freebufs(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_mapbufs(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* DMA support (drm_dma.c) */
-int drm_dma(DRM_IOCTL_ARGS);
+int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* IRQ support (drm_irq.c) */
-int drm_control(DRM_IOCTL_ARGS);
-int drm_wait_vblank(DRM_IOCTL_ARGS);
+int drm_control(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv);
+void drm_locked_tasklet(drm_device_t *dev,
+ void (*tasklet)(drm_device_t *dev));
/* AGP/GART support (drm_agpsupport.c) */
-int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_release_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_enable_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_info_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_free_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS);
-int drm_agp_bind_ioctl(DRM_IOCTL_ARGS);
+int drm_agp_acquire_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_release_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_enable_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_info_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_free_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_unbind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_agp_bind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* Scatter Gather Support (drm_scatter.c) */
-int drm_sg_alloc(DRM_IOCTL_ARGS);
-int drm_sg_free(DRM_IOCTL_ARGS);
+int drm_sg_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv);
+int drm_sg_free(drm_device_t *dev, void *data, struct drm_file *file_priv);
/* consistent PCI memory functions (drm_pci.c) */
drm_dma_handle_t *drm_pci_alloc(drm_device_t *dev, size_t size, size_t align,
diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c
index 28239d1b..6f963b9c 100644
--- a/bsd-core/drm_agpsupport.c
+++ b/bsd-core/drm_agpsupport.c
@@ -1,6 +1,3 @@
-/* drm_agpsupport.h -- DRM support for AGP/GART backend -*- linux-c -*-
- * Created: Mon Dec 13 09:56:45 1999 by faith@precisioninsight.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,11 @@
*
*/
+/** @file drm_agpsupport.c
+ * Support code for tying the kernel AGP support to DRM drivers and
+ * the DRM's AGP ioctls.
+ */
+
#include "drmP.h"
#ifdef __FreeBSD__
@@ -125,11 +127,10 @@ int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info)
return 0;
}
-int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_info_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
int err;
drm_agp_info_t info;
- DRM_DEVICE;
err = drm_agp_info(dev, &info);
if (err != 0)
@@ -139,9 +140,8 @@ int drm_agp_info_ioctl(DRM_IOCTL_ARGS)
return 0;
}
-int drm_agp_acquire_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_acquire_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
return drm_agp_acquire(dev);
}
@@ -161,9 +161,8 @@ int drm_agp_acquire(drm_device_t *dev)
return 0;
}
-int drm_agp_release_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_release_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
return drm_agp_release(dev);
}
@@ -185,15 +184,13 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
dev->agp->mode = mode.mode;
agp_enable(dev->agp->agpdev, mode.mode);
- dev->agp->base = dev->agp->info.ai_aperture_base;
dev->agp->enabled = 1;
return 0;
}
-int drm_agp_enable_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_enable_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
drm_agp_mode_t mode;
- DRM_DEVICE;
mode = *(drm_agp_mode_t *) data;
@@ -243,9 +240,8 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
return 0;
}
-int drm_agp_alloc_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_agp_buffer_t request;
int retcode;
@@ -292,9 +288,8 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
return retcode;
}
-int drm_agp_unbind_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_unbind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_agp_binding_t request;
int retcode;
@@ -333,9 +328,8 @@ int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
return retcode;
}
-int drm_agp_bind_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_bind_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_agp_binding_t request;
int retcode;
@@ -378,9 +372,8 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
}
-int drm_agp_free_ioctl(DRM_IOCTL_ARGS)
+int drm_agp_free_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_agp_buffer_t request;
int retcode;
@@ -411,6 +404,7 @@ drm_agp_head_t *drm_agp_init(void)
return NULL;
head->agpdev = agpdev;
agp_get_info(agpdev, &head->info);
+ head->base = head->info.ai_aperture_base;
head->memory = NULL;
DRM_INFO("AGP at 0x%08lx %dMB\n",
(long)head->info.ai_aperture_base,
diff --git a/bsd-core/drm_auth.c b/bsd-core/drm_auth.c
index aa0e29c0..aa8238c4 100644
--- a/bsd-core/drm_auth.c
+++ b/bsd-core/drm_auth.c
@@ -1,6 +1,3 @@
-/* drm_auth.h -- IOCTLs for authentication -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,11 @@
*
*/
+/** @file drm_auth.c
+ * Implementation of the get/authmagic ioctls implementing the authentication
+ * scheme between the master and clients.
+ */
+
#include "drmP.h"
static int drm_hash_magic(drm_magic_t magic)
@@ -38,25 +40,29 @@ static int drm_hash_magic(drm_magic_t magic)
return magic & (DRM_HASH_SIZE-1);
}
+/**
+ * Returns the file private associated with the given magic number.
+ */
static drm_file_t *drm_find_file(drm_device_t *dev, drm_magic_t magic)
{
- drm_file_t *retval = NULL;
drm_magic_entry_t *pt;
- int hash;
+ int hash = drm_hash_magic(magic);
- hash = drm_hash_magic(magic);
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
- DRM_LOCK();
for (pt = dev->magiclist[hash].head; pt; pt = pt->next) {
if (pt->magic == magic) {
- retval = pt->priv;
- break;
+ return pt->priv;
}
}
- DRM_UNLOCK();
- return retval;
+
+ return NULL;
}
+/**
+ * Inserts the given magic number into the hash table of used magic number
+ * lists.
+ */
static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
{
int hash;
@@ -64,9 +70,11 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
DRM_DEBUG("%d\n", magic);
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+
hash = drm_hash_magic(magic);
entry = malloc(sizeof(*entry), M_DRM, M_ZERO | M_NOWAIT);
- if (!entry) return DRM_ERR(ENOMEM);
+ if (!entry) return ENOMEM;
entry->magic = magic;
entry->priv = priv;
entry->next = NULL;
@@ -84,16 +92,21 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, drm_magic_t magic)
return 0;
}
+/**
+ * Removes the given magic number from the hash table of used magic number
+ * lists.
+ */
static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
{
drm_magic_entry_t *prev = NULL;
drm_magic_entry_t *pt;
int hash;
+ DRM_SPINLOCK_ASSERT(&dev->dev_lock);
+
DRM_DEBUG("%d\n", magic);
hash = drm_hash_magic(magic);
- DRM_LOCK();
for (pt = dev->magiclist[hash].head; pt; prev = pt, pt = pt->next) {
if (pt->magic == magic) {
if (dev->magiclist[hash].head == pt) {
@@ -105,68 +118,69 @@ static int drm_remove_magic(drm_device_t *dev, drm_magic_t magic)
if (prev) {
prev->next = pt->next;
}
- DRM_UNLOCK();
return 0;
}
}
- DRM_UNLOCK();
free(pt, M_DRM);
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
-int drm_getmagic(DRM_IOCTL_ARGS)
+/**
+ * Called by the client, this returns a unique magic number to be authorized
+ * by the master.
+ *
+ * The master may use its own knowledge of the client (such as the X
+ * connection that the magic is passed over) to determine if the magic number
+ * should be authenticated.
+ */
+int drm_getmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
static drm_magic_t sequence = 0;
- drm_auth_t auth;
- drm_file_t *priv;
-
- DRM_LOCK();
- priv = drm_find_file_by_proc(dev, p);
- DRM_UNLOCK();
- if (priv == NULL) {
- DRM_ERROR("can't find authenticator\n");
- return EINVAL;
- }
+ drm_auth_t *auth = data;
/* Find unique magic */
- if (priv->magic) {
- auth.magic = priv->magic;
+ if (file_priv->magic) {
+ auth->magic = file_priv->magic;
} else {
+ DRM_LOCK();
do {
int old = sequence;
-
- auth.magic = old+1;
-
- if (!atomic_cmpset_int(&sequence, old, auth.magic))
+
+ auth->magic = old+1;
+
+ if (!atomic_cmpset_int(&sequence, old, auth->magic))
continue;
- } while (drm_find_file(dev, auth.magic));
- priv->magic = auth.magic;
- drm_add_magic(dev, priv, auth.magic);
+ } while (drm_find_file(dev, auth->magic));
+ file_priv->magic = auth->magic;
+ drm_add_magic(dev, file_priv, auth->magic);
+ DRM_UNLOCK();
}
- DRM_DEBUG("%u\n", auth.magic);
-
- DRM_COPY_TO_USER_IOCTL((drm_auth_t *)data, auth, sizeof(auth));
+ DRM_DEBUG("%u\n", auth->magic);
return 0;
}
-int drm_authmagic(DRM_IOCTL_ARGS)
+/**
+ * Marks the client associated with the given magic number as authenticated.
+ */
+int drm_authmagic(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- drm_auth_t auth;
- drm_file_t *file;
- DRM_DEVICE;
-
- DRM_COPY_FROM_USER_IOCTL(auth, (drm_auth_t *)data, sizeof(auth));
+ drm_auth_t *auth = data;
+ drm_file_t *priv;
- DRM_DEBUG("%u\n", auth.magic);
+ DRM_DEBUG("%u\n", auth->magic);
- if ((file = drm_find_file(dev, auth.magic))) {
- file->authenticated = 1;
- drm_remove_magic(dev, auth.magic);
+ DRM_LOCK();
+ priv = drm_find_file(dev, auth->magic);
+ if (priv != NULL) {
+ priv->authenticated = 1;
+ drm_remove_magic(dev, auth->magic);
+ DRM_UNLOCK();
return 0;
+ } else {
+ DRM_UNLOCK();
+ return EINVAL;
}
- return DRM_ERR(EINVAL);
}
diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c
index 343ab1e8..9b58c593 100644
--- a/bsd-core/drm_bufs.c
+++ b/bsd-core/drm_bufs.c
@@ -1,6 +1,3 @@
-/* drm_bufs.h -- Generic buffer template -*- linux-c -*-
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- */
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,10 @@
*
*/
+/** @file drm_bufs.c
+ * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
+ */
+
#include "dev/pci/pcireg.h"
#include "drmP.h"
@@ -149,7 +150,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
*/
map = malloc(sizeof(*map), M_DRM, M_ZERO | M_NOWAIT);
if ( !map )
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
map->offset = offset;
map->size = size;
@@ -172,7 +173,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
map->size, drm_order(map->size), map->handle );
if ( !map->handle ) {
free(map, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
map->offset = (unsigned long)map->handle;
if ( map->flags & _DRM_CONTAINS_LOCK ) {
@@ -182,7 +183,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
DRM_UNLOCK();
free(map->handle, M_DRM);
free(map, M_DRM);
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
dev->lock.hw_lock = map->handle; /* Pointer to lock */
DRM_UNLOCK();
@@ -190,7 +191,17 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
break;
case _DRM_AGP:
/*valid = 0;*/
- map->offset += dev->agp->base;
+ /* In some cases (i810 driver), user space may have already
+ * added the AGP base itself, because dev->agp->base previously
+ * only got set during AGP enable. So, only add the base
+ * address if the map's offset isn't already within the
+ * aperture.
+ */
+ if (map->offset < dev->agp->base ||
+ map->offset > dev->agp->base +
+ dev->agp->info.ai_aperture_size - 1) {
+ map->offset += dev->agp->base;
+ }
map->mtrr = dev->agp->mtrr; /* for getmap */
/*for (entry = dev->agp->memory; entry; entry = entry->next) {
if ((map->offset >= entry->bound) &&
@@ -202,13 +213,13 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
}
if (!valid) {
free(map, M_DRM);
- return DRM_ERR(EACCES);
+ return EACCES;
}*/
break;
case _DRM_SCATTER_GATHER:
if (!dev->sg) {
free(map, M_DRM);
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
map->offset = map->offset + dev->sg->handle;
break;
@@ -225,7 +236,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
map->dmah = drm_pci_alloc(dev, map->size, align, 0xfffffffful);
if (map->dmah == NULL) {
free(map, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
map->handle = map->dmah->vaddr;
map->offset = map->dmah->busaddr;
@@ -233,7 +244,7 @@ int drm_addmap(drm_device_t * dev, unsigned long offset, unsigned long size,
default:
DRM_ERROR("Bad map type %d\n", map->type);
free(map, M_DRM);
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
DRM_LOCK();
@@ -250,39 +261,35 @@ done:
return 0;
}
-int drm_addmap_ioctl(DRM_IOCTL_ARGS)
+int drm_addmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- drm_map_t request;
+ drm_map_t *request = data;
drm_local_map_t *map;
int err;
- DRM_DEVICE;
if (!(dev->flags & (FREAD|FWRITE)))
- return DRM_ERR(EACCES); /* Require read/write */
+ return EACCES; /* Require read/write */
- DRM_COPY_FROM_USER_IOCTL(request, (drm_map_t *)data, sizeof(drm_map_t));
-
- if (!DRM_SUSER(p) && request.type != _DRM_AGP)
- return DRM_ERR(EACCES);
+ if (!DRM_SUSER(DRM_CURPROC) && request->type != _DRM_AGP)
+ return EACCES;
DRM_LOCK();
- err = drm_addmap(dev, request.offset, request.size, request.type,
- request.flags, &map);
+ err = drm_addmap(dev, request->offset, request->size, request->type,
+ request->flags, &map);
DRM_UNLOCK();
if (err != 0)
return err;
- request.offset = map->offset;
- request.size = map->size;
- request.type = map->type;
- request.flags = map->flags;
- request.mtrr = map->mtrr;
- request.handle = map->handle;
+ request->offset = map->offset;
+ request->size = map->size;
+ request->type = map->type;
+ request->flags = map->flags;
+ request->mtrr = map->mtrr;
+ request->handle = map->handle;
- if (request.type != _DRM_SHM) {
- request.handle = (void *)request.offset;
+ if (request->type != _DRM_SHM) {
+ request->handle = (void *)request->offset;
}
- DRM_COPY_TO_USER_IOCTL((drm_map_t *)data, request, sizeof(drm_map_t));
return 0;
}
@@ -333,17 +340,14 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
* isn't in use.
*/
-int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
+int drm_rmmap_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_local_map_t *map;
- drm_map_t request;
-
- DRM_COPY_FROM_USER_IOCTL( request, (drm_map_t *)data, sizeof(request) );
+ drm_map_t *request = data;
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
- if (map->handle == request.handle &&
+ if (map->handle == request->handle &&
map->flags & _DRM_REMOVABLE)
break;
}
@@ -351,7 +355,7 @@ int drm_rmmap_ioctl(DRM_IOCTL_ARGS)
/* No match found. */
if (map == NULL) {
DRM_UNLOCK();
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
drm_rmmap(dev, map);
@@ -441,7 +445,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
}
if (!valid) {
DRM_DEBUG("zone invalid\n");
- return DRM_ERR(EINVAL);
+ return EINVAL;
}*/
entry = &dma->bufs[order];
@@ -449,7 +453,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
M_NOWAIT | M_ZERO);
if ( !entry->buflist ) {
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
entry->buf_size = size;
@@ -469,7 +473,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
buf->address = (void *)(agp_offset + offset);
buf->next = NULL;
buf->pending = 0;
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@@ -478,7 +482,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
offset += alignment;
@@ -494,7 +498,7 @@ static int drm_do_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
if (temp_buflist == NULL) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
dma->buflist = temp_buflist;
@@ -563,7 +567,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
temp_pagelist == NULL) {
free(entry->buflist, M_DRM);
free(entry->seglist, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
memcpy(temp_pagelist, dma->pagelist, dma->page_count *
@@ -586,7 +590,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
entry->seglist[entry->seg_count++] = dmah;
@@ -610,7 +614,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
buf->bus_address = dmah->busaddr + offset;
buf->next = NULL;
buf->pending = 0;
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@@ -621,7 +625,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
entry->seg_count = count;
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
@@ -637,7 +641,7 @@ static int drm_do_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
free(temp_pagelist, M_DRM);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
dma->buflist = temp_buflist;
@@ -705,7 +709,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
entry->buflist = malloc(count * sizeof(*entry->buflist), M_DRM,
M_NOWAIT | M_ZERO);
if (entry->buflist == NULL)
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
entry->buf_size = size;
entry->page_order = page_order;
@@ -724,7 +728,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
buf->address = (void *)(agp_offset + offset + dev->sg->handle);
buf->next = NULL;
buf->pending = 0;
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver.buf_priv_size;
buf->dev_private = malloc(buf->dev_priv_size, M_DRM,
@@ -733,7 +737,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
drm_cleanup_buf_error(dev, entry);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
DRM_DEBUG( "buffer %d @ %p\n",
@@ -752,7 +756,7 @@ static int drm_do_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
if (temp_buflist == NULL) {
/* Free the entry because it isn't valid */
drm_cleanup_buf_error(dev, entry);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
dma->buflist = temp_buflist;
@@ -781,21 +785,21 @@ int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
DRM_SPINLOCK(&dev->dma_lock);
if (request->count < 0 || request->count > 4096)
- return DRM_ERR(EINVAL);
+ return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return DRM_ERR(EINVAL);
+ return EINVAL;
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
ret = drm_do_addbufs_agp(dev, request);
@@ -812,24 +816,24 @@ int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
DRM_SPINLOCK(&dev->dma_lock);
if (!DRM_SUSER(DRM_CURPROC))
- return DRM_ERR(EACCES);
+ return EACCES;
if (request->count < 0 || request->count > 4096)
- return DRM_ERR(EINVAL);
+ return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return DRM_ERR(EINVAL);
+ return EINVAL;
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
ret = drm_do_addbufs_sg(dev, request);
@@ -846,24 +850,24 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
DRM_SPINLOCK(&dev->dma_lock);
if (!DRM_SUSER(DRM_CURPROC))
- return DRM_ERR(EACCES);
+ return EACCES;
if (request->count < 0 || request->count > 4096)
- return DRM_ERR(EINVAL);
+ return EINVAL;
order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
- return DRM_ERR(EINVAL);
+ return EINVAL;
/* No more allocations after first buffer-using ioctl. */
if (dev->buf_use != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
/* No more than one allocation per order */
if (dev->dma->bufs[order].buf_count != 0) {
DRM_SPINUNLOCK(&dev->dma_lock);
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
ret = drm_do_addbufs_pci(dev, request);
@@ -873,39 +877,29 @@ int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
return ret;
}
-int drm_addbufs_ioctl(DRM_IOCTL_ARGS)
+int drm_addbufs_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_buf_desc_t request;
+ drm_buf_desc_t *request = data;
int err;
- DRM_COPY_FROM_USER_IOCTL(request, (drm_buf_desc_t *)data,
- sizeof(request));
-
- if (request.flags & _DRM_AGP_BUFFER)
- err = drm_addbufs_agp(dev, &request);
- else if (request.flags & _DRM_SG_BUFFER)
- err = drm_addbufs_sg(dev, &request);
+ if (request->flags & _DRM_AGP_BUFFER)
+ err = drm_addbufs_agp(dev, request);
+ else if (request->flags & _DRM_SG_BUFFER)
+ err = drm_addbufs_sg(dev, request);
else
- err = drm_addbufs_pci(dev, &request);
-
- DRM_COPY_TO_USER_IOCTL((drm_buf_desc_t *)data, request,
- sizeof(request));
+ err = drm_addbufs_pci(dev, request);
return err;
}
-int drm_infobufs(DRM_IOCTL_ARGS)
+int drm_infobufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
- drm_buf_info_t request;
+ drm_buf_info_t *request = data;
int i;
int count;
int retcode = 0;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_info_t *)data, sizeof(request) );
-
DRM_SPINLOCK(&dev->dma_lock);
++dev->buf_use; /* Can't allocate more after this call */
DRM_SPINUNLOCK(&dev->dma_lock);
@@ -916,7 +910,7 @@ int drm_infobufs(DRM_IOCTL_ARGS)
DRM_DEBUG( "count = %d\n", count );
- if ( request.count >= count ) {
+ if ( request->count >= count ) {
for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
if ( dma->bufs[i].buf_count ) {
drm_buf_desc_t from;
@@ -926,9 +920,9 @@ int drm_infobufs(DRM_IOCTL_ARGS)
from.low_mark = dma->bufs[i].freelist.low_mark;
from.high_mark = dma->bufs[i].freelist.high_mark;
- if (DRM_COPY_TO_USER(&request.list[count], &from,
+ if (DRM_COPY_TO_USER(&request->list[count], &from,
sizeof(drm_buf_desc_t)) != 0) {
- retcode = DRM_ERR(EFAULT);
+ retcode = EFAULT;
break;
}
@@ -942,76 +936,68 @@ int drm_infobufs(DRM_IOCTL_ARGS)
}
}
}
- request.count = count;
-
- DRM_COPY_TO_USER_IOCTL( (drm_buf_info_t *)data, request, sizeof(request) );
+ request->count = count;
return retcode;
}
-int drm_markbufs(DRM_IOCTL_ARGS)
+int drm_markbufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
- drm_buf_desc_t request;
+ drm_buf_desc_t *request = data;
int order;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_desc_t *)data, sizeof(request) );
-
DRM_DEBUG( "%d, %d, %d\n",
- request.size, request.low_mark, request.high_mark );
+ request->size, request->low_mark, request->high_mark );
- order = drm_order(request.size);
+ order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ||
- request.low_mark < 0 || request.high_mark < 0) {
- return DRM_ERR(EINVAL);
+ request->low_mark < 0 || request->high_mark < 0) {
+ return EINVAL;
}
DRM_SPINLOCK(&dev->dma_lock);
- if (request.low_mark > dma->bufs[order].buf_count ||
- request.high_mark > dma->bufs[order].buf_count) {
- return DRM_ERR(EINVAL);
+ if (request->low_mark > dma->bufs[order].buf_count ||
+ request->high_mark > dma->bufs[order].buf_count) {
+ return EINVAL;
}
- dma->bufs[order].freelist.low_mark = request.low_mark;
- dma->bufs[order].freelist.high_mark = request.high_mark;
+ dma->bufs[order].freelist.low_mark = request->low_mark;
+ dma->bufs[order].freelist.high_mark = request->high_mark;
DRM_SPINUNLOCK(&dev->dma_lock);
return 0;
}
-int drm_freebufs(DRM_IOCTL_ARGS)
+int drm_freebufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
- drm_buf_free_t request;
+ drm_buf_free_t *request = data;
int i;
int idx;
drm_buf_t *buf;
int retcode = 0;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_free_t *)data, sizeof(request) );
-
- DRM_DEBUG( "%d\n", request.count );
+ DRM_DEBUG( "%d\n", request->count );
DRM_SPINLOCK(&dev->dma_lock);
- for ( i = 0 ; i < request.count ; i++ ) {
- if (DRM_COPY_FROM_USER(&idx, &request.list[i], sizeof(idx))) {
- retcode = DRM_ERR(EFAULT);
+ for ( i = 0 ; i < request->count ; i++ ) {
+ if (DRM_COPY_FROM_USER(&idx, &request->list[i], sizeof(idx))) {
+ retcode = EFAULT;
break;
}
if ( idx < 0 || idx >= dma->buf_count ) {
DRM_ERROR( "Index %d (of %d max)\n",
idx, dma->buf_count - 1 );
- retcode = DRM_ERR(EINVAL);
+ retcode = EINVAL;
break;
}
buf = dma->buflist[idx];
- if ( buf->filp != filp ) {
+ if ( buf->file_priv != file_priv ) {
DRM_ERROR("Process %d freeing buffer not owned\n",
DRM_CURRENTPID);
- retcode = DRM_ERR(EINVAL);
+ retcode = EINVAL;
break;
}
drm_free_buffer(dev, buf);
@@ -1021,9 +1007,8 @@ int drm_freebufs(DRM_IOCTL_ARGS)
return retcode;
}
-int drm_mapbufs(DRM_IOCTL_ARGS)
+int drm_mapbufs(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_device_dma_t *dma = dev->dma;
int retcode = 0;
const int zero = 0;
@@ -1040,27 +1025,25 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
vaddr_t vaddr;
#endif /* __NetBSD__ || __OpenBSD__ */
- drm_buf_map_t request;
+ drm_buf_map_t *request = data;
int i;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_buf_map_t *)data, sizeof(request) );
-
#if defined(__NetBSD__) || defined(__OpenBSD__)
if (!vfinddev(kdev, VCHR, &vn))
return 0; /* FIXME: Shouldn't this be EINVAL or something? */
#endif /* __NetBSD__ || __OpenBSD */
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
- vms = p->td_proc->p_vmspace;
+ vms = DRM_CURPROC->td_proc->p_vmspace;
#else
- vms = p->p_vmspace;
+ vms = DRM_CURPROC->p_vmspace;
#endif
DRM_SPINLOCK(&dev->dma_lock);
dev->buf_use++; /* Can't allocate more after this call */
DRM_SPINUNLOCK(&dev->dma_lock);
- if (request.count < dma->buf_count)
+ if (request->count < dma->buf_count)
goto done;
if ((dev->driver.use_agp && (dma->flags & _DRM_DMA_USE_AGP)) ||
@@ -1082,10 +1065,11 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
vaddr = round_page((vm_offset_t)vms->vm_daddr + MAXDSIZ);
#if __FreeBSD_version >= 600023
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
- VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, kdev, foff );
+ VM_PROT_ALL, MAP_SHARED, OBJT_DEVICE, dev->devnode, foff);
#else
retcode = vm_mmap(&vms->vm_map, &vaddr, size, PROT_READ | PROT_WRITE,
- VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&kdev->si_hlist), foff );
+ VM_PROT_ALL, MAP_SHARED, SLIST_FIRST(&dev->devnode->si_hlist),
+ foff);
#endif
#elif defined(__NetBSD__) || defined(__OpenBSD__)
vaddr = round_page((vaddr_t)vms->vm_daddr + MAXDSIZ);
@@ -1096,26 +1080,26 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
if (retcode)
goto done;
- request.virtual = (void *)vaddr;
+ request->virtual = (void *)vaddr;
for ( i = 0 ; i < dma->buf_count ; i++ ) {
- if (DRM_COPY_TO_USER(&request.list[i].idx,
- &dma->buflist[i]->idx, sizeof(request.list[0].idx))) {
+ if (DRM_COPY_TO_USER(&request->list[i].idx,
+ &dma->buflist[i]->idx, sizeof(request->list[0].idx))) {
retcode = EFAULT;
goto done;
}
- if (DRM_COPY_TO_USER(&request.list[i].total,
- &dma->buflist[i]->total, sizeof(request.list[0].total))) {
+ if (DRM_COPY_TO_USER(&request->list[i].total,
+ &dma->buflist[i]->total, sizeof(request->list[0].total))) {
retcode = EFAULT;
goto done;
}
- if (DRM_COPY_TO_USER(&request.list[i].used, &zero,
+ if (DRM_COPY_TO_USER(&request->list[i].used, &zero,
sizeof(zero))) {
retcode = EFAULT;
goto done;
}
address = vaddr + dma->buflist[i]->offset; /* *** */
- if (DRM_COPY_TO_USER(&request.list[i].address, &address,
+ if (DRM_COPY_TO_USER(&request->list[i].address, &address,
sizeof(address))) {
retcode = EFAULT;
goto done;
@@ -1123,11 +1107,9 @@ int drm_mapbufs(DRM_IOCTL_ARGS)
}
done:
- request.count = dma->buf_count;
-
- DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
+ request->count = dma->buf_count;
- DRM_COPY_TO_USER_IOCTL((drm_buf_map_t *)data, request, sizeof(request));
+ DRM_DEBUG( "%d buffers, retcode = %d\n", request->count, retcode );
- return DRM_ERR(retcode);
+ return retcode;
}
diff --git a/bsd-core/drm_context.c b/bsd-core/drm_context.c
index 8e009540..4155ee92 100644
--- a/bsd-core/drm_context.c
+++ b/bsd-core/drm_context.c
@@ -1,6 +1,3 @@
-/* drm_context.h -- IOCTLs for generic contexts -*- linux-c -*-
- * Created: Fri Nov 24 18:31:37 2000 by gareth@valinux.com
- */
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,10 @@
*
*/
+/** @file drm_context.c
+ * Implementation of the context management ioctls.
+ */
+
#include "drmP.h"
/* ================================================================
@@ -109,7 +110,7 @@ int drm_ctxbitmap_init(drm_device_t *dev)
dev->ctx_bitmap = malloc(PAGE_SIZE, M_DRM, M_NOWAIT | M_ZERO);
if ( dev->ctx_bitmap == NULL ) {
DRM_UNLOCK();
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
dev->context_sareas = NULL;
dev->max_context = -1;
@@ -136,48 +137,39 @@ void drm_ctxbitmap_cleanup(drm_device_t *dev)
* Per Context SAREA Support
*/
-int drm_getsareactx( DRM_IOCTL_ARGS )
+int drm_getsareactx( drm_device_t *dev, void *data, struct drm_file *file_priv )
{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
+ drm_ctx_priv_map_t *request = data;
drm_local_map_t *map;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data,
- sizeof(request) );
-
DRM_LOCK();
- if (dev->max_context < 0 || request.ctx_id >= (unsigned) dev->max_context) {
+ if (dev->max_context < 0 ||
+ request->ctx_id >= (unsigned) dev->max_context) {
DRM_UNLOCK();
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
- map = dev->context_sareas[request.ctx_id];
+ map = dev->context_sareas[request->ctx_id];
DRM_UNLOCK();
- request.handle = map->handle;
-
- DRM_COPY_TO_USER_IOCTL( (drm_ctx_priv_map_t *)data, request, sizeof(request) );
+ request->handle = map->handle;
return 0;
}
-int drm_setsareactx( DRM_IOCTL_ARGS )
+int drm_setsareactx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_ctx_priv_map_t request;
+ drm_ctx_priv_map_t *request = data;
drm_local_map_t *map = NULL;
- DRM_COPY_FROM_USER_IOCTL( request, (drm_ctx_priv_map_t *)data,
- sizeof(request) );
-
DRM_LOCK();
TAILQ_FOREACH(map, &dev->maplist, link) {
- if (map->handle == request.handle) {
+ if (map->handle == request->handle) {
if (dev->max_context < 0)
goto bad;
- if (request.ctx_id >= (unsigned) dev->max_context)
+ if (request->ctx_id >= (unsigned) dev->max_context)
goto bad;
- dev->context_sareas[request.ctx_id] = map;
+ dev->context_sareas[request->ctx_id] = map;
DRM_UNLOCK();
return 0;
}
@@ -185,7 +177,7 @@ int drm_setsareactx( DRM_IOCTL_ARGS )
bad:
DRM_UNLOCK();
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
/* ================================================================
@@ -196,7 +188,7 @@ int drm_context_switch(drm_device_t *dev, int old, int new)
{
if ( test_and_set_bit( 0, &dev->context_flag ) ) {
DRM_ERROR( "Reentering -- FIXME\n" );
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
DRM_DEBUG( "Context switch from %d to %d\n", old, new );
@@ -225,120 +217,98 @@ int drm_context_switch_complete(drm_device_t *dev, int new)
return 0;
}
-int drm_resctx(DRM_IOCTL_ARGS)
+int drm_resctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- drm_ctx_res_t res;
+ drm_ctx_res_t *res = data;
drm_ctx_t ctx;
int i;
- DRM_COPY_FROM_USER_IOCTL( res, (drm_ctx_res_t *)data, sizeof(res) );
-
- if ( res.count >= DRM_RESERVED_CONTEXTS ) {
+ if ( res->count >= DRM_RESERVED_CONTEXTS ) {
bzero(&ctx, sizeof(ctx));
for ( i = 0 ; i < DRM_RESERVED_CONTEXTS ; i++ ) {
ctx.handle = i;
- if ( DRM_COPY_TO_USER( &res.contexts[i],
+ if ( DRM_COPY_TO_USER( &res->contexts[i],
&ctx, sizeof(ctx) ) )
- return DRM_ERR(EFAULT);
+ return EFAULT;
}
}
- res.count = DRM_RESERVED_CONTEXTS;
-
- DRM_COPY_TO_USER_IOCTL( (drm_ctx_res_t *)data, res, sizeof(res) );
+ res->count = DRM_RESERVED_CONTEXTS;
return 0;
}
-int drm_addctx(DRM_IOCTL_ARGS)
+int drm_addctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+ drm_ctx_t *ctx = data;
- ctx.handle = drm_ctxbitmap_next(dev);
- if ( ctx.handle == DRM_KERNEL_CONTEXT ) {
+ ctx->handle = drm_ctxbitmap_next(dev);
+ if ( ctx->handle == DRM_KERNEL_CONTEXT ) {
/* Skip kernel's context and get a new one. */
- ctx.handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_ctxbitmap_next(dev);
}
- DRM_DEBUG( "%d\n", ctx.handle );
- if ( ctx.handle == -1 ) {
+ DRM_DEBUG( "%d\n", ctx->handle );
+ if ( ctx->handle == -1 ) {
DRM_DEBUG( "Not enough free contexts.\n" );
/* Should this return -EBUSY instead? */
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
- if (dev->driver.context_ctor && ctx.handle != DRM_KERNEL_CONTEXT) {
+ if (dev->driver.context_ctor && ctx->handle != DRM_KERNEL_CONTEXT) {
DRM_LOCK();
- dev->driver.context_ctor(dev, ctx.handle);
+ dev->driver.context_ctor(dev, ctx->handle);
DRM_UNLOCK();
}
- DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
-
return 0;
}
-int drm_modctx(DRM_IOCTL_ARGS)
+int drm_modctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
/* This does nothing */
return 0;
}
-int drm_getctx(DRM_IOCTL_ARGS)
+int drm_getctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- drm_ctx_t ctx;
-
- DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+ drm_ctx_t *ctx = data;
/* This is 0, because we don't handle any context flags */
- ctx.flags = 0;
-
- DRM_COPY_TO_USER_IOCTL( (drm_ctx_t *)data, ctx, sizeof(ctx) );
+ ctx->flags = 0;
return 0;
}
-int drm_switchctx(DRM_IOCTL_ARGS)
+int drm_switchctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
+ drm_ctx_t *ctx = data;
- DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
-
- DRM_DEBUG( "%d\n", ctx.handle );
- return drm_context_switch(dev, dev->last_context, ctx.handle);
+ DRM_DEBUG( "%d\n", ctx->handle );
+ return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-int drm_newctx(DRM_IOCTL_ARGS)
+int drm_newctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+ drm_ctx_t *ctx = data;
- DRM_DEBUG( "%d\n", ctx.handle );
- drm_context_switch_complete(dev, ctx.handle);
+ DRM_DEBUG( "%d\n", ctx->handle );
+ drm_context_switch_complete(dev, ctx->handle);
return 0;
}
-int drm_rmctx(DRM_IOCTL_ARGS)
+int drm_rmctx(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_ctx_t ctx;
-
- DRM_COPY_FROM_USER_IOCTL( ctx, (drm_ctx_t *)data, sizeof(ctx) );
+ drm_ctx_t *ctx = data;
- DRM_DEBUG( "%d\n", ctx.handle );
- if ( ctx.handle != DRM_KERNEL_CONTEXT ) {
+ DRM_DEBUG( "%d\n", ctx->handle );
+ if ( ctx->handle != DRM_KERNEL_CONTEXT ) {
if (dev->driver.context_dtor) {
DRM_LOCK();
- dev->driver.context_dtor(dev, ctx.handle);
+ dev->driver.context_dtor(dev, ctx->handle);
DRM_UNLOCK();
}
- drm_ctxbitmap_free(dev, ctx.handle);
+ drm_ctxbitmap_free(dev, ctx->handle);
}
return 0;
diff --git a/bsd-core/drm_dma.c b/bsd-core/drm_dma.c
index 67b3fe2d..71ef845b 100644
--- a/bsd-core/drm_dma.c
+++ b/bsd-core/drm_dma.c
@@ -1,6 +1,3 @@
-/* drm_dma.c -- DMA IOCTL and function support -*- linux-c -*-
- * Created: Fri Mar 19 14:30:16 1999 by faith@valinux.com
- */
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,14 @@
*
*/
+/** @file drm_dma.c
+ * Support code for DMA buffer management.
+ *
+ * The implementation used to be significantly more complicated, but the
+ * complexity has been moved into the drivers as different buffer management
+ * schemes evolved.
+ */
+
#include "drmP.h"
int drm_dma_setup(drm_device_t *dev)
@@ -38,9 +43,9 @@ int drm_dma_setup(drm_device_t *dev)
dev->dma = malloc(sizeof(*dev->dma), M_DRM, M_NOWAIT | M_ZERO);
if (dev->dma == NULL)
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
- DRM_SPININIT(dev->dma_lock, "drmdma");
+ DRM_SPININIT(&dev->dma_lock, "drmdma");
return 0;
}
@@ -80,7 +85,7 @@ void drm_dma_takedown(drm_device_t *dev)
free(dma->pagelist, M_DRM);
free(dev->dma, M_DRM);
dev->dma = NULL;
- DRM_SPINUNINIT(dev->dma_lock);
+ DRM_SPINUNINIT(&dev->dma_lock);
}
@@ -89,18 +94,18 @@ void drm_free_buffer(drm_device_t *dev, drm_buf_t *buf)
if (!buf) return;
buf->pending = 0;
- buf->filp = NULL;
+ buf->file_priv= NULL;
buf->used = 0;
}
-void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
+void drm_reclaim_buffers(drm_device_t *dev, struct drm_file *file_priv)
{
drm_device_dma_t *dma = dev->dma;
int i;
if (!dma) return;
for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->filp == filp) {
+ if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
@@ -117,12 +122,12 @@ void drm_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
}
/* Call into the driver-specific DMA handler */
-int drm_dma(DRM_IOCTL_ARGS)
+int drm_dma(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
if (dev->driver.dma_ioctl) {
- return dev->driver.dma_ioctl(kdev, cmd, data, flags, p, filp);
+ /* shared code returns -errno */
+ return -dev->driver.dma_ioctl(dev, data, file_priv);
} else {
DRM_DEBUG("DMA ioctl on driver with no dma handler\n");
return EINVAL;
diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c
index 379e0aa7..fb318d47 100644
--- a/bsd-core/drm_drawable.c
+++ b/bsd-core/drm_drawable.c
@@ -1,6 +1,3 @@
-/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,21 +28,123 @@
*
*/
+/** @file drm_drawable.c
+ * This file implements ioctls to store information along with DRM drawables,
+ * such as the current set of cliprects for vblank-synced buffer swaps.
+ */
+
#include "drmP.h"
-int drm_adddraw(DRM_IOCTL_ARGS)
+struct bsd_drm_drawable_info {
+ struct drm_drawable_info info;
+ int handle;
+ RB_ENTRY(bsd_drm_drawable_info) tree;
+};
+
+static int
+drm_drawable_compare(struct bsd_drm_drawable_info *a,
+ struct bsd_drm_drawable_info *b)
+{
+ if (a->handle > b->handle)
+ return 1;
+ if (a->handle < b->handle)
+ return -1;
+ return 0;
+}
+
+RB_GENERATE_STATIC(drawable_tree, bsd_drm_drawable_info, tree,
+ drm_drawable_compare);
+
+struct drm_drawable_info *
+drm_get_drawable_info(drm_device_t *dev, int handle)
+{
+ struct bsd_drm_drawable_info find, *result;
+
+ find.handle = handle;
+ result = RB_FIND(drawable_tree, &dev->drw_head, &find);
+
+ return &result->info;
+}
+
+int drm_adddraw(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- drm_draw_t draw;
+ drm_draw_t *draw = data;
+ struct bsd_drm_drawable_info *info;
- draw.handle = 0; /* NOOP */
- DRM_DEBUG("%d\n", draw.handle);
-
- DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) );
+ info = drm_calloc(1, sizeof(struct bsd_drm_drawable_info),
+ DRM_MEM_DRAWABLE);
+ if (info == NULL)
+ return ENOMEM;
+
+ info->handle = alloc_unr(dev->drw_unrhdr);
+ DRM_SPINLOCK(&dev->drw_lock);
+ RB_INSERT(drawable_tree, &dev->drw_head, info);
+ draw->handle = info->handle;
+ DRM_SPINUNLOCK(&dev->drw_lock);
+
+ DRM_DEBUG("%d\n", draw->handle);
return 0;
}
-int drm_rmdraw(DRM_IOCTL_ARGS)
+int drm_rmdraw(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- return 0; /* NOOP */
+ drm_draw_t *draw = (drm_draw_t *)data;
+ struct drm_drawable_info *info;
+
+ DRM_SPINLOCK(&dev->drw_lock);
+ info = drm_get_drawable_info(dev, draw->handle);
+ if (info != NULL) {
+ RB_REMOVE(drawable_tree, &dev->drw_head,
+ (struct bsd_drm_drawable_info *)info);
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ free_unr(dev->drw_unrhdr, draw->handle);
+ drm_free(info, sizeof(struct bsd_drm_drawable_info),
+ DRM_MEM_DRAWABLE);
+ return 0;
+ } else {
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return EINVAL;
+ }
+}
+
+int drm_update_draw(drm_device_t *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_drawable_info *info;
+ struct drm_update_draw *update = (struct drm_update_draw *)data;
+ int ret;
+
+ info = drm_get_drawable_info(dev, update->handle);
+ if (info == NULL)
+ return EINVAL;
+
+ switch (update->type) {
+ case DRM_DRAWABLE_CLIPRECTS:
+ DRM_SPINLOCK(&dev->drw_lock);
+ if (update->num != info->num_rects) {
+ drm_free(info->rects,
+ sizeof(*info->rects) * info->num_rects,
+ DRM_MEM_DRAWABLE);
+ info->rects = NULL;
+ info->num_rects = 0;
+ }
+ if (update->num == 0) {
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return 0;
+ }
+ if (info->rects == NULL) {
+ info->rects = drm_alloc(sizeof(*info->rects) *
+ update->num, DRM_MEM_DRAWABLE);
+ if (info->rects == NULL)
+ return ENOMEM;
+ info->num_rects = update->num;
+ }
+ /* For some reason the pointer arg is unsigned long long. */
+ ret = copyin((void *)(intptr_t)update->data, info->rects,
+ sizeof(*info->rects) * info->num_rects);
+ DRM_SPINUNLOCK(&dev->drw_lock);
+ return ret;
+ default:
+ return EINVAL;
+ }
}
diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c
index baaeb43c..afd90351 100644
--- a/bsd-core/drm_drv.c
+++ b/bsd-core/drm_drv.c
@@ -1,6 +1,3 @@
-/* drm_drv.h -- Generic driver template -*- linux-c -*-
- * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
- */
/*-
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,13 @@
*
*/
+/** @file drm_drv.c
+ * The catch-all file for DRM device support, including module setup/teardown,
+ * open/close, and ioctl dispatch.
+ */
+
+
+#include <sys/limits.h>
#include "drmP.h"
#include "drm.h"
#include "drm_sarea.h"
@@ -64,63 +68,64 @@ MODULE_DEPEND(drm, mem, 1, 1, 1);
#endif /* __NetBSD__ || __OpenBSD__ */
static drm_ioctl_desc_t drm_ioctls[256] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { drm_version, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = { drm_getmap, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = { drm_getclient, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = { drm_getstats, 0 },
- [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = { drm_setversion, DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = { drm_rmmap_ioctl, DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = { drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = { drm_getsareactx, DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { drm_lock, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { drm_unlock, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_noop, DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { drm_markbufs, DRM_AUTH|DRM_MASTER },
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { drm_infobufs, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { drm_mapbufs, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { drm_freebufs, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { drm_dma, DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info_ioctl, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = { drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = { drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
-
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = { drm_wait_vblank, 0 },
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, drm_dma, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_draw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
#ifdef __FreeBSD__
@@ -198,6 +203,7 @@ int drm_attach(device_t nbdev, drm_pci_id_list_t *idlist)
"dri/card%d", unit);
#if __FreeBSD_version >= 500000
mtx_init(&dev->dev_lock, "drm device", NULL, MTX_DEF);
+ mtx_init(&dev->drw_lock, "drmdrw", NULL, MTX_DEF);
#endif
id_entry = drm_find_description(pci_get_vendor(dev->device),
@@ -496,7 +502,7 @@ static int drm_lastclose(drm_device_t *dev)
drm_dma_takedown(dev);
if ( dev->lock.hw_lock ) {
dev->lock.hw_lock = NULL; /* SHM removed */
- dev->lock.filp = NULL;
+ dev->lock.file_priv = NULL;
DRM_WAKEUP_INT((void *)&dev->lock.lock_queue);
}
@@ -529,7 +535,8 @@ static int drm_load(drm_device_t *dev)
if (dev->driver.load != NULL) {
DRM_LOCK();
- retcode = dev->driver.load(dev, dev->id_entry->driver_private);
+ retcode = -dev->driver.load(dev,
+ dev->id_entry->driver_private);
DRM_UNLOCK();
if (retcode != 0)
goto error;
@@ -541,7 +548,7 @@ static int drm_load(drm_device_t *dev)
if (dev->driver.require_agp && dev->agp == NULL) {
DRM_ERROR("Card isn't AGP, or couldn't initialize "
"AGP.\n");
- retcode = DRM_ERR(ENOMEM);
+ retcode = ENOMEM;
goto error;
}
if (dev->agp != NULL) {
@@ -556,7 +563,13 @@ static int drm_load(drm_device_t *dev)
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
goto error;
}
-
+
+ dev->drw_unrhdr = new_unrhdr(1, INT_MAX, NULL);
+ if (dev->drw_unrhdr == NULL) {
+ DRM_ERROR("Couldn't allocate drawable number allocator\n");
+ goto error;
+ }
+
DRM_INFO("Initialized %s %d.%d.%d %s\n",
dev->driver.name,
dev->driver.major,
@@ -628,6 +641,8 @@ static void drm_unload(drm_device_t *dev)
if (dev->driver.unload != NULL)
dev->driver.unload(dev);
+ delete_unrhdr(dev->drw_unrhdr);
+
drm_mem_uninit();
#if defined(__FreeBSD__) && __FreeBSD_version >= 500000
mtx_destroy(&dev->dev_lock);
@@ -635,32 +650,27 @@ static void drm_unload(drm_device_t *dev)
}
-int drm_version(DRM_IOCTL_ARGS)
+int drm_version(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_version_t version;
+ drm_version_t *version = data;
int len;
- DRM_COPY_FROM_USER_IOCTL( version, (drm_version_t *)data, sizeof(version) );
-
#define DRM_COPY( name, value ) \
len = strlen( value ); \
if ( len > name##_len ) len = name##_len; \
name##_len = strlen( value ); \
if ( len && name ) { \
if ( DRM_COPY_TO_USER( name, value, len ) ) \
- return DRM_ERR(EFAULT); \
+ return EFAULT; \
}
- version.version_major = dev->driver.major;
- version.version_minor = dev->driver.minor;
- version.version_patchlevel = dev->driver.patchlevel;
-
- DRM_COPY(version.name, dev->driver.name);
- DRM_COPY(version.date, dev->driver.date);
- DRM_COPY(version.desc, dev->driver.desc);
+ version->version_major = dev->driver.major;
+ version->version_minor = dev->driver.minor;
+ version->version_patchlevel = dev->driver.patchlevel;
- DRM_COPY_TO_USER_IOCTL( (drm_version_t *)data, version, sizeof(version) );
+ DRM_COPY(version->name, dev->driver.name);
+ DRM_COPY(version->date, dev->driver.date);
+ DRM_COPY(version->desc, dev->driver.desc);
return 0;
}
@@ -692,24 +702,23 @@ int drm_open(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
{
- drm_file_t *priv;
- DRM_DEVICE;
+ drm_device_t *dev = drm_get_device_from_kdev(kdev);
+ drm_file_t *file_priv;
int retcode = 0;
- DRMFILE filp = (void *)(uintptr_t)(DRM_CURRENTPID);
-
+
DRM_DEBUG( "open_count = %d\n", dev->open_count );
DRM_LOCK();
- priv = drm_find_file_by_proc(dev, p);
- if (!priv) {
+ file_priv = drm_find_file_by_proc(dev, p);
+ if (!file_priv) {
DRM_UNLOCK();
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
if (dev->driver.preclose != NULL)
- dev->driver.preclose(dev, filp);
+ dev->driver.preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
@@ -724,12 +733,12 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
#endif
if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
- && dev->lock.filp == filp) {
+ && dev->lock.file_priv == file_priv) {
DRM_DEBUG("Process %d dead, freeing lock for context %d\n",
DRM_CURRENTPID,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
if (dev->driver.reclaim_buffers_locked != NULL)
- dev->driver.reclaim_buffers_locked(dev, filp);
+ dev->driver.reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -744,12 +753,12 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
for (;;) {
if ( !dev->lock.hw_lock ) {
/* Device has been unregistered */
- retcode = DRM_ERR(EINTR);
+ retcode = EINTR;
break;
}
if (drm_lock_take(&dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT)) {
- dev->lock.filp = filp;
+ dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc( &dev->counts[_DRM_STAT_LOCKS] );
break; /* Got lock */
@@ -766,14 +775,14 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
break;
}
if (retcode == 0) {
- dev->driver.reclaim_buffers_locked(dev, filp);
+ dev->driver.reclaim_buffers_locked(dev, file_priv);
drm_lock_free(dev, &dev->lock.hw_lock->lock,
DRM_KERNEL_CONTEXT);
}
}
if (dev->driver.use_dma && !dev->driver.reclaim_buffers_locked)
- drm_reclaim_buffers(dev, filp);
+ drm_reclaim_buffers(dev, file_priv);
#if defined (__FreeBSD__) && (__FreeBSD_version >= 500000)
funsetown(&dev->buf_sigio);
@@ -783,11 +792,11 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
dev->buf_pgid = 0;
#endif /* __NetBSD__ || __OpenBSD__ */
- if (--priv->refs == 0) {
+ if (--file_priv->refs == 0) {
if (dev->driver.postclose != NULL)
- dev->driver.postclose(dev, priv);
- TAILQ_REMOVE(&dev->files, priv, link);
- free(priv, M_DRM);
+ dev->driver.postclose(dev, file_priv);
+ TAILQ_REMOVE(&dev->files, file_priv, link);
+ free(file_priv, M_DRM);
}
/* ========================================================
@@ -812,32 +821,33 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)
int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_STRUCTPROC *p)
{
- DRM_DEVICE;
+ drm_device_t *dev = drm_get_device_from_kdev(kdev);
int retcode = 0;
drm_ioctl_desc_t *ioctl;
- int (*func)(DRM_IOCTL_ARGS);
+ int (*func)(drm_device_t *dev, void *data, struct drm_file *file_priv);
int nr = DRM_IOCTL_NR(cmd);
int is_driver_ioctl = 0;
- drm_file_t *priv;
- DRMFILE filp = (DRMFILE)(uintptr_t)DRM_CURRENTPID;
+ drm_file_t *file_priv;
DRM_LOCK();
- priv = drm_find_file_by_proc(dev, p);
+ file_priv = drm_find_file_by_proc(dev, p);
DRM_UNLOCK();
- if (priv == NULL) {
+ if (file_priv == NULL) {
DRM_ERROR("can't find authenticator\n");
return EINVAL;
}
atomic_inc( &dev->counts[_DRM_STAT_IOCTLS] );
- ++priv->ioctl_count;
+ ++file_priv->ioctl_count;
#ifdef __FreeBSD__
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
- DRM_CURRENTPID, cmd, nr, (long)dev->device, priv->authenticated );
+ DRM_CURRENTPID, cmd, nr, (long)dev->device,
+ file_priv->authenticated );
#elif defined(__NetBSD__) || defined(__OpenBSD__)
DRM_DEBUG( "pid=%d, cmd=0x%02lx, nr=0x%02x, dev 0x%lx, auth=%d\n",
- DRM_CURRENTPID, cmd, nr, (long)&dev->device, priv->authenticated );
+ DRM_CURRENTPID, cmd, nr, (long)&dev->device,
+ file_priv->authenticated );
#endif
switch (cmd) {
@@ -892,24 +902,25 @@ int drm_ioctl(struct cdev *kdev, u_long cmd, caddr_t data, int flags,
DRM_DEBUG( "no function\n" );
return EINVAL;
}
- /* ioctl->master check should be against something in the filp set up
- * for the first opener, but it doesn't matter yet.
- */
+
if (((ioctl->flags & DRM_ROOT_ONLY) && !DRM_SUSER(p)) ||
- ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !priv->master))
+ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !file_priv->master))
return EACCES;
- if (is_driver_ioctl)
+ if (is_driver_ioctl) {
DRM_LOCK();
- retcode = func(kdev, cmd, data, flags, p, filp);
- if (is_driver_ioctl)
+ /* shared code returns -errno */
+ retcode = -func(dev, data, file_priv);
DRM_UNLOCK();
+ } else {
+ retcode = func(dev, data, file_priv);
+ }
if (retcode != 0)
DRM_DEBUG(" returning %d\n", retcode);
- return DRM_ERR(retcode);
+ return retcode;
}
drm_local_map_t *drm_getsarea(drm_device_t *dev)
diff --git a/bsd-core/drm_fops.c b/bsd-core/drm_fops.c
index f5c9349b..20bae8d7 100644
--- a/bsd-core/drm_fops.c
+++ b/bsd-core/drm_fops.c
@@ -1,6 +1,3 @@
-/* drm_fops.h -- File operations for DRM -*- linux-c -*-
- * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -32,6 +29,11 @@
*
*/
+/** @file drm_fops.c
+ * Support code for dealing with the file privates associated with each
+ * open of the DRM device.
+ */
+
#include "drmP.h"
drm_file_t *drm_find_file_by_proc(drm_device_t *dev, DRM_STRUCTPROC *p)
@@ -75,7 +77,7 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
priv = malloc(sizeof(*priv), M_DRM, M_NOWAIT | M_ZERO);
if (priv == NULL) {
DRM_UNLOCK();
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
}
#if __FreeBSD_version >= 500000
priv->uid = p->td_ucred->cr_svuid;
@@ -93,7 +95,8 @@ int drm_open_helper(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p,
priv->authenticated = DRM_SUSER(p);
if (dev->driver.open) {
- retcode = dev->driver.open(dev, priv);
+ /* shared code returns -errno */
+ retcode = -dev->driver.open(dev, priv);
if (retcode != 0) {
free(priv, M_DRM);
DRM_UNLOCK();
diff --git a/bsd-core/drm_ioctl.c b/bsd-core/drm_ioctl.c
index e22faa83..ce78bb8f 100644
--- a/bsd-core/drm_ioctl.c
+++ b/bsd-core/drm_ioctl.c
@@ -1,6 +1,3 @@
-/* drm_ioctl.h -- IOCTL processing for DRM -*- linux-c -*-
- * Created: Fri Jan 8 09:01:26 1999 by faith@valinux.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,11 @@
*
*/
+/** @file drm_ioctl.c
+ * Varios minor DRM ioctls not applicable to other files, such as versioning
+ * information and reporting DRM information to userland.
+ */
+
#include "drmP.h"
/*
@@ -39,20 +41,15 @@
* before setunique has been called. The format for the bus-specific part of
* the unique is not defined for any other bus.
*/
-int drm_getunique(DRM_IOCTL_ARGS)
+int drm_getunique(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_unique_t u;
-
- DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) );
+ drm_unique_t *u = data;
- if (u.unique_len >= dev->unique_len) {
- if (DRM_COPY_TO_USER(u.unique, dev->unique, dev->unique_len))
- return DRM_ERR(EFAULT);
+ if (u->unique_len >= dev->unique_len) {
+ if (DRM_COPY_TO_USER(u->unique, dev->unique, dev->unique_len))
+ return EFAULT;
}
- u.unique_len = dev->unique_len;
-
- DRM_COPY_TO_USER_IOCTL( (drm_unique_t *)data, u, sizeof(u) );
+ u->unique_len = dev->unique_len;
return 0;
}
@@ -60,28 +57,25 @@ int drm_getunique(DRM_IOCTL_ARGS)
/* Deprecated in DRM version 1.1, and will return EBUSY when setversion has
* requested version 1.1 or greater.
*/
-int drm_setunique(DRM_IOCTL_ARGS)
+int drm_setunique(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_unique_t u;
+ drm_unique_t *u = data;
int domain, bus, slot, func, ret;
char *busid;
- DRM_COPY_FROM_USER_IOCTL( u, (drm_unique_t *)data, sizeof(u) );
-
/* Check and copy in the submitted Bus ID */
- if (!u.unique_len || u.unique_len > 1024)
- return DRM_ERR(EINVAL);
+ if (!u->unique_len || u->unique_len > 1024)
+ return EINVAL;
- busid = malloc(u.unique_len + 1, M_DRM, M_WAITOK);
+ busid = malloc(u->unique_len + 1, M_DRM, M_WAITOK);
if (busid == NULL)
- return DRM_ERR(ENOMEM);
+ return ENOMEM;
- if (DRM_COPY_FROM_USER(busid, u.unique, u.unique_len)) {
+ if (DRM_COPY_FROM_USER(busid, u->unique, u->unique_len)) {
free(busid, M_DRM);
- return DRM_ERR(EFAULT);
+ return EFAULT;
}
- busid[u.unique_len] = '\0';
+ busid[u->unique_len] = '\0';
/* Return error if the busid submitted doesn't match the device's actual
* busid.
@@ -89,7 +83,7 @@ int drm_setunique(DRM_IOCTL_ARGS)
ret = sscanf(busid, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3) {
free(busid, M_DRM);
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
domain = bus >> 8;
bus &= 0xff;
@@ -99,17 +93,17 @@ int drm_setunique(DRM_IOCTL_ARGS)
(slot != dev->pci_slot) ||
(func != dev->pci_func)) {
free(busid, M_DRM);
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
/* Actually set the device's busid now. */
DRM_LOCK();
if (dev->unique_len || dev->unique) {
DRM_UNLOCK();
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
- dev->unique_len = u.unique_len;
+ dev->unique_len = u->unique_len;
dev->unique = busid;
DRM_UNLOCK();
@@ -143,32 +137,29 @@ drm_set_busid(drm_device_t *dev)
return 0;
}
-int drm_getmap(DRM_IOCTL_ARGS)
+int drm_getmap(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_map_t map;
+ drm_map_t *map = data;
drm_local_map_t *mapinlist;
int idx;
int i = 0;
- DRM_COPY_FROM_USER_IOCTL( map, (drm_map_t *)data, sizeof(map) );
-
- idx = map.offset;
+ idx = map->offset;
DRM_LOCK();
if (idx < 0) {
DRM_UNLOCK();
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
TAILQ_FOREACH(mapinlist, &dev->maplist, link) {
if (i==idx) {
- map.offset = mapinlist->offset;
- map.size = mapinlist->size;
- map.type = mapinlist->type;
- map.flags = mapinlist->flags;
- map.handle = mapinlist->handle;
- map.mtrr = mapinlist->mtrr;
+ map->offset = mapinlist->offset;
+ map->size = mapinlist->size;
+ map->type = mapinlist->type;
+ map->flags = mapinlist->flags;
+ map->handle = mapinlist->handle;
+ map->mtrr = mapinlist->mtrr;
break;
}
i++;
@@ -179,100 +170,89 @@ int drm_getmap(DRM_IOCTL_ARGS)
if (mapinlist == NULL)
return EINVAL;
- DRM_COPY_TO_USER_IOCTL( (drm_map_t *)data, map, sizeof(map) );
-
return 0;
}
-int drm_getclient(DRM_IOCTL_ARGS)
+int drm_getclient(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_client_t client;
+ drm_client_t *client = data;
drm_file_t *pt;
int idx;
int i = 0;
- DRM_COPY_FROM_USER_IOCTL( client, (drm_client_t *)data, sizeof(client) );
-
- idx = client.idx;
+ idx = client->idx;
DRM_LOCK();
TAILQ_FOREACH(pt, &dev->files, link) {
if (i==idx)
{
- client.auth = pt->authenticated;
- client.pid = pt->pid;
- client.uid = pt->uid;
- client.magic = pt->magic;
- client.iocs = pt->ioctl_count;
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
DRM_UNLOCK();
-
- *(drm_client_t *)data = client;
return 0;
}
i++;
}
DRM_UNLOCK();
- DRM_COPY_TO_USER_IOCTL( (drm_client_t *)data, client, sizeof(client) );
-
- return 0;
+ return EINVAL;
}
-int drm_getstats(DRM_IOCTL_ARGS)
+int drm_getstats(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_stats_t stats;
+ drm_stats_t *stats = data;
int i;
- memset(&stats, 0, sizeof(stats));
+ memset(stats, 0, sizeof(drm_stats_t));
DRM_LOCK();
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
- stats.data[i].value
+ stats->data[i].value
= (dev->lock.hw_lock
? dev->lock.hw_lock->lock : 0);
else
- stats.data[i].value = atomic_read(&dev->counts[i]);
- stats.data[i].type = dev->types[i];
+ stats->data[i].value = atomic_read(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
}
- stats.count = dev->counters;
+ stats->count = dev->counters;
DRM_UNLOCK();
- DRM_COPY_TO_USER_IOCTL( (drm_stats_t *)data, stats, sizeof(stats) );
-
return 0;
}
#define DRM_IF_MAJOR 1
#define DRM_IF_MINOR 2
-int drm_setversion(DRM_IOCTL_ARGS)
+int drm_setversion(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_set_version_t sv;
- drm_set_version_t retv;
+ drm_set_version_t *sv = data;
+ drm_set_version_t ver;
int if_version;
- DRM_COPY_FROM_USER_IOCTL(sv, (drm_set_version_t *)data, sizeof(sv));
-
- retv.drm_di_major = DRM_IF_MAJOR;
- retv.drm_di_minor = DRM_IF_MINOR;
- retv.drm_dd_major = dev->driver.major;
- retv.drm_dd_minor = dev->driver.minor;
-
- DRM_COPY_TO_USER_IOCTL((drm_set_version_t *)data, retv, sizeof(sv));
-
- if (sv.drm_di_major != -1) {
- if (sv.drm_di_major != DRM_IF_MAJOR ||
- sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
+ /* Save the incoming data, and set the response before continuing
+ * any further.
+ */
+ ver = *sv;
+ sv->drm_di_major = DRM_IF_MAJOR;
+ sv->drm_di_minor = DRM_IF_MINOR;
+ sv->drm_dd_major = dev->driver.major;
+ sv->drm_dd_minor = dev->driver.minor;
+
+ if (ver.drm_di_major != -1) {
+ if (ver.drm_di_major != DRM_IF_MAJOR ||
+ ver.drm_di_minor < 0 || ver.drm_di_minor > DRM_IF_MINOR) {
return EINVAL;
- if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_dd_minor);
+ }
+ if_version = DRM_IF_VERSION(ver.drm_di_major,
+ ver.drm_dd_minor);
dev->if_version = DRM_MAX(if_version, dev->if_version);
- if (sv.drm_di_minor >= 1) {
+ if (ver.drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
@@ -280,16 +260,20 @@ int drm_setversion(DRM_IOCTL_ARGS)
}
}
- if (sv.drm_dd_major != -1) {
- if (sv.drm_dd_major != dev->driver.major ||
- sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver.minor)
+ if (ver.drm_dd_major != -1) {
+ if (ver.drm_dd_major != dev->driver.major ||
+ ver.drm_dd_minor < 0 ||
+ ver.drm_dd_minor > dev->driver.minor)
+ {
return EINVAL;
+ }
}
+
return 0;
}
-int drm_noop(DRM_IOCTL_ARGS)
+int drm_noop(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
DRM_DEBUG("\n");
return 0;
diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c
index f7da5ed7..0772445a 100644
--- a/bsd-core/drm_irq.c
+++ b/bsd-core/drm_irq.c
@@ -1,6 +1,3 @@
-/* drm_irq.c -- IRQ IOCTL and function support
- * Created: Fri Oct 18 2003 by anholt@FreeBSD.org
- */
/*-
* Copyright 2003 Eric Anholt
* All Rights Reserved.
@@ -28,28 +25,30 @@
*
*/
+/** @file drm_irq.c
+ * Support code for handling setup/teardown of interrupt handlers and
+ * handing interrupt handlers off to the drivers.
+ */
+
#include "drmP.h"
#include "drm.h"
-int drm_irq_by_busid(DRM_IOCTL_ARGS)
-{
- DRM_DEVICE;
- drm_irq_busid_t irq;
+static void drm_locked_task(void *context, int pending __unused);
- DRM_COPY_FROM_USER_IOCTL(irq, (drm_irq_busid_t *)data, sizeof(irq));
+int drm_irq_by_busid(drm_device_t *dev, void *data, struct drm_file *file_priv)
+{
+ drm_irq_busid_t *irq = data;
- if ((irq.busnum >> 8) != dev->pci_domain ||
- (irq.busnum & 0xff) != dev->pci_bus ||
- irq.devnum != dev->pci_slot ||
- irq.funcnum != dev->pci_func)
+ if ((irq->busnum >> 8) != dev->pci_domain ||
+ (irq->busnum & 0xff) != dev->pci_bus ||
+ irq->devnum != dev->pci_slot ||
+ irq->funcnum != dev->pci_func)
return EINVAL;
- irq.irq = dev->irq;
+ irq->irq = dev->irq;
DRM_DEBUG("%d:%d:%d => IRQ %d\n",
- irq.busnum, irq.devnum, irq.funcnum, irq.irq);
-
- DRM_COPY_TO_USER_IOCTL( (drm_irq_busid_t *)data, irq, sizeof(irq) );
+ irq->busnum, irq->devnum, irq->funcnum, irq->irq);
return 0;
}
@@ -74,20 +73,20 @@ int drm_irq_install(drm_device_t *dev)
#endif
if (dev->irq == 0 || dev->dev_private == NULL)
- return DRM_ERR(EINVAL);
+ return EINVAL;
DRM_DEBUG( "%s: irq=%d\n", __FUNCTION__, dev->irq );
DRM_LOCK();
if (dev->irq_enabled) {
DRM_UNLOCK();
- return DRM_ERR(EBUSY);
+ return EBUSY;
}
dev->irq_enabled = 1;
dev->context_flag = 0;
- DRM_SPININIT(dev->irq_lock, "DRM IRQ lock");
+ DRM_SPININIT(&dev->irq_lock, "DRM IRQ lock");
/* Before installing handler */
dev->driver.irq_preinstall(dev);
@@ -131,6 +130,7 @@ int drm_irq_install(drm_device_t *dev)
dev->driver.irq_postinstall(dev);
DRM_UNLOCK();
+ TASK_INIT(&dev->locked_task, 0, drm_locked_task, dev);
return 0;
err:
DRM_LOCK();
@@ -142,7 +142,7 @@ err:
dev->irqrid = 0;
}
#endif
- DRM_SPINUNINIT(dev->irq_lock);
+ DRM_SPINUNINIT(&dev->irq_lock);
DRM_UNLOCK();
return retcode;
}
@@ -154,7 +154,7 @@ int drm_irq_uninstall(drm_device_t *dev)
#endif
if (!dev->irq_enabled)
- return DRM_ERR(EINVAL);
+ return EINVAL;
dev->irq_enabled = 0;
#ifdef __FreeBSD__
@@ -174,20 +174,17 @@ int drm_irq_uninstall(drm_device_t *dev)
#elif defined(__NetBSD__) || defined(__OpenBSD__)
pci_intr_disestablish(&dev->pa.pa_pc, dev->irqh);
#endif
- DRM_SPINUNINIT(dev->irq_lock);
+ DRM_SPINUNINIT(&dev->irq_lock);
return 0;
}
-int drm_control(DRM_IOCTL_ARGS)
+int drm_control(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_control_t ctl;
+ drm_control_t *ctl = data;
int err;
- DRM_COPY_FROM_USER_IOCTL( ctl, (drm_control_t *) data, sizeof(ctl) );
-
- switch ( ctl.func ) {
+ switch ( ctl->func ) {
case DRM_INST_HANDLER:
/* Handle drivers whose DRM used to require IRQ setup but the
* no longer does.
@@ -195,8 +192,8 @@ int drm_control(DRM_IOCTL_ARGS)
if (!dev->driver.use_irq)
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl.irq != dev->irq)
- return DRM_ERR(EINVAL);
+ ctl->irq != dev->irq)
+ return EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
if (!dev->driver.use_irq)
@@ -206,29 +203,25 @@ int drm_control(DRM_IOCTL_ARGS)
DRM_UNLOCK();
return err;
default:
- return DRM_ERR(EINVAL);
+ return EINVAL;
}
}
-int drm_wait_vblank(DRM_IOCTL_ARGS)
+int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_wait_vblank_t vblwait;
+ drm_wait_vblank_t *vblwait = data;
struct timeval now;
- int ret;
+ int ret, flags;
if (!dev->irq_enabled)
- return DRM_ERR(EINVAL);
-
- DRM_COPY_FROM_USER_IOCTL( vblwait, (drm_wait_vblank_t *)data,
- sizeof(vblwait) );
+ return EINVAL;
- if (vblwait.request.type & _DRM_VBLANK_RELATIVE) {
- vblwait.request.sequence += atomic_read(&dev->vbl_received);
- vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
+ if (vblwait->request.type & _DRM_VBLANK_RELATIVE) {
+ vblwait->request.sequence += atomic_read(&dev->vbl_received);
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
}
- flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
if (flags & _DRM_VBLANK_SIGNAL) {
#if 0 /* disabled */
drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM,
@@ -236,11 +229,11 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (vbl_sig == NULL)
return ENOMEM;
- vbl_sig->sequence = vblwait.request.sequence;
- vbl_sig->signo = vblwait.request.signal;
+ vbl_sig->sequence = vblwait->request.sequence;
+ vbl_sig->signo = vblwait->request.signal;
vbl_sig->pid = DRM_CURRENTPID;
- vblwait.reply.sequence = atomic_read(&dev->vbl_received);
+ vblwait->reply.sequence = atomic_read(&dev->vbl_received);
DRM_SPINLOCK(&dev->irq_lock);
TAILQ_INSERT_HEAD(&dev->vbl_sig_list, vbl_sig, link);
@@ -250,17 +243,16 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
ret = EINVAL;
} else {
DRM_LOCK();
- ret = dev->driver.vblank_wait(dev, &vblwait.request.sequence);
+ /* shared code returns -errno */
+ ret = -dev->driver.vblank_wait(dev,
+ &vblwait->request.sequence);
DRM_UNLOCK();
microtime(&now);
- vblwait.reply.tval_sec = now.tv_sec;
- vblwait.reply.tval_usec = now.tv_usec;
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
}
- DRM_COPY_TO_USER_IOCTL( (drm_wait_vblank_t *)data, vblwait,
- sizeof(vblwait) );
-
return ret;
}
@@ -291,3 +283,45 @@ void drm_vbl_send_signals( drm_device_t *dev )
}
}
#endif
+
+static void drm_locked_task(void *context, int pending __unused)
+{
+ drm_device_t *dev = context;
+
+ DRM_LOCK();
+ for (;;) {
+ int ret;
+
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT))
+ {
+ dev->lock.file_priv = NULL; /* kernel owned */
+ dev->lock.lock_time = jiffies;
+ atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+#if defined(__FreeBSD__) && __FreeBSD_version > 500000
+ ret = msleep((void *)&dev->lock.lock_queue, &dev->dev_lock,
+ PZERO | PCATCH, "drmlk2", 0);
+#else
+ ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH,
+ "drmlk2", 0);
+#endif
+ if (ret != 0)
+ return;
+ }
+ DRM_UNLOCK();
+
+ dev->locked_task_call(dev);
+
+ drm_lock_free(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+}
+
+void
+drm_locked_tasklet(drm_device_t *dev, void (*tasklet)(drm_device_t *dev))
+{
+ dev->locked_task_call = tasklet;
+ taskqueue_enqueue(taskqueue_swi, &dev->locked_task);
+}
diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c
index d0e61d3a..fb86fc68 100644
--- a/bsd-core/drm_lock.c
+++ b/bsd-core/drm_lock.c
@@ -1,6 +1,3 @@
-/* lock.c -- IOCTLs for locking -*- linux-c -*-
- * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com
- */
/*-
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,25 @@
*
*/
+/** @file drm_lock.c
+ * Implementation of the ioctls and other support code for dealing with the
+ * hardware lock.
+ *
+ * The DRM hardware lock is a shared structure between the kernel and userland.
+ *
+ * On uncontended access where the new context was the last context, the
+ * client may take the lock without dropping down into the kernel, using atomic
+ * compare-and-set.
+ *
+ * If the client finds during compare-and-set that it was not the last owner
+ * of the lock, it calls the DRM lock ioctl, which may sleep waiting for the
+ * lock, and may have side-effects of kernel-managed context switching.
+ *
+ * When the client releases the lock, if the lock is marked as being contended
+ * by another client, then the DRM unlock ioctl is called so that the
+ * contending client may be woken up.
+ */
+
#include "drmP.h"
int drm_lock_take(__volatile__ unsigned int *lock, unsigned int context)
@@ -66,7 +82,7 @@ int drm_lock_transfer(drm_device_t *dev,
{
unsigned int old, new;
- dev->lock.filp = NULL;
+ dev->lock.file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@@ -80,7 +96,7 @@ int drm_lock_free(drm_device_t *dev,
{
unsigned int old, new;
- dev->lock.filp = NULL;
+ dev->lock.file_priv = NULL;
do {
old = *lock;
new = 0;
@@ -95,30 +111,28 @@ int drm_lock_free(drm_device_t *dev,
return 0;
}
-int drm_lock(DRM_IOCTL_ARGS)
+int drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_lock_t lock;
+ drm_lock_t *lock = data;
int ret = 0;
- DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock));
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
+ if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
+ DRM_CURRENTPID, lock->context);
return EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, DRM_CURRENTPID, dev->lock.hw_lock->lock, lock.flags);
+ lock->context, DRM_CURRENTPID, dev->lock.hw_lock->lock,
+ lock->flags);
- if (dev->driver.use_dma_queue && lock.context < 0)
+ if (dev->driver.use_dma_queue && lock->context < 0)
return EINVAL;
DRM_LOCK();
for (;;) {
- if (drm_lock_take(&dev->lock.hw_lock->lock, lock.context)) {
- dev->lock.filp = (void *)(uintptr_t)DRM_CURRENTPID;
+ if (drm_lock_take(&dev->lock.hw_lock->lock, lock->context)) {
+ dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
@@ -136,7 +150,7 @@ int drm_lock(DRM_IOCTL_ARGS)
break;
}
DRM_UNLOCK();
- DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+ DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock");
if (ret != 0)
return ret;
@@ -144,24 +158,27 @@ int drm_lock(DRM_IOCTL_ARGS)
/* XXX: Add signal blocking here */
if (dev->driver.dma_quiescent != NULL &&
- (lock.flags & _DRM_LOCK_QUIESCENT))
+ (lock->flags & _DRM_LOCK_QUIESCENT))
dev->driver.dma_quiescent(dev);
return 0;
}
-int drm_unlock(DRM_IOCTL_ARGS)
+int drm_unlock(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_lock_t lock;
+ drm_lock_t *lock = data;
- DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t *)data, sizeof(lock));
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
+ if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
- DRM_CURRENTPID, lock.context);
+ DRM_CURRENTPID, lock->context);
return EINVAL;
}
+ /* Check that the context unlock being requested actually matches
+ * who currently holds the lock.
+ */
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) ||
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) != lock->context)
+ return EINVAL;
atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
diff --git a/bsd-core/drm_memory.c b/bsd-core/drm_memory.c
index 6d467e98..1f1f7f4b 100644
--- a/bsd-core/drm_memory.c
+++ b/bsd-core/drm_memory.c
@@ -1,6 +1,3 @@
-/* drm_memory.h -- Memory management wrappers for DRM -*- linux-c -*-
- * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
- */
/*-
*Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
@@ -31,6 +28,14 @@
*
*/
+/** @file drm_memory.c
+ * Wrappers for kernel memory allocation routines, and MTRR management support.
+ *
+ * This file previously implemented a memory consumption tracking system using
+ * the "area" argument for various different types of allocations, but that
+ * has been stripped out for now.
+ */
+
#include "drmP.h"
MALLOC_DEFINE(M_DRM, "drm", "DRM Data Structures");
diff --git a/bsd-core/drm_pci.c b/bsd-core/drm_pci.c
index a33f5f9c..6ec6b983 100644
--- a/bsd-core/drm_pci.c
+++ b/bsd-core/drm_pci.c
@@ -1,10 +1,3 @@
-/**
- * \file drm_pci.h
- * \brief PCI consistent, DMA-accessible memory functions.
- *
- * \author Eric Anholt <anholt@FreeBSD.org>
- */
-
/*-
* Copyright 2003 Eric Anholt.
* All Rights Reserved.
@@ -28,6 +21,13 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+/**
+ * \file drm_pci.h
+ * \brief PCI consistent, DMA-accessible memory allocation.
+ *
+ * \author Eric Anholt <anholt@FreeBSD.org>
+ */
+
#include "drmP.h"
/**********************************************************************/
diff --git a/bsd-core/drm_sarea.h b/bsd-core/drm_sarea.h
new file mode 120000
index 00000000..fd428f42
--- /dev/null
+++ b/bsd-core/drm_sarea.h
@@ -0,0 +1 @@
+../shared-core/drm_sarea.h \ No newline at end of file
diff --git a/bsd-core/drm_scatter.c b/bsd-core/drm_scatter.c
index 9dc280a4..92e715e0 100644
--- a/bsd-core/drm_scatter.c
+++ b/bsd-core/drm_scatter.c
@@ -1,5 +1,3 @@
-/* drm_scatter.h -- IOCTLs to manage scatter/gather memory -*- linux-c -*-
- * Created: Mon Dec 18 23:20:54 2000 by gareth@valinux.com */
/*-
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
@@ -29,6 +27,13 @@
*
*/
+/** @file drm_scatter.c
+ * Allocation of memory for scatter-gather mappings by the graphics chip.
+ *
+ * The memory allocated here is then made into an aperture in the card
+ * by drm_ati_pcigart_init().
+ */
+
#include "drmP.h"
#define DEBUG_SCATTER 0
@@ -40,28 +45,21 @@ void drm_sg_cleanup(drm_sg_mem_t *entry)
free(entry, M_DRM);
}
-int drm_sg_alloc(DRM_IOCTL_ARGS)
+int drm_sg_alloc(drm_device_t * dev, drm_scatter_gather_t * request)
{
- DRM_DEVICE;
- drm_scatter_gather_t request;
drm_sg_mem_t *entry;
unsigned long pages;
int i;
- DRM_DEBUG( "%s\n", __FUNCTION__ );
-
if ( dev->sg )
return EINVAL;
- DRM_COPY_FROM_USER_IOCTL(request, (drm_scatter_gather_t *)data,
- sizeof(request) );
-
entry = malloc(sizeof(*entry), M_DRM, M_WAITOK | M_ZERO);
if ( !entry )
return ENOMEM;
- pages = round_page(request.size) / PAGE_SIZE;
- DRM_DEBUG( "sg size=%ld pages=%ld\n", request.size, pages );
+ pages = round_page(request->size) / PAGE_SIZE;
+ DRM_DEBUG( "sg size=%ld pages=%ld\n", request->size, pages );
entry->pages = pages;
@@ -86,11 +84,7 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
DRM_DEBUG( "sg alloc handle = %08lx\n", entry->handle );
entry->virtual = (void *)entry->handle;
- request.handle = entry->handle;
-
- DRM_COPY_TO_USER_IOCTL( (drm_scatter_gather_t *)data,
- request,
- sizeof(request) );
+ request->handle = entry->handle;
DRM_LOCK();
if (dev->sg) {
@@ -104,21 +98,28 @@ int drm_sg_alloc(DRM_IOCTL_ARGS)
return 0;
}
-int drm_sg_free(DRM_IOCTL_ARGS)
+int drm_sg_alloc_ioctl(drm_device_t *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
+ drm_scatter_gather_t *request = data;
+ int ret;
+
+ DRM_DEBUG( "%s\n", __FUNCTION__ );
- DRM_COPY_FROM_USER_IOCTL( request, (drm_scatter_gather_t *)data,
- sizeof(request) );
+ ret = drm_sg_alloc(dev, request);
+ return ret;
+}
+
+int drm_sg_free(drm_device_t *dev, void *data, struct drm_file *file_priv)
+{
+ drm_scatter_gather_t *request = data;
+ drm_sg_mem_t *entry;
DRM_LOCK();
entry = dev->sg;
dev->sg = NULL;
DRM_UNLOCK();
- if ( !entry || entry->handle != request.handle )
+ if ( !entry || entry->handle != request->handle )
return EINVAL;
DRM_DEBUG( "sg free virtual = 0x%lx\n", entry->handle );
diff --git a/bsd-core/drm_sysctl.c b/bsd-core/drm_sysctl.c
index b2d0cc0c..3de5b8ae 100644
--- a/bsd-core/drm_sysctl.c
+++ b/bsd-core/drm_sysctl.c
@@ -21,6 +21,11 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+/** @file drm_sysctl.c
+ * Implementation of various sysctls for controlling DRM behavior and reporting
+ * debug information.
+ */
+
#include "drmP.h"
#include "drm.h"
diff --git a/bsd-core/drm_vm.c b/bsd-core/drm_vm.c
index 7f732c9b..fea31f52 100644
--- a/bsd-core/drm_vm.c
+++ b/bsd-core/drm_vm.c
@@ -21,6 +21,10 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+/** @file drm_vm.c
+ * Support code for mmaping of DRM maps.
+ */
+
#include "drmP.h"
#include "drm.h"
@@ -33,7 +37,7 @@ int drm_mmap(dev_t kdev, vm_offset_t offset, int prot)
paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
#endif
{
- DRM_DEVICE;
+ drm_device_t *dev = drm_get_device_from_kdev(kdev);
drm_local_map_t *map;
drm_file_t *priv;
drm_map_type_t type;
@@ -52,7 +56,7 @@ paddr_t drm_mmap(dev_t kdev, off_t offset, int prot)
}
if (!priv->authenticated)
- return DRM_ERR(EACCES);
+ return EACCES;
if (dev->dma && offset >= 0 && offset < ptoa(dev->dma->page_count)) {
drm_device_dma_t *dma = dev->dma;
diff --git a/bsd-core/i915_dma.c b/bsd-core/i915_dma.c
new file mode 120000
index 00000000..c61d967e
--- /dev/null
+++ b/bsd-core/i915_dma.c
@@ -0,0 +1 @@
+../shared-core/i915_dma.c \ No newline at end of file
diff --git a/bsd-core/i915_drm.h b/bsd-core/i915_drm.h
new file mode 120000
index 00000000..ed53f01d
--- /dev/null
+++ b/bsd-core/i915_drm.h
@@ -0,0 +1 @@
+../shared-core/i915_drm.h \ No newline at end of file
diff --git a/bsd-core/i915_drv.h b/bsd-core/i915_drv.h
new file mode 120000
index 00000000..085558ca
--- /dev/null
+++ b/bsd-core/i915_drv.h
@@ -0,0 +1 @@
+../shared-core/i915_drv.h \ No newline at end of file
diff --git a/bsd-core/i915_irq.c b/bsd-core/i915_irq.c
new file mode 120000
index 00000000..2058a2e4
--- /dev/null
+++ b/bsd-core/i915_irq.c
@@ -0,0 +1 @@
+../shared-core/i915_irq.c \ No newline at end of file
diff --git a/bsd-core/i915_mem.c b/bsd-core/i915_mem.c
new file mode 120000
index 00000000..e8e56553
--- /dev/null
+++ b/bsd-core/i915_mem.c
@@ -0,0 +1 @@
+../shared-core/i915_mem.c \ No newline at end of file
diff --git a/bsd-core/mach64_dma.c b/bsd-core/mach64_dma.c
new file mode 120000
index 00000000..e5c28975
--- /dev/null
+++ b/bsd-core/mach64_dma.c
@@ -0,0 +1 @@
+../shared-core/mach64_dma.c \ No newline at end of file
diff --git a/bsd-core/mach64_drm.h b/bsd-core/mach64_drm.h
new file mode 120000
index 00000000..136ea936
--- /dev/null
+++ b/bsd-core/mach64_drm.h
@@ -0,0 +1 @@
+../shared-core/mach64_drm.h \ No newline at end of file
diff --git a/bsd-core/mach64_drv.h b/bsd-core/mach64_drv.h
new file mode 120000
index 00000000..85222cc2
--- /dev/null
+++ b/bsd-core/mach64_drv.h
@@ -0,0 +1 @@
+../shared-core/mach64_drv.h \ No newline at end of file
diff --git a/bsd-core/mach64_irq.c b/bsd-core/mach64_irq.c
new file mode 120000
index 00000000..a1235d58
--- /dev/null
+++ b/bsd-core/mach64_irq.c
@@ -0,0 +1 @@
+../shared-core/mach64_irq.c \ No newline at end of file
diff --git a/bsd-core/mach64_state.c b/bsd-core/mach64_state.c
new file mode 120000
index 00000000..b11f202c
--- /dev/null
+++ b/bsd-core/mach64_state.c
@@ -0,0 +1 @@
+../shared-core/mach64_state.c \ No newline at end of file
diff --git a/bsd-core/mga_dma.c b/bsd-core/mga_dma.c
new file mode 120000
index 00000000..f290be9b
--- /dev/null
+++ b/bsd-core/mga_dma.c
@@ -0,0 +1 @@
+../shared-core/mga_dma.c \ No newline at end of file
diff --git a/bsd-core/mga_drm.h b/bsd-core/mga_drm.h
new file mode 120000
index 00000000..1c87036f
--- /dev/null
+++ b/bsd-core/mga_drm.h
@@ -0,0 +1 @@
+../shared-core/mga_drm.h \ No newline at end of file
diff --git a/bsd-core/mga_drv.h b/bsd-core/mga_drv.h
new file mode 120000
index 00000000..cb0c9e1d
--- /dev/null
+++ b/bsd-core/mga_drv.h
@@ -0,0 +1 @@
+../shared-core/mga_drv.h \ No newline at end of file
diff --git a/bsd-core/mga_irq.c b/bsd-core/mga_irq.c
new file mode 120000
index 00000000..cf521d29
--- /dev/null
+++ b/bsd-core/mga_irq.c
@@ -0,0 +1 @@
+../shared-core/mga_irq.c \ No newline at end of file
diff --git a/bsd-core/mga_state.c b/bsd-core/mga_state.c
new file mode 120000
index 00000000..8bda8ba9
--- /dev/null
+++ b/bsd-core/mga_state.c
@@ -0,0 +1 @@
+../shared-core/mga_state.c \ No newline at end of file
diff --git a/bsd-core/mga_ucode.h b/bsd-core/mga_ucode.h
new file mode 120000
index 00000000..728b9aca
--- /dev/null
+++ b/bsd-core/mga_ucode.h
@@ -0,0 +1 @@
+../shared-core/mga_ucode.h \ No newline at end of file
diff --git a/bsd-core/mga_warp.c b/bsd-core/mga_warp.c
new file mode 120000
index 00000000..d35b3255
--- /dev/null
+++ b/bsd-core/mga_warp.c
@@ -0,0 +1 @@
+../shared-core/mga_warp.c \ No newline at end of file
diff --git a/bsd-core/r128_cce.c b/bsd-core/r128_cce.c
new file mode 120000
index 00000000..0c1d659e
--- /dev/null
+++ b/bsd-core/r128_cce.c
@@ -0,0 +1 @@
+../shared-core/r128_cce.c \ No newline at end of file
diff --git a/bsd-core/r128_drm.h b/bsd-core/r128_drm.h
new file mode 120000
index 00000000..363852cb
--- /dev/null
+++ b/bsd-core/r128_drm.h
@@ -0,0 +1 @@
+../shared-core/r128_drm.h \ No newline at end of file
diff --git a/bsd-core/r128_drv.h b/bsd-core/r128_drv.h
new file mode 120000
index 00000000..4f7e822d
--- /dev/null
+++ b/bsd-core/r128_drv.h
@@ -0,0 +1 @@
+../shared-core/r128_drv.h \ No newline at end of file
diff --git a/bsd-core/r128_irq.c b/bsd-core/r128_irq.c
new file mode 120000
index 00000000..66d28b05
--- /dev/null
+++ b/bsd-core/r128_irq.c
@@ -0,0 +1 @@
+../shared-core/r128_irq.c \ No newline at end of file
diff --git a/bsd-core/r128_state.c b/bsd-core/r128_state.c
new file mode 120000
index 00000000..e83d84b5
--- /dev/null
+++ b/bsd-core/r128_state.c
@@ -0,0 +1 @@
+../shared-core/r128_state.c \ No newline at end of file
diff --git a/bsd-core/r300_cmdbuf.c b/bsd-core/r300_cmdbuf.c
new file mode 120000
index 00000000..6674d056
--- /dev/null
+++ b/bsd-core/r300_cmdbuf.c
@@ -0,0 +1 @@
+../shared-core/r300_cmdbuf.c \ No newline at end of file
diff --git a/bsd-core/r300_reg.h b/bsd-core/r300_reg.h
new file mode 120000
index 00000000..ef54eba2
--- /dev/null
+++ b/bsd-core/r300_reg.h
@@ -0,0 +1 @@
+../shared-core/r300_reg.h \ No newline at end of file
diff --git a/bsd-core/radeon_cp.c b/bsd-core/radeon_cp.c
new file mode 120000
index 00000000..ee860943
--- /dev/null
+++ b/bsd-core/radeon_cp.c
@@ -0,0 +1 @@
+../shared-core/radeon_cp.c \ No newline at end of file
diff --git a/bsd-core/radeon_drm.h b/bsd-core/radeon_drm.h
new file mode 120000
index 00000000..54f595a3
--- /dev/null
+++ b/bsd-core/radeon_drm.h
@@ -0,0 +1 @@
+../shared-core/radeon_drm.h \ No newline at end of file
diff --git a/bsd-core/radeon_drv.h b/bsd-core/radeon_drv.h
new file mode 120000
index 00000000..5b415ea8
--- /dev/null
+++ b/bsd-core/radeon_drv.h
@@ -0,0 +1 @@
+../shared-core/radeon_drv.h \ No newline at end of file
diff --git a/bsd-core/radeon_irq.c b/bsd-core/radeon_irq.c
new file mode 120000
index 00000000..2f394a5e
--- /dev/null
+++ b/bsd-core/radeon_irq.c
@@ -0,0 +1 @@
+../shared-core/radeon_irq.c \ No newline at end of file
diff --git a/bsd-core/radeon_mem.c b/bsd-core/radeon_mem.c
new file mode 120000
index 00000000..8cc27989
--- /dev/null
+++ b/bsd-core/radeon_mem.c
@@ -0,0 +1 @@
+../shared-core/radeon_mem.c \ No newline at end of file
diff --git a/bsd-core/radeon_state.c b/bsd-core/radeon_state.c
new file mode 120000
index 00000000..ccee8761
--- /dev/null
+++ b/bsd-core/radeon_state.c
@@ -0,0 +1 @@
+../shared-core/radeon_state.c \ No newline at end of file
diff --git a/bsd-core/savage_bci.c b/bsd-core/savage_bci.c
new file mode 120000
index 00000000..b8436713
--- /dev/null
+++ b/bsd-core/savage_bci.c
@@ -0,0 +1 @@
+../shared-core/savage_bci.c \ No newline at end of file
diff --git a/bsd-core/savage_drm.h b/bsd-core/savage_drm.h
new file mode 120000
index 00000000..0dab2e3b
--- /dev/null
+++ b/bsd-core/savage_drm.h
@@ -0,0 +1 @@
+../shared-core/savage_drm.h \ No newline at end of file
diff --git a/bsd-core/savage_drv.h b/bsd-core/savage_drv.h
new file mode 120000
index 00000000..8397009c
--- /dev/null
+++ b/bsd-core/savage_drv.h
@@ -0,0 +1 @@
+../shared-core/savage_drv.h \ No newline at end of file
diff --git a/bsd-core/savage_state.c b/bsd-core/savage_state.c
new file mode 120000
index 00000000..e55dc5d4
--- /dev/null
+++ b/bsd-core/savage_state.c
@@ -0,0 +1 @@
+../shared-core/savage_state.c \ No newline at end of file
diff --git a/bsd-core/sis_drm.h b/bsd-core/sis_drm.h
new file mode 120000
index 00000000..36c77aac
--- /dev/null
+++ b/bsd-core/sis_drm.h
@@ -0,0 +1 @@
+../shared-core/sis_drm.h \ No newline at end of file
diff --git a/bsd-core/sis_drv.h b/bsd-core/sis_drv.h
new file mode 120000
index 00000000..3fddfdae
--- /dev/null
+++ b/bsd-core/sis_drv.h
@@ -0,0 +1 @@
+../shared-core/sis_drv.h \ No newline at end of file
diff --git a/bsd-core/sis_ds.c b/bsd-core/sis_ds.c
new file mode 120000
index 00000000..242310a0
--- /dev/null
+++ b/bsd-core/sis_ds.c
@@ -0,0 +1 @@
+../shared-core/sis_ds.c \ No newline at end of file
diff --git a/bsd-core/sis_ds.h b/bsd-core/sis_ds.h
new file mode 120000
index 00000000..8cbdaf3b
--- /dev/null
+++ b/bsd-core/sis_ds.h
@@ -0,0 +1 @@
+../shared-core/sis_ds.h \ No newline at end of file
diff --git a/bsd-core/sis_mm.c b/bsd-core/sis_mm.c
new file mode 120000
index 00000000..8f802ec3
--- /dev/null
+++ b/bsd-core/sis_mm.c
@@ -0,0 +1 @@
+../shared-core/sis_mm.c \ No newline at end of file
diff --git a/bsd-core/tdfx_drv.h b/bsd-core/tdfx_drv.h
new file mode 120000
index 00000000..8df70329
--- /dev/null
+++ b/bsd-core/tdfx_drv.h
@@ -0,0 +1 @@
+../shared-core/tdfx_drv.h \ No newline at end of file
diff --git a/bsd-core/via_3d_reg.h b/bsd-core/via_3d_reg.h
new file mode 120000
index 00000000..90d238ec
--- /dev/null
+++ b/bsd-core/via_3d_reg.h
@@ -0,0 +1 @@
+../shared-core/via_3d_reg.h \ No newline at end of file
diff --git a/bsd-core/via_dma.c b/bsd-core/via_dma.c
new file mode 120000
index 00000000..1f4d920f
--- /dev/null
+++ b/bsd-core/via_dma.c
@@ -0,0 +1 @@
+../shared-core/via_dma.c \ No newline at end of file
diff --git a/bsd-core/via_drm.h b/bsd-core/via_drm.h
new file mode 120000
index 00000000..7cd175d3
--- /dev/null
+++ b/bsd-core/via_drm.h
@@ -0,0 +1 @@
+../shared-core/via_drm.h \ No newline at end of file
diff --git a/bsd-core/via_drv.h b/bsd-core/via_drv.h
new file mode 120000
index 00000000..8954fe88
--- /dev/null
+++ b/bsd-core/via_drv.h
@@ -0,0 +1 @@
+../shared-core/via_drv.h \ No newline at end of file
diff --git a/bsd-core/via_ds.c b/bsd-core/via_ds.c
new file mode 120000
index 00000000..b0fbb694
--- /dev/null
+++ b/bsd-core/via_ds.c
@@ -0,0 +1 @@
+../shared-core/via_ds.c \ No newline at end of file
diff --git a/bsd-core/via_ds.h b/bsd-core/via_ds.h
new file mode 120000
index 00000000..dc8f2f44
--- /dev/null
+++ b/bsd-core/via_ds.h
@@ -0,0 +1 @@
+../shared-core/via_ds.h \ No newline at end of file
diff --git a/bsd-core/via_irq.c b/bsd-core/via_irq.c
new file mode 120000
index 00000000..f615af87
--- /dev/null
+++ b/bsd-core/via_irq.c
@@ -0,0 +1 @@
+../shared-core/via_irq.c \ No newline at end of file
diff --git a/bsd-core/via_map.c b/bsd-core/via_map.c
new file mode 120000
index 00000000..b5056634
--- /dev/null
+++ b/bsd-core/via_map.c
@@ -0,0 +1 @@
+../shared-core/via_map.c \ No newline at end of file
diff --git a/bsd-core/via_mm.c b/bsd-core/via_mm.c
new file mode 120000
index 00000000..f9ec0f37
--- /dev/null
+++ b/bsd-core/via_mm.c
@@ -0,0 +1 @@
+../shared-core/via_mm.c \ No newline at end of file
diff --git a/bsd-core/via_mm.h b/bsd-core/via_mm.h
new file mode 120000
index 00000000..fe2234f6
--- /dev/null
+++ b/bsd-core/via_mm.h
@@ -0,0 +1 @@
+../shared-core/via_mm.h \ No newline at end of file
diff --git a/bsd-core/via_verifier.c b/bsd-core/via_verifier.c
new file mode 120000
index 00000000..00b411bd
--- /dev/null
+++ b/bsd-core/via_verifier.c
@@ -0,0 +1 @@
+../shared-core/via_verifier.c \ No newline at end of file
diff --git a/bsd-core/via_verifier.h b/bsd-core/via_verifier.h
new file mode 120000
index 00000000..62d3e287
--- /dev/null
+++ b/bsd-core/via_verifier.h
@@ -0,0 +1 @@
+../shared-core/via_verifier.h \ No newline at end of file
diff --git a/bsd-core/via_video.c b/bsd-core/via_video.c
new file mode 120000
index 00000000..a6d27947
--- /dev/null
+++ b/bsd-core/via_video.c
@@ -0,0 +1 @@
+../shared-core/via_video.c \ No newline at end of file
diff --git a/configure.ac b/configure.ac
index 94c47bd1..78203343 100644
--- a/configure.ac
+++ b/configure.ac
@@ -35,4 +35,9 @@ AC_SYS_LARGEFILE
pkgconfigdir=${libdir}/pkgconfig
AC_SUBST(pkgconfigdir)
-AC_OUTPUT([Makefile libdrm/Makefile shared-core/Makefile libdrm.pc])
+AC_OUTPUT([
+ Makefile
+ libdrm/Makefile
+ shared-core/Makefile
+ tests/Makefile
+ libdrm.pc])
diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c
index 93185512..0849f896 100644
--- a/libdrm/xf86drm.c
+++ b/libdrm/xf86drm.c
@@ -49,7 +49,6 @@
#include <sys/mman.h>
#include <sys/time.h>
#include <stdarg.h>
-#include "drm.h"
/* Not all systems have MAP_FAILED defined */
#ifndef MAP_FAILED
@@ -2355,8 +2354,7 @@ int drmFenceCreate(int fd, unsigned flags, int class, unsigned type,
arg.flags = flags;
arg.type = type;
arg.class = class;
- arg.op = drm_fence_create;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+ if (ioctl(fd, DRM_IOCTL_FENCE_CREATE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2378,8 +2376,8 @@ int drmFenceBuffers(int fd, unsigned flags, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.flags = flags;
- arg.op = drm_fence_buffers;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_BUFFERS, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2395,8 +2393,8 @@ int drmFenceDestroy(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_destroy;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_DESTROY, &arg))
return -errno;
return 0;
}
@@ -2407,8 +2405,8 @@ int drmFenceReference(int fd, unsigned handle, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = handle;
- arg.op = drm_fence_reference;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_REFERENCE, &arg))
return -errno;
fence->handle = arg.handle;
fence->class = arg.class;
@@ -2424,8 +2422,8 @@ int drmFenceUnreference(int fd, const drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_unreference;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_UNREFERENCE, &arg))
return -errno;
return 0;
}
@@ -2437,8 +2435,8 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
arg.type = flush_type;
- arg.op = drm_fence_flush;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_FLUSH, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2452,8 +2450,8 @@ int drmFenceUpdate(int fd, drmFence *fence)
memset(&arg, 0, sizeof(arg));
arg.handle = fence->handle;
- arg.op = drm_fence_signaled;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_SIGNALED, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2492,8 +2490,8 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)
arg.flags = flags;
arg.handle = fence->handle;
arg.type = emit_type;
- arg.op = drm_fence_emit;
- if (ioctl(fd, DRM_IOCTL_FENCE, &arg))
+
+ if (ioctl(fd, DRM_IOCTL_FENCE_EMIT, &arg))
return -errno;
fence->class = arg.class;
fence->type = arg.type;
@@ -2526,9 +2524,9 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)
arg.handle = fence->handle;
arg.type = flush_type;
arg.flags = flags;
- arg.op = drm_fence_wait;
+
do {
- ret = ioctl(fd, DRM_IOCTL_FENCE, &arg);
+ ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
@@ -2678,7 +2676,7 @@ int drmBOCreateList(int numTarget, drmBOList *list)
return drmAdjustListNodes(list);
}
-static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf)
+static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)
{
buf->handle = rep->handle;
buf->flags = rep->flags;
@@ -2690,16 +2688,21 @@ static void drmBOCopyReply(const drm_bo_arg_reply_t *rep, drmBO *buf)
buf->fenceFlags = rep->fence_flags;
buf->replyFlags = rep->rep_flags;
buf->pageAlignment = rep->page_alignment;
+ buf->tileInfo = rep->tile_info;
+ buf->hwTileStride = rep->hw_tile_stride;
+ buf->desiredTileStride = rep->desired_tile_stride;
}
-int drmBOCreate(int fd, unsigned long start, unsigned long size,
- unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
- unsigned mask,
+
+
+int drmBOCreate(int fd, unsigned long start, unsigned long size,
+ unsigned pageAlignment, void *user_buffer, drm_bo_type_t type,
+ uint64_t mask,
unsigned hint, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_create_arg arg;
+ struct drm_bo_create_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret;
memset(buf, 0, sizeof(*buf));
@@ -2726,21 +2729,13 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
default:
return -EINVAL;
}
- req->op = drm_bo_create;
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- fprintf(stderr, "Error %d\n", rep->ret);
- return rep->ret;
- }
drmBOCopyReply(rep, buf);
buf->mapVirtual = NULL;
@@ -2751,9 +2746,7 @@ int drmBOCreate(int fd, unsigned long start, unsigned long size,
int drmBODestroy(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) drmUnmap(buf->mapVirtual, buf->start + buf->size);
@@ -2762,40 +2755,26 @@ int drmBODestroy(int fd, drmBO *buf)
}
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_destroy;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_DESTROY, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
buf->handle = 0;
return 0;
}
-
+
int drmBOReference(int fd, unsigned handle, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
memset(&arg, 0, sizeof(arg));
req->handle = handle;
- req->op = drm_bo_reference;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_REFERENCE, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
drmBOCopyReply(rep, buf);
buf->type = drm_bo_type_dc;
@@ -2808,9 +2787,7 @@ int drmBOReference(int fd, unsigned handle, drmBO *buf)
int drmBOUnReference(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
if (buf->mapVirtual && (buf->type != drm_bo_type_fake)) {
(void) munmap(buf->mapVirtual, buf->start + buf->size);
@@ -2819,22 +2796,16 @@ int drmBOUnReference(int fd, drmBO *buf)
}
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_unreference;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg))
+ if (ioctl(fd, DRM_IOCTL_BO_UNREFERENCE, &arg))
return -errno;
- if (!arg.handled) {
- return -EFAULT;
- }
- if (rep->ret) {
- return rep->ret;
- }
buf->handle = 0;
return 0;
}
+
/*
* Flags can be DRM_BO_FLAG_READ, DRM_BO_FLAG_WRITE or'ed together
* Hint currently be DRM_BO_HINT_DONT_BLOCK, which makes the
@@ -2844,9 +2815,9 @@ int drmBOUnReference(int fd, drmBO *buf)
int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
/*
@@ -2871,7 +2842,6 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
req->handle = buf->handle;
req->mask = mapFlags;
req->hint = mapHint;
- req->op = drm_bo_map;
/*
* May hang if the buffer object is busy.
@@ -2879,15 +2849,11 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
*/
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg);
} while (ret != 0 && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
drmBOCopyReply(rep, buf);
buf->mapFlags = mapFlags;
@@ -2897,44 +2863,40 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
return 0;
}
+
int drmBOUnmap(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_handle_arg arg;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->op = drm_bo_unmap;
+ arg.handle = buf->handle;
- if (ioctl(fd, DRM_IOCTL_BUFOBJ, &arg)) {
+ if (ioctl(fd, DRM_IOCTL_BO_UNMAP, &arg)) {
return -errno;
}
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
-
+ buf->mapCount--;
return 0;
}
-
-int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
+
+int drmBOValidate(int fd, drmBO *buf,
+ uint64_t flags, uint64_t mask,
unsigned hint)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ struct drm_bo_arg_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->mask = flags;
- req->hint = hint;
- req->arg_handle = mask; /* Encode mask in the arg_handle field :/ */
+ req->bo_req.handle = buf->handle;
+ req->bo_req.flags = flags;
+ req->bo_req.mask = mask;
+ req->bo_req.hint = hint;
+ req->bo_req.fence_class = 0; /* Backwards compatibility. */
req->op = drm_bo_validate;
do{
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
} while (ret && errno == EAGAIN);
if (ret)
@@ -2944,26 +2906,25 @@ int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
if (rep->ret)
return rep->ret;
- drmBOCopyReply(rep, buf);
+ drmBOCopyReply(&rep->bo_info, buf);
return 0;
}
int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_op_arg arg;
+ struct drm_bo_op_req *req = &arg.d.req;
+ struct drm_bo_arg_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
- req->handle = buf->handle;
- req->mask = flags;
+ req->bo_req.handle = buf->handle;
+ req->bo_req.flags = flags;
req->arg_handle = fenceHandle;
req->op = drm_bo_fence;
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
-
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, &arg);
if (ret)
return -errno;
if (!arg.handled)
@@ -2975,56 +2936,70 @@ int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle)
int drmBOInfo(int fd, drmBO *buf)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_reference_info_arg arg;
+ struct drm_bo_handle_arg *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- req->op = drm_bo_info;
-
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_INFO, &arg);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
+
drmBOCopyReply(rep, buf);
return 0;
}
int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)
{
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t *rep = &arg.d.rep;
+ struct drm_bo_map_wait_idle_arg arg;
+ struct drm_bo_info_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
int ret = 0;
if ((buf->flags & DRM_BO_FLAG_SHAREABLE) ||
(buf->replyFlags & DRM_BO_REP_BUSY)) {
memset(&arg, 0, sizeof(arg));
req->handle = buf->handle;
- req->op = drm_bo_wait_idle;
req->hint = hint;
do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, &arg);
+ ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
- if (!arg.handled)
- return -EFAULT;
- if (rep->ret)
- return rep->ret;
+
drmBOCopyReply(rep, buf);
}
return 0;
}
-
+
+int drmBOSetPin(int fd, drmBO *buf, int pin)
+{
+ struct drm_bo_set_pin_arg arg;
+ struct drm_bo_set_pin_req *req = &arg.d.req;
+ struct drm_bo_info_rep *rep = &arg.d.rep;
+ int ret = 0;
+
+ memset(&arg, 0, sizeof(arg));
+ req->handle = buf->handle;
+ req->pin = pin;
+
+ do {
+ ret = ioctl(fd, DRM_IOCTL_BO_SET_PIN, &arg);
+ } while (ret && errno == EAGAIN);
+
+ if (ret)
+ return -errno;
+
+ drmBOCopyReply(rep, buf);
+
+ return 0;
+}
+
int drmBOBusy(int fd, drmBO *buf, int *busy)
{
if (!(buf->flags & DRM_BO_FLAG_SHAREABLE) &&
@@ -3041,7 +3016,6 @@ int drmBOBusy(int fd, drmBO *buf, int *busy)
}
}
-
int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem)
@@ -3103,74 +3077,74 @@ int drmBOValidateList(int fd, drmBOList *list)
{
drmBONode *node;
drmMMListHead *l;
- drm_bo_arg_t *arg, *first;
- drm_bo_arg_request_t *req;
- drm_bo_arg_reply_t *rep;
- drm_u64_t *prevNext = NULL;
+ struct drm_bo_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_arg_rep *rep;
+ uint64_t *prevNext = NULL;
drmBO *buf;
int ret;
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
+ node = DRMLISTENTRY(drmBONode, l, head);
- arg = &node->bo_arg;
- req = &arg->d.req;
+ arg = &node->bo_arg;
+ req = &arg->d.req;
- if (!first)
- first = arg;
+ if (!first)
+ first = arg;
if (prevNext)
*prevNext = (unsigned long) arg;
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
- req->handle = node->buf->handle;
+ req->bo_req.handle = node->buf->handle;
req->op = drm_bo_validate;
- req->mask = node->arg0;
- req->hint = 0;
- req->arg_handle = node->arg1;
+ req->bo_req.flags = node->arg0;
+ req->bo_req.hint = 0;
+ req->bo_req.mask = node->arg1;
+ req->bo_req.fence_class = 0; /* Backwards compat. */
}
-
- if (!first)
+
+ if (!first)
return 0;
- do {
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+ do{
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
} while (ret && errno == EAGAIN);
if (ret)
return -errno;
-
+
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
arg = &node->bo_arg;
rep = &arg->d.rep;
-
+
if (!arg->handled) {
drmMsg("Unhandled request\n");
return -EFAULT;
}
if (rep->ret)
return rep->ret;
-
+
buf = node->buf;
- drmBOCopyReply(rep, buf);
+ drmBOCopyReply(&rep->bo_info, buf);
}
-
+
return 0;
}
-
int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
{
drmBONode *node;
drmMMListHead *l;
- drm_bo_arg_t *arg, *first;
- drm_bo_arg_request_t *req;
- drm_bo_arg_reply_t *rep;
- drm_u64_t *prevNext = NULL;
+ struct drm_bo_op_arg *arg, *first;
+ struct drm_bo_op_req *req;
+ struct drm_bo_arg_rep *rep;
+ uint64_t *prevNext = NULL;
drmBO *buf;
unsigned fence_flags;
int ret;
@@ -3178,8 +3152,8 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
first = NULL;
for (l = list->list.next; l != &list->list; l = l->next) {
- node = DRMLISTENTRY(drmBONode, l, head);
-
+ node = DRMLISTENTRY(drmBONode, l, head);
+
arg = &node->bo_arg;
req = &arg->d.req;
@@ -3191,29 +3165,31 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
memset(arg, 0, sizeof(*arg));
prevNext = &arg->next;
- req->handle = node->buf->handle;
+ req->bo_req.handle = node->buf->handle;
req->op = drm_bo_fence;
- req->mask = node->arg0;
+ req->bo_req.mask = node->arg0;
req->arg_handle = fenceHandle;
}
if (!first)
return 0;
- ret = ioctl(fd, DRM_IOCTL_BUFOBJ, first);
+ ret = ioctl(fd, DRM_IOCTL_BO_OP, first);
if (ret)
return -errno;
for (l = list->list.next; l != &list->list; l = l->next) {
node = DRMLISTENTRY(drmBONode, l, head);
+
arg = &node->bo_arg;
rep = &arg->d.rep;
+
if (!arg->handled)
return -EFAULT;
if (rep->ret)
return rep->ret;
- drmBOCopyReply(rep, node->buf);
+ drmBOCopyReply(&rep->bo_info, node->buf);
}
return 0;
@@ -3222,13 +3198,16 @@ int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle)
int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_init_arg arg;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_init;
- arg.req.p_offset = pOffset;
- arg.req.p_size = pSize;
- arg.req.mem_type = memType;
+
+ arg.magic = DRM_BO_INIT_MAGIC;
+ arg.major = DRM_BO_INIT_MAJOR;
+ arg.minor = DRM_BO_INIT_MINOR;
+ arg.p_offset = pOffset;
+ arg.p_size = pSize;
+ arg.mem_type = memType;
if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
return -errno;
@@ -3237,28 +3216,26 @@ int drmMMInit(int fd, unsigned long pOffset, unsigned long pSize,
int drmMMTakedown(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_takedown;
- arg.req.mem_type = memType;
+ arg.mem_type = memType;
- if (ioctl(fd, DRM_IOCTL_MM_INIT, &arg))
+ if (ioctl(fd, DRM_IOCTL_MM_TAKEDOWN, &arg))
return -errno;
return 0;
}
int drmMMLock(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
int ret;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_lock;
- arg.req.mem_type = memType;
+ arg.mem_type = memType;
do{
- ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg);
} while (ret && errno == EAGAIN);
return (ret) ? -errno : 0;
@@ -3266,15 +3243,15 @@ int drmMMLock(int fd, unsigned memType)
int drmMMUnlock(int fd, unsigned memType)
{
- drm_mm_init_arg_t arg;
+ struct drm_mm_type_arg arg;
int ret;
memset(&arg, 0, sizeof(arg));
- arg.req.op = mm_unlock;
- arg.req.mem_type = memType;
+
+ arg.mem_type = memType;
do{
- ret = ioctl(fd, DRM_IOCTL_MM_INIT, &arg);
+ ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg);
} while (ret && errno == EAGAIN);
return (ret) ? -errno : 0;
diff --git a/libdrm/xf86drm.h b/libdrm/xf86drm.h
index d4260cc9..230f54ce 100644
--- a/libdrm/xf86drm.h
+++ b/libdrm/xf86drm.h
@@ -36,6 +36,7 @@
#include <stdarg.h>
#include <sys/types.h>
+#include <stdint.h>
#include <drm.h>
/* Defaults, if nothing set in xf86config */
diff --git a/libdrm/xf86drmHash.c b/libdrm/xf86drmHash.c
index d1ade063..82cbc2a5 100644
--- a/libdrm/xf86drmHash.c
+++ b/libdrm/xf86drmHash.c
@@ -74,7 +74,6 @@
#define HASH_MAIN 0
#if !HASH_MAIN
-# include "drm.h"
# include "xf86drm.h"
#endif
diff --git a/libdrm/xf86drmRandom.c b/libdrm/xf86drmRandom.c
index 61ffb078..ecab9e2d 100644
--- a/libdrm/xf86drmRandom.c
+++ b/libdrm/xf86drmRandom.c
@@ -77,7 +77,6 @@
#define RANDOM_MAIN 0
#if !RANDOM_MAIN
-# include "drm.h"
# include "xf86drm.h"
#endif
diff --git a/libdrm/xf86drmSL.c b/libdrm/xf86drmSL.c
index ce60648d..58aefac7 100644
--- a/libdrm/xf86drmSL.c
+++ b/libdrm/xf86drmSL.c
@@ -44,7 +44,6 @@
#define SL_MAIN 0
#if !SL_MAIN
-# include "drm.h"
# include "xf86drm.h"
#else
# include <sys/time.h>
diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h
index b3822d4d..d86644ca 100644
--- a/libdrm/xf86mm.h
+++ b/libdrm/xf86mm.h
@@ -107,9 +107,9 @@ typedef struct _drmBO
{
drm_bo_type_t type;
unsigned handle;
- drm_u64_t mapHandle;
- unsigned flags;
- unsigned mask;
+ uint64_t mapHandle;
+ uint64_t flags;
+ uint64_t mask;
unsigned mapFlags;
unsigned long size;
unsigned long offset;
@@ -117,6 +117,9 @@ typedef struct _drmBO
unsigned replyFlags;
unsigned fenceFlags;
unsigned pageAlignment;
+ unsigned tileInfo;
+ unsigned hwTileStride;
+ unsigned desiredTileStride;
void *virtual;
void *mapVirtual;
int mapCount;
@@ -127,7 +130,7 @@ typedef struct _drmBONode
{
drmMMListHead head;
drmBO *buf;
- drm_bo_arg_t bo_arg;
+ struct drm_bo_op_arg bo_arg;
unsigned long arg0;
unsigned long arg1;
} drmBONode;
@@ -176,8 +179,8 @@ extern int drmBOCreateList(int numTarget, drmBOList *list);
*/
extern int drmBOCreate(int fd, unsigned long start, unsigned long size,
- unsigned pageAlignment,void *user_buffer,
- drm_bo_type_t type, unsigned mask,
+ unsigned pageAlignment,void *user_buffer,
+ drm_bo_type_t type, uint64_t mask,
unsigned hint, drmBO *buf);
extern int drmBODestroy(int fd, drmBO *buf);
extern int drmBOReference(int fd, unsigned handle, drmBO *buf);
@@ -185,19 +188,20 @@ extern int drmBOUnReference(int fd, drmBO *buf);
extern int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,
void **address);
extern int drmBOUnmap(int fd, drmBO *buf);
-extern int drmBOValidate(int fd, drmBO *buf, unsigned flags, unsigned mask,
- unsigned hint);
+extern int drmBOValidate(int fd, drmBO *buf, uint64_t flags,
+ uint64_t mask, unsigned hint);
+
extern int drmBOFence(int fd, drmBO *buf, unsigned flags, unsigned fenceHandle);
extern int drmBOInfo(int fd, drmBO *buf);
extern int drmBOBusy(int fd, drmBO *buf, int *busy);
-
-extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
+extern int drmAddValidateItem(drmBOList *list, drmBO *buf, unsigned flags,
unsigned mask,
int *newItem);
extern int drmBOValidateList(int fd, drmBOList *list);
extern int drmBOFenceList(int fd, drmBOList *list, unsigned fenceHandle);
extern int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint);
+int drmBOSetPin(int fd, drmBO *buf, int pin);
/*
* Initialization functions.
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 1758777c..1cdf3b30 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
- mach64.o nv.o nouveau.o
+ mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@@ -91,6 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
+XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
PROGS = dristat drmstat
@@ -268,6 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \
ifneq ($(PAGE_AGP),0)
EXTRA_CFLAGS += -DHAVE_PAGE_AGP
endif
+EXTRA_CFLAGS += -g -O0
# Start with all modules turned off.
CONFIG_DRM_GAMMA := n
@@ -284,6 +286,7 @@ CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
+CONFIG_DRM_XGI := n
# Enable module builds for the modules requested/supported.
@@ -320,6 +323,9 @@ endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
+ifneq (,$(findstring xgi,$(DRM_MODULES)))
+CONFIG_DRM_XGI := m
+endif
# These require AGP support
@@ -347,6 +353,7 @@ $(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
+$(xgi-objs): $(XGIHEADERS)
endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index ac613d2e..a7ab1f1d 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -24,12 +24,14 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o \
+ nouveau_sgdma.o nouveau_dma.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
- nv40_graph.o nv50_graph.o
+ nv40_graph.o nv50_graph.o \
+ nv04_instmem.o nv50_instmem.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
@@ -38,6 +40,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
+xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
+ xgi_fence.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
@@ -46,6 +50,7 @@ mga-objs += mga_ioc32.o
r128-objs += r128_ioc32.o
i915-objs += i915_ioc32.o
nouveau-objs += nouveau_ioc32.o
+xgi-objs += xgi_ioc32.o
endif
obj-m += drm.o
@@ -62,3 +67,4 @@ obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
+obj-$(CONFIG_DRM_XGI) += xgi.o
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 524618a8..7241c2a8 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -81,9 +81,9 @@ static void drm_ati_free_pcigart_table(void *address, int order)
free_pages((unsigned long)address, order);
}
-int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info)
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long pages;
int i;
int order;
@@ -132,9 +132,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
+int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info)
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
unsigned long pages;
u32 *pci_gart, page_base, bus_address = 0;
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 510de608..64f9a63f 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -84,6 +84,8 @@
#include "drm_os_linux.h"
#include "drm_hashtab.h"
+struct drm_file;
+
/* If you want the memory alloc debug functionality, change define below */
/* #define DEBUG_MEMORY */
@@ -250,15 +252,15 @@
* Test that the hardware lock is held by the caller, returning otherwise.
*
* \param dev DRM device.
- * \param filp file pointer of the caller.
+ * \param file_priv DRM file private pointer of the caller.
*/
-#define LOCK_TEST_WITH_RETURN( dev, filp ) \
+#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
do { \
if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \
- dev->lock.filp != filp ) { \
+ dev->lock.file_priv != file_priv ) { \
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
__FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\
- dev->lock.filp, filp ); \
+ dev->lock.file_priv, file_priv ); \
return -EINVAL; \
} \
} while (0)
@@ -275,16 +277,19 @@ do { \
return -EFAULT; \
}
+struct drm_device;
+struct drm_file;
+
/**
* Ioctl function type.
*
- * \param inode device inode.
- * \param filp file pointer.
- * \param cmd command.
- * \param arg argument.
+ * \param dev DRM device structure
+ * \param data pointer to kernel-space stored data, copied in and out according
+ * to ioctl description.
+ * \param file_priv DRM file private pointer.
*/
-typedef int drm_ioctl_t(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -293,31 +298,34 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
#define DRM_MASTER 0x2
#define DRM_ROOT_ONLY 0x4
-typedef struct drm_ioctl_desc {
+struct drm_ioctl_desc {
+ unsigned int cmd;
drm_ioctl_t *func;
int flags;
-} drm_ioctl_desc_t;
-
-typedef struct drm_devstate {
- pid_t owner; /**< X server pid holding x_lock */
-} drm_devstate_t;
+};
+/**
+ * Creates a driver or general drm_ioctl_desc array entry for the given
+ * ioctl, for use by drm_ioctl().
+ */
+#define DRM_IOCTL_DEF(ioctl, func, flags) \
+ [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags}
-typedef struct drm_magic_entry {
+struct drm_magic_entry {
struct list_head head;
- drm_hash_item_t hash_item;
+ struct drm_hash_item hash_item;
struct drm_file *priv;
-} drm_magic_entry_t;
+};
-typedef struct drm_vma_entry {
+struct drm_vma_entry {
struct list_head head;
struct vm_area_struct *vma;
pid_t pid;
-} drm_vma_entry_t;
+};
/**
* DMA buffer.
*/
-typedef struct drm_buf {
+struct drm_buf {
int idx; /**< Index into master buflist */
int total; /**< Buffer size */
int order; /**< log-base-2(total) */
@@ -329,7 +337,7 @@ typedef struct drm_buf {
__volatile__ int waiting; /**< On kernel DMA queue */
__volatile__ int pending; /**< On hardware DMA queue */
wait_queue_head_t dma_wait; /**< Processes waiting */
- struct file *filp; /**< Pointer to holding file descr */
+ struct drm_file *file_priv; /**< Private of holding file descr */
int context; /**< Kernel queue for this buffer */
int while_locked; /**< Dispatch this buffer while locked */
enum {
@@ -343,30 +351,30 @@ typedef struct drm_buf {
int dev_priv_size; /**< Size of buffer private storage */
void *dev_private; /**< Per-buffer private storage */
-} drm_buf_t;
+};
/** bufs is one longer than it has to be */
-typedef struct drm_waitlist {
+struct drm_waitlist {
int count; /**< Number of possible buffers */
- drm_buf_t **bufs; /**< List of pointers to buffers */
- drm_buf_t **rp; /**< Read pointer */
- drm_buf_t **wp; /**< Write pointer */
- drm_buf_t **end; /**< End pointer */
+ struct drm_buf **bufs; /**< List of pointers to buffers */
+ struct drm_buf **rp; /**< Read pointer */
+ struct drm_buf **wp; /**< Write pointer */
+ struct drm_buf **end; /**< End pointer */
spinlock_t read_lock;
spinlock_t write_lock;
-} drm_waitlist_t;
+};
-typedef struct drm_freelist {
+struct drm_freelist {
int initialized; /**< Freelist in use */
atomic_t count; /**< Number of free buffers */
- drm_buf_t *next; /**< End pointer */
+ struct drm_buf *next; /**< End pointer */
wait_queue_head_t waiting; /**< Processes waiting on free bufs */
int low_mark; /**< Low water mark */
int high_mark; /**< High water mark */
atomic_t wfh; /**< If waiting for high mark */
spinlock_t lock;
-} drm_freelist_t;
+};
typedef struct drm_dma_handle {
dma_addr_t busaddr;
@@ -377,15 +385,15 @@ typedef struct drm_dma_handle {
/**
* Buffer entry. There is one of this for each buffer size order.
*/
-typedef struct drm_buf_entry {
+struct drm_buf_entry {
int buf_size; /**< size */
int buf_count; /**< number of buffers */
- drm_buf_t *buflist; /**< buffer list */
+ struct drm_buf *buflist; /**< buffer list */
int seg_count;
int page_order;
- drm_dma_handle_t **seglist;
- drm_freelist_t freelist;
-} drm_buf_entry_t;
+ struct drm_dma_handle **seglist;
+ struct drm_freelist freelist;
+};
/*
* This should be small enough to allow the use of kmalloc for hash tables
@@ -393,15 +401,15 @@ typedef struct drm_buf_entry {
*/
#define DRM_FILE_HASH_ORDER 8
-typedef enum{
+enum drm_ref_type {
_DRM_REF_USE=0,
_DRM_REF_TYPE1,
_DRM_NO_REF_TYPES
-} drm_ref_t;
+};
/** File private data */
-typedef struct drm_file {
+struct drm_file {
int authenticated;
int master;
int minor;
@@ -424,14 +432,15 @@ typedef struct drm_file {
struct list_head refd_objects;
struct list_head user_objects;
- drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES];
+ struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES];
+ struct file *filp;
void *driver_priv;
struct list_head fbs;
-} drm_file_t;
+};
/** Wait queue */
-typedef struct drm_queue {
+struct drm_queue {
atomic_t use_count; /**< Outstanding uses (+1) */
atomic_t finalization; /**< Finalization in progress */
atomic_t block_count; /**< Count of processes waiting */
@@ -444,33 +453,34 @@ typedef struct drm_queue {
atomic_t total_flushed; /**< Total flushes statistic */
atomic_t total_locks; /**< Total locks statistics */
#endif
- drm_ctx_flags_t flags; /**< Context preserving and 2D-only */
- drm_waitlist_t waitlist; /**< Pending buffers */
+ enum drm_ctx_flags flags; /**< Context preserving and 2D-only */
+ struct drm_waitlist waitlist; /**< Pending buffers */
wait_queue_head_t flush_queue; /**< Processes waiting until flush */
-} drm_queue_t;
+};
/**
* Lock data.
*/
-typedef struct drm_lock_data {
- drm_hw_lock_t *hw_lock; /**< Hardware lock */
- struct file *filp; /**< File descr of lock holder (0=kernel) */
+struct drm_lock_data {
+ struct drm_hw_lock *hw_lock; /**< Hardware lock */
+ /** Private of lock holder's file (NULL=kernel) */
+ struct drm_file *file_priv;
wait_queue_head_t lock_queue; /**< Queue of blocked processes */
unsigned long lock_time; /**< Time of last lock in jiffies */
spinlock_t spinlock;
uint32_t kernel_waiters;
uint32_t user_waiters;
int idle_has_lock;
-} drm_lock_data_t;
+};
/**
* DMA data.
*/
-typedef struct drm_device_dma {
+struct drm_device_dma {
- drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
+ struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */
int buf_count; /**< total number of buffers */
- drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */
+ struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */
int seg_count;
int page_count; /**< number of pages */
unsigned long *pagelist; /**< page list */
@@ -482,25 +492,25 @@ typedef struct drm_device_dma {
_DRM_DMA_USE_PCI_RO = 0x08
} flags;
-} drm_device_dma_t;
+};
/**
* AGP memory entry. Stored as a doubly linked list.
*/
-typedef struct drm_agp_mem {
+struct drm_agp_mem {
unsigned long handle; /**< handle */
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
struct list_head head;
-} drm_agp_mem_t;
+};
/**
* AGP data.
*
* \sa drm_agp_init)() and drm_device::agp.
*/
-typedef struct drm_agp_head {
+struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
struct list_head memory;
unsigned long mode; /**< AGP mode */
@@ -513,30 +523,30 @@ typedef struct drm_agp_head {
int agp_mtrr;
int cant_use_aperture;
unsigned long page_mask;
-} drm_agp_head_t;
+};
/**
* Scatter-gather memory.
*/
-typedef struct drm_sg_mem {
+struct drm_sg_mem {
unsigned long handle;
void *virtual;
int pages;
struct page **pagelist;
dma_addr_t *busaddr;
-} drm_sg_mem_t;
+};
-typedef struct drm_sigdata {
+struct drm_sigdata {
int context;
- drm_hw_lock_t *lock;
-} drm_sigdata_t;
+ struct drm_hw_lock *lock;
+};
/*
* Generic memory manager structs
*/
-typedef struct drm_mm_node {
+struct drm_mm_node {
struct list_head fl_entry;
struct list_head ml_entry;
int free;
@@ -544,46 +554,42 @@ typedef struct drm_mm_node {
unsigned long size;
struct drm_mm *mm;
void *private;
-} drm_mm_node_t;
+};
-typedef struct drm_mm {
+struct drm_mm {
struct list_head fl_entry;
struct list_head ml_entry;
-} drm_mm_t;
+};
/**
* Mappings list
*/
-typedef struct drm_map_list {
+struct drm_map_list {
struct list_head head; /**< list head */
- drm_hash_item_t hash;
- drm_map_t *map; /**< mapping */
- drm_u64_t user_token;
- drm_mm_node_t *file_offset_node;
-} drm_map_list_t;
+ struct drm_hash_item hash;
+ struct drm_map *map; /**< mapping */
+ uint64_t user_token;
+ struct drm_mm_node *file_offset_node;
+};
-typedef drm_map_t drm_local_map_t;
+typedef struct drm_map drm_local_map_t;
/**
* Context handle list
*/
-typedef struct drm_ctx_list {
+struct drm_ctx_list {
struct list_head head; /**< list head */
drm_context_t handle; /**< context handle */
- drm_file_t *tag; /**< associated fd private data */
-} drm_ctx_list_t;
-
-struct drm_ctx_sarea_list {
- drm_map_t *map;
+ struct drm_file *tag; /**< associated fd private data */
};
-typedef struct drm_vbl_sig {
+struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
struct siginfo info;
struct task_struct *task;
-} drm_vbl_sig_t;
+};
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
@@ -593,17 +599,13 @@ typedef struct drm_vbl_sig {
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
-typedef struct ati_pcigart_info {
+struct ati_pcigart_info {
int gart_table_location;
int gart_reg_if;
void *addr;
dma_addr_t bus_addr;
drm_local_map_t mapping;
int table_size;
-} drm_ati_pcigart_info;
-
-struct drm_drawable_list {
- drm_drawable_info_t info;
};
#include "drm_objects.h"
@@ -614,16 +616,15 @@ struct drm_drawable_list {
* in this family
*/
-struct drm_device;
struct drm_driver {
int (*load) (struct drm_device *, unsigned long flags);
int (*firstopen) (struct drm_device *);
- int (*open) (struct drm_device *, drm_file_t *);
- void (*preclose) (struct drm_device *, struct file * filp);
- void (*postclose) (struct drm_device *, drm_file_t *);
+ int (*open) (struct drm_device *, struct drm_file *);
+ void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+ void (*postclose) (struct drm_device *, struct drm_file *);
void (*lastclose) (struct drm_device *);
int (*unload) (struct drm_device *);
- int (*dma_ioctl) (DRM_IOCTL_ARGS);
+ int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
int (*context_ctor) (struct drm_device * dev, int context);
@@ -653,14 +654,15 @@ struct drm_driver {
void (*irq_preinstall) (struct drm_device * dev);
void (*irq_postinstall) (struct drm_device * dev);
void (*irq_uninstall) (struct drm_device * dev);
- void (*reclaim_buffers) (struct drm_device *dev, struct file * filp);
+ void (*reclaim_buffers) (struct drm_device *dev,
+ struct drm_file *file_priv);
void (*reclaim_buffers_locked) (struct drm_device *dev,
- struct file * filp);
+ struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
- struct file * filp);
- unsigned long (*get_map_ofs) (drm_map_t * map);
+ struct drm_file *file_priv);
+ unsigned long (*get_map_ofs) (struct drm_map * map);
unsigned long (*get_reg_ofs) (struct drm_device * dev);
- void (*set_version) (struct drm_device * dev, drm_set_version_t * sv);
+ void (*set_version) (struct drm_device * dev, struct drm_set_version * sv);
/* FB routines, if present */
int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc);
@@ -679,7 +681,7 @@ struct drm_driver {
/* variables */
u32 driver_features;
int dev_priv_size;
- drm_ioctl_desc_t *ioctls;
+ struct drm_ioctl_desc *ioctls;
int num_ioctls;
struct file_operations fops;
struct pci_driver pci_driver;
@@ -690,20 +692,20 @@ struct drm_driver {
* that may contain multiple heads. Embed one per head of these in the
* private drm_device structure.
*/
-typedef struct drm_head {
+struct drm_head {
int minor; /**< Minor device number */
struct drm_device *dev;
struct proc_dir_entry *dev_root; /**< proc directory entry */
dev_t device; /**< Device number for mknod */
struct class_device *dev_class;
-} drm_head_t;
+};
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
*/
-typedef struct drm_device {
+struct drm_device {
char *unique; /**< Unique identifier: e.g., busid */
int unique_len; /**< Length of unique field */
char *devname; /**< For /proc/interrupts */
@@ -729,14 +731,14 @@ typedef struct drm_device {
/** \name Performance counters */
/*@{ */
unsigned long counters;
- drm_stat_type_t types[15];
+ enum drm_stat_type types[15];
atomic_t counts[15];
/*@} */
/** \name Authentication */
/*@{ */
struct list_head filelist;
- drm_open_hash_t magiclist;
+ struct drm_open_hash magiclist;
struct list_head magicfree;
/*@} */
@@ -744,9 +746,9 @@ typedef struct drm_device {
/*@{ */
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
- drm_open_hash_t map_hash; /**< User token hash table for maps */
- drm_mm_t offset_manager; /**< User token manager */
- drm_open_hash_t object_hash; /**< User token hash table for objects */
+ struct drm_open_hash map_hash; /**< User token hash table for maps */
+ struct drm_mm offset_manager; /**< User token manager */
+ struct drm_open_hash object_hash; /**< User token hash table for objects */
struct address_space *dev_mapping; /**< For unmap_mapping_range() */
struct page *ttm_dummy_page;
@@ -759,7 +761,7 @@ typedef struct drm_device {
struct idr ctx_idr;
struct list_head vmalist; /**< List of vmas (for debugging) */
- drm_lock_data_t lock; /**< Information on hardware lock */
+ struct drm_lock_data lock; /**< Information on hardware lock */
/*@} */
/** \name DMA queues (contexts) */
@@ -767,8 +769,8 @@ typedef struct drm_device {
int queue_count; /**< Number of active DMA queues */
int queue_reserved; /**< Number of reserved DMA queues */
int queue_slots; /**< Actual length of queuelist */
- drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */
- drm_device_dma_t *dma; /**< Optional pointer for DMA support */
+ struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */
+ struct drm_device_dma *dma; /**< Optional pointer for DMA support */
/*@} */
/** \name Context support */
@@ -808,7 +810,7 @@ typedef struct drm_device {
wait_queue_head_t buf_readers; /**< Processes waiting to read */
wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */
- drm_agp_head_t *agp; /**< AGP data */
+ struct drm_agp_head *agp; /**< AGP data */
struct pci_dev *pdev; /**< PCI device structure */
int pci_vendor; /**< PCI vendor id */
@@ -816,18 +818,18 @@ typedef struct drm_device {
#ifdef __alpha__
struct pci_controller *hose;
#endif
- drm_sg_mem_t *sg; /**< Scatter gather memory */
+ struct drm_sg_mem *sg; /**< Scatter gather memory */
void *dev_private; /**< device private data */
- drm_sigdata_t sigdata; /**< For block_all_signals */
+ struct drm_sigdata sigdata; /**< For block_all_signals */
sigset_t sigmask;
struct drm_driver *driver;
drm_local_map_t *agp_buffer_map;
unsigned int agp_buffer_token;
- drm_head_t primary; /**< primary screen head */
+ struct drm_head primary; /**< primary screen head */
- drm_fence_manager_t fm;
- drm_buffer_manager_t bm;
+ struct drm_fence_manager fm;
+ struct drm_buffer_manager bm;
/** \name Drawable information */
/*@{ */
@@ -837,15 +839,15 @@ typedef struct drm_device {
/* DRM mode setting */
struct drm_mode_config mode_config;
-} drm_device_t;
+};
#if __OS_HAS_AGP
-typedef struct drm_agp_ttm_backend {
- drm_ttm_backend_t backend;
+struct drm_agp_ttm_backend {
+ struct drm_ttm_backend backend;
DRM_AGP_MEM *mem;
struct agp_bridge_data *bridge;
int populated;
-} drm_agp_ttm_backend_t;
+};
#endif
@@ -920,10 +922,12 @@ extern void drm_exit(struct drm_driver *driver);
extern void drm_cleanup_pci(struct pci_dev *pdev);
extern int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
+extern long drm_unlocked_ioctl(struct file *filp,
+ unsigned int cmd, unsigned long arg);
extern long drm_compat_ioctl(struct file *filp,
unsigned int cmd, unsigned long arg);
-extern int drm_lastclose(drm_device_t * dev);
+extern int drm_lastclose(struct drm_device *dev);
/* Device support (drm_fops.h) */
extern int drm_open(struct inode *inode, struct file *filp);
@@ -934,7 +938,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
/* Mapping support (drm_vm.h) */
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
-extern unsigned long drm_core_get_map_ofs(drm_map_t * map);
+extern unsigned long drm_core_get_map_ofs(struct drm_map * map);
extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev);
extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma);
@@ -947,174 +951,179 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area);
extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area);
extern unsigned long drm_alloc_pages(int order, int area);
extern void drm_free_pages(unsigned long address, int order, int area);
-extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type);
+extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type);
extern int drm_free_agp(DRM_AGP_MEM * handle, int pages);
extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start);
extern int drm_unbind_agp(DRM_AGP_MEM * handle);
extern void drm_free_memctl(size_t size);
extern int drm_alloc_memctl(size_t size);
-extern void drm_query_memctl(drm_u64_t *cur_used,
- drm_u64_t *low_threshold,
- drm_u64_t *high_threshold);
+extern void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *low_threshold,
+ uint64_t *high_threshold);
extern void drm_init_memctl(size_t low_threshold,
size_t high_threshold,
size_t unit_size);
/* Misc. IOCTL support (drm_ioctl.h) */
-extern int drm_irq_by_busid(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getunique(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_setunique(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getmap(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getclient(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getstats(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_setversion(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_noop(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getmap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getstats(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_setversion(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* Context IOCTL support (drm_context.h) */
-extern int drm_resctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_addctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_modctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_switchctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_newctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_rmctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-extern int drm_ctxbitmap_init(drm_device_t * dev);
-extern void drm_ctxbitmap_cleanup(drm_device_t * dev);
-extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle);
-
-extern int drm_setsareactx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_getsareactx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_modctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern int drm_ctxbitmap_init(struct drm_device *dev);
+extern void drm_ctxbitmap_cleanup(struct drm_device *dev);
+extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle);
+
+extern int drm_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* Drawable IOCTL support (drm_drawable.h) */
-extern int drm_adddraw(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_rmdraw(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
- drm_drawable_t id);
-extern void drm_drawable_free_all(drm_device_t *dev);
+extern int drm_adddraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmdraw(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_update_drawable_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev,
+ drm_drawable_t id);
+extern void drm_drawable_free_all(struct drm_device *dev);
/* Authentication IOCTL support (drm_auth.h) */
-extern int drm_getmagic(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_authmagic(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_getmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* Locking IOCTL support (drm_lock.h) */
-extern int drm_lock(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_unlock(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context);
-extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context);
-extern void drm_idlelock_take(drm_lock_data_t *lock_data);
-extern void drm_idlelock_release(drm_lock_data_t *lock_data);
+extern int drm_lock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_unlock(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context);
+extern void drm_idlelock_take(struct drm_lock_data *lock_data);
+extern void drm_idlelock_release(struct drm_lock_data *lock_data);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
-extern int drm_i_have_hw_lock(struct file *filp);
-extern int drm_kernel_take_hw_lock(struct file *filp);
+extern int drm_i_have_hw_lock(struct drm_device *dev,
+ struct drm_file *file_priv);
/* Buffer management support (drm_bufs.h) */
-extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request);
-extern int drm_addmap(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t ** map_ptr);
-extern int drm_addmap_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map);
-extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map);
-extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_addbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_infobufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_markbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_freebufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mapbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request);
+extern int drm_addmap(struct drm_device *dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, drm_local_map_t ** map_ptr);
+extern int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map);
+extern int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_addbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_markbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_freebufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern int drm_order(unsigned long size);
-extern unsigned long drm_get_resource_start(drm_device_t *dev,
+extern unsigned long drm_get_resource_start(struct drm_device *dev,
unsigned int resource);
-extern unsigned long drm_get_resource_len(drm_device_t *dev,
+extern unsigned long drm_get_resource_len(struct drm_device *dev,
unsigned int resource);
+extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
+ drm_local_map_t *map);
+
/* DMA support (drm_dma.h) */
-extern int drm_dma_setup(drm_device_t * dev);
-extern void drm_dma_takedown(drm_device_t * dev);
-extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf);
-extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp);
+extern int drm_dma_setup(struct drm_device *dev);
+extern void drm_dma_takedown(struct drm_device *dev);
+extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf);
+extern void drm_core_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *filp);
/* IRQ support (drm_irq.h) */
-extern int drm_control(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS);
-extern int drm_irq_uninstall(drm_device_t *dev);
-extern void drm_driver_irq_preinstall(drm_device_t * dev);
-extern void drm_driver_irq_postinstall(drm_device_t * dev);
-extern void drm_driver_irq_uninstall(drm_device_t * dev);
-
-extern int drm_wait_vblank(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(drm_device_t * dev);
-extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*));
+extern int drm_irq_install(struct drm_device *dev);
+extern int drm_irq_uninstall(struct drm_device *dev);
+extern void drm_driver_irq_preinstall(struct drm_device *dev);
+extern void drm_driver_irq_postinstall(struct drm_device *dev);
+extern void drm_driver_irq_uninstall(struct drm_device *dev);
+
+extern int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
+extern void drm_vbl_send_signals(struct drm_device *dev);
+extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
/* AGP/GART support (drm_agpsupport.h) */
-extern drm_agp_head_t *drm_agp_init(drm_device_t *dev);
-extern int drm_agp_acquire(drm_device_t * dev);
-extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_release(drm_device_t *dev);
-extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode);
-extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info);
-extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request);
-extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request);
-extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request);
-extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request);
-extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
+extern int drm_agp_acquire(struct drm_device *dev);
+extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_release(struct drm_device *dev);
+extern int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+extern int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+extern int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type);
#else
@@ -1123,22 +1132,22 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
-extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev);
+extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
-extern int drm_put_dev(drm_device_t * dev);
-extern int drm_put_head(drm_head_t * head);
+extern int drm_put_dev(struct drm_device *dev);
+extern int drm_put_head(struct drm_head * head);
extern unsigned int drm_debug; /* 1 to enable debug output */
extern unsigned int drm_cards_limit;
-extern drm_head_t **drm_heads;
-extern struct drm_sysfs_class *drm_class;
+extern struct drm_head **drm_heads;
+extern struct class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
/* Proc support (drm_proc.h) */
-extern int drm_proc_init(drm_device_t * dev,
+extern int drm_proc_init(struct drm_device *dev,
int minor,
struct proc_dir_entry *root,
struct proc_dir_entry **dev_root);
@@ -1147,47 +1156,47 @@ extern int drm_proc_cleanup(int minor,
struct proc_dir_entry *dev_root);
/* Scatter Gather Support (drm_scatter.h) */
-extern void drm_sg_cleanup(drm_sg_mem_t * entry);
-extern int drm_sg_alloc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_sg_free(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern void drm_sg_cleanup(struct drm_sg_mem * entry);
+extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request);
+extern int drm_sg_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info);
-extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info);
-extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size,
+extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
-extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah);
-extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah);
+extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
+extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
/* sysfs support (drm_sysfs.c) */
struct drm_sysfs_class;
-extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner,
- char *name);
-extern void drm_sysfs_destroy(struct drm_sysfs_class *cs);
-extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
- drm_head_t * head);
+extern struct class *drm_sysfs_create(struct module *owner, char *name);
+extern void drm_sysfs_destroy(struct class *cs);
+extern struct class_device *drm_sysfs_device_add(struct class *cs,
+ struct drm_head * head);
extern void drm_sysfs_device_remove(struct class_device *class_dev);
/*
* Basic memory manager support (drm_mm.c)
*/
-extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size,
+extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size,
unsigned alignment);
-extern void drm_mm_put_block(drm_mm_node_t *cur);
-extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size,
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
unsigned alignment, int best_match);
-extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(drm_mm_t *mm);
-extern int drm_mm_clean(drm_mm_t *mm);
-extern unsigned long drm_mm_tail_space(drm_mm_t *mm);
-extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size);
-
-static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block)
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
{
return block->mm;
}
@@ -1198,14 +1207,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev);
static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
- drm_map_list_t *_entry;
+ struct drm_map_list *_entry;
list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
}
-static __inline__ int drm_device_is_agp(drm_device_t *dev)
+static __inline__ int drm_device_is_agp(struct drm_device *dev)
{
if ( dev->driver->device_is_agp != NULL ) {
int err = (*dev->driver->device_is_agp)( dev );
@@ -1218,7 +1227,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev)
return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP);
}
-static __inline__ int drm_device_is_pcie(drm_device_t *dev)
+static __inline__ int drm_device_is_pcie(struct drm_device *dev)
{
return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP);
}
@@ -1279,5 +1288,19 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
/*@}*/
+/** Type for the OS's non-sleepable mutex lock */
+#define DRM_SPINTYPE spinlock_t
+/**
+ * Initialize the lock for use. name is an optional string describing the
+ * lock
+ */
+#define DRM_SPININIT(l,name) spin_lock_init(l)
+#define DRM_SPINUNINIT(l)
+#define DRM_SPINLOCK(l) spin_lock(l)
+#define DRM_SPINUNLOCK(l) spin_unlock(l)
+#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
+#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
+#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
+
#endif /* __KERNEL__ */
#endif
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index f134563a..4618823c 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -40,7 +40,7 @@
* Get AGP information.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a (output) drm_agp_info structure.
* \return zero on success or a negative number on failure.
@@ -48,7 +48,7 @@
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
-int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info)
+int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
{
DRM_AGP_KERN *kern;
@@ -70,20 +70,16 @@ int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info)
}
EXPORT_SYMBOL(drm_agp_info);
-int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_info_t info;
+ struct drm_agp_info *info = data;
int err;
- err = drm_agp_info(dev, &info);
+ err = drm_agp_info(dev, info);
if (err)
return err;
-
- if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info)))
- return -EFAULT;
+
return 0;
}
@@ -96,7 +92,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire(drm_device_t * dev)
+int drm_agp_acquire(struct drm_device * dev)
{
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
int retcode;
@@ -123,7 +119,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
* Acquire the AGP device (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or a negative number on failure.
@@ -131,12 +127,10 @@ EXPORT_SYMBOL(drm_agp_acquire);
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
-
- return drm_agp_acquire( (drm_device_t *) priv->head->dev );
+ return drm_agp_acquire( (struct drm_device *) file_priv->head->dev );
}
/**
@@ -147,7 +141,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp,
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
-int drm_agp_release(drm_device_t *dev)
+int drm_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -162,12 +156,9 @@ int drm_agp_release(drm_device_t *dev)
}
EXPORT_SYMBOL(drm_agp_release);
-int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
-
return drm_agp_release(dev);
}
@@ -181,7 +172,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
-int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
+int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -192,31 +183,24 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode)
#else
agp_enable(dev->agp->bridge, mode.mode);
#endif
- dev->agp->base = dev->agp->agp_info.aper_base;
dev->agp->enabled = 1;
return 0;
}
EXPORT_SYMBOL(drm_agp_enable);
-int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_mode_t mode;
-
+ struct drm_agp_mode *mode = data;
- if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode)))
- return -EFAULT;
-
- return drm_agp_enable(dev, mode);
+ return drm_agp_enable(dev, *mode);
}
/**
* Allocate AGP memory.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv file private pointer.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
@@ -224,9 +208,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp,
* Verifies the AGP device is present and has been acquired, allocates the
* memory via alloc_agp() and creates a drm_agp_mem entry for it.
*/
-int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
DRM_AGP_MEM *memory;
unsigned long pages;
u32 type;
@@ -259,35 +243,12 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
EXPORT_SYMBOL(drm_agp_alloc);
-int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
- drm_agp_buffer_t __user *argp = (void __user *)arg;
- int err;
-
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
+ struct drm_agp_buffer *request = data;
- err = drm_agp_alloc(dev, &request);
- if (err)
- return err;
-
- if (copy_to_user(argp, &request, sizeof(request))) {
- drm_agp_mem_t *entry;
- list_for_each_entry(entry, &dev->agp->memory, head) {
- if (entry->handle == request.handle)
- break;
- }
- list_del(&entry->head);
- drm_free_agp(entry->memory, entry->pages);
- drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
- return -EFAULT;
- }
-
- return 0;
+ return drm_agp_alloc(dev, request);
}
/**
@@ -299,10 +260,10 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
-static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
+static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev,
unsigned long handle)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
@@ -315,7 +276,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
* Unbind AGP memory from the GATT (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
@@ -323,9 +284,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
-int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
@@ -342,18 +303,12 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request)
EXPORT_SYMBOL(drm_agp_unbind);
-int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
-
- if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
- return -EFAULT;
+ struct drm_agp_binding *request = data;
- return drm_agp_unbind(dev, &request);
+ return drm_agp_unbind(dev, request);
}
@@ -361,7 +316,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
* Bind AGP memory into the GATT (ioctl)
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_binding structure.
* \return zero on success or a negative number on failure.
@@ -370,9 +325,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp,
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
-int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
+int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int retcode;
int page;
@@ -393,18 +348,12 @@ int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request)
EXPORT_SYMBOL(drm_agp_bind);
-int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_binding_t request;
+ struct drm_agp_binding *request = data;
- if (copy_from_user
- (&request, (drm_agp_binding_t __user *) arg, sizeof(request)))
- return -EFAULT;
-
- return drm_agp_bind(dev, &request);
+ return drm_agp_bind(dev, request);
}
@@ -412,7 +361,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
* Free AGP memory (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_agp_buffer structure.
* \return zero on success or a negative number on failure.
@@ -422,9 +371,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp,
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
-int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
+int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -443,18 +392,12 @@ EXPORT_SYMBOL(drm_agp_free);
-int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_agp_buffer_t request;
-
- if (copy_from_user
- (&request, (drm_agp_buffer_t __user *) arg, sizeof(request)))
- return -EFAULT;
+ struct drm_agp_buffer *request = data;
- return drm_agp_free(dev, &request);
+ return drm_agp_free(dev, request);
}
@@ -467,9 +410,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp,
* via the inter_module_* functions. Creates and initializes a drm_agp_head
* structure.
*/
-drm_agp_head_t *drm_agp_init(drm_device_t *dev)
+struct drm_agp_head *drm_agp_init(struct drm_device *dev)
{
- drm_agp_head_t *head = NULL;
+ struct drm_agp_head *head = NULL;
if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS)))
return NULL;
@@ -497,6 +440,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
+ head->base = head->agp_info.aper_base;
return head;
}
@@ -554,16 +498,16 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
#define AGP_REQUIRED_MAJOR 0
#define AGP_REQUIRED_MINOR 102
-static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) {
+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) {
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
-static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
+static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages,
struct page **pages) {
- drm_agp_ttm_backend_t *agp_be =
- container_of(backend, drm_agp_ttm_backend_t, backend);
+ struct drm_agp_ttm_backend *agp_be =
+ container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
@@ -590,12 +534,12 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages,
return 0;
}
-static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
+static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
unsigned long offset,
int cached)
{
- drm_agp_ttm_backend_t *agp_be =
- container_of(backend, drm_agp_ttm_backend_t, backend);
+ struct drm_agp_ttm_backend *agp_be =
+ container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
int ret;
@@ -612,10 +556,10 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend,
return ret;
}
-static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
- drm_agp_ttm_backend_t *agp_be =
- container_of(backend, drm_agp_ttm_backend_t, backend);
+ struct drm_agp_ttm_backend *agp_be =
+ container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
if (agp_be->mem->is_bound)
@@ -624,10 +568,10 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) {
return 0;
}
-static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
- drm_agp_ttm_backend_t *agp_be =
- container_of(backend, drm_agp_ttm_backend_t, backend);
+ struct drm_agp_ttm_backend *agp_be =
+ container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
DRM_DEBUG("drm_agp_clear_ttm\n");
@@ -640,13 +584,13 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) {
agp_be->mem = NULL;
}
-static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) {
- drm_agp_ttm_backend_t *agp_be;
+ struct drm_agp_ttm_backend *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
- agp_be = container_of(backend, drm_agp_ttm_backend_t, backend);
+ agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
if (agp_be) {
if (agp_be->mem) {
backend->func->clear(backend);
@@ -656,7 +600,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) {
}
}
-static drm_ttm_backend_func_t agp_ttm_backend =
+static struct drm_ttm_backend_func agp_ttm_backend =
{
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
@@ -666,10 +610,10 @@ static drm_ttm_backend_func_t agp_ttm_backend =
.destroy = drm_agp_destroy_ttm,
};
-drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev)
+struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
{
- drm_agp_ttm_backend_t *agp_be;
+ struct drm_agp_ttm_backend *agp_be;
struct agp_kern_info *info;
if (!dev->agp) {
diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c
index 6948d858..e35e8b6d 100644
--- a/linux-core/drm_auth.c
+++ b/linux-core/drm_auth.c
@@ -45,15 +45,15 @@
* the one with matching magic number, while holding the drm_device::struct_mutex
* lock.
*/
-static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
+static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic)
{
- drm_file_t *retval = NULL;
- drm_magic_entry_t *pt;
- drm_hash_item_t *hash;
+ struct drm_file *retval = NULL;
+ struct drm_magic_entry *pt;
+ struct drm_hash_item *hash;
- mutex_lock(&dev->struct_mutex);
- if (!drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) {
- pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+ mutex_lock(&dev->struct_mutex);
+ if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
+ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
retval = pt->priv;
}
mutex_unlock(&dev->struct_mutex);
@@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic)
* associated the magic number hash key in drm_device::magiclist, while holding
* the drm_device::struct_mutex lock.
*/
-static int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
+static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
drm_magic_t magic)
{
- drm_magic_entry_t *entry;
+ struct drm_magic_entry *entry;
DRM_DEBUG("%d\n", magic);
@@ -101,10 +101,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv,
* Searches and unlinks the entry in drm_device::magiclist with the magic
* number hash key, while holding the drm_device::struct_mutex lock.
*/
-static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
+static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
{
- drm_magic_entry_t *pt;
- drm_hash_item_t *hash;
+ struct drm_magic_entry *pt;
+ struct drm_hash_item *hash;
DRM_DEBUG("%d\n", magic);
@@ -113,7 +113,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item);
+ pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item);
drm_ht_remove_item(&dev->magiclist, hash);
list_del(&pt->head);
mutex_unlock(&dev->struct_mutex);
@@ -127,42 +127,38 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic)
* Get a unique magic number (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a resulting drm_auth structure.
* \return zero on success, or a negative number on failure.
*
* If there is a magic number in drm_file::magic then use it, otherwise
* searches an unique non-zero magic number and add it associating it with \p
- * filp.
+ * file_priv.
*/
-int drm_getmagic(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
static drm_magic_t sequence = 0;
static DEFINE_SPINLOCK(lock);
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_auth_t auth;
+ struct drm_auth *auth = data;
/* Find unique magic */
- if (priv->magic) {
- auth.magic = priv->magic;
+ if (file_priv->magic) {
+ auth->magic = file_priv->magic;
} else {
do {
spin_lock(&lock);
if (!sequence)
++sequence; /* reserve 0 */
- auth.magic = sequence++;
+ auth->magic = sequence++;
spin_unlock(&lock);
- } while (drm_find_file(dev, auth.magic));
- priv->magic = auth.magic;
- drm_add_magic(dev, priv, auth.magic);
+ } while (drm_find_file(dev, auth->magic));
+ file_priv->magic = auth->magic;
+ drm_add_magic(dev, file_priv, auth->magic);
}
- DRM_DEBUG("%u\n", auth.magic);
- if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth)))
- return -EFAULT;
+ DRM_DEBUG("%u\n", auth->magic);
+
return 0;
}
@@ -170,27 +166,23 @@ int drm_getmagic(struct inode *inode, struct file *filp,
* Authenticate with a magic.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_auth structure.
* \return zero if authentication successed, or a negative number otherwise.
*
- * Checks if \p filp is associated with the magic number passed in \arg.
+ * Checks if \p file_priv is associated with the magic number passed in \arg.
*/
-int drm_authmagic(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_authmagic(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_auth_t auth;
- drm_file_t *file;
-
- if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth)))
- return -EFAULT;
- DRM_DEBUG("%u\n", auth.magic);
- if ((file = drm_find_file(dev, auth.magic))) {
+ struct drm_auth *auth = data;
+ struct drm_file *file;
+
+ DRM_DEBUG("%u\n", auth->magic);
+ if ((file = drm_find_file(dev, auth->magic))) {
file->authenticated = 1;
- drm_remove_magic(dev, auth.magic);
+ drm_remove_magic(dev, auth->magic);
return 0;
}
return -EINVAL;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index fd097968..89062f11 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -49,10 +49,10 @@
*
*/
-static void drm_bo_destroy_locked(drm_buffer_object_t * bo);
-static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo);
-static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo);
-static void drm_bo_unmap_virtual(drm_buffer_object_t * bo);
+static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
+static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
+static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
static inline uint32_t drm_bo_type_flags(unsigned type)
{
@@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type)
* bo locked. dev->struct_mutex locked.
*/
-void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
+void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
{
- drm_mem_type_manager_t *man;
+ struct drm_mem_type_manager *man;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
DRM_ASSERT_LOCKED(&bo->mutex);
@@ -74,14 +74,13 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
list_add_tail(&bo->pinned_lru, &man->pinned);
}
-void drm_bo_add_to_lru(drm_buffer_object_t * bo)
+void drm_bo_add_to_lru(struct drm_buffer_object * bo)
{
- drm_mem_type_manager_t *man;
+ struct drm_mem_type_manager *man;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
- if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
- || bo->mem.mem_type != bo->pinned_mem_type) {
+ if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
} else {
@@ -89,7 +88,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo)
}
}
-static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
+static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
@@ -112,7 +111,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci)
return 0;
}
-static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
+static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
@@ -133,9 +132,9 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo)
* Call bo->mutex locked.
*/
-static int drm_bo_add_ttm(drm_buffer_object_t * bo)
+static int drm_bo_add_ttm(struct drm_buffer_object * bo)
{
- drm_device_t *dev = bo->dev;
+ struct drm_device *dev = bo->dev;
int ret = 0;
bo->ttm = NULL;
@@ -164,16 +163,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo)
return ret;
}
-static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
- drm_bo_mem_reg_t * mem,
+static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
+ struct drm_bo_mem_reg * mem,
int evict, int no_wait)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
int new_is_pci = drm_mem_reg_is_pci(dev, mem);
- drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type];
- drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type];
+ struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
+ struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
int ret = 0;
if (old_is_pci || new_is_pci)
@@ -201,9 +200,9 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
- drm_bo_mem_reg_t *old_mem = &bo->mem;
- uint32_t save_flags = old_mem->flags;
- uint32_t save_mask = old_mem->mask;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ uint64_t save_flags = old_mem->flags;
+ uint64_t save_mask = old_mem->mask;
*old_mem = *mem;
mem->mm_node = NULL;
@@ -266,7 +265,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
* Wait until the buffer is idle.
*/
-int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
int no_wait)
{
int ret;
@@ -292,10 +291,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
return 0;
}
-static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
+static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
if (bo->fence) {
if (bm->nice_mode) {
@@ -327,10 +326,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
* fence object and removing from lru lists and memory managers.
*/
-static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
+static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
@@ -389,10 +388,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
* to the buffer object. Then destroy it.
*/
-static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
+static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
@@ -438,19 +437,19 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
* Call dev->struct_mutex locked.
*/
-static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all)
+static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
{
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_buffer_manager *bm = &dev->bm;
- drm_buffer_object_t *entry, *nentry;
+ struct drm_buffer_object *entry, *nentry;
struct list_head *list, *next;
list_for_each_safe(list, next, &bm->ddestroy) {
- entry = list_entry(list, drm_buffer_object_t, ddestroy);
+ entry = list_entry(list, struct drm_buffer_object, ddestroy);
nentry = NULL;
if (next != &bm->ddestroy) {
- nentry = list_entry(next, drm_buffer_object_t,
+ nentry = list_entry(next, struct drm_buffer_object,
ddestroy);
atomic_inc(&nentry->usage);
}
@@ -470,12 +469,12 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
#endif
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
- drm_device_t *dev = (drm_device_t *) data;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = (struct drm_device *) data;
+ struct drm_buffer_manager *bm = &dev->bm;
#else
- drm_buffer_manager_t *bm =
- container_of(work, drm_buffer_manager_t, wq.work);
- drm_device_t *dev = container_of(bm, drm_device_t, bm);
+ struct drm_buffer_manager *bm =
+ container_of(work, struct drm_buffer_manager, wq.work);
+ struct drm_device *dev = container_of(bm, struct drm_device, bm);
#endif
DRM_DEBUG("Delayed delete Worker\n");
@@ -493,7 +492,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo)
+void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
{
struct drm_buffer_object *tmp_bo = *bo;
bo = NULL;
@@ -506,10 +505,11 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo)
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
-static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
+static void drm_bo_base_deref_locked(struct drm_file * file_priv,
+ struct drm_user_object * uo)
{
- drm_buffer_object_t *bo =
- drm_user_object_entry(uo, drm_buffer_object_t, base);
+ struct drm_buffer_object *bo =
+ drm_user_object_entry(uo, struct drm_buffer_object, base);
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
@@ -517,10 +517,10 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
drm_bo_usage_deref_locked(&bo);
}
-static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo)
+void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
{
struct drm_buffer_object *tmp_bo = *bo;
- drm_device_t *dev = tmp_bo->dev;
+ struct drm_device *dev = tmp_bo->dev;
*bo = NULL;
if (atomic_dec_and_test(&tmp_bo->usage)) {
@@ -530,22 +530,23 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo)
mutex_unlock(&dev->struct_mutex);
}
}
+EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
/*
* Note. The caller has to register (if applicable)
* and deregister fence object usage.
*/
-int drm_fence_buffer_objects(drm_file_t * priv,
+int drm_fence_buffer_objects(struct drm_file * file_priv,
struct list_head *list,
uint32_t fence_flags,
- drm_fence_object_t * fence,
- drm_fence_object_t ** used_fence)
+ struct drm_fence_object * fence,
+ struct drm_fence_object ** used_fence)
{
- drm_device_t *dev = priv->head->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
- drm_buffer_object_t *entry;
+ struct drm_buffer_object *entry;
uint32_t fence_type = 0;
int count = 0;
int ret = 0;
@@ -603,7 +604,7 @@ int drm_fence_buffer_objects(drm_file_t * priv,
l = f_list.next;
while (l != &f_list) {
prefetch(l->next);
- entry = list_entry(l, drm_buffer_object_t, lru);
+ entry = list_entry(l, struct drm_buffer_object, lru);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
@@ -636,12 +637,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);
* bo->mutex locked
*/
-static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
+static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
int no_wait)
{
int ret = 0;
- drm_device_t *dev = bo->dev;
- drm_bo_mem_reg_t evict_mem;
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_mem_reg evict_mem;
/*
* Someone might have modified the buffer before we took the buffer mutex.
@@ -706,14 +707,18 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type,
return ret;
}
-static int drm_bo_mem_force_space(drm_device_t * dev,
- drm_bo_mem_reg_t * mem,
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int drm_bo_mem_force_space(struct drm_device * dev,
+ struct drm_bo_mem_reg * mem,
uint32_t mem_type, int no_wait)
{
- drm_mm_node_t *node;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_buffer_object_t *entry;
- drm_mem_type_manager_t *man = &bm->man[mem_type];
+ struct drm_mm_node *node;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_buffer_object *entry;
+ struct drm_mem_type_manager *man = &bm->man[mem_type];
struct list_head *lru;
unsigned long num_pages = mem->num_pages;
int ret;
@@ -729,11 +734,11 @@ static int drm_bo_mem_force_space(drm_device_t * dev,
if (lru->next == lru)
break;
- entry = list_entry(lru->next, drm_buffer_object_t, lru);
+ entry = list_entry(lru->next, struct drm_buffer_object, lru);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&entry->mutex);
- BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
+ BUG_ON(entry->pinned);
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
@@ -755,7 +760,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev,
return 0;
}
-static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
+static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
uint32_t mem_type,
uint32_t mask, uint32_t * res_mask)
{
@@ -792,12 +797,20 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man,
return 1;
}
-int drm_bo_mem_space(drm_buffer_object_t * bo,
- drm_bo_mem_reg_t * mem, int no_wait)
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver. If free space isn't found, then
+ * drm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int drm_bo_mem_space(struct drm_buffer_object * bo,
+ struct drm_bo_mem_reg * mem, int no_wait)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man;
uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
@@ -807,7 +820,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo,
int type_found = 0;
int type_ok = 0;
int has_eagain = 0;
- drm_mm_node_t *node = NULL;
+ struct drm_mm_node *node = NULL;
int ret;
mem->mm_node = NULL;
@@ -884,8 +897,8 @@ int drm_bo_mem_space(drm_buffer_object_t * bo,
EXPORT_SYMBOL(drm_bo_mem_space);
-static int drm_bo_new_mask(drm_buffer_object_t * bo,
- uint32_t new_mask, uint32_t hint)
+static int drm_bo_new_mask(struct drm_buffer_object * bo,
+ uint64_t new_mask, uint32_t hint)
{
uint32_t new_props;
@@ -893,18 +906,6 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo,
DRM_ERROR("User buffers are not supported yet\n");
return -EINVAL;
}
- if (bo->type == drm_bo_type_fake &&
- !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) {
- DRM_ERROR("Fake buffers must be pinned.\n");
- return -EINVAL;
- }
-
- if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
- DRM_ERROR
- ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
- "processes\n");
- return -EPERM;
- }
new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_READ);
@@ -922,25 +923,25 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo,
* Call dev->struct_mutex locked.
*/
-drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
+struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle, int check_owner)
{
- drm_user_object_t *uo;
- drm_buffer_object_t *bo;
+ struct drm_user_object *uo;
+ struct drm_buffer_object *bo;
- uo = drm_lookup_user_object(priv, handle);
+ uo = drm_lookup_user_object(file_priv, handle);
if (!uo || (uo->type != drm_buffer_type)) {
DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
return NULL;
}
- if (check_owner && priv != uo->owner) {
- if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE))
+ if (check_owner && file_priv != uo->owner) {
+ if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
return NULL;
}
- bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
+ bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
atomic_inc(&bo->usage);
return bo;
}
@@ -951,9 +952,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv,
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
*/
-static int drm_bo_quick_busy(drm_buffer_object_t * bo)
+static int drm_bo_quick_busy(struct drm_buffer_object * bo)
{
- drm_fence_object_t *fence = bo->fence;
+ struct drm_fence_object *fence = bo->fence;
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
@@ -971,9 +972,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
* Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
*/
-static int drm_bo_busy(drm_buffer_object_t * bo)
+static int drm_bo_busy(struct drm_buffer_object * bo)
{
- drm_fence_object_t *fence = bo->fence;
+ struct drm_fence_object *fence = bo->fence;
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
@@ -991,7 +992,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
return 0;
}
-static int drm_bo_read_cached(drm_buffer_object_t * bo)
+static int drm_bo_read_cached(struct drm_buffer_object * bo)
{
int ret = 0;
@@ -1005,7 +1006,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo)
* Wait until a buffer is unmapped.
*/
-static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
+static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
{
int ret = 0;
@@ -1021,7 +1022,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait)
return ret;
}
-static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
+static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
{
int ret;
@@ -1043,7 +1044,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo)
* the buffer "unfenced" after validating, but before fencing.
*/
-static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
+static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
int eagain_if_wait)
{
int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
@@ -1076,8 +1077,8 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait,
* Bo locked.
*/
-static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
- drm_bo_arg_reply_t * rep)
+static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
+ struct drm_bo_info_rep *rep)
{
rep->handle = bo->base.hash.key;
rep->flags = bo->mem.flags;
@@ -1103,17 +1104,17 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo,
* unregistered.
*/
-static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
+static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
uint32_t map_flags, unsigned hint,
- drm_bo_arg_reply_t * rep)
+ struct drm_bo_info_rep *rep)
{
- drm_buffer_object_t *bo;
- drm_device_t *dev = priv->head->dev;
+ struct drm_buffer_object *bo;
+ struct drm_device *dev = file_priv->head->dev;
int ret = 0;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(priv, handle, 1);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo)
@@ -1170,7 +1171,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
}
mutex_lock(&dev->struct_mutex);
- ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+ ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
mutex_unlock(&dev->struct_mutex);
if (ret) {
if (atomic_add_negative(-1, &bo->mapped))
@@ -1184,28 +1185,28 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
return ret;
}
-static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
+static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
{
- drm_device_t *dev = priv->head->dev;
- drm_buffer_object_t *bo;
- drm_ref_object_t *ro;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_object *bo;
+ struct drm_ref_object *ro;
int ret = 0;
mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(priv, handle, 1);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
if (!bo) {
ret = -EINVAL;
goto out;
}
- ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1);
+ ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
if (!ro) {
ret = -EINVAL;
goto out;
}
- drm_remove_ref_object(priv, ro);
+ drm_remove_ref_object(file_priv, ro);
drm_bo_usage_deref_locked(&bo);
out:
mutex_unlock(&dev->struct_mutex);
@@ -1216,12 +1217,12 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
* Call struct-sem locked.
*/
-static void drm_buffer_user_object_unmap(drm_file_t * priv,
- drm_user_object_t * uo,
- drm_ref_t action)
+static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
+ struct drm_user_object * uo,
+ enum drm_ref_type action)
{
- drm_buffer_object_t *bo =
- drm_user_object_entry(uo, drm_buffer_object_t, base);
+ struct drm_buffer_object *bo =
+ drm_user_object_entry(uo, struct drm_buffer_object, base);
/*
* We DON'T want to take the bo->lock here, because we want to
@@ -1239,13 +1240,13 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv,
* Note that new_mem_flags are NOT transferred to the bo->mem.mask.
*/
-int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
+int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
int no_wait, int move_unfenced)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
int ret = 0;
- drm_bo_mem_reg_t mem;
+ struct drm_bo_mem_reg mem;
/*
* Flush outstanding fences.
*/
@@ -1301,7 +1302,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
return ret;
}
-static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
+static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
{
uint32_t flag_diff = (mem->mask ^ mem->flags);
@@ -1319,10 +1320,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem)
return 1;
}
-static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
+static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man;
uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
uint32_t i;
@@ -1352,7 +1353,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
return 0;
}
- DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask);
+ DRM_ERROR("Illegal fake buffer flags 0x%016llx\n",
+ (unsigned long long) mem->mask);
return -EINVAL;
}
@@ -1360,23 +1362,52 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem)
* bo locked.
*/
-static int drm_buffer_object_validate(drm_buffer_object_t * bo,
+static int drm_buffer_object_validate(struct drm_buffer_object * bo,
+ uint32_t fence_class,
int move_unfenced, int no_wait)
{
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_bo_driver_t *driver = dev->driver->bo_driver;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ uint32_t ftype;
int ret;
- DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask,
- bo->mem.flags);
- ret =
- driver->fence_type(bo, &bo->fence_class, &bo->fence_type);
+ DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.mask,
+ (unsigned long long) bo->mem.flags);
+
+ ret = driver->fence_type(bo, &ftype);
+
if (ret) {
DRM_ERROR("Driver did not support given buffer permissions\n");
return ret;
}
+ if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) {
+ DRM_ERROR("Attempt to validate pinned buffer into different memory "
+ "type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * We're switching command submission mechanism,
+ * or cannot simply rely on the hardware serializing for us.
+ *
+ * Wait for buffer idle.
+ */
+
+ if ((fence_class != bo->fence_class) ||
+ ((ftype ^ bo->fence_type) & bo->fence_type)) {
+
+ ret = drm_bo_wait(bo, 0, 0, no_wait);
+
+ if (ret)
+ return ret;
+
+ }
+
+ bo->fence_class = fence_class;
+ bo->fence_type = ftype;
ret = drm_bo_wait_unmapped(bo, no_wait);
if (ret)
return ret;
@@ -1402,37 +1433,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
}
/*
- * Pinned buffers.
- */
-
- if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
- bo->pinned_mem_type = bo->mem.mem_type;
- mutex_lock(&dev->struct_mutex);
- list_del_init(&bo->pinned_lru);
- drm_bo_add_to_pinned_lru(bo);
-
- if (bo->pinned_node != bo->mem.mm_node) {
- if (bo->pinned_node != NULL)
- drm_mm_put_block(bo->pinned_node);
- bo->pinned_node = bo->mem.mm_node;
- }
-
- mutex_unlock(&dev->struct_mutex);
-
- } else if (bo->pinned_node != NULL) {
-
- mutex_lock(&dev->struct_mutex);
-
- if (bo->pinned_node != bo->mem.mm_node)
- drm_mm_put_block(bo->pinned_node);
-
- list_del_init(&bo->pinned_lru);
- bo->pinned_node = NULL;
- mutex_unlock(&dev->struct_mutex);
-
- }
-
- /*
* We might need to add a TTM.
*/
@@ -1466,17 +1466,19 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo,
return 0;
}
-static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
- uint32_t flags, uint32_t mask, uint32_t hint,
- drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_validate(struct drm_file *file_priv,
+ uint32_t handle,
+ uint32_t fence_class,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ struct drm_bo_info_rep *rep)
{
- struct drm_device *dev = priv->head->dev;
- drm_buffer_object_t *bo;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_object *bo;
int ret;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(priv, handle, 1);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
return -EINVAL;
@@ -1494,7 +1496,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
goto out;
ret =
- drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE),
+ drm_buffer_object_validate(bo, fence_class,
+ !(hint & DRM_BO_HINT_DONT_FENCE),
no_wait);
drm_bo_fill_rep_arg(bo, rep);
@@ -1506,14 +1509,18 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
return ret;
}
-static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
- drm_bo_arg_reply_t * rep)
+/**
+ * Fills out the generic buffer object ioctl reply with the information for
+ * the BO with id of handle.
+ */
+static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
+ struct drm_bo_info_rep *rep)
{
- struct drm_device *dev = priv->head->dev;
- drm_buffer_object_t *bo;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_object *bo;
mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(priv, handle, 1);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
@@ -1528,16 +1535,17 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
return 0;
}
-static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
- uint32_t hint, drm_bo_arg_reply_t * rep)
+static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
+ uint32_t hint,
+ struct drm_bo_info_rep *rep)
{
- struct drm_device *dev = priv->head->dev;
- drm_buffer_object_t *bo;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_buffer_object *bo;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
int ret;
mutex_lock(&dev->struct_mutex);
- bo = drm_lookup_buffer_object(priv, handle, 1);
+ bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
if (!bo) {
@@ -1560,17 +1568,18 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
return ret;
}
-int drm_buffer_object_create(drm_device_t *dev,
+int drm_buffer_object_create(struct drm_device *dev,
unsigned long size,
- drm_bo_type_t type,
- uint32_t mask,
+ enum drm_bo_type type,
+ uint64_t mask,
uint32_t hint,
uint32_t page_alignment,
unsigned long buffer_start,
- drm_buffer_object_t ** buf_obj)
+ struct drm_buffer_object ** buf_obj)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_buffer_object_t *bo;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_buffer_object *bo;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
int ret = 0;
unsigned long num_pages;
@@ -1615,8 +1624,8 @@ int drm_buffer_object_create(drm_device_t *dev,
bo->buffer_start = buffer_start;
}
bo->priv_flags = 0;
- bo->mem.flags = 0;
- bo->mem.mask = 0;
+ bo->mem.flags = 0ULL;
+ bo->mem.mask = 0ULL;
atomic_inc(&bm->count);
ret = drm_bo_new_mask(bo, mask, hint);
@@ -1630,10 +1639,28 @@ int drm_buffer_object_create(drm_device_t *dev,
if (ret)
goto out_err;
}
- ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK);
+
+ bo->fence_class = 0;
+ ret = driver->fence_type(bo, &bo->fence_type);
+ if (ret) {
+ DRM_ERROR("Driver did not support given buffer permissions\n");
+ goto out_err;
+ }
+
+ if (bo->type == drm_bo_type_fake) {
+ ret = drm_bo_check_fake(dev, &bo->mem);
+ if (ret)
+ goto out_err;
+ }
+
+ ret = drm_bo_add_ttm(bo);
if (ret)
goto out_err;
+ mutex_lock(&dev->struct_mutex);
+ drm_bo_add_to_lru(bo);
+ mutex_unlock(&dev->struct_mutex);
+
mutex_unlock(&bo->mutex);
*buf_obj = bo;
return 0;
@@ -1646,14 +1673,14 @@ int drm_buffer_object_create(drm_device_t *dev,
}
EXPORT_SYMBOL(drm_buffer_object_create);
-static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
- int shareable)
+int drm_bo_add_user_object(struct drm_file *file_priv,
+ struct drm_buffer_object *bo, int shareable)
{
- drm_device_t *dev = priv->head->dev;
+ struct drm_device *dev = file_priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
- ret = drm_add_user_object(priv, &bo->base, shareable);
+ ret = drm_add_user_object(file_priv, &bo->base, shareable);
if (ret)
goto out;
@@ -1666,22 +1693,25 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo,
mutex_unlock(&dev->struct_mutex);
return ret;
}
+EXPORT_SYMBOL(drm_bo_add_user_object);
-static int drm_bo_lock_test(drm_device_t * dev, struct file *filp)
+static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv)
{
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return 0;
}
-int drm_bo_ioctl(DRM_IOCTL_ARGS)
+int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_bo_arg_t arg;
- drm_bo_arg_request_t *req = &arg.d.req;
- drm_bo_arg_reply_t rep;
- unsigned long next;
- drm_user_object_t *uo;
- drm_buffer_object_t *entry;
+ struct drm_bo_op_arg curarg;
+ struct drm_bo_op_arg *arg = data;
+ struct drm_bo_op_req *req = &arg->d.req;
+ struct drm_bo_info_rep rep;
+ unsigned long next = 0;
+ void __user *curuserarg = NULL;
+ int ret;
+
+ DRM_DEBUG("drm_bo_op_ioctl\n");
if (!dev->bm.initialized) {
DRM_ERROR("Buffer object manager is not initialized.\n");
@@ -1689,140 +1719,388 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
}
do {
- DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ if (next != 0) {
+ curuserarg = (void __user *)next;
+ if (copy_from_user(&curarg, curuserarg,
+ sizeof(curarg)) != 0)
+ return -EFAULT;
+ arg = &curarg;
+ }
- if (arg.handled) {
- data = arg.next;
+ if (arg->handled) {
+ next = arg->next;
continue;
}
-
- rep.ret = 0;
+ req = &arg->d.req;
+ ret = 0;
switch (req->op) {
- case drm_bo_create:
- rep.ret = drm_bo_lock_test(dev, filp);
- if (rep.ret)
- break;
- rep.ret =
- drm_buffer_object_create(priv->head->dev,
- req->size,
- req->type,
- req->mask,
- req->hint,
- req->page_alignment,
- req->buffer_start, &entry);
- if (rep.ret)
- break;
-
- rep.ret =
- drm_bo_add_user_object(priv, entry,
- req->
- mask &
- DRM_BO_FLAG_SHAREABLE);
- if (rep.ret)
- drm_bo_usage_deref_unlocked(&entry);
-
- if (rep.ret)
- break;
-
- mutex_lock(&entry->mutex);
- drm_bo_fill_rep_arg(entry, &rep);
- mutex_unlock(&entry->mutex);
- break;
- case drm_bo_unmap:
- rep.ret = drm_buffer_object_unmap(priv, req->handle);
- break;
- case drm_bo_map:
- rep.ret = drm_buffer_object_map(priv, req->handle,
- req->mask,
- req->hint, &rep);
- break;
- case drm_bo_destroy:
- mutex_lock(&dev->struct_mutex);
- uo = drm_lookup_user_object(priv, req->handle);
- if (!uo || (uo->type != drm_buffer_type)
- || uo->owner != priv) {
- mutex_unlock(&dev->struct_mutex);
- rep.ret = -EINVAL;
- break;
- }
- rep.ret = drm_remove_user_object(priv, uo);
- mutex_unlock(&dev->struct_mutex);
- break;
- case drm_bo_reference:
- rep.ret = drm_user_object_ref(priv, req->handle,
- drm_buffer_type, &uo);
- if (rep.ret)
- break;
- rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
- break;
- case drm_bo_unreference:
- rep.ret = drm_user_object_unref(priv, req->handle,
- drm_buffer_type);
- break;
case drm_bo_validate:
- rep.ret = drm_bo_lock_test(dev, filp);
-
- if (rep.ret)
+ ret = drm_bo_lock_test(dev, file_priv);
+ if (ret)
break;
- rep.ret =
- drm_bo_handle_validate(priv, req->handle, req->mask,
- req->arg_handle, req->hint,
- &rep);
+ ret = drm_bo_handle_validate(file_priv, req->bo_req.handle,
+ req->bo_req.fence_class,
+ req->bo_req.flags,
+ req->bo_req.mask,
+ req->bo_req.hint,
+ &rep);
break;
case drm_bo_fence:
- rep.ret = drm_bo_lock_test(dev, filp);
- if (rep.ret)
- break;
- /**/ break;
- case drm_bo_info:
- rep.ret = drm_bo_handle_info(priv, req->handle, &rep);
- break;
- case drm_bo_wait_idle:
- rep.ret = drm_bo_handle_wait(priv, req->handle,
- req->hint, &rep);
+ ret = -EINVAL;
+ DRM_ERROR("Function is not implemented yet.\n");
break;
case drm_bo_ref_fence:
- rep.ret = -EINVAL;
+ ret = -EINVAL;
DRM_ERROR("Function is not implemented yet.\n");
+ break;
default:
- rep.ret = -EINVAL;
+ ret = -EINVAL;
}
- next = arg.next;
+ next = arg->next;
/*
* A signal interrupted us. Make sure the ioctl is restartable.
*/
- if (rep.ret == -EAGAIN)
+ if (ret == -EAGAIN)
return -EAGAIN;
- arg.handled = 1;
- arg.d.rep = rep;
- DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
- data = next;
- } while (data);
+ arg->handled = 1;
+ arg->d.rep.ret = ret;
+ arg->d.rep.bo_info = rep;
+ if (arg != data) {
+ if (copy_to_user(curuserarg, &curarg,
+ sizeof(curarg)) != 0)
+ return -EFAULT;
+ }
+ } while (next != 0);
+ return 0;
+}
+
+int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_create_arg *arg = data;
+ struct drm_bo_create_req *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ struct drm_buffer_object *entry;
+ int ret = 0;
+
+ DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n",
+ (int)(req->size / 1024), req->page_alignment * 4, req->type);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+ if (req->type == drm_bo_type_fake)
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ ret = drm_buffer_object_create(file_priv->head->dev,
+ req->size, req->type, req->mask,
+ req->hint, req->page_alignment,
+ req->buffer_start, &entry);
+ if (ret)
+ goto out;
+
+ ret = drm_bo_add_user_object(file_priv, entry,
+ req->mask & DRM_BO_FLAG_SHAREABLE);
+ if (ret) {
+ drm_bo_usage_deref_unlocked(&entry);
+ goto out;
+ }
+
+ mutex_lock(&entry->mutex);
+ drm_bo_fill_rep_arg(entry, rep);
+ mutex_unlock(&entry->mutex);
+
+out:
+ return ret;
+}
+
+
+int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_handle_arg *arg = data;
+ struct drm_user_object *uo;
+ int ret = 0;
+
+ DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(file_priv, arg->handle);
+ if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) {
+ mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(file_priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
+}
+
+int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_map_wait_idle_arg *arg = data;
+ struct drm_bo_info_req *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ int ret;
+
+ DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
+ req->hint, rep);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_handle_arg *arg = data;
+ int ret;
+
+ DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_buffer_object_unmap(file_priv, arg->handle);
+ return ret;
+}
+
+
+int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_reference_info_arg *arg = data;
+ struct drm_bo_handle_arg *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ struct drm_user_object *uo;
+ int ret;
+
+ DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_user_object_ref(file_priv, req->handle,
+ drm_buffer_type, &uo);
+ if (ret)
+ return ret;
+
+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_handle_arg *arg = data;
+ int ret = 0;
+
+ DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
+ return ret;
+}
+
+int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_reference_info_arg *arg = data;
+ struct drm_bo_handle_arg *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ int ret;
+
+ DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_bo_handle_info(file_priv, req->handle, rep);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_map_wait_idle_arg *arg = data;
+ struct drm_bo_info_req *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ int ret;
+
+ DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_bo_handle_wait(file_priv, req->handle,
+ req->hint, rep);
+ if (ret)
+ return ret;
+
return 0;
}
/**
+ * Pins or unpins the given buffer object in the given memory area.
+ *
+ * Pinned buffers will not be evicted from or move within their memory area.
+ * Must be called with the hardware lock held for pinning.
+ */
+int
+drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo,
+ int pin)
+{
+ int ret = 0;
+
+ mutex_lock(&bo->mutex);
+ if (bo->pinned == pin) {
+ mutex_unlock(&bo->mutex);
+ return 0;
+ }
+
+ if (pin) {
+ ret = drm_bo_wait_unfenced(bo, 0, 0);
+ if (ret) {
+ mutex_unlock(&bo->mutex);
+ return ret;
+ }
+
+ /* Validate the buffer into its pinned location, with no
+ * pending fence.
+ */
+ ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0);
+ if (ret) {
+ mutex_unlock(&bo->mutex);
+ return ret;
+ }
+
+ /* Pull the buffer off of the LRU and add it to the pinned
+ * list
+ */
+ bo->pinned_mem_type = bo->mem.mem_type;
+ mutex_lock(&dev->struct_mutex);
+ list_del_init(&bo->lru);
+ list_del_init(&bo->pinned_lru);
+ drm_bo_add_to_pinned_lru(bo);
+
+ if (bo->pinned_node != bo->mem.mm_node) {
+ if (bo->pinned_node != NULL)
+ drm_mm_put_block(bo->pinned_node);
+ bo->pinned_node = bo->mem.mm_node;
+ }
+
+ bo->pinned = pin;
+ mutex_unlock(&dev->struct_mutex);
+
+ } else {
+ mutex_lock(&dev->struct_mutex);
+
+ /* Remove our buffer from the pinned list */
+ if (bo->pinned_node != bo->mem.mm_node)
+ drm_mm_put_block(bo->pinned_node);
+
+ list_del_init(&bo->pinned_lru);
+ bo->pinned_node = NULL;
+ bo->pinned = pin;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ mutex_unlock(&bo->mutex);
+ return 0;
+}
+EXPORT_SYMBOL(drm_bo_set_pin);
+
+int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_bo_set_pin_arg *arg = data;
+ struct drm_bo_set_pin_req *req = &arg->d.req;
+ struct drm_bo_info_rep *rep = &arg->d.rep;
+ struct drm_buffer_object *bo;
+ int ret;
+
+ DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n",
+ req->handle, req->pin);
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized.\n");
+ return -EINVAL;
+ }
+
+ if (req->pin < 0 || req->pin > 1) {
+ DRM_ERROR("Bad arguments to set_pin\n");
+ return -EINVAL;
+ }
+
+ if (req->pin)
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+ mutex_lock(&dev->struct_mutex);
+ bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+ if (!bo) {
+ return -EINVAL;
+ }
+
+ ret = drm_bo_set_pin(dev, bo, req->pin);
+ if (ret) {
+ drm_bo_usage_deref_unlocked(&bo);
+ return ret;
+ }
+
+ drm_bo_fill_rep_arg(bo, rep);
+ drm_bo_usage_deref_unlocked(&bo);
+
+ return 0;
+}
+
+
+/**
*Clean the unfenced list and put on regular LRU.
*This is part of the memory manager cleanup and should only be
*called with the DRI lock held.
*Call dev->struct_sem locked.
*/
-static void drm_bo_clean_unfenced(drm_device_t *dev)
+static void drm_bo_clean_unfenced(struct drm_device *dev)
{
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_buffer_manager *bm = &dev->bm;
struct list_head *head, *list;
- drm_buffer_object_t *entry;
+ struct drm_buffer_object *entry;
head = &bm->unfenced;
list = head->next;
while(list != head) {
prefetch(list->next);
- entry = list_entry(list, drm_buffer_object_t, lru);
+ entry = list_entry(list, struct drm_buffer_object, lru);
atomic_inc(&entry->usage);
mutex_unlock(&dev->struct_mutex);
@@ -1837,11 +2115,11 @@ static void drm_bo_clean_unfenced(drm_device_t *dev)
}
}
-static int drm_bo_leave_list(drm_buffer_object_t * bo,
+static int drm_bo_leave_list(struct drm_buffer_object * bo,
uint32_t mem_type,
int free_pinned, int allow_errors)
{
- drm_device_t *dev = bo->dev;
+ struct drm_device *dev = bo->dev;
int ret = 0;
mutex_lock(&bo->mutex);
@@ -1863,11 +2141,10 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo,
mutex_unlock(&dev->struct_mutex);
}
- if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
- DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
+ if (bo->pinned) {
+ DRM_ERROR("A pinned buffer was present at "
"cleanup. Removing flag and evicting.\n");
- bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
- bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+ bo->pinned = 0;
}
if (bo->mem.mem_type == mem_type)
@@ -1888,20 +2165,20 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo,
}
-static drm_buffer_object_t *drm_bo_entry(struct list_head *list,
+static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
int pinned_list)
{
if (pinned_list)
- return list_entry(list, drm_buffer_object_t, pinned_lru);
+ return list_entry(list, struct drm_buffer_object, pinned_lru);
else
- return list_entry(list, drm_buffer_object_t, lru);
+ return list_entry(list, struct drm_buffer_object, lru);
}
/*
* dev->struct_mutex locked.
*/
-static int drm_bo_force_list_clean(drm_device_t * dev,
+static int drm_bo_force_list_clean(struct drm_device * dev,
struct list_head *head,
unsigned mem_type,
int free_pinned,
@@ -1909,7 +2186,7 @@ static int drm_bo_force_list_clean(drm_device_t * dev,
int pinned_list)
{
struct list_head *list, *next, *prev;
- drm_buffer_object_t *entry, *nentry;
+ struct drm_buffer_object *entry, *nentry;
int ret;
int do_restart;
@@ -1966,10 +2243,10 @@ restart:
return 0;
}
-int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type)
+int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man = &bm->man[mem_type];
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man = &bm->man[mem_type];
int ret = -EINVAL;
if (mem_type >= DRM_BO_MEM_TYPES) {
@@ -2009,11 +2286,11 @@ EXPORT_SYMBOL(drm_bo_clean_mm);
*point since we have the hardware lock.
*/
-static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
+static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
{
int ret;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man = &bm->man[mem_type];
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man = &bm->man[mem_type];
if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
@@ -2035,13 +2312,13 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type)
return ret;
}
-int drm_bo_init_mm(drm_device_t * dev,
+int drm_bo_init_mm(struct drm_device * dev,
unsigned type,
unsigned long p_offset, unsigned long p_size)
{
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_buffer_manager *bm = &dev->bm;
int ret = -EINVAL;
- drm_mem_type_manager_t *man;
+ struct drm_mem_type_manager *man;
if (type >= DRM_BO_MEM_TYPES) {
DRM_ERROR("Illegal memory type %d\n", type);
@@ -2084,12 +2361,12 @@ EXPORT_SYMBOL(drm_bo_init_mm);
* any clients still running when we set the initialized flag to zero.
*/
-int drm_bo_driver_finish(drm_device_t * dev)
+int drm_bo_driver_finish(struct drm_device * dev)
{
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_buffer_manager *bm = &dev->bm;
int ret = 0;
unsigned i = DRM_BO_MEM_TYPES;
- drm_mem_type_manager_t *man;
+ struct drm_mem_type_manager *man;
mutex_lock(&dev->bm.init_mutex);
mutex_lock(&dev->struct_mutex);
@@ -2136,10 +2413,10 @@ int drm_bo_driver_finish(drm_device_t * dev)
}
EXPORT_SYMBOL(drm_bo_driver_finish);
-int drm_bo_driver_init(drm_device_t * dev)
+int drm_bo_driver_init(struct drm_device * dev)
{
- drm_bo_driver_t *driver = dev->driver->bo_driver;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ struct drm_buffer_manager *bm = &dev->bm;
int ret = -EINVAL;
mutex_lock(&dev->bm.init_mutex);
@@ -2151,8 +2428,7 @@ int drm_bo_driver_init(drm_device_t * dev)
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
*/
-
- ret = drm_bo_init_mm(dev, 0, 0, 0);
+ ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
if (ret)
goto out_unlock;
@@ -2175,81 +2451,150 @@ int drm_bo_driver_init(drm_device_t * dev)
EXPORT_SYMBOL(drm_bo_driver_init);
-int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
+int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
+ struct drm_mm_init_arg *arg = data;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ int ret;
- int ret = 0;
- drm_mm_init_arg_t arg;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_bo_driver_t *driver = dev->driver->bo_driver;
+ DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n",
+ arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4));
if (!driver) {
DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
+ ret = -EINVAL;
+ if (arg->magic != DRM_BO_INIT_MAGIC) {
+ DRM_ERROR("You are using an old libdrm that is not compatible with\n"
+ "\tthe kernel DRM module. Please upgrade your libdrm.\n");
+ return -EINVAL;
+ }
+ if (arg->major != DRM_BO_INIT_MAJOR) {
+ DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
+ "\tversion don't match. Got %d, expected %d,\n",
+ arg->major, DRM_BO_INIT_MAJOR);
+ return -EINVAL;
+ }
+ if (arg->minor > DRM_BO_INIT_MINOR) {
+ DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n"
+ "\tlibdrm buffer object interface version is %d.%d.\n"
+ "\tkernel DRM buffer object interface version is %d.%d\n",
+ arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR);
+ return -EINVAL;
+ }
- switch (arg.req.op) {
- case mm_init:
- ret = -EINVAL;
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- if (!bm->initialized) {
- DRM_ERROR("DRM memory manager was not initialized.\n");
- break;
- }
- if (arg.req.mem_type == 0) {
- DRM_ERROR
- ("System memory buffers already initialized.\n");
- break;
- }
- ret = drm_bo_init_mm(dev, arg.req.mem_type,
- arg.req.p_offset, arg.req.p_size);
- break;
- case mm_takedown:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = -EINVAL;
- if (!bm->initialized) {
- DRM_ERROR("DRM memory manager was not initialized\n");
- break;
- }
- if (arg.req.mem_type == 0) {
- DRM_ERROR("No takedown for System memory buffers.\n");
- break;
- }
- ret = 0;
- if (drm_bo_clean_mm(dev, arg.req.mem_type)) {
- DRM_ERROR("Memory manager type %d not clean. "
- "Delaying takedown\n", arg.req.mem_type);
- }
- break;
- case mm_lock:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = drm_bo_lock_mm(dev, arg.req.mem_type);
- break;
- case mm_unlock:
- LOCK_TEST_WITH_RETURN(dev, filp);
- mutex_lock(&dev->bm.init_mutex);
- mutex_lock(&dev->struct_mutex);
- ret = 0;
- break;
- default:
- DRM_ERROR("Function not implemented yet\n");
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized.\n");
+ goto out;
+ }
+ if (arg->mem_type == 0) {
+ DRM_ERROR("System memory buffers already initialized.\n");
+ goto out;
+ }
+ ret = drm_bo_init_mm(dev, arg->mem_type,
+ arg->p_offset, arg->p_size);
+
+out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_mm_type_arg *arg = data;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ int ret;
+
+ DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type);
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
+ return -EINVAL;
+ }
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = -EINVAL;
+ if (!bm->initialized) {
+ DRM_ERROR("DRM memory manager was not initialized\n");
+ goto out;
+ }
+ if (arg->mem_type == 0) {
+ DRM_ERROR("No takedown for System memory buffers.\n");
+ goto out;
+ }
+ ret = 0;
+ if (drm_bo_clean_mm(dev, arg->mem_type)) {
+ DRM_ERROR("Memory manager type %d not clean. "
+ "Delaying takedown\n", arg->mem_type);
+ }
+out:
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_mm_type_arg *arg = data;
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ int ret;
+
+ DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type);
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
return -EINVAL;
}
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = drm_bo_lock_mm(dev, arg->mem_type);
+ mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->bm.init_mutex);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_bo_driver *driver = dev->driver->bo_driver;
+ int ret;
+
+ DRM_DEBUG("drm_mm_unlock_ioctl\n");
+
+ if (!driver) {
+ DRM_ERROR("Buffer objects are not supported by this driver\n");
+ return -EINVAL;
+ }
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ mutex_lock(&dev->bm.init_mutex);
+ mutex_lock(&dev->struct_mutex);
+ ret = 0;
+
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->bm.init_mutex);
if (ret)
return ret;
- DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return 0;
}
@@ -2257,10 +2602,10 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS)
* buffer object vm functions.
*/
-int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem)
+int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
if (mem->mem_type == DRM_BO_MEM_LOCAL)
@@ -2291,13 +2636,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci);
* Otherwise returns zero.
*/
-int drm_bo_pci_offset(drm_device_t * dev,
- drm_bo_mem_reg_t * mem,
+int drm_bo_pci_offset(struct drm_device *dev,
+ struct drm_bo_mem_reg *mem,
unsigned long *bus_base,
unsigned long *bus_offset, unsigned long *bus_size)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
*bus_size = 0;
if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
@@ -2320,9 +2665,9 @@ int drm_bo_pci_offset(drm_device_t * dev,
* Call bo->mutex locked.
*/
-void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
+void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
{
- drm_device_t *dev = bo->dev;
+ struct drm_device *dev = bo->dev;
loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
@@ -2332,11 +2677,11 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo)
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}
-static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
{
- drm_map_list_t *list = &bo->map_list;
+ struct drm_map_list *list = &bo->map_list;
drm_local_map_t *map;
- drm_device_t *dev = bo->dev;
+ struct drm_device *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
if (list->user_token) {
@@ -2358,11 +2703,11 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
drm_bo_usage_deref_locked(&bo);
}
-static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
+static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
{
- drm_map_list_t *list = &bo->map_list;
+ struct drm_map_list *list = &bo->map_list;
drm_local_map_t *map;
- drm_device_t *dev = bo->dev;
+ struct drm_device *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
@@ -2394,7 +2739,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
return -ENOMEM;
}
- list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT;
+ list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
return 0;
}
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 28c1509e..21ab4bbb 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -35,9 +35,9 @@
* have not been requested to free also pinned regions.
*/
-static void drm_bo_free_old_node(drm_buffer_object_t * bo)
+static void drm_bo_free_old_node(struct drm_buffer_object * bo)
{
- drm_bo_mem_reg_t *old_mem = &bo->mem;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
mutex_lock(&bo->dev->struct_mutex);
@@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo)
old_mem->mm_node = NULL;
}
-int drm_bo_move_ttm(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+int drm_bo_move_ttm(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
- drm_ttm_t *ttm = bo->ttm;
- drm_bo_mem_reg_t *old_mem = &bo->mem;
+ struct drm_ttm *ttm = bo->ttm;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
int ret;
@@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm);
* Call bo->mutex locked.
*/
-int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
+int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
void **virtual)
{
- drm_buffer_manager_t *bm = &dev->bm;
- drm_mem_type_manager_t *man = &bm->man[mem->mem_type];
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
unsigned long bus_offset;
unsigned long bus_size;
unsigned long bus_base;
@@ -138,11 +138,11 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap);
* Call bo->mutex locked.
*/
-void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem,
+void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
void *virtual)
{
- drm_buffer_manager_t *bm;
- drm_mem_type_manager_t *man;
+ struct drm_buffer_manager *bm;
+ struct drm_mem_type_manager *man;
bm = &dev->bm;
man = &bm->man[mem->mem_type];
@@ -166,7 +166,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
+static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
@@ -184,7 +184,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page)
return 0;
}
-static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
+static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page)
{
struct page *s = drm_ttm_get_page(ttm, page);
void *src;
@@ -202,14 +202,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page)
return 0;
}
-int drm_bo_move_memcpy(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+int drm_bo_move_memcpy(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
- drm_device_t *dev = bo->dev;
- drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
- drm_ttm_t *ttm = bo->ttm;
- drm_bo_mem_reg_t *old_mem = &bo->mem;
- drm_bo_mem_reg_t old_copy = *old_mem;
+ struct drm_device *dev = bo->dev;
+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
+ struct drm_ttm *ttm = bo->ttm;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ struct drm_bo_mem_reg old_copy = *old_mem;
void *old_iomap;
void *new_iomap;
int ret;
@@ -283,12 +283,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy);
* object. Call bo->mutex locked.
*/
-int drm_buffer_object_transfer(drm_buffer_object_t * bo,
- drm_buffer_object_t ** new_obj)
+int drm_buffer_object_transfer(struct drm_buffer_object * bo,
+ struct drm_buffer_object ** new_obj)
{
- drm_buffer_object_t *fbo;
- drm_device_t *dev = bo->dev;
- drm_buffer_manager_t *bm = &dev->bm;
+ struct drm_buffer_object *fbo;
+ struct drm_device *dev = bo->dev;
+ struct drm_buffer_manager *bm = &dev->bm;
fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
if (!fbo)
@@ -325,20 +325,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo,
* We cannot restart until it has finished.
*/
-int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
+int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
- uint32_t fence_flags, drm_bo_mem_reg_t * new_mem)
+ uint32_t fence_flags, struct drm_bo_mem_reg * new_mem)
{
- drm_device_t *dev = bo->dev;
- drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type];
- drm_bo_mem_reg_t *old_mem = &bo->mem;
+ struct drm_device *dev = bo->dev;
+ struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
int ret;
uint32_t save_flags = old_mem->flags;
uint32_t save_mask = old_mem->mask;
- drm_buffer_object_t *old_obj;
+ struct drm_buffer_object *old_obj;
if (bo->fence)
drm_fence_usage_deref_unlocked(&bo->fence);
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 38f0dd3a..dc7f342a 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -36,22 +36,21 @@
#include <linux/vmalloc.h>
#include "drmP.h"
-unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource)
{
return pci_resource_start(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_start);
-unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
+unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource)
{
return pci_resource_len(dev->pdev, resource);
}
EXPORT_SYMBOL(drm_get_resource_len);
-static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
- drm_local_map_t *map)
+struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map)
{
- drm_map_list_t *entry;
+ struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
@@ -62,8 +61,9 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
return NULL;
}
+EXPORT_SYMBOL(drm_find_matching_map);
-static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
+static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
unsigned long user_token, int hashed_handle)
{
int use_hashed_handle;
@@ -92,7 +92,7 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
* Ioctl to specify a range of memory that is available for mapping by a non-root process.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_map structure.
* \return zero on success or a negative value on error.
@@ -101,12 +101,13 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
* type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
* applicable and if supported by the kernel.
*/
-static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_map_list_t ** maplist)
+static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags,
+ struct drm_map_list **maplist)
{
- drm_map_t *map;
- drm_map_list_t *list;
+ struct drm_map *map;
+ struct drm_map_list *list;
drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -212,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
}
break;
case _DRM_AGP: {
- drm_agp_mem_t *entry;
+ struct drm_agp_mem *entry;
int valid = 0;
if (!drm_core_has_AGP(dev)) {
@@ -222,11 +223,17 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
#ifdef __alpha__
map->offset += dev->hose->mem_space->start;
#endif
- /* Note: dev->agp->base may actually be 0 when the DRM
- * is not in control of AGP space. But if user space is
- * it should already have added the AGP base itself.
+ /* In some cases (i810 driver), user space may have already
+ * added the AGP base itself, because dev->agp->base previously
+ * only got set during AGP enable. So, only add the base
+ * address if the map's offset isn't already within the
+ * aperture.
*/
- map->offset += dev->agp->base;
+ if (map->offset < dev->agp->base ||
+ map->offset > dev->agp->base +
+ dev->agp->agp_info.aper_size * 1024 * 1024 - 1) {
+ map->offset += dev->agp->base;
+ }
map->mtrr = dev->agp->agp_mtrr; /* for getmap */
/* This assumes the DRM is in total control of AGP space.
@@ -310,11 +317,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
return 0;
}
-int drm_addmap(drm_device_t * dev, unsigned int offset,
- unsigned int size, drm_map_type_t type,
- drm_map_flags_t flags, drm_local_map_t ** map_ptr)
+int drm_addmap(struct drm_device *dev, unsigned int offset,
+ unsigned int size, enum drm_map_type type,
+ enum drm_map_flags flags, drm_local_map_t ** map_ptr)
{
- drm_map_list_t *list;
+ struct drm_map_list *list;
int rc;
rc = drm_addmap_core(dev, offset, size, type, flags, &list);
@@ -325,38 +332,24 @@ int drm_addmap(drm_device_t * dev, unsigned int offset,
EXPORT_SYMBOL(drm_addmap);
-int drm_addmap_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_addmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t map;
- drm_map_list_t *maplist;
- drm_map_t __user *argp = (void __user *)arg;
+ struct drm_map *map = data;
+ struct drm_map_list *maplist;
int err;
- if (!(filp->f_mode & 3))
- return -EACCES; /* Require read/write */
-
- if (copy_from_user(&map, argp, sizeof(map))) {
- return -EFAULT;
- }
-
- if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
+ if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP))
return -EPERM;
- err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
- &maplist);
+ err = drm_addmap_core(dev, map->offset, map->size, map->type,
+ map->flags, &maplist);
if (err)
return err;
- if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
- return -EFAULT;
-
/* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
- if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
- return -EFAULT;
+ map->handle = (void *)(unsigned long)maplist->user_token;
return 0;
}
@@ -365,9 +358,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
* isn't in use.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
- * \param arg pointer to a drm_map_t structure.
+ * \param arg pointer to a struct drm_map structure.
* \return zero on success or a negative value on error.
*
* Searches the map on drm_device::maplist, removes it from the list, see if
@@ -376,9 +369,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*
* \sa drm_addmap
*/
-int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
{
- drm_map_list_t *r_list = NULL, *list_t;
+ struct drm_map_list *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
int found = 0;
@@ -434,7 +427,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
}
EXPORT_SYMBOL(drm_rmmap_locked);
-int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
+int drm_rmmap(struct drm_device *dev, drm_local_map_t *map)
{
int ret;
@@ -455,24 +448,18 @@ EXPORT_SYMBOL(drm_rmmap);
* gets used by drivers that the server doesn't need to care about. This seems
* unlikely.
*/
-int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_rmmap_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t request;
+ struct drm_map *request = data;
drm_local_map_t *map = NULL;
- drm_map_list_t *r_list;
+ struct drm_map_list *r_list;
int ret;
- if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
- return -EFAULT;
- }
-
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
- r_list->user_token == (unsigned long)request.handle &&
+ r_list->user_token == (unsigned long)request->handle &&
r_list->map->flags & _DRM_REMOVABLE) {
map = r_list->map;
break;
@@ -487,11 +474,6 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
return -EINVAL;
}
- if (!map) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
-
/* Register and framebuffer maps are permanent */
if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
mutex_unlock(&dev->struct_mutex);
@@ -513,7 +495,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
*
* Frees any pages and buffers associated with the given entry.
*/
-static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
+static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry)
{
int i;
@@ -550,20 +532,20 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
/**
* Add AGP buffers for DMA transfers
*
- * \param dev drm_device_t to which the buffers are to be added.
- * \param request pointer to a drm_buf_desc_t describing the request.
+ * \param dev struct drm_device to which the buffers are to be added.
+ * \param request pointer to a struct drm_buf_desc describing the request.
* \return zero on success or a negative number on failure.
*
* After some sanity checks creates a drm_buf structure for each buffer and
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
-int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_agp_mem_t *agp_entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_agp_mem *agp_entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -574,7 +556,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i, valid;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!dma)
return -EINVAL;
@@ -667,7 +649,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -728,24 +710,24 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */
-int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int count;
int order;
int size;
int total;
int page_order;
- drm_buf_entry_t *entry;
+ struct drm_buf_entry *entry;
drm_dma_handle_t *dmah;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int alignment;
unsigned long offset;
int i;
int byte_count;
int page_count;
unsigned long *temp_pagelist;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
return -EINVAL;
@@ -878,7 +860,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size,
@@ -954,11 +936,11 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
}
EXPORT_SYMBOL(drm_addbufs_pci);
-static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
+static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -969,7 +951,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -1056,7 +1038,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1116,11 +1098,11 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
return 0;
}
-int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
+int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_entry_t *entry;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_entry *entry;
+ struct drm_buf *buf;
unsigned long offset;
unsigned long agp_offset;
int count;
@@ -1131,7 +1113,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
int total;
int byte_count;
int i;
- drm_buf_t **temp_buflist;
+ struct drm_buf **temp_buflist;
if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
return -EINVAL;
@@ -1217,7 +1199,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
buf->waiting = 0;
buf->pending = 0;
init_waitqueue_head(&buf->dma_wait);
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->dev_priv_size = dev->driver->dev_priv_size;
buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
@@ -1282,9 +1264,9 @@ EXPORT_SYMBOL(drm_addbufs_fb);
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
- * \param arg pointer to a drm_buf_desc_t request.
+ * \param arg pointer to a struct drm_buf_desc request.
* \return zero on success or a negative number on failure.
*
* According with the memory type specified in drm_buf_desc::flags and the
@@ -1292,39 +1274,27 @@ EXPORT_SYMBOL(drm_addbufs_fb);
* addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
* PCI memory respectively.
*/
-int drm_addbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_addbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_buf_desc_t request;
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_buf_desc *request = data;
int ret;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
- if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
- sizeof(request)))
- return -EFAULT;
-
#if __OS_HAS_AGP
- if (request.flags & _DRM_AGP_BUFFER)
- ret = drm_addbufs_agp(dev, &request);
+ if (request->flags & _DRM_AGP_BUFFER)
+ ret = drm_addbufs_agp(dev, request);
else
#endif
- if (request.flags & _DRM_SG_BUFFER)
- ret = drm_addbufs_sg(dev, &request);
- else if (request.flags & _DRM_FB_BUFFER)
- ret = drm_addbufs_fb(dev, &request);
+ if (request->flags & _DRM_SG_BUFFER)
+ ret = drm_addbufs_sg(dev, request);
+ else if (request->flags & _DRM_FB_BUFFER)
+ ret = drm_addbufs_fb(dev, request);
else
- ret = drm_addbufs_pci(dev, &request);
+ ret = drm_addbufs_pci(dev, request);
- if (ret == 0) {
- if (copy_to_user((void __user *) arg, &request,
- sizeof(request))) {
- ret = -EFAULT;
- }
- }
return ret;
}
@@ -1336,7 +1306,7 @@ int drm_addbufs(struct inode *inode, struct file *filp,
* large buffers can be used for image transfer).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_info structure.
* \return zero on success or a negative number on failure.
@@ -1345,14 +1315,11 @@ int drm_addbufs(struct inode *inode, struct file *filp,
* lock, preventing of allocating more buffers after this call. Information
* about each requested buffer is then copied into user space.
*/
-int drm_infobufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_infobufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_info_t request;
- drm_buf_info_t __user *argp = (void __user *)arg;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_info *request = data;
int i;
int count;
@@ -1370,9 +1337,6 @@ int drm_infobufs(struct inode *inode, struct file *filp,
++dev->buf_use; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count)
++count;
@@ -1380,13 +1344,13 @@ int drm_infobufs(struct inode *inode, struct file *filp,
DRM_DEBUG("count = %d\n", count);
- if (request.count >= count) {
+ if (request->count >= count) {
for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
if (dma->bufs[i].buf_count) {
- drm_buf_desc_t __user *to =
- &request.list[count];
- drm_buf_entry_t *from = &dma->bufs[i];
- drm_freelist_t *list = &dma->bufs[i].freelist;
+ struct drm_buf_desc __user *to =
+ &request->list[count];
+ struct drm_buf_entry *from = &dma->bufs[i];
+ struct drm_freelist *list = &dma->bufs[i].freelist;
if (copy_to_user(&to->count,
&from->buf_count,
sizeof(from->buf_count)) ||
@@ -1411,10 +1375,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
}
}
}
- request.count = count;
-
- if (copy_to_user(argp, &request, sizeof(request)))
- return -EFAULT;
+ request->count = count;
return 0;
}
@@ -1423,7 +1384,7 @@ int drm_infobufs(struct inode *inode, struct file *filp,
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg a pointer to a drm_buf_desc structure.
* \return zero on success or a negative number on failure.
@@ -1433,15 +1394,13 @@ int drm_infobufs(struct inode *inode, struct file *filp,
*
* \note This ioctl is deprecated and mostly never used.
*/
-int drm_markbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_markbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_desc_t request;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_desc *request = data;
int order;
- drm_buf_entry_t *entry;
+ struct drm_buf_entry *entry;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1449,24 +1408,20 @@ int drm_markbufs(struct inode *inode, struct file *filp,
if (!dma)
return -EINVAL;
- if (copy_from_user(&request,
- (drm_buf_desc_t __user *) arg, sizeof(request)))
- return -EFAULT;
-
DRM_DEBUG("%d, %d, %d\n",
- request.size, request.low_mark, request.high_mark);
- order = drm_order(request.size);
+ request->size, request->low_mark, request->high_mark);
+ order = drm_order(request->size);
if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
return -EINVAL;
entry = &dma->bufs[order];
- if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ if (request->low_mark < 0 || request->low_mark > entry->buf_count)
return -EINVAL;
- if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ if (request->high_mark < 0 || request->high_mark > entry->buf_count)
return -EINVAL;
- entry->freelist.low_mark = request.low_mark;
- entry->freelist.high_mark = request.high_mark;
+ entry->freelist.low_mark = request->low_mark;
+ entry->freelist.high_mark = request->high_mark;
return 0;
}
@@ -1475,7 +1430,7 @@ int drm_markbufs(struct inode *inode, struct file *filp,
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_free structure.
* \return zero on success or a negative number on failure.
@@ -1483,16 +1438,14 @@ int drm_markbufs(struct inode *inode, struct file *filp,
* Calls free_buffer() for each used buffer.
* This function is primarily used for debugging.
*/
-int drm_freebufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_freebufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_free_t request;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf_free *request = data;
int i;
int idx;
- drm_buf_t *buf;
+ struct drm_buf *buf;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
return -EINVAL;
@@ -1500,13 +1453,9 @@ int drm_freebufs(struct inode *inode, struct file *filp,
if (!dma)
return -EINVAL;
- if (copy_from_user(&request,
- (drm_buf_free_t __user *) arg, sizeof(request)))
- return -EFAULT;
-
- DRM_DEBUG("%d\n", request.count);
- for (i = 0; i < request.count; i++) {
- if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
+ DRM_DEBUG("%d\n", request->count);
+ for (i = 0; i < request->count; i++) {
+ if (copy_from_user(&idx, &request->list[i], sizeof(idx)))
return -EFAULT;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("Index %d (of %d max)\n",
@@ -1514,7 +1463,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
return -EINVAL;
}
buf = dma->buflist[idx];
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("Process %d freeing buffer not owned\n",
current->pid);
return -EINVAL;
@@ -1529,7 +1478,7 @@ int drm_freebufs(struct inode *inode, struct file *filp,
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg pointer to a drm_buf_map structure.
* \return zero on success or a negative number on failure.
@@ -1539,18 +1488,15 @@ int drm_freebufs(struct inode *inode, struct file *filp,
* offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
* drm_mmap_dma().
*/
-int drm_mapbufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mapbufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_map_t __user *argp = (void __user *)arg;
+ struct drm_device_dma *dma = dev->dma;
int retcode = 0;
const int zero = 0;
unsigned long virtual;
unsigned long address;
- drm_buf_map_t request;
+ struct drm_buf_map *request = data;
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
@@ -1567,16 +1513,13 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
dev->buf_use++; /* Can't allocate more after this call */
spin_unlock(&dev->count_lock);
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
- if (request.count >= dma->buf_count) {
+ if (request->count >= dma->buf_count) {
if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
|| (drm_core_check_feature(dev, DRIVER_SG)
&& (dma->flags & _DRM_DMA_USE_SG))
|| (drm_core_check_feature(dev, DRIVER_FB_DMA)
&& (dma->flags & _DRM_DMA_USE_FB))) {
- drm_map_t *map = dev->agp_buffer_map;
+ struct drm_map *map = dev->agp_buffer_map;
unsigned long token = dev->agp_buffer_token;
if (!map) {
@@ -1584,14 +1527,14 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
goto done;
}
down_write(&current->mm->mmap_sem);
- virtual = do_mmap(filp, 0, map->size,
+ virtual = do_mmap(file_priv->filp, 0, map->size,
PROT_READ | PROT_WRITE,
MAP_SHARED,
token);
up_write(&current->mm->mmap_sem);
} else {
down_write(&current->mm->mmap_sem);
- virtual = do_mmap(filp, 0, dma->byte_count,
+ virtual = do_mmap(file_priv->filp, 0, dma->byte_count,
PROT_READ | PROT_WRITE,
MAP_SHARED, 0);
up_write(&current->mm->mmap_sem);
@@ -1601,28 +1544,28 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
retcode = (signed long)virtual;
goto done;
}
- request.virtual = (void __user *)virtual;
+ request->virtual = (void __user *)virtual;
for (i = 0; i < dma->buf_count; i++) {
- if (copy_to_user(&request.list[i].idx,
+ if (copy_to_user(&request->list[i].idx,
&dma->buflist[i]->idx,
- sizeof(request.list[0].idx))) {
+ sizeof(request->list[0].idx))) {
retcode = -EFAULT;
goto done;
}
- if (copy_to_user(&request.list[i].total,
+ if (copy_to_user(&request->list[i].total,
&dma->buflist[i]->total,
- sizeof(request.list[0].total))) {
+ sizeof(request->list[0].total))) {
retcode = -EFAULT;
goto done;
}
- if (copy_to_user(&request.list[i].used,
+ if (copy_to_user(&request->list[i].used,
&zero, sizeof(zero))) {
retcode = -EFAULT;
goto done;
}
address = virtual + dma->buflist[i]->offset; /* *** */
- if (copy_to_user(&request.list[i].address,
+ if (copy_to_user(&request->list[i].address,
&address, sizeof(address))) {
retcode = -EFAULT;
goto done;
@@ -1630,11 +1573,8 @@ int drm_mapbufs(struct inode *inode, struct file *filp,
}
}
done:
- request.count = dma->buf_count;
- DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
-
- if (copy_to_user(argp, &request, sizeof(request)))
- return -EFAULT;
+ request->count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode);
return retcode;
}
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 08d20d06..e51aedb7 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -196,15 +196,16 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
return ret;
}
-static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data)
{
unsigned long address = data->address;
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
- drm_ttm_t *ttm;
- drm_device_t *dev;
+ struct drm_ttm *ttm;
+ struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
@@ -261,7 +262,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
- drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
@@ -350,11 +351,11 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
unsigned long address,
int *type)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
- drm_ttm_t *ttm;
- drm_device_t *dev;
+ struct drm_ttm *ttm;
+ struct drm_device *dev;
mutex_lock(&bo->mutex);
@@ -394,7 +395,7 @@ out_unlock:
int drm_bo_map_bound(struct vm_area_struct *vma)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
int ret = 0;
unsigned long bus_base;
unsigned long bus_offset;
@@ -405,7 +406,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
BUG_ON(ret);
if (bus_size) {
- drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type];
+ struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
@@ -417,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
}
-int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
+int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n_entry;
vma_entry_t *v_entry;
@@ -453,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
return 0;
}
-void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
+void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
p_mm_entry_t *entry, *n;
vma_entry_t *v_entry, *v_n;
@@ -485,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma)
-int drm_bo_lock_kmm(drm_buffer_object_t * bo)
+int drm_bo_lock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
@@ -517,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo)
return -EAGAIN;
}
-void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
+void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
@@ -528,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo)
}
}
-int drm_bo_remap_bound(drm_buffer_object_t *bo)
+int drm_bo_remap_bound(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
int ret = 0;
@@ -544,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo)
return ret;
}
-void drm_bo_finish_unmap(drm_buffer_object_t *bo)
+void drm_bo_finish_unmap(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
@@ -677,4 +678,51 @@ void idr_remove_all(struct idr *idp)
idp->layers = 0;
}
EXPORT_SYMBOL(idr_remove_all);
+
+#endif /* DRM_IDR_COMPAT_FN */
+
+
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+/**
+ * idr_replace - replace pointer for given id
+ * @idp: idr handle
+ * @ptr: pointer you want associated with the id
+ * @id: lookup key
+ *
+ * Replace the pointer registered with an id and return the old value.
+ * A -ENOENT return indicates that @id was not found.
+ * A -EINVAL return indicates that @id was not within valid constraints.
+ *
+ * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
+ */
+void *idr_replace(struct idr *idp, void *ptr, int id)
+{
+ int n;
+ struct idr_layer *p, *old_p;
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+
+ id &= MAX_ID_MASK;
+
+ if (id >= (1 << n))
+ return ERR_PTR(-EINVAL);
+
+ n -= IDR_BITS;
+ while ((n > 0) && p) {
+ p = p->ary[(id >> n) & IDR_MASK];
+ n -= IDR_BITS;
+ }
+
+ n = id & IDR_MASK;
+ if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
+ return ERR_PTR(-ENOENT);
+
+ old_p = p->ary[n];
+ p->ary[n] = ptr;
+
+ return (void *)old_p;
+}
+EXPORT_SYMBOL(idr_replace);
#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index 1cf7f165..e906a539 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -313,12 +313,23 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
-/* fixme when functions are upstreamed */
+/* fixme when functions are upstreamed - upstreamed for 2.6.23 */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23))
#define DRM_IDR_COMPAT_FN
+#endif
#ifdef DRM_IDR_COMPAT_FN
int idr_for_each(struct idr *idp,
int (*fn)(int id, void *p, void *data), void *data);
void idr_remove_all(struct idr *idp);
#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+void *idr_replace(struct idr *idp, void *ptr, int id);
+#endif
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
+typedef _Bool bool;
+#endif
+
#endif
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index 101a298c..7854e89c 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -56,19 +56,11 @@
* in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
-void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
+void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle)
{
- struct drm_ctx_sarea_list *ctx;
-
mutex_lock(&dev->struct_mutex);
- ctx = idr_find(&dev->ctx_idr, ctx_handle);
- if (ctx) {
- idr_remove(&dev->ctx_idr, ctx_handle);
- drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
- } else
- DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
+ idr_remove(&dev->ctx_idr, ctx_handle);
mutex_unlock(&dev->struct_mutex);
- return;
}
/**
@@ -80,24 +72,19 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
* Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
-static int drm_ctxbitmap_next(drm_device_t * dev)
+static int drm_ctxbitmap_next(struct drm_device *dev)
{
int new_id;
int ret;
- struct drm_ctx_sarea_list *new_ctx;
-
- new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
- if (!new_ctx)
- return -1;
again:
if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
- drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
return -ENOMEM;
}
mutex_lock(&dev->struct_mutex);
- ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id);
+ ret = idr_get_new_above(&dev->ctx_idr, NULL,
+ DRM_RESERVED_CONTEXTS, &new_id);
if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
goto again;
@@ -114,21 +101,12 @@ again:
*
* Initialise the drm_device::ctx_idr
*/
-int drm_ctxbitmap_init(drm_device_t * dev)
+int drm_ctxbitmap_init(struct drm_device *dev)
{
idr_init(&dev->ctx_idr);
return 0;
}
-
-
-static int drm_ctx_sarea_free(int id, void *p, void *data)
-{
- struct drm_ctx_sarea_list *ctx_entry = p;
- drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
- return 0;
-}
-
/**
* Context bitmap cleanup.
*
@@ -137,10 +115,9 @@ static int drm_ctx_sarea_free(int id, void *p, void *data)
* Free all idr members using drm_ctx_sarea_free helper function
* while holding the drm_device::struct_mutex lock.
*/
-void drm_ctxbitmap_cleanup(drm_device_t * dev)
+void drm_ctxbitmap_cleanup(struct drm_device *dev)
{
mutex_lock(&dev->struct_mutex);
- idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL);
idr_remove_all(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@@ -155,7 +132,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* Get per-context SAREA.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
@@ -163,44 +140,34 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
-int drm_getsareactx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_priv_map_t __user *argp = (void __user *)arg;
- drm_ctx_priv_map_t request;
- drm_map_t *map;
- drm_map_list_t *_entry;
- struct drm_ctx_sarea_list *ctx_sarea;
-
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
+ struct drm_ctx_priv_map *request = data;
+ struct drm_map *map;
+ struct drm_map_list *_entry;
mutex_lock(&dev->struct_mutex);
- ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
- if (!ctx_sarea) {
+ map = idr_find(&dev->ctx_idr, request->ctx_id);
+ if (!map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
- map = ctx_sarea->map;
mutex_unlock(&dev->struct_mutex);
- request.handle = NULL;
+ request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
- request.handle =
+ request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
}
- if (request.handle == NULL)
+ if (request->handle == NULL)
return -EINVAL;
- if (copy_to_user(argp, &request, sizeof(request)))
- return -EFAULT;
return 0;
}
@@ -208,7 +175,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* Set per-context SAREA.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
@@ -216,24 +183,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* Searches the mapping specified in \p arg and update the entry in
* drm_device::ctx_idr with it.
*/
-int drm_setsareactx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_setsareactx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_priv_map_t request;
- drm_map_t *map = NULL;
- drm_map_list_t *r_list = NULL;
- struct drm_ctx_sarea_list *ctx_sarea;
-
- if (copy_from_user(&request,
- (drm_ctx_priv_map_t __user *) arg, sizeof(request)))
- return -EFAULT;
+ struct drm_ctx_priv_map *request = data;
+ struct drm_map *map = NULL;
+ struct drm_map_list *r_list = NULL;
mutex_lock(&dev->struct_mutex);
list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
- && r_list->user_token == (unsigned long) request.handle)
+ && r_list->user_token == (unsigned long) request->handle)
goto found;
}
bad:
@@ -245,14 +205,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
if (!map)
goto bad;
- mutex_lock(&dev->struct_mutex);
-
- ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
- if (!ctx_sarea)
+ if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id)))
goto bad;
- ctx_sarea->map = map;
mutex_unlock(&dev->struct_mutex);
+
return 0;
}
@@ -272,7 +229,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
*
* Attempt to set drm_device::context_flag.
*/
-static int drm_context_switch(drm_device_t * dev, int old, int new)
+static int drm_context_switch(struct drm_device *dev, int old, int new)
{
if (test_and_set_bit(0, &dev->context_flag)) {
DRM_ERROR("Reentering -- FIXME\n");
@@ -300,7 +257,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new)
* hardware lock is held, clears the drm_device::context_flag and wakes up
* drm_device::context_wait.
*/
-static int drm_context_switch_complete(drm_device_t * dev, int new)
+static int drm_context_switch_complete(struct drm_device *dev, int new)
{
dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
dev->last_switch = jiffies;
@@ -322,34 +279,28 @@ static int drm_context_switch_complete(drm_device_t * dev, int new)
* Reserve contexts.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx_res structure.
* \return zero on success or a negative number on failure.
*/
-int drm_resctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_resctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_ctx_res_t res;
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
+ struct drm_ctx_res *res = data;
+ struct drm_ctx ctx;
int i;
- if (copy_from_user(&res, argp, sizeof(res)))
- return -EFAULT;
-
- if (res.count >= DRM_RESERVED_CONTEXTS) {
+ if (res->count >= DRM_RESERVED_CONTEXTS) {
memset(&ctx, 0, sizeof(ctx));
for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
ctx.handle = i;
- if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx)))
+ if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx)))
return -EFAULT;
}
}
- res.count = DRM_RESERVED_CONTEXTS;
+ res->count = DRM_RESERVED_CONTEXTS;
- if (copy_to_user(argp, &res, sizeof(res)))
- return -EFAULT;
return 0;
}
@@ -357,40 +308,34 @@ int drm_resctx(struct inode *inode, struct file *filp,
* Add context.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Get a new handle for the context and copy to userspace.
*/
-int drm_addctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_addctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_list_t *ctx_entry;
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
- return -EFAULT;
+ struct drm_ctx_list *ctx_entry;
+ struct drm_ctx *ctx = data;
- ctx.handle = drm_ctxbitmap_next(dev);
- if (ctx.handle == DRM_KERNEL_CONTEXT) {
+ ctx->handle = drm_ctxbitmap_next(dev);
+ if (ctx->handle == DRM_KERNEL_CONTEXT) {
/* Skip kernel's context and get a new one. */
- ctx.handle = drm_ctxbitmap_next(dev);
+ ctx->handle = drm_ctxbitmap_next(dev);
}
- DRM_DEBUG("%d\n", ctx.handle);
- if (ctx.handle == -1) {
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle == -1) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
}
- if (ctx.handle != DRM_KERNEL_CONTEXT) {
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_ctor)
- if (!dev->driver->context_ctor(dev, ctx.handle)) {
+ if (!dev->driver->context_ctor(dev, ctx->handle)) {
DRM_DEBUG("Running out of ctxs or memory.\n");
return -ENOMEM;
}
@@ -403,21 +348,18 @@ int drm_addctx(struct inode *inode, struct file *filp,
}
INIT_LIST_HEAD(&ctx_entry->head);
- ctx_entry->handle = ctx.handle;
- ctx_entry->tag = priv;
+ ctx_entry->handle = ctx->handle;
+ ctx_entry->tag = file_priv;
mutex_lock(&dev->ctxlist_mutex);
list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
- return -EFAULT;
return 0;
}
-int drm_modctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
/* This does nothing */
return 0;
@@ -427,25 +369,18 @@ int drm_modctx(struct inode *inode, struct file *filp,
* Get context.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*/
-int drm_getctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_ctx_t __user *argp = (void __user *)arg;
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, argp, sizeof(ctx)))
- return -EFAULT;
+ struct drm_ctx *ctx = data;
/* This is 0, because we don't handle any context flags */
- ctx.flags = 0;
+ ctx->flags = 0;
- if (copy_to_user(argp, &ctx, sizeof(ctx)))
- return -EFAULT;
return 0;
}
@@ -453,50 +388,40 @@ int drm_getctx(struct inode *inode, struct file *filp,
* Switch context.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch().
*/
-int drm_switchctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_switchctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("%d\n", ctx.handle);
- return drm_context_switch(dev, dev->last_context, ctx.handle);
+ DRM_DEBUG("%d\n", ctx->handle);
+ return drm_context_switch(dev, dev->last_context, ctx->handle);
}
/**
* New context.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* Calls context_switch_complete().
*/
-int drm_newctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_newctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
+ struct drm_ctx *ctx = data;
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
-
- DRM_DEBUG("%d\n", ctx.handle);
- drm_context_switch_complete(dev, ctx.handle);
+ DRM_DEBUG("%d\n", ctx->handle);
+ drm_context_switch_complete(dev, ctx->handle);
return 0;
}
@@ -505,39 +430,34 @@ int drm_newctx(struct inode *inode, struct file *filp,
* Remove context.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument pointing to a drm_ctx structure.
* \return zero on success or a negative number on failure.
*
* If not the special kernel context, calls ctxbitmap_free() to free the specified context.
*/
-int drm_rmctx(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_rmctx(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ctx_t ctx;
-
- if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
- return -EFAULT;
+ struct drm_ctx *ctx = data;
- DRM_DEBUG("%d\n", ctx.handle);
- if (ctx.handle == DRM_KERNEL_CONTEXT + 1) {
- priv->remove_auth_on_close = 1;
+ DRM_DEBUG("%d\n", ctx->handle);
+ if (ctx->handle == DRM_KERNEL_CONTEXT + 1) {
+ file_priv->remove_auth_on_close = 1;
}
- if (ctx.handle != DRM_KERNEL_CONTEXT) {
+ if (ctx->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
- dev->driver->context_dtor(dev, ctx.handle);
- drm_ctxbitmap_free(dev, ctx.handle);
+ dev->driver->context_dtor(dev, ctx->handle);
+ drm_ctxbitmap_free(dev, ctx->handle);
}
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
- drm_ctx_list_t *pos, *n;
+ struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->handle == ctx.handle) {
+ if (pos->handle == ctx->handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
--dev->ctx_count;
diff --git a/linux-core/drm_crtc.c b/linux-core/drm_crtc.c
index df00a7d2..ed5f1dfa 100644
--- a/linux-core/drm_crtc.c
+++ b/linux-core/drm_crtc.c
@@ -117,7 +117,7 @@ struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev,
* RETURNS:
* Pointer to new framebuffer or NULL on error.
*/
-struct drm_framebuffer *drm_framebuffer_create(drm_device_t *dev)
+struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev)
{
struct drm_framebuffer *fb;
@@ -153,7 +153,7 @@ EXPORT_SYMBOL(drm_framebuffer_create);
*/
void drm_framebuffer_destroy(struct drm_framebuffer *fb)
{
- drm_device_t *dev = fb->dev;
+ struct drm_device *dev = fb->dev;
struct drm_crtc *crtc;
/* remove from any CRTC */
@@ -183,7 +183,7 @@ EXPORT_SYMBOL(drm_framebuffer_destroy);
* RETURNS:
* Pointer to new CRTC object or NULL on error.
*/
-struct drm_crtc *drm_crtc_create(drm_device_t *dev,
+struct drm_crtc *drm_crtc_create(struct drm_device *dev,
const struct drm_crtc_funcs *funcs)
{
struct drm_crtc *crtc;
@@ -216,7 +216,7 @@ EXPORT_SYMBOL(drm_crtc_create);
*/
void drm_crtc_destroy(struct drm_crtc *crtc)
{
- drm_device_t *dev = crtc->dev;
+ struct drm_device *dev = crtc->dev;
if (crtc->funcs->cleanup)
(*crtc->funcs->cleanup)(crtc);
@@ -243,7 +243,7 @@ EXPORT_SYMBOL(drm_crtc_destroy);
bool drm_crtc_in_use(struct drm_crtc *crtc)
{
struct drm_output *output;
- drm_device_t *dev = crtc->dev;
+ struct drm_device *dev = crtc->dev;
/* FIXME: Locking around list access? */
list_for_each_entry(output, &dev->mode_config.output_list, head)
if (output->crtc == crtc)
@@ -369,7 +369,7 @@ void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY)
bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode,
int x, int y)
{
- drm_device_t *dev = crtc->dev;
+ struct drm_device *dev = crtc->dev;
struct drm_display_mode *adjusted_mode, saved_mode;
int saved_x, saved_y;
bool didLock = false;
@@ -549,7 +549,7 @@ EXPORT_SYMBOL(drm_mode_remove);
* RETURNS:
* Pointer to the new output or NULL on error.
*/
-struct drm_output *drm_output_create(drm_device_t *dev,
+struct drm_output *drm_output_create(struct drm_device *dev,
const struct drm_output_funcs *funcs,
const char *name)
{
@@ -698,7 +698,7 @@ EXPORT_SYMBOL(drm_mode_destroy);
* Initialize @dev's mode_config structure, used for tracking the graphics
* configuration of @dev.
*/
-void drm_mode_config_init(drm_device_t *dev)
+void drm_mode_config_init(struct drm_device *dev)
{
mutex_init(&dev->mode_config.mutex);
INIT_LIST_HEAD(&dev->mode_config.fb_list);
@@ -724,10 +724,10 @@ EXPORT_SYMBOL(drm_mode_config_init);
* RETURNS:
* Zero on success, -EINVAL if the handle couldn't be found.
*/
-static int drm_get_buffer_object(drm_device_t *dev, struct drm_buffer_object **bo, unsigned long handle)
+static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle)
{
- drm_user_object_t *uo;
- drm_hash_item_t *hash;
+ struct drm_user_object *uo;
+ struct drm_hash_item *hash;
int ret;
*bo = NULL;
@@ -740,13 +740,13 @@ static int drm_get_buffer_object(drm_device_t *dev, struct drm_buffer_object **b
goto out_err;
}
- uo = drm_hash_entry(hash, drm_user_object_t, hash);
+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
if (uo->type != drm_buffer_type) {
ret = -EINVAL;
goto out_err;
}
- *bo = drm_user_object_entry(uo, drm_buffer_object_t, base);
+ *bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
ret = 0;
out_err:
mutex_unlock(&dev->struct_mutex);
@@ -760,7 +760,7 @@ out_err:
* LOCKING:
* Caller must hold mode config lock.
*/
-static void drm_pick_crtcs (drm_device_t *dev)
+static void drm_pick_crtcs (struct drm_device *dev)
{
int c, o;
struct drm_output *output, *output_equal;
@@ -853,7 +853,7 @@ clone:
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
-bool drm_initial_config(drm_device_t *dev, bool can_grow)
+bool drm_initial_config(struct drm_device *dev, bool can_grow)
{
struct drm_output *output;
int ret = false;
@@ -891,7 +891,7 @@ EXPORT_SYMBOL(drm_initial_config);
*
* FIXME: cleanup any dangling user buffer objects too
*/
-void drm_mode_config_cleanup(drm_device_t *dev)
+void drm_mode_config_cleanup(struct drm_device *dev)
{
struct drm_output *output, *ot;
struct drm_crtc *crtc, *ct;
@@ -942,7 +942,7 @@ EXPORT_SYMBOL(drm_mode_config_cleanup);
*/
int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb)
{
- drm_device_t *dev = crtc->dev;
+ struct drm_device *dev = crtc->dev;
struct drm_crtc **save_crtcs, *new_crtc;
bool save_enabled = crtc->enabled;
bool changed;
@@ -1085,12 +1085,10 @@ void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modein
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_getresources(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_card_res __user *argp = (void __user *)arg;
+ struct drm_mode_card_res __user *argp = (void __user *)data;
struct drm_mode_card_res card_res;
struct list_head *lh;
struct drm_framebuffer *fb;
@@ -1236,12 +1234,10 @@ out_unlock:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_getcrtc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_crtc __user *argp = (void __user *)arg;
+ struct drm_mode_crtc __user *argp = (void __user *)data;
struct drm_mode_crtc crtc_resp;
struct drm_crtc *crtc;
struct drm_output *output;
@@ -1300,12 +1296,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_getoutput(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_getoutput(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_get_output __user *argp = (void __user *)arg;
+ struct drm_mode_get_output __user *argp = (void __user *)data;
struct drm_mode_get_output out_resp;
struct drm_output *output;
struct drm_display_mode *mode;
@@ -1390,12 +1384,10 @@ out_unlock:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_setcrtc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_crtc __user *argp = (void __user *)arg;
+ struct drm_mode_crtc __user *argp = (void __user *)data;
struct drm_mode_crtc crtc_req;
struct drm_crtc *crtc;
struct drm_output **output_set = NULL, *output;
@@ -1508,12 +1500,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_addfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- struct drm_file *priv = filp->private_data;
- struct drm_device *dev = priv->head->dev;
- struct drm_mode_fb_cmd __user *argp = (void __user *)arg;
+ struct drm_mode_fb_cmd __user *argp = (void __user *)data;
struct drm_mode_fb_cmd r;
struct drm_mode_config *config = &dev->mode_config;
struct drm_framebuffer *fb;
@@ -1560,7 +1550,7 @@ int drm_mode_addfb(struct inode *inode, struct file *filp,
r.buffer_id = fb->id;
- list_add(&fb->filp_head, &priv->fbs);
+ list_add(&fb->filp_head, &file_priv->fbs);
if (copy_to_user(argp, &r, sizeof(r))) {
ret = -EFAULT;
@@ -1595,13 +1585,11 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_rmfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
struct drm_framebuffer *fb = 0;
- uint32_t id = arg;
+ uint32_t id = data;
int ret = 0;
mutex_lock(&dev->mode_config.mutex);
@@ -1644,12 +1632,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_getfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_fb_cmd __user *argp = (void __user *)arg;
+ struct drm_mode_fb_cmd __user *argp = (void __user *)data;
struct drm_mode_fb_cmd r;
struct drm_framebuffer *fb;
int ret = 0;
@@ -1696,8 +1682,8 @@ out:
*/
void drm_fb_release(struct file *filp)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
struct drm_framebuffer *fb, *tfb;
mutex_lock(&dev->mode_config.mutex);
@@ -1724,12 +1710,10 @@ void drm_fb_release(struct file *filp)
* writes new mode id into arg.
* Zero on success, errno on failure.
*/
-int drm_mode_addmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_addmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_modeinfo __user *argp = (void __user *)arg;
+ struct drm_mode_modeinfo __user *argp = (void __user *)data;
struct drm_mode_modeinfo new_mode;
struct drm_display_mode *user_mode;
int ret = 0;
@@ -1774,12 +1758,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_rmmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_rmmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- uint32_t id = arg;
+ uint32_t id = (uint32_t)data;
struct drm_display_mode *mode, *t;
int ret = -EINVAL;
@@ -1827,12 +1809,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_attachmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_attachmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_mode_cmd __user *argp = (void __user *)arg;
+ struct drm_mode_mode_cmd __user *argp = (void __user *)data;
struct drm_mode_mode_cmd mode_cmd;
struct drm_output *output;
struct drm_display_mode *mode;
@@ -1884,12 +1864,10 @@ out:
* RETURNS:
* Zero on success, errno on failure.
*/
-int drm_mode_detachmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_mode_detachmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- struct drm_mode_mode_cmd __user *argp = (void __user *)arg;
+ struct drm_mode_mode_cmd __user *argp = (void __user *)data;
struct drm_mode_mode_cmd mode_cmd;
struct drm_output *output;
struct drm_display_mode *mode;
diff --git a/linux-core/drm_crtc.h b/linux-core/drm_crtc.h
index 75db5b05..d97cd196 100644
--- a/linux-core/drm_crtc.h
+++ b/linux-core/drm_crtc.h
@@ -528,29 +528,29 @@ extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mo
int x, int y);
/* IOCTLs */
-extern int drm_mode_getresources(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-extern int drm_mode_getcrtc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_getoutput(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_setcrtc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_addfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_rmfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_getfb(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_addmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_rmmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_attachmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
-extern int drm_mode_detachmode(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+extern int drm_mode_getresources(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+
+extern int drm_mode_getcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getoutput(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_setcrtc(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_rmfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_getfb(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_addmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_rmmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_attachmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
+extern int drm_mode_detachmode(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
#endif /* __DRM_CRTC_H__ */
diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c
index a7eee1a4..7cc44193 100644
--- a/linux-core/drm_dma.c
+++ b/linux-core/drm_dma.c
@@ -43,7 +43,7 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(drm_device_t * dev)
+int drm_dma_setup(struct drm_device * dev)
{
int i;
@@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev)
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the the drm_device::dma structure itself.
*/
-void drm_dma_takedown(drm_device_t * dev)
+void drm_dma_takedown(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, j;
if (!dma)
@@ -129,14 +129,14 @@ void drm_dma_takedown(drm_device_t * dev)
*
* Resets the fields of \p buf.
*/
-void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
+void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf)
{
if (!buf)
return;
buf->waiting = 0;
buf->pending = 0;
- buf->filp = NULL;
+ buf->file_priv = NULL;
buf->used = 0;
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)
@@ -148,19 +148,20 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf)
/**
* Reclaim the buffers.
*
- * \param filp file pointer.
+ * \param file_priv DRM file private.
*
- * Frees each buffer associated with \p filp not already on the hardware.
+ * Frees each buffer associated with \p file_priv not already on the hardware.
*/
-void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp)
+void drm_core_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
return;
for (i = 0; i < dma->buf_count; i++) {
- if (dma->buflist[i]->filp == filp) {
+ if (dma->buflist[i]->file_priv == file_priv) {
switch (dma->buflist[i]->list) {
case DRM_LIST_NONE:
drm_free_buffer(dev, dma->buflist[i]);
diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c
index eb44a189..1839c576 100644
--- a/linux-core/drm_drawable.c
+++ b/linux-core/drm_drawable.c
@@ -40,28 +40,21 @@
/**
* Allocate drawable ID and memory to store information about it.
*/
-int drm_adddraw(DRM_IOCTL_ARGS)
+int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
unsigned long irqflags;
- struct drm_drawable_list *draw_info;
- drm_draw_t draw;
+ struct drm_draw *draw = data;
int new_id = 0;
int ret;
- draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
- if (!draw_info)
- return -ENOMEM;
-
again:
if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
DRM_ERROR("Out of memory expanding drawable idr\n");
- drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
return -ENOMEM;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
- ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id);
+ ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
if (ret == -EAGAIN) {
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
goto again;
@@ -69,11 +62,9 @@ again:
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- draw.handle = new_id;
-
- DRM_DEBUG("%d\n", draw.handle);
+ draw->handle = new_id;
- DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
+ DRM_DEBUG("%d\n", draw->handle);
return 0;
}
@@ -81,73 +72,64 @@ again:
/**
* Free drawable ID and memory to store information about it.
*/
-int drm_rmdraw(DRM_IOCTL_ARGS)
+int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_draw_t draw;
+ struct drm_draw *draw = data;
unsigned long irqflags;
- struct drm_drawable_list *draw_info;
-
- DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
- sizeof(draw));
-
- draw_info = idr_find(&dev->drw_idr, draw.handle);
- if (!draw_info) {
- DRM_DEBUG("No such drawable %d\n", draw.handle);
- return -EINVAL;
- }
spin_lock_irqsave(&dev->drw_lock, irqflags);
- idr_remove(&dev->drw_idr, draw.handle);
- drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
+ drm_free(drm_get_drawable_info(dev, draw->handle),
+ sizeof(struct drm_drawable_info), DRM_MEM_BUFS);
+
+ idr_remove(&dev->drw_idr, draw->handle);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("%d\n", draw.handle);
+ DRM_DEBUG("%d\n", draw->handle);
return 0;
}
-int drm_update_drawable_info(DRM_IOCTL_ARGS) {
- DRM_DEVICE;
- drm_update_draw_t update;
+int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ struct drm_update_draw *update = data;
unsigned long irqflags;
- drm_drawable_info_t *info;
- drm_clip_rect_t *rects;
- struct drm_drawable_list *draw_info;
+ struct drm_clip_rect *rects;
+ struct drm_drawable_info *info;
int err;
- DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
- sizeof(update));
-
- draw_info = idr_find(&dev->drw_idr, update.handle);
- if (!draw_info) {
- DRM_ERROR("No such drawable %d\n", update.handle);
- return DRM_ERR(EINVAL);
+ info = idr_find(&dev->drw_idr, update->handle);
+ if (!info) {
+ info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS);
+ if (!info)
+ return -ENOMEM;
+ if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
+ DRM_ERROR("No such drawable %d\n", update->handle);
+ drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+ return -EINVAL;
+ }
}
- info = &draw_info->info;
-
- switch (update.type) {
+ switch (update->type) {
case DRM_DRAWABLE_CLIPRECTS:
- if (update.num != info->num_rects) {
- rects = drm_alloc(update.num * sizeof(drm_clip_rect_t),
+ if (update->num != info->num_rects) {
+ rects = drm_alloc(update->num * sizeof(struct drm_clip_rect),
DRM_MEM_BUFS);
} else
rects = info->rects;
- if (update.num && !rects) {
+ if (update->num && !rects) {
DRM_ERROR("Failed to allocate cliprect memory\n");
- err = DRM_ERR(ENOMEM);
+ err = -ENOMEM;
goto error;
}
- if (update.num && DRM_COPY_FROM_USER(rects,
- (drm_clip_rect_t __user *)
- (unsigned long)update.data,
- update.num *
+ if (update->num && DRM_COPY_FROM_USER(rects,
+ (struct drm_clip_rect __user *)
+ (unsigned long)update->data,
+ update->num *
sizeof(*rects))) {
DRM_ERROR("Failed to copy cliprects from userspace\n");
- err = DRM_ERR(EFAULT);
+ err = -EFAULT;
goto error;
}
@@ -155,27 +137,27 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
if (rects != info->rects) {
drm_free(info->rects, info->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
}
info->rects = rects;
- info->num_rects = update.num;
+ info->num_rects = update->num;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, update.handle);
+ info->num_rects, update->handle);
break;
default:
- DRM_ERROR("Invalid update type %d\n", update.type);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("Invalid update type %d\n", update->type);
+ return -EINVAL;
}
return 0;
error:
if (rects != info->rects)
- drm_free(rects, update.num * sizeof(drm_clip_rect_t),
+ drm_free(rects, update->num * sizeof(struct drm_clip_rect),
DRM_MEM_BUFS);
return err;
@@ -184,28 +166,26 @@ error:
/**
* Caller must hold the drawable spinlock!
*/
-drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
- struct drm_drawable_list *draw_info;
- draw_info = idr_find(&dev->drw_idr, id);
- if (!draw_info) {
- DRM_DEBUG("No such drawable %d\n", id);
- return NULL;
- }
-
- return &draw_info->info;
+struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
+{
+ return idr_find(&dev->drw_idr, id);
}
EXPORT_SYMBOL(drm_get_drawable_info);
static int drm_drawable_free(int idr, void *p, void *data)
{
- struct drm_drawable_list *drw_entry = p;
- drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
- drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
+ struct drm_drawable_info *info = p;
+
+ if (info) {
+ drm_free(info->rects, info->num_rects *
+ sizeof(struct drm_clip_rect), DRM_MEM_BUFS);
+ drm_free(info, sizeof(*info), DRM_MEM_BUFS);
+ }
+
return 0;
}
-void drm_drawable_free_all(drm_device_t *dev)
+void drm_drawable_free_all(struct drm_device *dev)
{
idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
idr_remove_all(&dev->drw_idr);
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 0b4f8095..9004d535 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -48,92 +48,119 @@
#include "drmP.h"
#include "drm_core.h"
-static void drm_cleanup(drm_device_t * dev);
+static void drm_cleanup(struct drm_device * dev);
int drm_fb_loaded = 0;
-static int drm_version(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg);
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/** Ioctl table */
-static drm_ioctl_desc_t drm_ioctls[] = {
- [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY},
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH},
-
- [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
-
- [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH},
-
- [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH},
-
- [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH},
+static struct drm_ioctl_desc drm_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH),
/* The DRM_IOCTL_DMA ioctl should be defined by the driver. */
- [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH},
+ DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH),
- [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if __OS_HAS_AGP
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
- [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
-
- [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0},
- [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl,
- DRM_AUTH },
-
- [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETRESOURCES)] = {drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETCRTC)] = {drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETOUTPUT)] = {drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_SETCRTC)] = {drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB)] = {drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_RMFB)] = {drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETFB)] = {drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDMODE)] = {drm_mode_addmode, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_RMMODE)] = {drm_mode_rmmode, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_ATTACHMODE)] = {drm_mode_attachmode, DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_IOCTL_MODE_DETACHMODE)] = {drm_mode_detachmode, DRM_MASTER|DRM_ROOT_ONLY},
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+ // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
+ DRM_AUTH ),
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode, DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode, DRM_MASTER|DRM_ROOT_ONLY),
+ // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
@@ -148,11 +175,11 @@ static drm_ioctl_desc_t drm_ioctls[] = {
*
* \sa drm_device
*/
-int drm_lastclose(drm_device_t * dev)
+int drm_lastclose(struct drm_device * dev)
{
- drm_magic_entry_t *pt, *next;
- drm_map_list_t *r_list, *list_t;
- drm_vma_entry_t *vma, *vma_temp;
+ struct drm_magic_entry *pt, *next;
+ struct drm_map_list *r_list, *list_t;
+ struct drm_vma_entry *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
@@ -198,7 +225,7 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
- drm_agp_mem_t *entry, *tempe;
+ struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
@@ -252,8 +279,9 @@ int drm_lastclose(drm_device_t * dev)
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA))
drm_dma_takedown(dev);
- if (dev->lock.filp) {
- dev->lock.filp = NULL;
+ if (dev->lock.hw_lock) {
+ dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.file_priv = NULL;
wake_up_interruptible(&dev->lock.lock_queue);
}
dev->dev_mapping = NULL;
@@ -265,7 +293,7 @@ int drm_lastclose(drm_device_t * dev)
void drm_cleanup_pci(struct pci_dev *pdev)
{
- drm_device_t *dev = pci_get_drvdata(pdev);
+ struct drm_device *dev = pci_get_drvdata(pdev);
pci_set_drvdata(pdev, NULL);
pci_release_regions(pdev);
@@ -319,7 +347,7 @@ int drm_init(struct drm_driver *driver,
}
if (!drm_fb_loaded)
- pci_register_driver(&driver->pci_driver);
+ return pci_register_driver(&driver->pci_driver);
else {
for (i = 0; pciidlist[i].vendor != 0; i++) {
pid = &pciidlist[i];
@@ -351,7 +379,7 @@ EXPORT_SYMBOL(drm_init);
*
* \sa drm_init
*/
-static void drm_cleanup(drm_device_t * dev)
+static void drm_cleanup(struct drm_device * dev)
{
DRM_DEBUG("\n");
@@ -400,8 +428,8 @@ static void drm_cleanup(drm_device_t * dev)
void drm_exit(struct drm_driver *driver)
{
int i;
- drm_device_t *dev = NULL;
- drm_head_t *head;
+ struct drm_device *dev = NULL;
+ struct drm_head *head;
DRM_DEBUG("\n");
if (drm_fb_loaded) {
@@ -519,34 +547,26 @@ module_exit(drm_core_exit);
* Get version information
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_version structure.
* \return zero on success or negative number on failure.
*
* Fills in the version information in \p arg.
*/
-static int drm_version(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int drm_version(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_version_t __user *argp = (void __user *)arg;
- drm_version_t version;
+ struct drm_version *version = data;
int len;
- if (copy_from_user(&version, argp, sizeof(version)))
- return -EFAULT;
+ version->version_major = dev->driver->major;
+ version->version_minor = dev->driver->minor;
+ version->version_patchlevel = dev->driver->patchlevel;
+ DRM_COPY(version->name, dev->driver->name);
+ DRM_COPY(version->date, dev->driver->date);
+ DRM_COPY(version->desc, dev->driver->desc);
- version.version_major = dev->driver->major;
- version.version_minor = dev->driver->minor;
- version.version_patchlevel = dev->driver->patchlevel;
- DRM_COPY(version.name, dev->driver->name);
- DRM_COPY(version.date, dev->driver->date);
- DRM_COPY(version.desc, dev->driver->desc);
-
- if (copy_to_user(argp, &version, sizeof(version)))
- return -EFAULT;
return 0;
}
@@ -554,31 +574,43 @@ static int drm_version(struct inode *inode, struct file *filp,
* Called whenever a process performs an ioctl on /dev/drm.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
*
* Looks up the ioctl function in the ::ioctls table, checking for root
* previleges if so required, and dispatches to the respective function.
+ *
+ * Copies data in and out according to the size and direction given in cmd,
+ * which must match the ioctl cmd known by the kernel. The kernel uses a 512
+ * byte stack buffer to store the ioctl arguments in kernel space. Should we
+ * ever need much larger ioctl arguments, we may need to allocate memory.
*/
int drm_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_ioctl_desc_t *ioctl;
+ return drm_unlocked_ioctl(filp, cmd, arg);
+}
+EXPORT_SYMBOL(drm_ioctl);
+
+long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->head->dev;
+ struct drm_ioctl_desc *ioctl;
drm_ioctl_t *func;
unsigned int nr = DRM_IOCTL_NR(cmd);
int retcode = -EINVAL;
+ char kdata[512];
atomic_inc(&dev->ioctl_count);
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
- ++priv->ioctl_count;
+ ++file_priv->ioctl_count;
DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
- current->pid, cmd, nr, (long)old_encode_dev(priv->head->device),
- priv->authenticated);
+ current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device),
+ file_priv->authenticated);
if ((nr >= DRM_CORE_IOCTL_COUNT) &&
((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END)))
@@ -588,35 +620,63 @@ int drm_ioctl(struct inode *inode, struct file *filp,
ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
- else
+ else {
+ retcode = -EINVAL;
goto err_i1;
-
+ }
+#if 0
+ /*
+ * This check is disabled, because driver private ioctl->cmd
+ * are not the ioctl commands with size and direction bits but
+ * just the indices. The DRM core ioctl->cmd are the proper ioctl
+ * commands. The drivers' ioctl tables need to be fixed.
+ */
+ if (ioctl->cmd != cmd) {
+ retcode = -EINVAL;
+ goto err_i1;
+ }
+#endif
func = ioctl->func;
/* is there a local override? */
if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl)
func = dev->driver->dma_ioctl;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg,
+ _IOC_SIZE(cmd)) != 0) {
+ retcode = -EACCES;
+ goto err_i1;
+ }
+ }
+
if (!func) {
DRM_DEBUG("no function\n");
retcode = -EINVAL;
} else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) ||
- ((ioctl->flags & DRM_AUTH) && !priv->authenticated) ||
- ((ioctl->flags & DRM_MASTER) && !priv->master)) {
+ ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) ||
+ ((ioctl->flags & DRM_MASTER) && !file_priv->master)) {
retcode = -EACCES;
} else {
- retcode = func(inode, filp, cmd, arg);
+ retcode = func(dev, kdata, file_priv);
+ }
+
+ if (cmd & IOC_OUT) {
+ if (copy_to_user((void __user *)arg, kdata,
+ _IOC_SIZE(cmd)) != 0)
+ retcode = -EACCES;
}
+
err_i1:
atomic_dec(&dev->ioctl_count);
if (retcode)
- DRM_DEBUG("ret = %x\n", retcode);
+ DRM_DEBUG("ret = %d\n", retcode);
return retcode;
}
-EXPORT_SYMBOL(drm_ioctl);
+EXPORT_SYMBOL(drm_unlocked_ioctl);
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
- drm_map_list_t *entry;
+ struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
diff --git a/linux-core/drm_edid.c b/linux-core/drm_edid.c
index 0d067929..d7d3939a 100644
--- a/linux-core/drm_edid.c
+++ b/linux-core/drm_edid.c
@@ -94,7 +94,7 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
* if this is the display's preferred timing, and we'll use it to indicate
* to the other layers that this mode is desired.
*/
-struct drm_display_mode *drm_mode_detailed(drm_device_t *dev,
+struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
struct detailed_timing *timing)
{
struct drm_display_mode *mode;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index 5215feb6..2f16f7ef 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -34,17 +34,17 @@
* Typically called by the IRQ handler.
*/
-void drm_fence_handler(drm_device_t * dev, uint32_t class,
+void drm_fence_handler(struct drm_device * dev, uint32_t class,
uint32_t sequence, uint32_t type)
{
int wake = 0;
uint32_t diff;
uint32_t relevant;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &fm->class[class];
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->class[class];
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
struct list_head *head;
- drm_fence_object_t *fence, *next;
+ struct drm_fence_object *fence, *next;
int found = 0;
int is_exe = (type & DRM_FENCE_TYPE_EXE);
int ge_last_exe;
@@ -114,9 +114,9 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class,
EXPORT_SYMBOL(drm_fence_handler);
-static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
+static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
{
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
@@ -124,11 +124,11 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
write_unlock_irqrestore(&fm->lock, flags);
}
-void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
+void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
*fence = NULL;
@@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
}
}
-void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
+void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
*fence = NULL;
if (atomic_dec_and_test(&tmp_fence->usage)) {
@@ -180,22 +180,22 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
}
-static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
+static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
{
- drm_fence_object_t *fence =
- drm_user_object_entry(base, drm_fence_object_t, base);
+ struct drm_fence_object *fence =
+ drm_user_object_entry(base, struct drm_fence_object, base);
drm_fence_usage_deref_locked(&fence);
}
-int drm_fence_object_signaled(drm_fence_object_t * fence,
+int drm_fence_object_signaled(struct drm_fence_object * fence,
uint32_t mask, int poke_flush)
{
unsigned long flags;
int signaled;
struct drm_device *dev = fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
if (poke_flush)
driver->poke_flush(dev, fence->class);
@@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence,
return signaled;
}
-static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
- drm_fence_driver_t * driver, uint32_t sequence)
+static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
+ struct drm_fence_driver * driver, uint32_t sequence)
{
uint32_t diff;
@@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
}
}
-int drm_fence_object_flush(drm_fence_object_t * fence,
+int drm_fence_object_flush(struct drm_fence_object * fence,
uint32_t type)
{
struct drm_device *dev = fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &fm->class[fence->class];
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->class[fence->class];
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
unsigned long flags;
if (type & ~fence->type) {
@@ -262,14 +262,14 @@ int drm_fence_object_flush(drm_fence_object_t * fence,
* wrapped around and reused.
*/
-void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
+void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence)
{
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &fm->class[class];
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->class[class];
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
uint32_t old_sequence;
unsigned long flags;
- drm_fence_object_t *fence;
+ struct drm_fence_object *fence;
uint32_t diff;
write_lock_irqsave(&fm->lock, flags);
@@ -290,7 +290,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
mutex_unlock(&dev->struct_mutex);
return;
}
- fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
+ fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring));
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
read_unlock_irqrestore(&fm->lock, flags);
@@ -302,13 +302,13 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
EXPORT_SYMBOL(drm_fence_flush_old);
-static int drm_fence_lazy_wait(drm_fence_object_t *fence,
+static int drm_fence_lazy_wait(struct drm_fence_object *fence,
int ignore_signals,
uint32_t mask)
{
struct drm_device *dev = fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &fm->class[fence->class];
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->class[fence->class];
int signaled;
unsigned long _end = jiffies + 3*DRM_HZ;
int ret = 0;
@@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence,
return 0;
}
-int drm_fence_object_wait(drm_fence_object_t * fence,
+int drm_fence_object_wait(struct drm_fence_object * fence,
int lazy, int ignore_signals, uint32_t mask)
{
struct drm_device *dev = fence->dev;
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
int signaled;
@@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence,
return 0;
}
-int drm_fence_object_emit(drm_fence_object_t * fence,
+int drm_fence_object_emit(struct drm_fence_object * fence,
uint32_t fence_flags, uint32_t class, uint32_t type)
{
struct drm_device *dev = fence->dev;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_driver_t *driver = dev->driver->fence_driver;
- drm_fence_class_manager_t *fc = &fm->class[fence->class];
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
+ struct drm_fence_class_manager *fc = &fm->class[fence->class];
unsigned long flags;
uint32_t sequence;
uint32_t native_type;
@@ -435,14 +435,14 @@ int drm_fence_object_emit(drm_fence_object_t * fence,
return 0;
}
-static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
+static int drm_fence_object_init(struct drm_device * dev, uint32_t class,
uint32_t type,
uint32_t fence_flags,
- drm_fence_object_t * fence)
+ struct drm_fence_object * fence)
{
int ret = 0;
unsigned long flags;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
mutex_lock(&dev->struct_mutex);
atomic_set(&fence->usage, 1);
@@ -471,10 +471,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
return ret;
}
-int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
+int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
int shareable)
{
- drm_device_t *dev = priv->head->dev;
+ struct drm_device *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -491,12 +491,12 @@ out:
}
EXPORT_SYMBOL(drm_fence_add_user_object);
-int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
- unsigned flags, drm_fence_object_t ** c_fence)
+int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type,
+ unsigned flags, struct drm_fence_object ** c_fence)
{
- drm_fence_object_t *fence;
+ struct drm_fence_object *fence;
int ret;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
@@ -514,15 +514,16 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
EXPORT_SYMBOL(drm_fence_object_create);
-void drm_fence_manager_init(drm_device_t * dev)
+void drm_fence_manager_init(struct drm_device * dev)
{
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *class;
- drm_fence_driver_t *fed = dev->driver->fence_driver;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *class;
+ struct drm_fence_driver *fed = dev->driver->fence_driver;
int i;
+ unsigned long flags;
rwlock_init(&fm->lock);
- write_lock(&fm->lock);
+ write_lock_irqsave(&fm->lock, flags);
fm->initialized = 0;
if (!fed)
goto out_unlock;
@@ -541,18 +542,18 @@ void drm_fence_manager_init(drm_device_t * dev)
atomic_set(&fm->count, 0);
out_unlock:
- write_unlock(&fm->lock);
+ write_unlock_irqrestore(&fm->lock, flags);
}
-void drm_fence_manager_takedown(drm_device_t * dev)
+void drm_fence_manager_takedown(struct drm_device * dev)
{
}
-drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
+struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
{
- drm_device_t *dev = priv->head->dev;
- drm_user_object_t *uo;
- drm_fence_object_t *fence;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_user_object *uo;
+ struct drm_fence_object *fence;
mutex_lock(&dev->struct_mutex);
uo = drm_lookup_user_object(priv, handle);
@@ -560,19 +561,17 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
mutex_unlock(&dev->struct_mutex);
return NULL;
}
- fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base));
mutex_unlock(&dev->struct_mutex);
return fence;
}
-int drm_fence_ioctl(DRM_IOCTL_ARGS)
+int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
int ret;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_arg_t arg;
- drm_fence_object_t *fence;
- drm_user_object_t *uo;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
unsigned long flags;
ret = 0;
@@ -581,100 +580,265 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg));
- switch (arg.op) {
- case drm_fence_create:
- if (arg.flags & DRM_FENCE_FLAG_EMIT)
- LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_object_create(dev, arg.class,
- arg.type, arg.flags, &fence);
- if (ret)
- return ret;
- ret = drm_fence_add_user_object(priv, fence,
- arg.flags &
- DRM_FENCE_FLAG_SHAREABLE);
- if (ret) {
- drm_fence_usage_deref_unlocked(&fence);
- return ret;
- }
- arg.handle = fence->base.hash.key;
+ if (arg->flags & DRM_FENCE_FLAG_EMIT)
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ ret = drm_fence_object_create(dev, arg->class,
+ arg->type, arg->flags, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(file_priv, fence,
+ arg->flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret) {
+ drm_fence_usage_deref_unlocked(&fence);
+ return ret;
+ }
+
+ /*
+ * usage > 0. No need to lock dev->struct_mutex;
+ */
+
+ arg->handle = fence->base.hash.key;
- break;
- case drm_fence_destroy:
- mutex_lock(&dev->struct_mutex);
- uo = drm_lookup_user_object(priv, arg.handle);
- if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
- ret = drm_remove_user_object(priv, uo);
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_user_object *uo;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dev->struct_mutex);
+ uo = drm_lookup_user_object(file_priv, arg->handle);
+ if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) {
mutex_unlock(&dev->struct_mutex);
+ return -EINVAL;
+ }
+ ret = drm_remove_user_object(file_priv, uo);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+}
+
+
+int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ struct drm_user_object *uo;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo);
+ if (ret)
return ret;
- case drm_fence_reference:
- ret =
- drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo);
- if (ret)
- return ret;
- fence = drm_lookup_fence_object(priv, arg.handle);
- break;
- case drm_fence_unreference:
- ret = drm_user_object_unref(priv, arg.handle, drm_fence_type);
- return ret;
- case drm_fence_signaled:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- break;
- case drm_fence_flush:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret = drm_fence_object_flush(fence, arg.type);
- break;
- case drm_fence_wait:
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret =
- drm_fence_object_wait(fence,
- arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
- 0, arg.type);
- break;
- case drm_fence_emit:
- LOCK_TEST_WITH_RETURN(dev, filp);
- fence = drm_lookup_fence_object(priv, arg.handle);
- if (!fence)
- return -EINVAL;
- ret = drm_fence_object_emit(fence, arg.flags, arg.class,
- arg.type);
- break;
- case drm_fence_buffers:
- if (!dev->bm.initialized) {
- DRM_ERROR("Buffer object manager is not initialized\n");
- return -EINVAL;
- }
- LOCK_TEST_WITH_RETURN(dev, filp);
- ret = drm_fence_buffer_objects(priv, NULL, arg.flags,
- NULL, &fence);
- if (ret)
- return ret;
- ret = drm_fence_add_user_object(priv, fence,
- arg.flags &
- DRM_FENCE_FLAG_SHAREABLE);
- if (ret)
- return ret;
- arg.handle = fence->base.hash.key;
- break;
- default:
+ fence = drm_lookup_fence_object(file_priv, arg->handle);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+
+int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ return drm_user_object_unref(file_priv, arg->handle, drm_fence_type);
+}
+
+int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ fence = drm_lookup_fence_object(file_priv, arg->handle);
+ if (!fence)
+ return -EINVAL;
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
return -EINVAL;
}
+
+ fence = drm_lookup_fence_object(file_priv, arg->handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_flush(fence, arg->type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+
+int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ fence = drm_lookup_fence_object(file_priv, arg->handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_wait(fence,
+ arg->flags & DRM_FENCE_FLAG_WAIT_LAZY,
+ 0, arg->type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+
+int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ fence = drm_lookup_fence_object(file_priv, arg->handle);
+ if (!fence)
+ return -EINVAL;
+ ret = drm_fence_object_emit(fence, arg->flags, arg->class,
+ arg->type);
+
+ read_lock_irqsave(&fm->lock, flags);
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
+ read_unlock_irqrestore(&fm->lock, flags);
+ drm_fence_usage_deref_unlocked(&fence);
+
+ return ret;
+}
+
+int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+{
+ int ret;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_arg *arg = data;
+ struct drm_fence_object *fence;
+ unsigned long flags;
+ ret = 0;
+
+ if (!fm->initialized) {
+ DRM_ERROR("The DRM driver does not support fencing.\n");
+ return -EINVAL;
+ }
+
+ if (!dev->bm.initialized) {
+ DRM_ERROR("Buffer object manager is not initialized\n");
+ return -EINVAL;
+ }
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
+ ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags,
+ NULL, &fence);
+ if (ret)
+ return ret;
+ ret = drm_fence_add_user_object(file_priv, fence,
+ arg->flags &
+ DRM_FENCE_FLAG_SHAREABLE);
+ if (ret)
+ return ret;
+
+ arg->handle = fence->base.hash.key;
+
read_lock_irqsave(&fm->lock, flags);
- arg.class = fence->class;
- arg.type = fence->type;
- arg.signaled = fence->signaled;
+ arg->class = fence->class;
+ arg->type = fence->type;
+ arg->signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
drm_fence_usage_deref_unlocked(&fence);
- DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
}
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 9116685c..643205c8 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -39,9 +39,9 @@
#include <linux/poll.h>
static int drm_open_helper(struct inode *inode, struct file *filp,
- drm_device_t * dev);
+ struct drm_device * dev);
-static int drm_setup(drm_device_t * dev)
+static int drm_setup(struct drm_device * dev)
{
drm_local_map_t *map;
int i;
@@ -128,7 +128,7 @@ static int drm_setup(drm_device_t * dev)
*/
int drm_open(struct inode *inode, struct file *filp)
{
- drm_device_t *dev = NULL;
+ struct drm_device *dev = NULL;
int minor = iminor(inode);
int retcode = 0;
@@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_open);
*/
int drm_stub_open(struct inode *inode, struct file *filp)
{
- drm_device_t *dev = NULL;
+ struct drm_device *dev = NULL;
int minor = iminor(inode);
int err = -ENODEV;
const struct file_operations *old_fops;
@@ -232,10 +232,10 @@ static int drm_cpu_valid(void)
* filp and add it into the double linked list in \p dev.
*/
static int drm_open_helper(struct inode *inode, struct file *filp,
- drm_device_t * dev)
+ struct drm_device * dev)
{
int minor = iminor(inode);
- drm_file_t *priv;
+ struct drm_file *priv;
int ret;
int i,j;
@@ -252,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
memset(priv, 0, sizeof(*priv));
filp->private_data = priv;
+ priv->filp = filp;
priv->uid = current->euid;
priv->pid = current->pid;
priv->minor = minor;
@@ -321,8 +322,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
/** No-op. */
int drm_fasync(int fd, struct file *filp, int on)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int retcode;
DRM_DEBUG("fd = %d, device = 0x%lx\n", fd,
@@ -336,10 +337,10 @@ EXPORT_SYMBOL(drm_fasync);
static void drm_object_release(struct file *filp) {
- drm_file_t *priv = filp->private_data;
+ struct drm_file *priv = filp->private_data;
struct list_head *head;
- drm_user_object_t *user_object;
- drm_ref_object_t *ref_object;
+ struct drm_user_object *user_object;
+ struct drm_ref_object *ref_object;
int i;
/*
@@ -352,7 +353,7 @@ static void drm_object_release(struct file *filp) {
head = &priv->refd_objects;
while (head->next != head) {
- ref_object = list_entry(head->next, drm_ref_object_t, list);
+ ref_object = list_entry(head->next, struct drm_ref_object, list);
drm_remove_ref_object(priv, ref_object);
head = &priv->refd_objects;
}
@@ -363,7 +364,7 @@ static void drm_object_release(struct file *filp) {
head = &priv->user_objects;
while (head->next != head) {
- user_object = list_entry(head->next, drm_user_object_t, list);
+ user_object = list_entry(head->next, struct drm_user_object, list);
drm_remove_user_object(priv, user_object);
head = &priv->user_objects;
}
@@ -377,7 +378,7 @@ static void drm_object_release(struct file *filp) {
* Release file.
*
* \param inode device inode
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \return zero on success or a negative number on failure.
*
* If the hardware lock is held then free it, and take it again for the kernel
@@ -387,29 +388,28 @@ static void drm_object_release(struct file *filp) {
*/
int drm_release(struct inode *inode, struct file *filp)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_device *dev = file_priv->head->dev;
int retcode = 0;
lock_kernel();
- dev = priv->head->dev;
DRM_DEBUG("open_count = %d\n", dev->open_count);
if (dev->driver->preclose)
- dev->driver->preclose(dev, filp);
+ dev->driver->preclose(dev, file_priv);
/* ========================================================
* Begin inline drm_release
*/
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
- current->pid, (long)old_encode_dev(priv->head->device),
+ current->pid, (long)old_encode_dev(file_priv->head->device),
dev->open_count);
if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) {
- if (drm_i_have_hw_lock(filp)) {
- dev->driver->reclaim_buffers_locked(dev, filp);
+ if (drm_i_have_hw_lock(dev, file_priv)) {
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
} else {
unsigned long _end=jiffies + 3*DRM_HZ;
int locked = 0;
@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
"\tI will go on reclaiming the buffers anyway.\n");
}
- dev->driver->reclaim_buffers_locked(dev, filp);
+ dev->driver->reclaim_buffers_locked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
}
@@ -443,12 +443,12 @@ int drm_release(struct inode *inode, struct file *filp)
if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) {
drm_idlelock_take(&dev->lock);
- dev->driver->reclaim_buffers_idlelocked(dev, filp);
+ dev->driver->reclaim_buffers_idlelocked(dev, file_priv);
drm_idlelock_release(&dev->lock);
}
- if (drm_i_have_hw_lock(filp)) {
+ if (drm_i_have_hw_lock(dev, file_priv)) {
DRM_DEBUG("File %p released, freeing lock for context %d\n",
filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
@@ -459,7 +459,7 @@ int drm_release(struct inode *inode, struct file *filp)
if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) &&
!dev->driver->reclaim_buffers_locked) {
- dev->driver->reclaim_buffers(dev, filp);
+ dev->driver->reclaim_buffers(dev, file_priv);
}
drm_fasync(-1, filp, 0);
@@ -467,10 +467,10 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->ctxlist_mutex);
if (!list_empty(&dev->ctxlist)) {
- drm_ctx_list_t *pos, *n;
+ struct drm_ctx_list *pos, *n;
list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
- if (pos->tag == priv &&
+ if (pos->tag == file_priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
dev->driver->context_dtor(dev,
@@ -489,18 +489,18 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->struct_mutex);
drm_fb_release(filp);
drm_object_release(filp);
- if (priv->remove_auth_on_close == 1) {
- drm_file_t *temp;
+ if (file_priv->remove_auth_on_close == 1) {
+ struct drm_file *temp;
list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
}
- list_del(&priv->lhead);
+ list_del(&file_priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)
- dev->driver->postclose(dev, priv);
- drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+ dev->driver->postclose(dev, file_priv);
+ drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES);
/* ========================================================
* End inline drm_release
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index 6f17e114..a8ec8468 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -36,7 +36,7 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
-int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
+int drm_ht_create(struct drm_open_hash * ht, unsigned int order)
{
unsigned int i;
@@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t * ht, unsigned int order)
return 0;
}
-void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
+void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
@@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key)
DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key);
}
}
-static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht,
unsigned long key)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list;
unsigned int hashed_key;
@@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
hashed_key = hash_long(key, ht->order);
h_list = &ht->table[hashed_key];
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return list;
if (entry->key > key)
@@ -100,9 +100,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht,
return NULL;
}
-int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
+int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
{
- drm_hash_item_t *entry;
+ struct drm_hash_item *entry;
struct hlist_head *h_list;
struct hlist_node *list, *parent;
unsigned int hashed_key;
@@ -112,7 +112,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
h_list = &ht->table[hashed_key];
parent = NULL;
hlist_for_each(list, h_list) {
- entry = hlist_entry(list, drm_hash_item_t, head);
+ entry = hlist_entry(list, struct drm_hash_item, head);
if (entry->key == key)
return -EINVAL;
if (entry->key > key)
@@ -131,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item)
* Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
-int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
+int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@@ -155,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item,
return 0;
}
-int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
- drm_hash_item_t ** item)
+int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key,
+ struct drm_hash_item ** item)
{
struct hlist_node *list;
@@ -164,11 +164,11 @@ int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key,
if (!list)
return -EINVAL;
- *item = hlist_entry(list, drm_hash_item_t, head);
+ *item = hlist_entry(list, struct drm_hash_item, head);
return 0;
}
-int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
+int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key)
{
struct hlist_node *list;
@@ -181,14 +181,14 @@ int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key)
return -EINVAL;
}
-int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item)
+int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
-void drm_ht_remove(drm_open_hash_t * ht)
+void drm_ht_remove(struct drm_open_hash * ht)
{
if (ht->table) {
if (ht->use_vmalloc)
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 613091c9..0f137677 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -37,31 +37,31 @@
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-typedef struct drm_hash_item{
+struct drm_hash_item {
struct hlist_node head;
unsigned long key;
-} drm_hash_item_t;
+};
-typedef struct drm_open_hash{
+struct drm_open_hash {
unsigned int size;
unsigned int order;
unsigned int fill;
struct hlist_head *table;
int use_vmalloc;
-} drm_open_hash_t;
+};
-extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order);
-extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item,
+extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add);
-extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item);
+extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key);
-extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item);
-extern void drm_ht_remove(drm_open_hash_t *ht);
+extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c
index bbab3ea2..0188154e 100644
--- a/linux-core/drm_ioc32.c
+++ b/linux-core/drm_ioc32.c
@@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_version32_t v32;
- drm_version_t __user *version;
+ struct drm_version __user *version;
int err;
if (copy_from_user(&v32, (void __user *)arg, sizeof(v32)))
@@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- drm_unique_t __user *u;
+ struct drm_unique __user *u;
int err;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
@@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_unique32_t uq32;
- drm_unique_t __user *u;
+ struct drm_unique __user *u;
if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32)))
return -EFAULT;
@@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
typedef struct drm_map32 {
u32 offset; /**< Requested physical address (0 for SAREA)*/
u32 size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
} drm_map32_t;
@@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- drm_map_t __user *map;
+ struct drm_map __user *map;
int idx, err;
void *handle;
@@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
{
drm_map32_t __user *argp = (void __user *)arg;
drm_map32_t m32;
- drm_map_t __user *map;
+ struct drm_map __user *map;
int err;
void *handle;
@@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_map32_t __user *argp = (void __user *)arg;
- drm_map_t __user *map;
+ struct drm_map __user *map;
u32 handle;
if (get_user(handle, &argp->handle))
@@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
{
drm_client32_t c32;
drm_client32_t __user *argp = (void __user *)arg;
- drm_client_t __user *client;
+ struct drm_client __user *client;
int idx, err;
if (get_user(idx, &argp->idx))
@@ -333,7 +333,7 @@ typedef struct drm_stats32 {
u32 count;
struct {
u32 value;
- drm_stat_type_t type;
+ enum drm_stat_type type;
} data[15];
} drm_stats32_t;
@@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
{
drm_stats32_t s32;
drm_stats32_t __user *argp = (void __user *)arg;
- drm_stats_t __user *stats;
+ struct drm_stats __user *stats;
int i, err;
stats = compat_alloc_user_space(sizeof(*stats));
@@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc_t __user *buf;
+ struct drm_buf_desc __user *buf;
int err;
unsigned long agp_start;
@@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
{
drm_buf_desc32_t b32;
drm_buf_desc32_t __user *argp = (void __user *)arg;
- drm_buf_desc_t __user *buf;
+ struct drm_buf_desc __user *buf;
if (copy_from_user(&b32, argp, sizeof(b32)))
return -EFAULT;
@@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
drm_buf_info32_t req32;
drm_buf_info32_t __user *argp = (void __user *)arg;
drm_buf_desc32_t __user *to;
- drm_buf_info_t __user *request;
- drm_buf_desc_t __user *list;
+ struct drm_buf_info __user *request;
+ struct drm_buf_desc __user *list;
size_t nbytes;
int i, err;
int count, actual;
@@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
&& !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t)))
return -EFAULT;
- nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t);
+ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
- list = (drm_buf_desc_t *) (request + 1);
+ list = (struct drm_buf_desc *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
@@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&to[i], &list[i],
- offsetof(drm_buf_desc_t, flags)))
+ offsetof(struct drm_buf_desc, flags)))
return -EFAULT;
if (__put_user(actual, &argp->count))
@@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
drm_buf_map32_t __user *argp = (void __user *)arg;
drm_buf_map32_t req32;
drm_buf_pub32_t __user *list32;
- drm_buf_map_t __user *request;
- drm_buf_pub_t __user *list;
+ struct drm_buf_map __user *request;
+ struct drm_buf_pub __user *list;
int i, err;
int count, actual;
size_t nbytes;
@@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
if (count < 0)
return -EINVAL;
- nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t);
+ nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub);
request = compat_alloc_user_space(nbytes);
if (!access_ok(VERIFY_WRITE, request, nbytes))
return -EFAULT;
- list = (drm_buf_pub_t *) (request + 1);
+ list = (struct drm_buf_pub *) (request + 1);
if (__put_user(count, &request->count)
|| __put_user(list, &request->list))
@@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
if (count >= actual)
for (i = 0; i < actual; ++i)
if (__copy_in_user(&list32[i], &list[i],
- offsetof(drm_buf_pub_t, address))
+ offsetof(struct drm_buf_pub, address))
|| __get_user(addr, &list[i].address)
|| __put_user((unsigned long)addr,
&list32[i].address))
@@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_buf_free32_t req32;
- drm_buf_free_t __user *request;
+ struct drm_buf_free __user *request;
drm_buf_free32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_ctx_priv_map32_t req32;
- drm_ctx_priv_map_t __user *request;
+ struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_ctx_priv_map_t __user *request;
+ struct drm_ctx_priv_map __user *request;
drm_ctx_priv_map32_t __user *argp = (void __user *)arg;
int err;
unsigned int ctx_id;
@@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
{
drm_ctx_res32_t __user *argp = (void __user *)arg;
drm_ctx_res32_t res32;
- drm_ctx_res_t __user *res;
+ struct drm_ctx_res __user *res;
int err;
if (copy_from_user(&res32, argp, sizeof(res32)))
@@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
if (!access_ok(VERIFY_WRITE, res, sizeof(*res)))
return -EFAULT;
if (__put_user(res32.count, &res->count)
- || __put_user((drm_ctx_t __user *)(unsigned long)res32.contexts,
+ || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts,
&res->contexts))
return -EFAULT;
@@ -679,7 +679,7 @@ typedef struct drm_dma32 {
int send_count; /**< Number of buffers to send */
u32 send_indices; /**< List of handles to buffers */
u32 send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
+ enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
u32 request_indices; /**< Buffer information */
@@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
{
drm_dma32_t d32;
drm_dma32_t __user *argp = (void __user *)arg;
- drm_dma_t __user *d;
+ struct drm_dma __user *d;
int err;
if (copy_from_user(&d32, argp, sizeof(d32)))
@@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
{
drm_agp_mode32_t __user *argp = (void __user *)arg;
drm_agp_mode32_t m32;
- drm_agp_mode_t __user *mode;
+ struct drm_agp_mode __user *mode;
if (get_user(m32.mode, &argp->mode))
return -EFAULT;
@@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
{
drm_agp_info32_t __user *argp = (void __user *)arg;
drm_agp_info32_t i32;
- drm_agp_info_t __user *info;
+ struct drm_agp_info __user *info;
int err;
info = compat_alloc_user_space(sizeof(*info));
@@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
drm_agp_buffer32_t req32;
- drm_agp_buffer_t __user *request;
+ struct drm_agp_buffer __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_buffer32_t __user *argp = (void __user *)arg;
- drm_agp_buffer_t __user *request;
+ struct drm_agp_buffer __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
@@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
drm_agp_binding32_t req32;
- drm_agp_binding_t __user *request;
+ struct drm_agp_binding __user *request;
if (copy_from_user(&req32, argp, sizeof(req32)))
return -EFAULT;
@@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_agp_binding32_t __user *argp = (void __user *)arg;
- drm_agp_binding_t __user *request;
+ struct drm_agp_binding __user *request;
u32 handle;
request = compat_alloc_user_space(sizeof(*request));
@@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t __user *request;
+ struct drm_scatter_gather __user *request;
int err;
unsigned long x;
@@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
unsigned long arg)
{
drm_scatter_gather32_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t __user *request;
+ struct drm_scatter_gather __user *request;
unsigned long x;
request = compat_alloc_user_space(sizeof(*request));
@@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
}
struct drm_wait_vblank_request32 {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
u32 signal;
};
struct drm_wait_vblank_reply32 {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
s32 tval_sec;
s32 tval_usec;
@@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
{
drm_wait_vblank32_t __user *argp = (void __user *)arg;
drm_wait_vblank32_t req32;
- drm_wait_vblank_t __user *request;
+ union drm_wait_vblank __user *request;
int err;
if (copy_from_user(&req32, argp, sizeof(req32)))
@@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/drm.
*
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument.
* \return zero on success or negative number on failure.
@@ -1051,8 +1051,13 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
drm_ioctl_compat_t *fn;
int ret;
+
+ /* Assume that ioctls without an explicit compat routine will "just
+ * work". This may not always be a good assumption, but it's better
+ * than always failing.
+ */
if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls))
- return -ENOTTY;
+ return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
fn = drm_compat_ioctls[nr];
diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c
index 97df972f..9d52fd8a 100644
--- a/linux-core/drm_ioctl.c
+++ b/linux-core/drm_ioctl.c
@@ -42,30 +42,24 @@
* Get the bus id.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure.
*
* Copies the bus id from drm_device::unique into user space.
*/
-int drm_getunique(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_unique_t __user *argp = (void __user *)arg;
- drm_unique_t u;
+ struct drm_unique *u = data;
- if (copy_from_user(&u, argp, sizeof(u)))
- return -EFAULT;
- if (u.unique_len >= dev->unique_len) {
- if (copy_to_user(u.unique, dev->unique, dev->unique_len))
+ if (u->unique_len >= dev->unique_len) {
+ if (copy_to_user(u->unique, dev->unique, dev->unique_len))
return -EFAULT;
}
- u.unique_len = dev->unique_len;
- if (copy_to_user(argp, &u, sizeof(u)))
- return -EFAULT;
+ u->unique_len = dev->unique_len;
+
return 0;
}
@@ -73,7 +67,7 @@ int drm_getunique(struct inode *inode, struct file *filp,
* Set the bus id.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_unique structure.
* \return zero on success or a negative number on failure.
@@ -83,28 +77,23 @@ int drm_getunique(struct inode *inode, struct file *filp,
* in interface version 1.1 and will return EBUSY when setversion has requested
* version 1.1 or greater.
*/
-int drm_setunique(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_setunique(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_unique_t u;
+ struct drm_unique *u = data;
int domain, bus, slot, func, ret;
if (dev->unique_len || dev->unique)
return -EBUSY;
- if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u)))
- return -EFAULT;
-
- if (!u.unique_len || u.unique_len > 1024)
+ if (!u->unique_len || u->unique_len > 1024)
return -EINVAL;
- dev->unique_len = u.unique_len;
- dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER);
+ dev->unique_len = u->unique_len;
+ dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER);
if (!dev->unique)
return -ENOMEM;
- if (copy_from_user(dev->unique, u.unique, dev->unique_len))
+ if (copy_from_user(dev->unique, u->unique, dev->unique_len))
return -EFAULT;
dev->unique[dev->unique_len] = '\0';
@@ -121,7 +110,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
*/
ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func);
if (ret != 3)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
domain = bus >> 8;
bus &= 0xff;
@@ -134,7 +123,7 @@ int drm_setunique(struct inode *inode, struct file *filp,
return 0;
}
-static int drm_set_busid(drm_device_t * dev)
+static int drm_set_busid(struct drm_device * dev)
{
int len;
if (dev->unique != NULL)
@@ -167,7 +156,7 @@ static int drm_set_busid(drm_device_t * dev)
* Get a mapping information.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_map structure.
*
@@ -176,21 +165,16 @@ static int drm_set_busid(drm_device_t * dev)
* Searches for the mapping with the specified offset and copies its information
* into userspace
*/
-int drm_getmap(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getmap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t __user *argp = (void __user *)arg;
- drm_map_t map;
- drm_map_list_t *r_list = NULL;
+ struct drm_map *map = data;
+ struct drm_map_list *r_list = NULL;
struct list_head *list;
int idx;
int i;
- if (copy_from_user(&map, argp, sizeof(map)))
- return -EFAULT;
- idx = map.offset;
+ idx = map->offset;
mutex_lock(&dev->struct_mutex);
if (idx < 0) {
@@ -201,7 +185,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
i = 0;
list_for_each(list, &dev->maplist) {
if (i == idx) {
- r_list = list_entry(list, drm_map_list_t, head);
+ r_list = list_entry(list, struct drm_map_list, head);
break;
}
i++;
@@ -211,16 +195,14 @@ int drm_getmap(struct inode *inode, struct file *filp,
return -EINVAL;
}
- map.offset = r_list->map->offset;
- map.size = r_list->map->size;
- map.type = r_list->map->type;
- map.flags = r_list->map->flags;
- map.handle = (void *)(unsigned long) r_list->user_token;
- map.mtrr = r_list->map->mtrr;
+ map->offset = r_list->map->offset;
+ map->size = r_list->map->size;
+ map->type = r_list->map->type;
+ map->flags = r_list->map->flags;
+ map->handle = (void *)(unsigned long) r_list->user_token;
+ map->mtrr = r_list->map->mtrr;
mutex_unlock(&dev->struct_mutex);
- if (copy_to_user(argp, &map, sizeof(map)))
- return -EFAULT;
return 0;
}
@@ -228,7 +210,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
* Get client information.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_client structure.
*
@@ -237,82 +219,68 @@ int drm_getmap(struct inode *inode, struct file *filp,
* Searches for the client with the specified index and copies its information
* into userspace
*/
-int drm_getclient(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getclient(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_client_t __user *argp = (drm_client_t __user *)arg;
- drm_client_t client;
- drm_file_t *pt;
+ struct drm_client *client = data;
+ struct drm_file *pt;
int idx;
int i;
- if (copy_from_user(&client, argp, sizeof(client)))
- return -EFAULT;
- idx = client.idx;
+ idx = client->idx;
mutex_lock(&dev->struct_mutex);
-
- if (list_empty(&dev->filelist)) {
- mutex_unlock(&dev->struct_mutex);
- return -EINVAL;
- }
i = 0;
list_for_each_entry(pt, &dev->filelist, lhead) {
- if (i++ >= idx)
- break;
+ if (i++ >= idx) {
+ client->auth = pt->authenticated;
+ client->pid = pt->pid;
+ client->uid = pt->uid;
+ client->magic = pt->magic;
+ client->iocs = pt->ioctl_count;
+ mutex_unlock(&dev->struct_mutex);
+
+ return 0;
+ }
}
-
- client.auth = pt->authenticated;
- client.pid = pt->pid;
- client.uid = pt->uid;
- client.magic = pt->magic;
- client.iocs = pt->ioctl_count;
mutex_unlock(&dev->struct_mutex);
- if (copy_to_user(argp, &client, sizeof(client)))
- return -EFAULT;
- return 0;
+ return -EINVAL;
}
/**
* Get statistics information.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_stats structure.
*
* \return zero on success or a negative number on failure.
*/
-int drm_getstats(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_getstats(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_stats_t stats;
+ struct drm_stats *stats = data;
int i;
- memset(&stats, 0, sizeof(stats));
+ memset(stats, 0, sizeof(stats));
mutex_lock(&dev->struct_mutex);
for (i = 0; i < dev->counters; i++) {
if (dev->types[i] == _DRM_STAT_LOCK)
- stats.data[i].value
- = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
+ stats->data[i].value =
+ (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0);
else
- stats.data[i].value = atomic_read(&dev->counts[i]);
- stats.data[i].type = dev->types[i];
+ stats->data[i].value = atomic_read(&dev->counts[i]);
+ stats->data[i].type = dev->types[i];
}
- stats.count = dev->counters;
+ stats->count = dev->counters;
mutex_unlock(&dev->struct_mutex);
- if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats)))
- return -EFAULT;
return 0;
}
@@ -320,39 +288,28 @@ int drm_getstats(struct inode *inode, struct file *filp,
* Setversion ioctl.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Sets the requested interface version
*/
-int drm_setversion(DRM_IOCTL_ARGS)
+int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_set_version_t sv;
- drm_set_version_t retv;
- int if_version;
- drm_set_version_t __user *argp = (void __user *)data;
-
- if (copy_from_user(&sv, argp, sizeof(sv)))
- return -EFAULT;
-
- retv.drm_di_major = DRM_IF_MAJOR;
- retv.drm_di_minor = DRM_IF_MINOR;
- retv.drm_dd_major = dev->driver->major;
- retv.drm_dd_minor = dev->driver->minor;
-
- if (copy_to_user(argp, &retv, sizeof(retv)))
- return -EFAULT;
-
- if (sv.drm_di_major != -1) {
- if (sv.drm_di_major != DRM_IF_MAJOR ||
- sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR)
- return -EINVAL;
- if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor);
+ struct drm_set_version *sv = data;
+ int if_version, retcode = 0;
+
+ if (sv->drm_di_major != -1) {
+ if (sv->drm_di_major != DRM_IF_MAJOR ||
+ sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) {
+ retcode = -EINVAL;
+ goto done;
+ }
+ if_version = DRM_IF_VERSION(sv->drm_di_major,
+ sv->drm_di_minor);
dev->if_version = max(if_version, dev->if_version);
- if (sv.drm_di_minor >= 1) {
+ if (sv->drm_di_minor >= 1) {
/*
* Version 1.1 includes tying of DRM to specific device
*/
@@ -360,20 +317,30 @@ int drm_setversion(DRM_IOCTL_ARGS)
}
}
- if (sv.drm_dd_major != -1) {
- if (sv.drm_dd_major != dev->driver->major ||
- sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor)
- return -EINVAL;
+ if (sv->drm_dd_major != -1) {
+ if (sv->drm_dd_major != dev->driver->major ||
+ sv->drm_dd_minor < 0 || sv->drm_dd_minor >
+ dev->driver->minor) {
+ retcode = -EINVAL;
+ goto done;
+ }
if (dev->driver->set_version)
- dev->driver->set_version(dev, &sv);
+ dev->driver->set_version(dev, sv);
}
- return 0;
+
+done:
+ sv->drm_di_major = DRM_IF_MAJOR;
+ sv->drm_di_minor = DRM_IF_MINOR;
+ sv->drm_dd_major = dev->driver->major;
+ sv->drm_dd_minor = dev->driver->minor;
+
+ return retcode;
}
/** No-op ioctl. */
-int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
DRM_DEBUG("\n");
return 0;
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index 88716712..25166b6f 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -41,7 +41,7 @@
* Get interrupt from bus id.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_irq_busid structure.
* \return zero on success or a negative number on failure.
@@ -50,30 +50,24 @@
* This IOCTL is deprecated, and will now return EINVAL for any busid not equal
* to that of the device that this DRM instance attached to.
*/
-int drm_irq_by_busid(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_irq_by_busid(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_irq_busid_t __user *argp = (void __user *)arg;
- drm_irq_busid_t p;
+ struct drm_irq_busid *p = data;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
- if (copy_from_user(&p, argp, sizeof(p)))
- return -EFAULT;
-
- if ((p.busnum >> 8) != drm_get_pci_domain(dev) ||
- (p.busnum & 0xff) != dev->pdev->bus->number ||
- p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn))
+ if ((p->busnum >> 8) != drm_get_pci_domain(dev) ||
+ (p->busnum & 0xff) != dev->pdev->bus->number ||
+ p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
return -EINVAL;
- p.irq = dev->irq;
+ p->irq = dev->irq;
+
+ DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
+ p->irq);
- DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq);
- if (copy_to_user(argp, &p, sizeof(p)))
- return -EFAULT;
return 0;
}
@@ -86,7 +80,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp,
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
-static int drm_irq_install(drm_device_t * dev)
+int drm_irq_install(struct drm_device * dev)
{
int ret;
unsigned long sh_flags = 0;
@@ -146,6 +140,7 @@ static int drm_irq_install(drm_device_t * dev)
return 0;
}
+EXPORT_SYMBOL(drm_irq_install);
/**
* Uninstall the IRQ handler.
@@ -154,7 +149,7 @@ static int drm_irq_install(drm_device_t * dev)
*
* Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq.
*/
-int drm_irq_uninstall(drm_device_t * dev)
+int drm_irq_uninstall(struct drm_device * dev)
{
int irq_enabled;
@@ -185,31 +180,27 @@ EXPORT_SYMBOL(drm_irq_uninstall);
* IRQ control ioctl.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_control structure.
* \return zero on success or a negative number on failure.
*
* Calls irq_install() or irq_uninstall() according to \p arg.
*/
-int drm_control(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_control(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_control_t ctl;
+ struct drm_control *ctl = data;
/* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */
- if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl)))
- return -EFAULT;
- switch (ctl.func) {
+ switch (ctl->func) {
case DRM_INST_HANDLER:
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
- ctl.irq != dev->irq)
+ ctl->irq != dev->irq)
return -EINVAL;
return drm_irq_install(dev);
case DRM_UNINST_HANDLER:
@@ -225,7 +216,7 @@ int drm_control(struct inode *inode, struct file *filp,
* Wait for VBLANK.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param data user argument, pointing to a drm_wait_vblank structure.
* \return zero on success or a negative number on failure.
@@ -240,12 +231,9 @@ int drm_control(struct inode *inode, struct file *filp,
*
* If a signal is not requested, then calls vblank_wait().
*/
-int drm_wait_vblank(DRM_IOCTL_ARGS)
+int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_wait_vblank_t __user *argp = (void __user *)data;
- drm_wait_vblank_t vblwait;
+ union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0;
unsigned int flags, seq;
@@ -253,18 +241,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if ((!dev->irq) || (!dev->irq_enabled))
return -EINVAL;
- if (copy_from_user(&vblwait, argp, sizeof(vblwait)))
- return -EFAULT;
-
- if (vblwait.request.type &
+ if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) {
DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
- vblwait.request.type,
+ vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK));
return -EINVAL;
}
- flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK;
+ flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
@@ -273,10 +258,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
: &dev->vbl_received);
- switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) {
+ switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
- vblwait.request.sequence += seq;
- vblwait.request.type &= ~_DRM_VBLANK_RELATIVE;
+ vblwait->request.sequence += seq;
+ vblwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case _DRM_VBLANK_ABSOLUTE:
break;
default:
@@ -284,15 +269,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
}
if ((flags & _DRM_VBLANK_NEXTONMISS) &&
- (seq - vblwait.request.sequence) <= (1<<23)) {
- vblwait.request.sequence = seq + 1;
+ (seq - vblwait->request.sequence) <= (1<<23)) {
+ vblwait->request.sequence = seq + 1;
}
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
- drm_vbl_sig_t *vbl_sig;
+ struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -301,12 +286,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* that case
*/
list_for_each_entry(vbl_sig, vbl_sigs, head) {
- if (vbl_sig->sequence == vblwait.request.sequence
- && vbl_sig->info.si_signo == vblwait.request.signal
+ if (vbl_sig->sequence == vblwait->request.sequence
+ && vbl_sig->info.si_signo ==
+ vblwait->request.signal
&& vbl_sig->task == current) {
spin_unlock_irqrestore(&dev->vbl_lock,
irqflags);
- vblwait.reply.sequence = seq;
+ vblwait->reply.sequence = seq;
goto done;
}
}
@@ -322,14 +308,14 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (!
(vbl_sig =
- drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) {
+ drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
return -ENOMEM;
}
memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
- vbl_sig->sequence = vblwait.request.sequence;
- vbl_sig->info.si_signo = vblwait.request.signal;
+ vbl_sig->sequence = vblwait->request.sequence;
+ vbl_sig->info.si_signo = vblwait->request.signal;
vbl_sig->task = current;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -338,25 +324,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- vblwait.reply.sequence = seq;
+ vblwait->reply.sequence = seq;
} else {
if (flags & _DRM_VBLANK_SECONDARY) {
if (dev->driver->vblank_wait2)
- ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence);
+ ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
} else if (dev->driver->vblank_wait)
ret =
dev->driver->vblank_wait(dev,
- &vblwait.request.sequence);
+ &vblwait->request.sequence);
do_gettimeofday(&now);
- vblwait.reply.tval_sec = now.tv_sec;
- vblwait.reply.tval_usec = now.tv_usec;
+ vblwait->reply.tval_sec = now.tv_sec;
+ vblwait->reply.tval_usec = now.tv_usec;
}
done:
- if (copy_to_user(argp, &vblwait, sizeof(vblwait)))
- return -EFAULT;
-
return ret;
}
@@ -369,7 +352,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
*
* If a signal is not requested, then calls vblank_wait().
*/
-void drm_vbl_send_signals(drm_device_t * dev)
+void drm_vbl_send_signals(struct drm_device * dev)
{
unsigned long flags;
int i;
@@ -377,7 +360,7 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
- drm_vbl_sig_t *vbl_sig, *tmp;
+ struct drm_vbl_sig *vbl_sig, *tmp;
struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
@@ -413,7 +396,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals);
*/
static void drm_locked_tasklet_func(unsigned long data)
{
- drm_device_t *dev = (drm_device_t*)data;
+ struct drm_device *dev = (struct drm_device*)data;
unsigned long irqflags;
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@@ -450,7 +433,7 @@ static void drm_locked_tasklet_func(unsigned long data)
* context, it must not make any assumptions about this. Also, the HW lock will
* be held with the kernel context or any client context.
*/
-void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*))
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*))
{
unsigned long irqflags;
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c
index f02df36b..b8e4a5d9 100644
--- a/linux-core/drm_lock.c
+++ b/linux-core/drm_lock.c
@@ -41,39 +41,33 @@ static int drm_notifier(void *priv);
* Lock ioctl.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Add the current task to the lock wait queue, and attempt to take to lock.
*/
-int drm_lock(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
DECLARE_WAITQUEUE(entry, current);
- drm_lock_t lock;
+ struct drm_lock *lock = data;
int ret = 0;
- ++priv->lock_count;
+ ++file_priv->lock_count;
- if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
- return -EFAULT;
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
+ if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
- current->pid, lock.context);
+ current->pid, lock->context);
return -EINVAL;
}
DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
- lock.context, current->pid,
- dev->lock.hw_lock->lock, lock.flags);
+ lock->context, current->pid,
+ dev->lock.hw_lock->lock, lock->flags);
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE))
- if (lock.context < 0)
+ if (lock->context < 0)
return -EINVAL;
add_wait_queue(&dev->lock.lock_queue, &entry);
@@ -87,8 +81,8 @@ int drm_lock(struct inode *inode, struct file *filp,
ret = -EINTR;
break;
}
- if (drm_lock_take(&dev->lock, lock.context)) {
- dev->lock.filp = filp;
+ if (drm_lock_take(&dev->lock, lock->context)) {
+ dev->lock.file_priv = file_priv;
dev->lock.lock_time = jiffies;
atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
break; /* Got lock */
@@ -107,7 +101,8 @@ int drm_lock(struct inode *inode, struct file *filp,
__set_current_state(TASK_RUNNING);
remove_wait_queue(&dev->lock.lock_queue, &entry);
- DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" );
+ DRM_DEBUG("%d %s\n", lock->context,
+ ret ? "interrupted" : "has lock");
if (ret) return ret;
sigemptyset(&dev->sigmask);
@@ -115,24 +110,26 @@ int drm_lock(struct inode *inode, struct file *filp,
sigaddset(&dev->sigmask, SIGTSTP);
sigaddset(&dev->sigmask, SIGTTIN);
sigaddset(&dev->sigmask, SIGTTOU);
- dev->sigdata.context = lock.context;
+ dev->sigdata.context = lock->context;
dev->sigdata.lock = dev->lock.hw_lock;
block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
- if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY))
+ if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
dev->driver->dma_ready(dev);
- if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) {
+ if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
+ {
if (dev->driver->dma_quiescent(dev)) {
- DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context);
- return DRM_ERR(EBUSY);
+ DRM_DEBUG("%d waiting for DMA quiescent\n",
+ lock->context);
+ return -EBUSY;
}
}
if (dev->driver->kernel_context_switch &&
- dev->last_context != lock.context) {
+ dev->last_context != lock->context) {
dev->driver->kernel_context_switch(dev, dev->last_context,
- lock.context);
+ lock->context);
}
return 0;
@@ -142,27 +139,21 @@ int drm_lock(struct inode *inode, struct file *filp,
* Unlock ioctl.
*
* \param inode device inode.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param cmd command.
* \param arg user argument, pointing to a drm_lock structure.
* \return zero on success or negative number on failure.
*
* Transfer and free the lock.
*/
-int drm_unlock(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_lock_t lock;
+ struct drm_lock *lock = data;
unsigned long irqflags;
- if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock)))
- return -EFAULT;
-
- if (lock.context == DRM_KERNEL_CONTEXT) {
+ if (lock->context == DRM_KERNEL_CONTEXT) {
DRM_ERROR("Process %d using kernel context %d\n",
- current->pid, lock.context);
+ current->pid, lock->context);
return -EINVAL;
}
@@ -184,7 +175,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
if (dev->driver->kernel_context_switch_unlock)
dev->driver->kernel_context_switch_unlock(dev);
else {
- if (drm_lock_free(&dev->lock,lock.context)) {
+ if (drm_lock_free(&dev->lock,lock->context)) {
/* FIXME: Should really bail out here. */
}
}
@@ -202,7 +193,7 @@ int drm_unlock(struct inode *inode, struct file *filp,
*
* Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
*/
-int drm_lock_take(drm_lock_data_t *lock_data,
+int drm_lock_take(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
@@ -252,13 +243,13 @@ int drm_lock_take(drm_lock_data_t *lock_data,
* Resets the lock file pointer.
* Marks the lock as held by the given context, via the \p cmpxchg instruction.
*/
-static int drm_lock_transfer(drm_lock_data_t *lock_data,
+static int drm_lock_transfer(struct drm_lock_data *lock_data,
unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
- lock_data->filp = NULL;
+ lock_data->file_priv = NULL;
do {
old = *lock;
new = context | _DRM_LOCK_HELD;
@@ -278,7 +269,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data,
* Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task
* waiting on the lock queue.
*/
-int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
+int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
{
unsigned int old, new, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -320,7 +311,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context)
*/
static int drm_notifier(void *priv)
{
- drm_sigdata_t *s = (drm_sigdata_t *) priv;
+ struct drm_sigdata *s = (struct drm_sigdata *) priv;
unsigned int old, new, prev;
/* Allow signal delivery if lock isn't held */
@@ -351,7 +342,7 @@ static int drm_notifier(void *priv)
* having to worry about starvation.
*/
-void drm_idlelock_take(drm_lock_data_t *lock_data)
+void drm_idlelock_take(struct drm_lock_data *lock_data)
{
int ret = 0;
@@ -370,7 +361,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data)
}
EXPORT_SYMBOL(drm_idlelock_take);
-void drm_idlelock_release(drm_lock_data_t *lock_data)
+void drm_idlelock_release(struct drm_lock_data *lock_data)
{
unsigned int old, prev;
volatile unsigned int *lock = &lock_data->hw_lock->lock;
@@ -391,13 +382,12 @@ void drm_idlelock_release(drm_lock_data_t *lock_data)
EXPORT_SYMBOL(drm_idlelock_release);
-int drm_i_have_hw_lock(struct file *filp)
+int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
{
- DRM_DEVICE;
- return (priv->lock_count && dev->lock.hw_lock &&
+ return (file_priv->lock_count && dev->lock.hw_lock &&
_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) &&
- dev->lock.filp == filp);
+ dev->lock.file_priv == file_priv);
}
EXPORT_SYMBOL(drm_i_have_hw_lock);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index b1423c12..f68a3a3e 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -38,9 +38,9 @@
static struct {
spinlock_t lock;
- drm_u64_t cur_used;
- drm_u64_t low_threshold;
- drm_u64_t high_threshold;
+ uint64_t cur_used;
+ uint64_t low_threshold;
+ uint64_t high_threshold;
} drm_memctl = {
.lock = SPIN_LOCK_UNLOCKED
};
@@ -82,9 +82,9 @@ void drm_free_memctl(size_t size)
}
EXPORT_SYMBOL(drm_free_memctl);
-void drm_query_memctl(drm_u64_t *cur_used,
- drm_u64_t *low_threshold,
- drm_u64_t *high_threshold)
+void drm_query_memctl(uint64_t *cur_used,
+ uint64_t *low_threshold,
+ uint64_t *high_threshold)
{
spin_lock(&drm_memctl.lock);
*cur_used = drm_memctl.cur_used;
@@ -214,7 +214,7 @@ void drm_free_pages(unsigned long address, int order, int area)
#if __OS_HAS_AGP
static void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
unsigned long *phys_addr_map, i, num_pages =
PAGE_ALIGN(size) / PAGE_SIZE;
@@ -258,12 +258,12 @@ static void *agp_remap(unsigned long offset, unsigned long size,
/** Wrapper around agp_allocate_memory() */
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11)
-DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
{
return drm_agp_allocate_memory(pages, type);
}
#else
-DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
{
return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
}
@@ -289,7 +289,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
#else /* __OS_HAS_AGP*/
static void *agp_remap(unsigned long offset, unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
return NULL;
}
diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c
index c124f8f8..c196ee2b 100644
--- a/linux-core/drm_memory_debug.c
+++ b/linux-core/drm_memory_debug.c
@@ -291,7 +291,7 @@ void drm_free_pages(unsigned long address, int order, int area)
#if __OS_HAS_AGP
-DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type)
+DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type)
{
DRM_AGP_MEM *handle;
diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h
index 9d0dedfb..b055ac00 100644
--- a/linux-core/drm_memory_debug.h
+++ b/linux-core/drm_memory_debug.h
@@ -277,7 +277,7 @@ void drm_free_pages (unsigned long address, int order, int area) {
#if __OS_HAS_AGP
-DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) {
+DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) {
DRM_AGP_MEM *handle;
if (!pages) {
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index 2caf596b..cf0d92fa 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -44,26 +44,26 @@
#include "drmP.h"
#include <linux/slab.h>
-unsigned long drm_mm_tail_space(drm_mm_t *mm)
+unsigned long drm_mm_tail_space(struct drm_mm *mm)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return 0;
return entry->size;
}
-int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free)
return -ENOMEM;
@@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size)
}
-static int drm_mm_create_tail_node(drm_mm_t *mm,
+static int drm_mm_create_tail_node(struct drm_mm *mm,
unsigned long start,
unsigned long size)
{
- drm_mm_node_t *child;
+ struct drm_mm_node *child;
- child = (drm_mm_node_t *)
+ child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return -ENOMEM;
@@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm,
}
-int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
+int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
{
struct list_head *tail_node;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
tail_node = mm->ml_entry.prev;
- entry = list_entry(tail_node, drm_mm_node_t, ml_entry);
+ entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
if (!entry->free) {
return drm_mm_create_tail_node(mm, entry->start + entry->size, size);
}
@@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size)
return 0;
}
-static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
+static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
unsigned long size)
{
- drm_mm_node_t *child;
+ struct drm_mm_node *child;
- child = (drm_mm_node_t *)
+ child = (struct drm_mm_node *)
drm_ctl_alloc(sizeof(*child), DRM_MEM_MM);
if (!child)
return NULL;
@@ -137,12 +137,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent,
return child;
}
-drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
+struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
unsigned long size, unsigned alignment)
{
- drm_mm_node_t *align_splitoff = NULL;
- drm_mm_node_t *child;
+ struct drm_mm_node *align_splitoff = NULL;
+ struct drm_mm_node *child;
unsigned tmp = 0;
if (alignment)
@@ -173,26 +173,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent,
* Otherwise add to the free stack.
*/
-void drm_mm_put_block(drm_mm_node_t * cur)
+void drm_mm_put_block(struct drm_mm_node * cur)
{
- drm_mm_t *mm = cur->mm;
+ struct drm_mm *mm = cur->mm;
struct list_head *cur_head = &cur->ml_entry;
struct list_head *root_head = &mm->ml_entry;
- drm_mm_node_t *prev_node = NULL;
- drm_mm_node_t *next_node;
+ struct drm_mm_node *prev_node = NULL;
+ struct drm_mm_node *next_node;
int merged = 0;
if (cur_head->prev != root_head) {
- prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry);
+ prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
if (prev_node->free) {
prev_node->size += cur->size;
merged = 1;
}
}
if (cur_head->next != root_head) {
- next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry);
+ next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry);
if (next_node->free) {
if (merged) {
prev_node->size += next_node->size;
@@ -217,14 +217,14 @@ void drm_mm_put_block(drm_mm_node_t * cur)
}
EXPORT_SYMBOL(drm_mm_put_block);
-drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
+struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
unsigned long size,
unsigned alignment, int best_match)
{
struct list_head *list;
const struct list_head *free_stack = &mm->fl_entry;
- drm_mm_node_t *entry;
- drm_mm_node_t *best;
+ struct drm_mm_node *entry;
+ struct drm_mm_node *best;
unsigned long best_size;
unsigned wasted;
@@ -232,7 +232,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
best_size = ~0UL;
list_for_each(list, free_stack) {
- entry = list_entry(list, drm_mm_node_t, fl_entry);
+ entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
if (entry->size < size)
@@ -258,14 +258,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm,
return best;
}
-int drm_mm_clean(drm_mm_t * mm)
+int drm_mm_clean(struct drm_mm * mm)
{
struct list_head *head = &mm->ml_entry;
return (head->next->next == head);
}
-int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
+int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
{
INIT_LIST_HEAD(&mm->ml_entry);
INIT_LIST_HEAD(&mm->fl_entry);
@@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size)
EXPORT_SYMBOL(drm_mm_init);
-void drm_mm_takedown(drm_mm_t * mm)
+void drm_mm_takedown(struct drm_mm * mm)
{
struct list_head *bnode = mm->fl_entry.next;
- drm_mm_node_t *entry;
+ struct drm_mm_node *entry;
- entry = list_entry(bnode, drm_mm_node_t, fl_entry);
+ entry = list_entry(bnode, struct drm_mm_node, fl_entry);
if (entry->ml_entry.next != &mm->ml_entry ||
entry->fl_entry.next != &mm->fl_entry) {
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index 567a7d2b..3d866333 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -30,10 +30,10 @@
#include "drmP.h"
-int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
int shareable)
{
- drm_device_t *dev = priv->head->dev;
+ struct drm_device *dev = priv->head->dev;
int ret;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
@@ -51,12 +51,12 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
return 0;
}
-drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
+struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
{
- drm_device_t *dev = priv->head->dev;
- drm_hash_item_t *hash;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_hash_item *hash;
int ret;
- drm_user_object_t *item;
+ struct drm_user_object *item;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
@@ -64,10 +64,10 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
if (ret) {
return NULL;
}
- item = drm_hash_entry(hash, drm_user_object_t, hash);
+ item = drm_hash_entry(hash, struct drm_user_object, hash);
if (priv != item->owner) {
- drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE];
+ struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE];
ret = drm_ht_find_item(ht, (unsigned long)item, &hash);
if (ret) {
DRM_ERROR("Object not registered for usage\n");
@@ -77,9 +77,9 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
return item;
}
-static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
+static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
{
- drm_device_t *dev = priv->head->dev;
+ struct drm_device *dev = priv->head->dev;
int ret;
if (atomic_dec_and_test(&item->refcount)) {
@@ -90,7 +90,7 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
}
}
-int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
+int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item)
{
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
@@ -105,8 +105,8 @@ int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
return 0;
}
-static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
- drm_ref_t action)
+static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
+ enum drm_ref_type action)
{
int ret = 0;
@@ -124,12 +124,12 @@ static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro,
return ret;
}
-int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
- drm_ref_t ref_action)
+int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object,
+ enum drm_ref_type ref_action)
{
int ret = 0;
- drm_ref_object_t *item;
- drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
+ struct drm_ref_object *item;
+ struct drm_open_hash *ht = &priv->refd_object_hash[ref_action];
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
@@ -181,11 +181,11 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
return ret;
}
-drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
- drm_user_object_t * referenced_object,
- drm_ref_t ref_action)
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
+ struct drm_user_object * referenced_object,
+ enum drm_ref_type ref_action)
{
- drm_hash_item_t *hash;
+ struct drm_hash_item *hash;
int ret;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
@@ -194,32 +194,32 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
if (ret)
return NULL;
- return drm_hash_entry(hash, drm_ref_object_t, hash);
+ return drm_hash_entry(hash, struct drm_ref_object, hash);
}
-static void drm_remove_other_references(drm_file_t * priv,
- drm_user_object_t * ro)
+static void drm_remove_other_references(struct drm_file * priv,
+ struct drm_user_object * ro)
{
int i;
- drm_open_hash_t *ht;
- drm_hash_item_t *hash;
- drm_ref_object_t *item;
+ struct drm_open_hash *ht;
+ struct drm_hash_item *hash;
+ struct drm_ref_object *item;
for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) {
ht = &priv->refd_object_hash[i];
while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) {
- item = drm_hash_entry(hash, drm_ref_object_t, hash);
+ item = drm_hash_entry(hash, struct drm_ref_object, hash);
drm_remove_ref_object(priv, item);
}
}
}
-void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
+void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
{
int ret;
- drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key;
- drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
- drm_ref_t unref_action;
+ struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
+ struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action];
+ enum drm_ref_type unref_action;
DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
unref_action = item->unref_action;
@@ -244,12 +244,12 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
}
-int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
- drm_object_type_t type, drm_user_object_t ** object)
+int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
+ enum drm_object_type type, struct drm_user_object ** object)
{
- drm_device_t *dev = priv->head->dev;
- drm_user_object_t *uo;
- drm_hash_item_t *hash;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_user_object *uo;
+ struct drm_hash_item *hash;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -258,7 +258,7 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
DRM_ERROR("Could not find user object to reference.\n");
goto out_err;
}
- uo = drm_hash_entry(hash, drm_user_object_t, hash);
+ uo = drm_hash_entry(hash, struct drm_user_object, hash);
if (uo->type != type) {
ret = -EINVAL;
goto out_err;
@@ -274,12 +274,12 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
return ret;
}
-int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
- drm_object_type_t type)
+int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
+ enum drm_object_type type)
{
- drm_device_t *dev = priv->head->dev;
- drm_user_object_t *uo;
- drm_ref_object_t *ro;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_user_object *uo;
+ struct drm_ref_object *ro;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index e0716aa4..2ddbe74b 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -39,14 +39,14 @@ struct drm_device;
#define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
-typedef enum {
+enum drm_object_type {
drm_fence_type,
drm_buffer_type,
drm_ttm_type
/*
* Add other user space object types here.
*/
-} drm_object_type_t;
+};
/*
* A user object is a structure that helps the drm give out user handles
@@ -55,20 +55,20 @@ typedef enum {
* Designed to be accessible using a user space 32-bit handle.
*/
-typedef struct drm_user_object {
- drm_hash_item_t hash;
+struct drm_user_object {
+ struct drm_hash_item hash;
struct list_head list;
- drm_object_type_t type;
+ enum drm_object_type type;
atomic_t refcount;
int shareable;
- drm_file_t *owner;
- void (*ref_struct_locked) (drm_file_t * priv,
+ struct drm_file *owner;
+ void (*ref_struct_locked) (struct drm_file * priv,
struct drm_user_object * obj,
- drm_ref_t ref_action);
- void (*unref) (drm_file_t * priv, struct drm_user_object * obj,
- drm_ref_t unref_action);
- void (*remove) (drm_file_t * priv, struct drm_user_object * obj);
-} drm_user_object_t;
+ enum drm_ref_type ref_action);
+ void (*unref) (struct drm_file * priv, struct drm_user_object * obj,
+ enum drm_ref_type unref_action);
+ void (*remove) (struct drm_file * priv, struct drm_user_object * obj);
+};
/*
* A ref object is a structure which is used to
@@ -77,24 +77,24 @@ typedef struct drm_user_object {
* process exits. Designed to be accessible using a pointer to the _user_ object.
*/
-typedef struct drm_ref_object {
- drm_hash_item_t hash;
+struct drm_ref_object {
+ struct drm_hash_item hash;
struct list_head list;
atomic_t refcount;
- drm_ref_t unref_action;
-} drm_ref_object_t;
+ enum drm_ref_type unref_action;
+};
/**
* Must be called with the struct_mutex held.
*/
-extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
+extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
int shareable);
/**
* Must be called with the struct_mutex held.
*/
-extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
+extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
uint32_t key);
/*
@@ -104,23 +104,23 @@ extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv,
* This function may temporarily release the struct_mutex.
*/
-extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item);
+extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
-extern int drm_add_ref_object(drm_file_t * priv,
- drm_user_object_t * referenced_object,
- drm_ref_t ref_action);
+extern int drm_add_ref_object(struct drm_file * priv,
+ struct drm_user_object * referenced_object,
+ enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
*/
-drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
- drm_user_object_t * referenced_object,
- drm_ref_t ref_action);
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
+ struct drm_user_object * referenced_object,
+ enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
* If "item" has been obtained by a call to drm_lookup_ref_object. You may not
@@ -128,19 +128,19 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
* This function may temporarily release the struct_mutex.
*/
-extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item);
-extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token,
- drm_object_type_t type,
- drm_user_object_t ** object);
-extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
- drm_object_type_t type);
+extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item);
+extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
+ enum drm_object_type type,
+ struct drm_user_object ** object);
+extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
+ enum drm_object_type type);
/***************************************************
* Fence objects. (drm_fence.c)
*/
-typedef struct drm_fence_object {
- drm_user_object_t base;
+struct drm_fence_object {
+ struct drm_user_object base;
struct drm_device *dev;
atomic_t usage;
@@ -156,29 +156,29 @@ typedef struct drm_fence_object {
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
-} drm_fence_object_t;
+};
#define _DRM_FENCE_CLASSES 8
#define _DRM_FENCE_TYPE_EXE 0x00
-typedef struct drm_fence_class_manager {
+struct drm_fence_class_manager {
struct list_head ring;
uint32_t pending_flush;
wait_queue_head_t fence_queue;
int pending_exe_flush;
uint32_t last_exe_flush;
uint32_t exe_flush_sequence;
-} drm_fence_class_manager_t;
+};
-typedef struct drm_fence_manager {
+struct drm_fence_manager {
int initialized;
rwlock_t lock;
- drm_fence_class_manager_t class[_DRM_FENCE_CLASSES];
+ struct drm_fence_class_manager class[_DRM_FENCE_CLASSES];
uint32_t num_classes;
atomic_t count;
-} drm_fence_manager_t;
+};
-typedef struct drm_fence_driver {
+struct drm_fence_driver {
uint32_t num_classes;
uint32_t wrap_diff;
uint32_t flush_diff;
@@ -189,7 +189,7 @@ typedef struct drm_fence_driver {
int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * breadcrumb, uint32_t * native_type);
void (*poke_flush) (struct drm_device * dev, uint32_t class);
-} drm_fence_driver_t;
+};
extern void drm_fence_handler(struct drm_device *dev, uint32_t class,
uint32_t sequence, uint32_t type);
@@ -197,23 +197,40 @@ extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
uint32_t sequence);
-extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type);
-extern int drm_fence_object_signaled(drm_fence_object_t * fence,
+extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
+extern int drm_fence_object_signaled(struct drm_fence_object * fence,
uint32_t type, int flush);
-extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence);
-extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence);
+extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence);
+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence);
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src);
-extern int drm_fence_object_wait(drm_fence_object_t * fence,
+extern int drm_fence_object_wait(struct drm_fence_object * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t class,
- drm_fence_object_t ** c_fence);
-extern int drm_fence_add_user_object(drm_file_t * priv,
- drm_fence_object_t * fence, int shareable);
-extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
-
+ struct drm_fence_object ** c_fence);
+extern int drm_fence_add_user_object(struct drm_file * priv,
+ struct drm_fence_object * fence, int shareable);
+
+extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/**************************************************
*TTMs
*/
@@ -235,7 +252,7 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS);
#define DRM_BE_FLAG_BOUND_CACHED 0x00000002
struct drm_ttm_backend;
-typedef struct drm_ttm_backend_func {
+struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
int (*populate) (struct drm_ttm_backend * backend,
unsigned long num_pages, struct page ** pages);
@@ -244,16 +261,16 @@ typedef struct drm_ttm_backend_func {
unsigned long offset, int cached);
int (*unbind) (struct drm_ttm_backend * backend);
void (*destroy) (struct drm_ttm_backend * backend);
-} drm_ttm_backend_func_t;
+};
-typedef struct drm_ttm_backend {
+struct drm_ttm_backend {
uint32_t flags;
int mem_type;
- drm_ttm_backend_func_t *func;
-} drm_ttm_backend_t;
+ struct drm_ttm_backend_func *func;
+};
-typedef struct drm_ttm {
+struct drm_ttm {
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
@@ -262,7 +279,7 @@ typedef struct drm_ttm {
struct drm_device *dev;
int destroy;
uint32_t mapping_offset;
- drm_ttm_backend_t *be;
+ struct drm_ttm_backend *be;
enum {
ttm_bound,
ttm_evicted,
@@ -270,14 +287,14 @@ typedef struct drm_ttm {
ttm_unpopulated,
} state;
-} drm_ttm_t;
+};
-extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size);
-extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset);
-extern void drm_ttm_unbind(drm_ttm_t * ttm);
-extern void drm_ttm_evict(drm_ttm_t * ttm);
-extern void drm_ttm_fixup_caching(drm_ttm_t * ttm);
-extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
+extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
+extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset);
+extern void drm_ttm_unbind(struct drm_ttm * ttm);
+extern void drm_ttm_evict(struct drm_ttm * ttm);
+extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
+extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
@@ -285,7 +302,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index);
* when the last vma exits.
*/
-extern int drm_destroy_ttm(drm_ttm_t * ttm);
+extern int drm_destroy_ttm(struct drm_ttm * ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
@@ -308,19 +325,19 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm);
* Buffer objects. (drm_bo.c, drm_bo_move.c)
*/
-typedef struct drm_bo_mem_reg {
- drm_mm_node_t *mm_node;
+struct drm_bo_mem_reg {
+ struct drm_mm_node *mm_node;
unsigned long size;
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
- uint32_t flags;
- uint32_t mask;
-} drm_bo_mem_reg_t;
+ uint64_t flags;
+ uint64_t mask;
+};
-typedef struct drm_buffer_object {
+struct drm_buffer_object {
struct drm_device *dev;
- drm_user_object_t base;
+ struct drm_user_object base;
/*
* If there is a possibility that the usage variable is zero,
@@ -329,30 +346,31 @@ typedef struct drm_buffer_object {
atomic_t usage;
unsigned long buffer_start;
- drm_bo_type_t type;
+ enum drm_bo_type type;
unsigned long offset;
atomic_t mapped;
- drm_bo_mem_reg_t mem;
+ struct drm_bo_mem_reg mem;
struct list_head lru;
struct list_head ddestroy;
uint32_t fence_type;
uint32_t fence_class;
- drm_fence_object_t *fence;
+ struct drm_fence_object *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
struct mutex mutex;
/* For pinned buffers */
- drm_mm_node_t *pinned_node;
+ int pinned;
+ struct drm_mm_node *pinned_node;
uint32_t pinned_mem_type;
struct list_head pinned_lru;
/* For vm */
- drm_ttm_t *ttm;
- drm_map_list_t map_list;
+ struct drm_ttm *ttm;
+ struct drm_map_list map_list;
uint32_t memory_type;
unsigned long bus_offset;
uint32_t vm_flags;
@@ -364,15 +382,15 @@ typedef struct drm_buffer_object {
struct list_head p_mm_list;
#endif
-} drm_buffer_object_t;
+};
#define _DRM_BO_FLAG_UNFENCED 0x00000001
#define _DRM_BO_FLAG_EVICTED 0x00000002
-typedef struct drm_mem_type_manager {
+struct drm_mem_type_manager {
int has_type;
int use_type;
- drm_mm_t manager;
+ struct drm_mm manager;
struct list_head lru;
struct list_head pinned;
uint32_t flags;
@@ -380,7 +398,7 @@ typedef struct drm_mem_type_manager {
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
-} drm_mem_type_manager_t;
+};
#define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
#define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
@@ -390,13 +408,13 @@ typedef struct drm_mem_type_manager {
#define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
-typedef struct drm_buffer_manager {
+struct drm_buffer_manager {
struct mutex init_mutex;
struct mutex evict_mutex;
int nice_mode;
int initialized;
- drm_file_t *last_to_validate;
- drm_mem_type_manager_t man[DRM_BO_MEM_TYPES];
+ struct drm_file *last_to_validate;
+ struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
struct list_head unfenced;
struct list_head ddestroy;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
@@ -407,81 +425,104 @@ typedef struct drm_buffer_manager {
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
-} drm_buffer_manager_t;
+};
-typedef struct drm_bo_driver {
+struct drm_bo_driver {
const uint32_t *mem_type_prio;
const uint32_t *mem_busy_prio;
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
- drm_ttm_backend_t *(*create_ttm_backend_entry)
+ struct drm_ttm_backend *(*create_ttm_backend_entry)
(struct drm_device * dev);
- int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type);
- int (*invalidate_caches) (struct drm_device * dev, uint32_t flags);
+ int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type);
+ int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
int (*init_mem_type) (struct drm_device * dev, uint32_t type,
- drm_mem_type_manager_t * man);
+ struct drm_mem_type_manager * man);
uint32_t(*evict_mask) (struct drm_buffer_object *bo);
int (*move) (struct drm_buffer_object * bo,
int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
-} drm_bo_driver_t;
+};
/*
* buffer objects (drm_bo.c)
*/
-extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
- unsigned long p_offset, unsigned long p_size);
-extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
- drm_bo_type_t type, uint32_t mask,
- uint32_t hint, uint32_t page_alignment,
- unsigned long buffer_start,
- drm_buffer_object_t ** buf_obj);
-extern int drm_bo_ioctl(DRM_IOCTL_ARGS);
-extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS);
+extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
- drm_bo_mem_reg_t * mem,
+ struct drm_bo_mem_reg * mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
-extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
-extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo);
-extern int drm_fence_buffer_objects(drm_file_t * priv,
+extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
+extern int drm_fence_buffer_objects(struct drm_file * priv,
struct list_head *list,
uint32_t fence_flags,
- drm_fence_object_t * fence,
- drm_fence_object_t ** used_fence);
-extern void drm_bo_add_to_lru(drm_buffer_object_t * bo);
-extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
+ struct drm_fence_object * fence,
+ struct drm_fence_object ** used_fence);
+extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
+extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
int no_wait);
-extern int drm_bo_mem_space(drm_buffer_object_t * bo,
- drm_bo_mem_reg_t * mem, int no_wait);
-extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags,
+extern int drm_bo_mem_space(struct drm_buffer_object * bo,
+ struct drm_bo_mem_reg * mem, int no_wait);
+extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags,
int no_wait, int move_unfenced);
+extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
+ enum drm_bo_type type, uint64_t mask,
+ uint32_t hint, uint32_t page_alignment,
+ unsigned long buffer_start,
+ struct drm_buffer_object **bo);
+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
+ unsigned long p_offset, unsigned long p_size);
extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
+extern int drm_bo_add_user_object(struct drm_file *file_priv,
+ struct drm_buffer_object *bo, int sharable);
+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
+extern int drm_bo_set_pin(struct drm_device *dev,
+ struct drm_buffer_object *bo, int pin);
/*
* Buffer object memory move helpers.
* drm_bo_move.c
*/
-extern int drm_bo_move_ttm(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem);
-extern int drm_bo_move_memcpy(drm_buffer_object_t * bo,
+extern int drm_bo_move_ttm(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
+extern int drm_bo_move_memcpy(struct drm_buffer_object * bo,
int evict,
- int no_wait, drm_bo_mem_reg_t * new_mem);
-extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
+ int no_wait, struct drm_bo_mem_reg * new_mem);
+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
int evict,
int no_wait,
uint32_t fence_class,
uint32_t fence_type,
uint32_t fence_flags,
- drm_bo_mem_reg_t * new_mem);
+ struct drm_bo_mem_reg * new_mem);
+
+extern int drm_mem_reg_ioremap(struct drm_device *dev,
+ struct drm_bo_mem_reg *mem, void **virtual);
+extern void drm_mem_reg_iounmap(struct drm_device *dev,
+ struct drm_bo_mem_reg *mem, void *virtual);
-extern int drm_mem_reg_ioremap(struct drm_device *dev, drm_bo_mem_reg_t * mem,
+extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void **virtual);
-extern void drm_mem_reg_iounmap(struct drm_device *dev, drm_bo_mem_reg_t * mem,
+extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
void *virtual);
#ifdef CONFIG_DEBUG_MUTEXES
#define DRM_ASSERT_LOCKED(_mutex) \
diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h
index 2ea105c5..2688479a 100644
--- a/linux-core/drm_os_linux.h
+++ b/linux-core/drm_os_linux.h
@@ -6,11 +6,6 @@
#include <linux/interrupt.h> /* For task queue support */
#include <linux/delay.h>
-/** File pointer type */
-#define DRMFILE struct file *
-/** Ioctl arguments */
-#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data
-#define DRM_ERR(d) -(d)
/** Current process ID */
#define DRM_CURRENTPID current->pid
#define DRM_SUSER(p) capable(CAP_SYS_ADMIN)
@@ -51,9 +46,6 @@
#define DRM_WRITEMEMORYBARRIER() wmb()
/** Read/write memory barrier */
#define DRM_MEMORYBARRIER() mb()
-/** DRM device local declaration */
-#define DRM_DEVICE drm_file_t *priv = filp->private_data; \
- drm_device_t *dev = priv->head->dev
/** IRQ handler arguments and return type and values */
#define DRM_IRQ_ARGS int irq, void *arg
@@ -93,14 +85,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define MTRR_TYPE_WRCOMB 1
#endif
-/** For data going into the kernel through the ioctl argument */
-#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \
- if ( copy_from_user(&arg1, arg2, arg3) ) \
- return -EFAULT
-/** For data going from the kernel through the ioctl argument */
-#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \
- if ( copy_to_user(arg1, &arg2, arg3) ) \
- return -EFAULT
/** Other copying of data to kernel space */
#define DRM_COPY_FROM_USER(arg1, arg2, arg3) \
copy_from_user(arg1, arg2, arg3)
@@ -117,8 +101,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_GET_USER_UNCHECKED(val, uaddr) \
__get_user(val, uaddr)
-#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data
-
#define DRM_HZ HZ
#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c
index 76252204..a608eed3 100644
--- a/linux-core/drm_pci.c
+++ b/linux-core/drm_pci.c
@@ -47,7 +47,7 @@
/**
* \brief Allocate a PCI consistent memory block, for DMA.
*/
-drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
+drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*
* This function is for internal use in the Linux-specific DRM core code.
*/
-void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
+void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
{
unsigned long addr;
size_t sz;
@@ -167,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
/**
* \brief Free a PCI consistent memory block
*/
-void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah)
+void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index e93a0406..08bf99d6 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -90,7 +90,7 @@ static struct drm_proc_list {
* "/proc/dri/%minor%/", and each entry in proc_list as
* "/proc/dri/%minor%/%name%".
*/
-int drm_proc_init(drm_device_t * dev, int minor,
+int drm_proc_init(struct drm_device * dev, int minor,
struct proc_dir_entry *root, struct proc_dir_entry **dev_root)
{
struct proc_dir_entry *ent;
@@ -165,7 +165,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root,
static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
@@ -207,10 +207,10 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request,
static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_map_t *map;
- drm_map_list_t *r_list;
+ struct drm_map *map;
+ struct drm_map_list *r_list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
@@ -264,7 +264,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -287,10 +287,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request,
static int drm__queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
int i;
- drm_queue_t *q;
+ struct drm_queue *q;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -337,7 +337,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset,
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -360,9 +360,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request,
static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma || offset > DRM_PROC_LIMIT) {
@@ -409,7 +409,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -432,13 +432,13 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
static int drm__objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_buffer_manager_t *bm = &dev->bm;
- drm_fence_manager_t *fm = &dev->fm;
- drm_u64_t used_mem;
- drm_u64_t low_mem;
- drm_u64_t high_mem;
+ struct drm_buffer_manager *bm = &dev->bm;
+ struct drm_fence_manager *fm = &dev->fm;
+ uint64_t used_mem;
+ uint64_t low_mem;
+ uint64_t high_mem;
if (offset > DRM_PROC_LIMIT) {
@@ -496,7 +496,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request,
static int drm_objects_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -519,9 +519,9 @@ static int drm_objects_info(char *buf, char **start, off_t offset, int request,
static int drm__clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_file_t *priv;
+ struct drm_file *priv;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
@@ -552,7 +552,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -566,9 +566,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int len = 0;
- drm_vma_entry_t *pt;
+ struct drm_vma_entry *pt;
struct vm_area_struct *vma;
#if defined(__i386__)
unsigned int pgprot;
@@ -625,7 +625,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
static int drm_vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
int ret;
mutex_lock(&dev->struct_mutex);
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index e5c9f877..3c0f672e 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -36,7 +36,7 @@
#define DEBUG_SCATTER 0
-void drm_sg_cleanup(drm_sg_mem_t * entry)
+void drm_sg_cleanup(struct drm_sg_mem *entry)
{
struct page *page;
int i;
@@ -55,6 +55,7 @@ void drm_sg_cleanup(drm_sg_mem_t * entry)
entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES);
drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS);
}
+EXPORT_SYMBOL(drm_sg_cleanup);
#ifdef _LP64
# define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1)))
@@ -62,14 +63,9 @@ void drm_sg_cleanup(drm_sg_mem_t * entry)
# define ScatterHandle(x) (unsigned int)(x)
#endif
-int drm_sg_alloc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_scatter_gather_t __user *argp = (void __user *)arg;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
+ struct drm_sg_mem *entry;
unsigned long pages, i, j;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -80,17 +76,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
if (dev->sg)
return -EINVAL;
- if (copy_from_user(&request, argp, sizeof(request)))
- return -EFAULT;
-
entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS);
if (!entry)
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
-
- pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages);
+ pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
+ DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -142,12 +134,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
SetPageReserved(entry->pagelist[j]);
}
- request.handle = entry->handle;
-
- if (copy_to_user(argp, &request, sizeof(request))) {
- drm_sg_cleanup(entry);
- return -EFAULT;
- }
+ request->handle = entry->handle;
dev->sg = entry;
@@ -196,28 +183,32 @@ int drm_sg_alloc(struct inode *inode, struct file *filp,
failed:
drm_sg_cleanup(entry);
return -ENOMEM;
+
}
+EXPORT_SYMBOL(drm_sg_alloc);
-int drm_sg_free(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_scatter_gather_t request;
- drm_sg_mem_t *entry;
+ struct drm_scatter_gather *request = data;
+
+ return drm_sg_alloc(dev, request);
+
+}
+
+int drm_sg_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_scatter_gather *request = data;
+ struct drm_sg_mem *entry;
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
- if (copy_from_user(&request,
- (drm_scatter_gather_t __user *) arg,
- sizeof(request)))
- return -EFAULT;
-
entry = dev->sg;
dev->sg = NULL;
- if (!entry || entry->handle != request.handle)
+ if (!entry || entry->handle != request->handle)
return -EINVAL;
DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index e15db6d6..118e82ae 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -38,13 +38,13 @@
#include "drm_sman.h"
-typedef struct drm_owner_item {
- drm_hash_item_t owner_hash;
+struct drm_owner_item {
+ struct drm_hash_item owner_hash;
struct list_head sman_list;
struct list_head mem_blocks;
-} drm_owner_item_t;
+};
-void drm_sman_takedown(drm_sman_t * sman)
+void drm_sman_takedown(struct drm_sman * sman)
{
drm_ht_remove(&sman->user_hash_tab);
drm_ht_remove(&sman->owner_hash_tab);
@@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman)
EXPORT_SYMBOL(drm_sman_takedown);
int
-drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order)
{
int ret = 0;
- sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm),
+ sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm),
DRM_MEM_MM);
if (!sman->mm) {
ret = -ENOMEM;
@@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init);
static void *drm_sman_mm_allocate(void *private, unsigned long size,
unsigned alignment)
{
- drm_mm_t *mm = (drm_mm_t *) private;
- drm_mm_node_t *tmp;
+ struct drm_mm *mm = (struct drm_mm *) private;
+ struct drm_mm_node *tmp;
tmp = drm_mm_search_free(mm, size, alignment, 1);
if (!tmp) {
@@ -101,30 +101,30 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size,
static void drm_sman_mm_free(void *private, void *ref)
{
- drm_mm_node_t *node = (drm_mm_node_t *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
drm_mm_put_block(node);
}
static void drm_sman_mm_destroy(void *private)
{
- drm_mm_t *mm = (drm_mm_t *) private;
+ struct drm_mm *mm = (struct drm_mm *) private;
drm_mm_takedown(mm);
drm_free(mm, sizeof(*mm), DRM_MEM_MM);
}
static unsigned long drm_sman_mm_offset(void *private, void *ref)
{
- drm_mm_node_t *node = (drm_mm_node_t *) ref;
+ struct drm_mm_node *node = (struct drm_mm_node *) ref;
return node->start;
}
int
-drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size)
{
- drm_sman_mm_t *sman_mm;
- drm_mm_t *mm;
+ struct drm_sman_mm *sman_mm;
+ struct drm_mm *mm;
int ret;
BUG_ON(manager >= sman->num_managers);
@@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
EXPORT_SYMBOL(drm_sman_set_range);
int
-drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
- drm_sman_mm_t * allocator)
+drm_sman_set_manager(struct drm_sman * sman, unsigned int manager,
+ struct drm_sman_mm * allocator)
{
BUG_ON(manager >= sman->num_managers);
sman->mm[manager] = *allocator;
@@ -163,16 +163,16 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager,
}
EXPORT_SYMBOL(drm_sman_set_manager);
-static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman,
+static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman,
unsigned long owner)
{
int ret;
- drm_hash_item_t *owner_hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *owner_hash_item;
+ struct drm_owner_item *owner_item;
ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item);
if (!ret) {
- return drm_hash_entry(owner_hash_item, drm_owner_item_t,
+ return drm_hash_entry(owner_hash_item, struct drm_owner_item,
owner_hash);
}
@@ -194,14 +194,14 @@ out:
return NULL;
}
-drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager,
+struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager,
unsigned long size, unsigned alignment,
unsigned long owner)
{
void *tmp;
- drm_sman_mm_t *sman_mm;
- drm_owner_item_t *owner_item;
- drm_memblock_item_t *memblock;
+ struct drm_sman_mm *sman_mm;
+ struct drm_owner_item *owner_item;
+ struct drm_memblock_item *memblock;
BUG_ON(manager >= sman->num_managers);
@@ -246,9 +246,9 @@ out:
EXPORT_SYMBOL(drm_sman_alloc);
-static void drm_sman_free(drm_memblock_item_t *item)
+static void drm_sman_free(struct drm_memblock_item *item)
{
- drm_sman_t *sman = item->sman;
+ struct drm_sman *sman = item->sman;
list_del(&item->owner_list);
drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash);
@@ -256,40 +256,40 @@ static void drm_sman_free(drm_memblock_item_t *item)
drm_free(item, sizeof(*item), DRM_MEM_MM);
}
-int drm_sman_free_key(drm_sman_t *sman, unsigned int key)
+int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
{
- drm_hash_item_t *hash_item;
- drm_memblock_item_t *memblock_item;
+ struct drm_hash_item *hash_item;
+ struct drm_memblock_item *memblock_item;
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL;
- memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash);
+ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash);
drm_sman_free(memblock_item);
return 0;
}
EXPORT_SYMBOL(drm_sman_free_key);
-static void drm_sman_remove_owner(drm_sman_t *sman,
- drm_owner_item_t *owner_item)
+static void drm_sman_remove_owner(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
{
list_del(&owner_item->sman_list);
drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash);
drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM);
}
-int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
+int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner)
{
- drm_hash_item_t *hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return -1;
}
- owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
if (owner_item->mem_blocks.next == &owner_item->mem_blocks) {
drm_sman_remove_owner(sman, owner_item);
return -1;
@@ -300,10 +300,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner)
EXPORT_SYMBOL(drm_sman_owner_clean);
-static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
- drm_owner_item_t *owner_item)
+static void drm_sman_do_owner_cleanup(struct drm_sman *sman,
+ struct drm_owner_item *owner_item)
{
- drm_memblock_item_t *entry, *next;
+ struct drm_memblock_item *entry, *next;
list_for_each_entry_safe(entry, next, &owner_item->mem_blocks,
owner_list) {
@@ -312,28 +312,28 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman,
drm_sman_remove_owner(sman, owner_item);
}
-void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner)
+void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner)
{
- drm_hash_item_t *hash_item;
- drm_owner_item_t *owner_item;
+ struct drm_hash_item *hash_item;
+ struct drm_owner_item *owner_item;
if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) {
return;
}
- owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash);
+ owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash);
drm_sman_do_owner_cleanup(sman, owner_item);
}
EXPORT_SYMBOL(drm_sman_owner_cleanup);
-void drm_sman_cleanup(drm_sman_t *sman)
+void drm_sman_cleanup(struct drm_sman *sman)
{
- drm_owner_item_t *entry, *next;
+ struct drm_owner_item *entry, *next;
unsigned int i;
- drm_sman_mm_t *sman_mm;
+ struct drm_sman_mm *sman_mm;
list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) {
drm_sman_do_owner_cleanup(sman, entry);
diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h
index ddc732a1..39a39fef 100644
--- a/linux-core/drm_sman.h
+++ b/linux-core/drm_sman.h
@@ -50,7 +50,7 @@
* for memory management.
*/
-typedef struct drm_sman_mm {
+struct drm_sman_mm {
/* private info. If allocated, needs to be destroyed by the destroy
function */
void *private;
@@ -74,30 +74,30 @@ typedef struct drm_sman_mm {
"alloc" function */
unsigned long (*offset) (void *private, void *ref);
-} drm_sman_mm_t;
+};
-typedef struct drm_memblock_item {
+struct drm_memblock_item {
struct list_head owner_list;
- drm_hash_item_t user_hash;
+ struct drm_hash_item user_hash;
void *mm_info;
- drm_sman_mm_t *mm;
+ struct drm_sman_mm *mm;
struct drm_sman *sman;
-} drm_memblock_item_t;
+};
-typedef struct drm_sman {
- drm_sman_mm_t *mm;
+struct drm_sman {
+ struct drm_sman_mm *mm;
int num_managers;
- drm_open_hash_t owner_hash_tab;
- drm_open_hash_t user_hash_tab;
+ struct drm_open_hash owner_hash_tab;
+ struct drm_open_hash user_hash_tab;
struct list_head owner_items;
-} drm_sman_t;
+};
/*
* Take down a memory manager. This function should only be called after a
* successful init and after a call to drm_sman_cleanup.
*/
-extern void drm_sman_takedown(drm_sman_t * sman);
+extern void drm_sman_takedown(struct drm_sman * sman);
/*
* Allocate structures for a manager.
@@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman);
*
*/
-extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
+extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers,
unsigned int user_order, unsigned int owner_order);
/*
@@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers,
* manager unless a customized allogator is used.
*/
-extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
+extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager,
unsigned long start, unsigned long size);
/*
@@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager,
* so it can be destroyed after this call.
*/
-extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger,
- drm_sman_mm_t * allocator);
+extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger,
+ struct drm_sman_mm * allocator);
/*
* Allocate a memory block. Aligment is not implemented yet.
*/
-extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman,
- unsigned int manager,
- unsigned long size,
- unsigned alignment,
- unsigned long owner);
+extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman,
+ unsigned int manager,
+ unsigned long size,
+ unsigned alignment,
+ unsigned long owner);
/*
* Free a memory block identified by its user hash key.
*/
-extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
+extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key);
/*
* returns 1 iff there are no stale memory blocks associated with this owner.
@@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key);
* resources associated with owner.
*/
-extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
+extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with this owner. Note that this
@@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner);
* is not going to be referenced anymore.
*/
-extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner);
+extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner);
/*
* Frees all stale memory blocks associated with the memory manager.
* See idling above.
*/
-extern void drm_sman_cleanup(drm_sman_t * sman);
+extern void drm_sman_cleanup(struct drm_sman * sman);
#endif
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 0cc8a105..db8c0bd8 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -50,11 +50,11 @@ MODULE_PARM_DESC(debug, "Enable debug output");
module_param_named(cards_limit, drm_cards_limit, int, 0444);
module_param_named(debug, drm_debug, int, 0600);
-drm_head_t **drm_heads;
-struct drm_sysfs_class *drm_class;
+struct drm_head **drm_heads;
+struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
-static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
+static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
@@ -160,9 +160,9 @@ error_out_unreg:
* create the proc init entry via proc_init(). This routines assigns
* minor numbers to secondary heads of multi-headed cards
*/
-static int drm_get_head(drm_device_t * dev, drm_head_t * head)
+static int drm_get_head(struct drm_device * dev, struct drm_head * head)
{
- drm_head_t **heads = drm_heads;
+ struct drm_head **heads = drm_heads;
int ret;
int minor;
@@ -171,7 +171,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
for (minor = 0; minor < drm_cards_limit; minor++, heads++) {
if (!*heads) {
- *head = (drm_head_t) {
+ *head = (struct drm_head) {
.dev = dev,
.device = MKDEV(DRM_MAJOR, minor),
.minor = minor,
@@ -202,7 +202,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head)
err_g2:
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
err_g1:
- *head = (drm_head_t) {
+ *head = (struct drm_head) {
.dev = NULL};
return ret;
}
@@ -221,7 +221,7 @@ err_g1:
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver)
{
- drm_device_t *dev;
+ struct drm_device *dev;
int ret;
DRM_DEBUG("\n");
@@ -232,18 +232,22 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
if (!drm_fb_loaded) {
pci_set_drvdata(pdev, dev);
- pci_request_regions(pdev, driver->pci_driver.name);
+ ret = pci_request_regions(pdev, driver->pci_driver.name);
+ if (ret)
+ goto err_g1;
}
- pci_enable_device(pdev);
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_g2;
pci_set_master(pdev);
if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) {
printk(KERN_ERR "DRM: fill_in_dev failed\n");
- goto err_g1;
+ goto err_g3;
}
if ((ret = drm_get_head(dev, &dev->primary)))
- goto err_g1;
+ goto err_g3;
DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
driver->name, driver->major, driver->minor, driver->patchlevel,
@@ -251,12 +255,16 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
return 0;
-err_g1:
- if (!drm_fb_loaded) {
- pci_set_drvdata(pdev, NULL);
- pci_release_regions(pdev);
+ err_g3:
+ if (!drm_fb_loaded)
pci_disable_device(pdev);
- }
+ err_g2:
+ if (!drm_fb_loaded)
+ pci_release_regions(pdev);
+ err_g1:
+ if (!drm_fb_loaded)
+ pci_set_drvdata(pdev, NULL);
+
drm_free(dev, sizeof(*dev), DRM_MEM_STUB);
printk(KERN_ERR "DRM: drm_get_dev failed.\n");
return ret;
@@ -274,7 +282,7 @@ EXPORT_SYMBOL(drm_get_dev);
* "drm" data, otherwise unregisters the "drm" data, frees the dev list and
* unregisters the character device.
*/
-int drm_put_dev(drm_device_t * dev)
+int drm_put_dev(struct drm_device * dev)
{
DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name);
@@ -302,7 +310,7 @@ int drm_put_dev(drm_device_t * dev)
* last minor released.
*
*/
-int drm_put_head(drm_head_t * head)
+int drm_put_head(struct drm_head * head)
{
int minor = head->minor;
@@ -311,7 +319,7 @@ int drm_put_head(drm_head_t * head)
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
drm_sysfs_device_remove(head->dev_class);
- *head = (drm_head_t){.dev = NULL};
+ *head = (struct drm_head){.dev = NULL};
drm_heads[minor] = NULL;
return 0;
diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c
index ace0778b..cf4349b0 100644
--- a/linux-core/drm_sysfs.c
+++ b/linux-core/drm_sysfs.c
@@ -1,3 +1,4 @@
+
/*
* drm_sysfs.c - Modifications to drm_sysfs_class.c to support
* extra sysfs attribute from DRM. Normal drm_sysfs_class
@@ -15,38 +16,8 @@
#include <linux/kdev_t.h>
#include <linux/err.h>
-#include "drmP.h"
#include "drm_core.h"
-
-struct drm_sysfs_class {
- struct class_device_attribute attr;
- struct class class;
-};
-#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class)
-
-struct simple_dev {
- dev_t dev;
- struct class_device class_dev;
-};
-#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev)
-
-static void release_simple_dev(struct class_device *class_dev)
-{
- struct simple_dev *s_dev = to_simple_dev(class_dev);
- kfree(s_dev);
-}
-
-static ssize_t show_dev(struct class_device *class_dev, char *buf)
-{
- struct simple_dev *s_dev = to_simple_dev(class_dev);
- return print_dev_t(buf, s_dev->dev);
-}
-
-static void drm_sysfs_class_release(struct class *class)
-{
- struct drm_sysfs_class *cs = to_drm_sysfs_class(class);
- kfree(cs);
-}
+#include "drmP.h"
/* Display the version of drm_core. This doesn't work right in current design */
static ssize_t version_show(struct class *dev, char *buf)
@@ -68,38 +39,27 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL);
* Note, the pointer created here is to be destroyed when finished by making a
* call to drm_sysfs_destroy().
*/
-struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name)
+struct class *drm_sysfs_create(struct module *owner, char *name)
{
- struct drm_sysfs_class *cs;
- int retval;
+ struct class *class;
+ int err;
- cs = kmalloc(sizeof(*cs), GFP_KERNEL);
- if (!cs) {
- retval = -ENOMEM;
- goto error;
+ class = class_create(owner, name);
+ if (IS_ERR(class)) {
+ err = PTR_ERR(class);
+ goto err_out;
}
- memset(cs, 0x00, sizeof(*cs));
-
- cs->class.name = name;
- cs->class.class_release = drm_sysfs_class_release;
- cs->class.release = release_simple_dev;
- cs->attr.attr.name = "dev";
- cs->attr.attr.mode = S_IRUGO;
- cs->attr.attr.owner = owner;
- cs->attr.show = show_dev;
- cs->attr.store = NULL;
+ err = class_create_file(class, &class_attr_version);
+ if (err)
+ goto err_out_class;
- retval = class_register(&cs->class);
- if (retval)
- goto error;
- class_create_file(&cs->class, &class_attr_version);
+ return class;
- return cs;
-
- error:
- kfree(cs);
- return ERR_PTR(retval);
+err_out_class:
+ class_destroy(class);
+err_out:
+ return ERR_PTR(err);
}
/**
@@ -109,17 +69,18 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name)
* Note, the pointer to be destroyed must have been created with a call to
* drm_sysfs_create().
*/
-void drm_sysfs_destroy(struct drm_sysfs_class *cs)
+void drm_sysfs_destroy(struct class *class)
{
- if ((cs == NULL) || (IS_ERR(cs)))
+ if ((class == NULL) || (IS_ERR(class)))
return;
- class_unregister(&cs->class);
+ class_remove_file(class, &class_attr_version);
+ class_destroy(class);
}
static ssize_t show_dri(struct class_device *class_device, char *buf)
{
- drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev;
+ struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev;
if (dev->driver->dri_library_name)
return dev->driver->dri_library_name(dev, buf);
return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name);
@@ -131,7 +92,7 @@ static struct class_device_attribute class_device_attrs[] = {
/**
* drm_sysfs_device_add - adds a class device to sysfs for a character driver
- * @cs: pointer to the struct drm_sysfs_class that this device should be registered to.
+ * @cs: pointer to the struct class that this device should be registered to.
* @dev: the dev_t for the device to be added.
* @device: a pointer to a struct device that is assiociated with this class device.
* @fmt: string for the class device's name
@@ -140,47 +101,42 @@ static struct class_device_attribute class_device_attrs[] = {
* class. A "dev" file will be created, showing the dev_t for the device. The
* pointer to the struct class_device will be returned from the call. Any further
* sysfs files that might be required can be created using this pointer.
- * Note: the struct drm_sysfs_class passed to this function must have previously been
+ * Note: the struct class passed to this function must have previously been
* created with a call to drm_sysfs_create().
*/
-struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs,
- drm_head_t * head)
+struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head)
{
- struct simple_dev *s_dev = NULL;
- int i, retval;
-
- if ((cs == NULL) || (IS_ERR(cs))) {
- retval = -ENODEV;
- goto error;
+ struct class_device *class_dev;
+ int i, j, err;
+
+ class_dev = class_device_create(cs, NULL,
+ MKDEV(DRM_MAJOR, head->minor),
+ &(head->dev->pdev)->dev,
+ "card%d", head->minor);
+ if (IS_ERR(class_dev)) {
+ err = PTR_ERR(class_dev);
+ goto err_out;
}
- s_dev = kmalloc(sizeof(*s_dev), GFP_KERNEL);
- if (!s_dev) {
- retval = -ENOMEM;
- goto error;
- }
- memset(s_dev, 0x00, sizeof(*s_dev));
+ class_set_devdata(class_dev, head);
- s_dev->dev = MKDEV(DRM_MAJOR, head->minor);
- s_dev->class_dev.dev = &head->dev->pdev->dev;
- s_dev->class_dev.class = &cs->class;
-
- snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor);
- retval = class_device_register(&s_dev->class_dev);
- if (retval)
- goto error;
-
- class_device_create_file(&s_dev->class_dev, &cs->attr);
- class_set_devdata(&s_dev->class_dev, head);
-
- for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]);
+ for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) {
+ err = class_device_create_file(class_dev,
+ &class_device_attrs[i]);
+ if (err)
+ goto err_out_files;
+ }
- return &s_dev->class_dev;
+ return class_dev;
-error:
- kfree(s_dev);
- return ERR_PTR(retval);
+err_out_files:
+ if (i > 0)
+ for (j = 0; j < i; j++)
+ class_device_remove_file(class_dev,
+ &class_device_attrs[i]);
+ class_device_unregister(class_dev);
+err_out:
+ return ERR_PTR(err);
}
/**
@@ -192,11 +148,9 @@ error:
*/
void drm_sysfs_device_remove(struct class_device *class_dev)
{
- struct simple_dev *s_dev = to_simple_dev(class_dev);
int i;
for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++)
- class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]);
-
- class_device_unregister(&s_dev->class_dev);
+ class_device_remove_file(class_dev, &class_device_attrs[i]);
+ class_device_unregister(class_dev);
}
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 31503c9c..60c64cba 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void)
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
-static void ttm_alloc_pages(drm_ttm_t * ttm)
+static void ttm_alloc_pages(struct drm_ttm * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
@@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm)
}
}
-static void ttm_free_pages(drm_ttm_t * ttm)
+static void ttm_free_pages(struct drm_ttm * ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
@@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void)
* for range of pages in a ttm.
*/
-static int drm_set_caching(drm_ttm_t * ttm, int noncached)
+static int drm_set_caching(struct drm_ttm * ttm, int noncached)
{
int i;
struct page **cur_page;
@@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached)
* Free all resources associated with a ttm.
*/
-int drm_destroy_ttm(drm_ttm_t * ttm)
+int drm_destroy_ttm(struct drm_ttm * ttm)
{
int i;
struct page **cur_page;
- drm_ttm_backend_t *be;
+ struct drm_ttm_backend *be;
if (!ttm)
return 0;
@@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
}
if (ttm->pages) {
- drm_buffer_manager_t *bm = &ttm->dev->bm;
+ struct drm_buffer_manager *bm = &ttm->dev->bm;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
drm_set_caching(ttm, 0);
@@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm)
return 0;
}
-struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
+struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
{
struct page *p;
- drm_buffer_manager_t *bm = &ttm->dev->bm;
+ struct drm_buffer_manager *bm = &ttm->dev->bm;
p = ttm->pages[index];
if (!p) {
@@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
return p;
}
-static int drm_ttm_populate(drm_ttm_t * ttm)
+static int drm_ttm_populate(struct drm_ttm * ttm)
{
struct page *page;
unsigned long i;
- drm_ttm_backend_t *be;
+ struct drm_ttm_backend *be;
if (ttm->state != ttm_unpopulated)
return 0;
@@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm)
* Initialize a ttm.
*/
-drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
+struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
{
- drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
- drm_ttm_t *ttm;
+ struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
+ struct drm_ttm *ttm;
if (!bo_driver)
return NULL;
@@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
* Unbind a ttm region from the aperture.
*/
-void drm_ttm_evict(drm_ttm_t * ttm)
+void drm_ttm_evict(struct drm_ttm * ttm)
{
- drm_ttm_backend_t *be = ttm->be;
+ struct drm_ttm_backend *be = ttm->be;
int ret;
if (ttm->state == ttm_bound) {
@@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm)
ttm->state = ttm_evicted;
}
-void drm_ttm_fixup_caching(drm_ttm_t * ttm)
+void drm_ttm_fixup_caching(struct drm_ttm * ttm)
{
if (ttm->state == ttm_evicted) {
- drm_ttm_backend_t *be = ttm->be;
+ struct drm_ttm_backend *be = ttm->be;
if (be->func->needs_ub_cache_adjust(be)) {
drm_set_caching(ttm, 0);
}
@@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm)
}
}
-void drm_ttm_unbind(drm_ttm_t * ttm)
+void drm_ttm_unbind(struct drm_ttm * ttm)
{
if (ttm->state == ttm_bound)
drm_ttm_evict(ttm);
@@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm)
drm_ttm_fixup_caching(ttm);
}
-int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
+int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset)
{
int ret = 0;
- drm_ttm_backend_t *be;
+ struct drm_ttm_backend *be;
if (!ttm)
return -EINVAL;
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index 72d63c10..c4e790ef 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -85,11 +85,11 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t *map = NULL;
- drm_map_list_t *r_list;
- drm_hash_item_t *hash;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map *map = NULL;
+ struct drm_map_list *r_list;
+ struct drm_hash_item *hash;
/*
* Find the right map
@@ -103,7 +103,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
goto vm_nopage_error;
- r_list = drm_hash_entry(hash, drm_map_list_t, hash);
+ r_list = drm_hash_entry(hash, struct drm_map_list, hash);
map = r_list->map;
if (map && map->type == _DRM_AGP) {
@@ -172,7 +172,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_map_t *map = (drm_map_t *) vma->vm_private_data;
+ struct drm_map *map = (struct drm_map *) vma->vm_private_data;
unsigned long offset;
unsigned long i;
struct page *page;
@@ -203,11 +203,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
*/
static void drm_vm_shm_close(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *temp;
- drm_map_t *map;
- drm_map_list_t *r_list;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *pt, *temp;
+ struct drm_map *map;
+ struct drm_map_list *r_list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -285,9 +285,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_device_dma *dma = dev->dma;
unsigned long offset;
unsigned long page_nr;
struct page *page;
@@ -321,10 +321,10 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
unsigned long address)
{
- drm_map_t *map = (drm_map_t *) vma->vm_private_data;
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_map *map = (struct drm_map *) vma->vm_private_data;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long offset;
unsigned long map_offset;
unsigned long page_offset;
@@ -418,9 +418,9 @@ static struct vm_operations_struct drm_vm_sg_ops = {
*/
static void drm_vm_open_locked(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *vma_entry;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *vma_entry;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@@ -436,8 +436,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
static void drm_vm_open(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_open_locked(vma);
@@ -454,9 +454,9 @@ static void drm_vm_open(struct vm_area_struct *vma)
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
- drm_file_t *priv = vma->vm_file->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *temp;
+ struct drm_file *priv = vma->vm_file->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_vma_entry *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
@@ -477,7 +477,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
/**
* mmap DMA memory.
*
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
@@ -486,9 +486,9 @@ static void drm_vm_close(struct vm_area_struct *vma)
*/
static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
- drm_device_dma_t *dma;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
+ struct drm_device_dma *dma;
unsigned long length = vma->vm_end - vma->vm_start;
dev = priv->head->dev;
@@ -524,7 +524,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
return 0;
}
-unsigned long drm_core_get_map_ofs(drm_map_t * map)
+unsigned long drm_core_get_map_ofs(struct drm_map * map)
{
return map->offset;
}
@@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
/**
* mmap DMA memory.
*
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param vma virtual memory area.
* \return zero on success or a negative number on failure.
*
@@ -555,11 +555,11 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs);
*/
static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_map_t *map = NULL;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
+ struct drm_map *map = NULL;
unsigned long offset = 0;
- drm_hash_item_t *hash;
+ struct drm_hash_item *hash;
DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
vma->vm_start, vma->vm_end, vma->vm_pgoff);
@@ -585,7 +585,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
- map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
+ map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
return -EPERM;
@@ -676,8 +676,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
int drm_mmap(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev = priv->head->dev;
int ret;
mutex_lock(&dev->struct_mutex);
@@ -713,11 +713,11 @@ EXPORT_SYMBOL(drm_mmap);
static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
unsigned long address)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
- drm_ttm_t *ttm;
- drm_device_t *dev;
+ struct drm_ttm *ttm;
+ struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
@@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
if (bus_size) {
- drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type];
+ struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
@@ -798,7 +798,7 @@ out_unlock:
static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
drm_vm_open_locked(vma);
atomic_inc(&bo->usage);
@@ -815,8 +815,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
static void drm_bo_vm_open(struct vm_area_struct *vma)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
- drm_device_t *dev = bo->dev;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+ struct drm_device *dev = bo->dev;
mutex_lock(&dev->struct_mutex);
drm_bo_vm_open_locked(vma);
@@ -831,8 +831,8 @@ static void drm_bo_vm_open(struct vm_area_struct *vma)
static void drm_bo_vm_close(struct vm_area_struct *vma)
{
- drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data;
- drm_device_t *dev = bo->dev;
+ struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
+ struct drm_device *dev = bo->dev;
drm_vm_close(vma);
if (bo) {
@@ -865,7 +865,7 @@ static struct vm_operations_struct drm_bo_vm_ops = {
* mmap buffer object memory.
*
* \param vma virtual memory area.
- * \param filp file pointer.
+ * \param file_priv DRM file private.
* \param map The buffer object drm map.
* \return zero on success or a negative number on failure.
*/
diff --git a/linux-core/ffb_context.c b/linux-core/ffb_context.c
index e6ae60c3..586c3503 100644
--- a/linux-core/ffb_context.c
+++ b/linux-core/ffb_context.c
@@ -13,7 +13,7 @@
#include "drmP.h"
#include "ffb_drv.h"
-static int ffb_alloc_queue(drm_device_t * dev, int is_2d_only) {
+static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) {
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int i;
@@ -351,7 +351,7 @@ static void FFBWait(ffb_fbcPtr ffb)
} while (--limit);
}
-int ffb_context_switch(drm_device_t * dev, int old, int new) {
+int ffb_context_switch(struct drm_device * dev, int old, int new) {
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
#if DRM_DMA_HISTOGRAM
@@ -401,7 +401,7 @@ int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd,
int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
+ struct drm_device *dev = priv->dev;
drm_ctx_t ctx;
int idx;
@@ -421,7 +421,7 @@ int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd,
int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
+ struct drm_device *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
struct ffb_hw_context *hwctx;
drm_ctx_t ctx;
@@ -449,7 +449,7 @@ int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd,
int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
+ struct drm_device *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
struct ffb_hw_context *hwctx;
drm_ctx_t ctx;
@@ -480,7 +480,7 @@ int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd,
int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
+ struct drm_device *dev = priv->dev;
drm_ctx_t ctx;
if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx)))
@@ -504,7 +504,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd,
unsigned long arg) {
drm_ctx_t ctx;
drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->dev;
+ struct drm_device *dev = priv->dev;
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int idx;
@@ -523,7 +523,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd,
return 0;
}
-static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev)
+static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev)
{
ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private;
int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock);
@@ -537,13 +537,13 @@ static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev)
}
}
-static void ffb_driver_lastclose(drm_device_t * dev)
+static void ffb_driver_lastclose(struct drm_device * dev)
{
if (dev->dev_private)
kfree(dev->dev_private);
}
-static void ffb_driver_unload(drm_device_t * dev)
+static void ffb_driver_unload(struct drm_device * dev)
{
if (ffb_position != NULL)
kfree(ffb_position);
@@ -571,7 +571,7 @@ unsigned long ffb_driver_get_map_ofs(drm_map_t * map)
return (map->offset & 0xffffffff);
}
-unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev)
+unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev)
{
ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private;
diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c
index 9c88f061..f2b4cc7f 100644
--- a/linux-core/ffb_drv.c
+++ b/linux-core/ffb_drv.c
@@ -114,7 +114,7 @@ static void ffb_apply_upa_parent_ranges(int parent,
return;
}
-static int ffb_init_one(drm_device_t *dev, int prom_node, int parent_node,
+static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node,
int instance)
{
struct linux_prom64_registers regs[2*PROMREG_MAX];
@@ -167,7 +167,7 @@ static int __init ffb_scan_siblings(int root, int instance)
static drm_map_t *ffb_find_map(struct file *filp, unsigned long off)
{
drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_device *dev;
drm_map_list_t *r_list;
struct list_head *list;
drm_map_t *map;
@@ -237,10 +237,10 @@ unsigned long ffb_get_unmapped_area(struct file *filp,
/* This functions must be here since it references drm_numdevs)
* which drm_drv.h declares.
*/
-static int ffb_driver_firstopen(drm_device_t *dev)
+static int ffb_driver_firstopen(struct drm_device *dev)
{
ffb_dev_priv_t *ffb_priv;
- drm_device_t *temp_dev;
+ struct drm_device *temp_dev;
int ret = 0;
int i;
diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h
index f76b0d92..bad3c94d 100644
--- a/linux-core/ffb_drv.h
+++ b/linux-core/ffb_drv.h
@@ -281,4 +281,4 @@ extern unsigned long ffb_get_unmapped_area(struct file *filp,
unsigned long pgoff,
unsigned long flags);
extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map)
-extern unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev)
+extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev)
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 49379434..7c37b4bb 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -46,9 +46,9 @@
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
-static inline void i810_print_status_page(drm_device_t * dev)
+static inline void i810_print_status_page(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = dev->dev_private;
u32 *temp = dev_priv->hw_status_page;
int i;
@@ -64,16 +64,16 @@ static inline void i810_print_status_page(drm_device_t * dev)
}
}
-static drm_buf_t *i810_freelist_get(drm_device_t * dev)
+static struct drm_buf *i810_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
int used;
/* Linear search might not be the best solution */
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
/* In use is already a pointer */
used = cmpxchg(buf_priv->in_use, I810_BUF_FREE,
@@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev)
* yet, the hardware updates in use for us once its on the ring buffer.
*/
-static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used;
@@ -106,10 +106,10 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf)
static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev;
+ struct drm_file *priv = filp->private_data;
+ struct drm_device *dev;
drm_i810_private_t *dev_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
lock_kernel();
@@ -139,10 +139,9 @@ static const struct file_operations i810_buffer_fops = {
.fasync = drm_fasync,
};
-static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
+static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
+ struct drm_device *dev = file_priv->head->dev;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_private_t *dev_priv = dev->dev_private;
const struct file_operations *old_fops;
@@ -152,14 +151,14 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
return -EINVAL;
down_write(&current->mm->mmap_sem);
- old_fops = filp->f_op;
- filp->f_op = &i810_buffer_fops;
+ old_fops = file_priv->filp->f_op;
+ file_priv->filp->f_op = &i810_buffer_fops;
dev_priv->mmap_buffer = buf;
- buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total,
+ buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total,
PROT_READ | PROT_WRITE,
MAP_SHARED, buf->bus_address);
dev_priv->mmap_buffer = NULL;
- filp->f_op = old_fops;
+ file_priv->filp->f_op = old_fops;
if (IS_ERR(buf_priv->virtual)) {
/* Real error */
DRM_ERROR("mmap error\n");
@@ -171,7 +170,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp)
return retcode;
}
-static int i810_unmap_buffer(drm_buf_t * buf)
+static int i810_unmap_buffer(struct drm_buf * buf)
{
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int retcode = 0;
@@ -191,10 +190,10 @@ static int i810_unmap_buffer(drm_buf_t * buf)
return retcode;
}
-static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
- struct file *filp)
+static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d,
+ struct drm_file *file_priv)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_i810_buf_priv_t *buf_priv;
int retcode = 0;
@@ -205,13 +204,13 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
return retcode;
}
- retcode = i810_map_buffer(buf, filp);
+ retcode = i810_map_buffer(buf, file_priv);
if (retcode) {
i810_freelist_put(dev, buf);
DRM_ERROR("mapbuf failed, retcode %d\n", retcode);
return retcode;
}
- buf->filp = filp;
+ buf->file_priv = file_priv;
buf_priv = buf->dev_private;
d->granted = 1;
d->request_idx = buf->idx;
@@ -221,9 +220,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d,
return retcode;
}
-static int i810_dma_cleanup(drm_device_t * dev)
+static int i810_dma_cleanup(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
@@ -252,7 +251,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
dev->dev_private = NULL;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
if (buf_priv->kernel_virtual && buf->total)
@@ -262,7 +261,7 @@ static int i810_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int i810_wait_ring(drm_device_t * dev, int n)
+static int i810_wait_ring(struct drm_device * dev, int n)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -295,7 +294,7 @@ static int i810_wait_ring(drm_device_t * dev, int n)
return iters;
}
-static void i810_kernel_lost_context(drm_device_t * dev)
+static void i810_kernel_lost_context(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_ring_buffer_t *ring = &(dev_priv->ring);
@@ -307,9 +306,9 @@ static void i810_kernel_lost_context(drm_device_t * dev)
ring->space += ring->Size;
}
-static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
+static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int my_idx = 24;
u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx);
int i;
@@ -320,7 +319,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
}
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->in_use = hw_status++;
@@ -342,11 +341,11 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv)
return 0;
}
-static int i810_dma_initialize(drm_device_t * dev,
+static int i810_dma_initialize(struct drm_device * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
- drm_map_list_t *r_list;
+ struct drm_map_list *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
list_for_each_entry(r_list, &dev->maplist, head) {
@@ -399,7 +398,7 @@ static int i810_dma_initialize(drm_device_t * dev,
i810_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -449,99 +448,29 @@ static int i810_dma_initialize(drm_device_t * dev,
return 0;
}
-/* i810 DRM version 1.1 used a smaller init structure with different
- * ordering of values than is currently used (drm >= 1.2). There is
- * no defined way to detect the XFree version to correct this problem,
- * however by checking using this procedure we can detect the correct
- * thing to do.
- *
- * #1 Read the Smaller init structure from user-space
- * #2 Verify the overlay_physical is a valid physical address, or NULL
- * If it isn't then we have a v1.1 client. Fix up params.
- * If it is, then we have a 1.2 client... get the rest of the data.
- */
-static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg)
+static int i810_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
-
- /* Get v1.1 init data */
- if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg,
- sizeof(drm_i810_pre12_init_t))) {
- return -EFAULT;
- }
-
- if ((!init->overlay_physical) || (init->overlay_physical > 4096)) {
-
- /* This is a v1.2 client, just get the v1.2 init data */
- DRM_INFO("Using POST v1.2 init.\n");
- if (copy_from_user(init, (drm_i810_init_t __user *) arg,
- sizeof(drm_i810_init_t))) {
- return -EFAULT;
- }
- } else {
-
- /* This is a v1.1 client, fix the params */
- DRM_INFO("Using PRE v1.2 init.\n");
- init->pitch_bits = init->h;
- init->pitch = init->w;
- init->h = init->overlay_physical;
- init->w = init->overlay_offset;
- init->overlay_physical = 0;
- init->overlay_offset = 0;
- }
-
- return 0;
-}
-
-static int i810_dma_init(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv;
- drm_i810_init_t init;
+ drm_i810_init_t *init = data;
int retcode = 0;
- /* Get only the init func */
- if (copy_from_user
- (&init, (void __user *)arg, sizeof(drm_i810_init_func_t)))
- return -EFAULT;
-
- switch (init.func) {
- case I810_INIT_DMA:
- /* This case is for backward compatibility. It
- * handles XFree 4.1.0 and 4.2.0, and has to
- * do some parameter checking as described below.
- * It will someday go away.
- */
- retcode = i810_dma_init_compat(&init, arg);
- if (retcode)
- return retcode;
-
- dev_priv = drm_alloc(sizeof(drm_i810_private_t),
- DRM_MEM_DRIVER);
- if (dev_priv == NULL)
- return -ENOMEM;
- retcode = i810_dma_initialize(dev, dev_priv, &init);
- break;
-
- default:
+ switch (init->func) {
case I810_INIT_DMA_1_4:
DRM_INFO("Using v1.4 init.\n");
- if (copy_from_user(&init, (drm_i810_init_t __user *) arg,
- sizeof(drm_i810_init_t))) {
- return -EFAULT;
- }
dev_priv = drm_alloc(sizeof(drm_i810_private_t),
DRM_MEM_DRIVER);
if (dev_priv == NULL)
return -ENOMEM;
- retcode = i810_dma_initialize(dev, dev_priv, &init);
+ retcode = i810_dma_initialize(dev, dev_priv, init);
break;
case I810_CLEANUP_DMA:
DRM_INFO("DMA Cleanup\n");
retcode = i810_dma_cleanup(dev);
break;
+ default:
+ return -EINVAL;
}
return retcode;
@@ -553,7 +482,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp,
* Use 'volatile' & local var tmp to force the emitted values to be
* identical to the verified ones.
*/
-static void i810EmitContextVerified(drm_device_t * dev,
+static void i810EmitContextVerified(struct drm_device * dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -586,7 +515,7 @@ static void i810EmitContextVerified(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
+static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int i, j = 0;
@@ -619,7 +548,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code)
/* Need to do some additional checking when setting the dest buffer.
*/
-static void i810EmitDestVerified(drm_device_t * dev,
+static void i810EmitDestVerified(struct drm_device * dev,
volatile unsigned int *code)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -654,7 +583,7 @@ static void i810EmitDestVerified(drm_device_t * dev,
ADVANCE_LP_RING();
}
-static void i810EmitState(drm_device_t * dev)
+static void i810EmitState(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -685,14 +614,14 @@ static void i810EmitState(drm_device_t * dev)
/* need to verify
*/
-static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
+static void i810_dma_dispatch_clear(struct drm_device * dev, int flags,
unsigned int clear_color,
unsigned int clear_zval)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
@@ -760,12 +689,12 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags,
}
}
-static void i810_dma_dispatch_swap(drm_device_t * dev)
+static void i810_dma_dispatch_swap(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int pitch = dev_priv->pitch;
int cpp = 2;
int i;
@@ -806,13 +735,13 @@ static void i810_dma_dispatch_swap(drm_device_t * dev)
}
}
-static void i810_dma_dispatch_vertex(drm_device_t * dev,
- drm_buf_t * buf, int discard, int used)
+static void i810_dma_dispatch_vertex(struct drm_device * dev,
+ struct drm_buf * buf, int discard, int used)
{
drm_i810_private_t *dev_priv = dev->dev_private;
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_clip_rect_t *box = sarea_priv->boxes;
+ struct drm_clip_rect *box = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
unsigned long address = (unsigned long)buf->bus_address;
unsigned long start = address - dev->agp->base;
@@ -886,7 +815,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev,
}
}
-static void i810_dma_dispatch_flip(drm_device_t * dev)
+static void i810_dma_dispatch_flip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
int pitch = dev_priv->pitch;
@@ -933,7 +862,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev)
}
-static void i810_dma_quiescent(drm_device_t * dev)
+static void i810_dma_quiescent(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -952,10 +881,10 @@ static void i810_dma_quiescent(drm_device_t * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
}
-static int i810_flush_queue(drm_device_t * dev)
+static int i810_flush_queue(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i, ret = 0;
RING_LOCALS;
@@ -971,7 +900,7 @@ static int i810_flush_queue(drm_device_t * dev)
i810_wait_ring(dev, dev_priv->ring.Size - 8);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE,
@@ -987,9 +916,10 @@ static int i810_flush_queue(drm_device_t * dev)
}
/* Must be called with the lock held */
-static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp)
+static void i810_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
if (!dma)
@@ -1002,10 +932,10 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp)
i810_flush_queue(dev);
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_i810_buf_priv_t *buf_priv = buf->dev_private;
- if (buf->filp == filp && buf_priv) {
+ if (buf->file_priv == file_priv && buf_priv) {
int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT,
I810_BUF_FREE);
@@ -1017,47 +947,38 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp)
}
}
-static int i810_flush_ioctl(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_flush_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_flush_queue(dev);
return 0;
}
-static int i810_dma_vertex(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_dma_vertex(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- drm_i810_vertex_t vertex;
+ drm_i810_vertex_t *vertex = data;
- if (copy_from_user
- (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex)))
- return -EFAULT;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
- vertex.idx, vertex.used, vertex.discard);
+ vertex->idx, vertex->used, vertex->discard);
- if (vertex.idx < 0 || vertex.idx > dma->buf_count)
+ if (vertex->idx < 0 || vertex->idx > dma->buf_count)
return -EINVAL;
i810_dma_dispatch_vertex(dev,
- dma->buflist[vertex.idx],
- vertex.discard, vertex.used);
+ dma->buflist[vertex->idx],
+ vertex->discard, vertex->used);
- atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]);
+ atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1065,48 +986,37 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp,
return 0;
}
-static int i810_clear_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_clear_bufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_i810_clear_t clear;
-
- if (copy_from_user
- (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear)))
- return -EFAULT;
+ drm_i810_clear_t *clear = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* GH: Someone's doing nasty things... */
if (!dev->dev_private) {
return -EINVAL;
}
- i810_dma_dispatch_clear(dev, clear.flags,
- clear.clear_color, clear.clear_depth);
+ i810_dma_dispatch_clear(dev, clear->flags,
+ clear->clear_color, clear->clear_depth);
return 0;
}
-static int i810_swap_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_swap_bufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
-
DRM_DEBUG("i810_swap_bufs\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
i810_dma_dispatch_swap(dev);
return 0;
}
-static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int i810_getage(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
@@ -1116,52 +1026,45 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd,
return 0;
}
-static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int i810_getbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
int retcode = 0;
- drm_i810_dma_t d;
+ drm_i810_dma_t *d = data;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d)))
- return -EFAULT;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- d.granted = 0;
+ d->granted = 0;
- retcode = i810_dma_get_buffer(dev, &d, filp);
+ retcode = i810_dma_get_buffer(dev, d, file_priv);
DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n",
- current->pid, retcode, d.granted);
+ current->pid, retcode, d->granted);
- if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d)))
- return -EFAULT;
sarea_priv->last_dispatch = (int)hw_status[5];
return retcode;
}
-static int i810_copybuf(struct inode *inode,
- struct file *filp, unsigned int cmd, unsigned long arg)
+static int i810_copybuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
-static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd,
- unsigned long arg)
+static int i810_docopy(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
/* Never copy - 2.4.x doesn't need it */
return 0;
}
-static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
+static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used,
unsigned int last_render)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1221,30 +1124,25 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used,
ADVANCE_LP_RING();
}
-static int i810_dma_mc(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_dma_mc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
u32 *hw_status = dev_priv->hw_status_page;
drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *)
dev_priv->sarea_priv;
- drm_i810_mc_t mc;
+ drm_i810_mc_t *mc = data;
- if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc)))
- return -EFAULT;
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- if (mc.idx >= dma->buf_count || mc.idx < 0)
+ if (mc->idx >= dma->buf_count || mc->idx < 0)
return -EINVAL;
- i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used,
- mc.last_render);
+ i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
+ mc->last_render);
- atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]);
+ atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
atomic_inc(&dev->counts[_DRM_STAT_DMA]);
sarea_priv->last_enqueue = dev_priv->counter - 1;
sarea_priv->last_dispatch = (int)hw_status[5];
@@ -1252,51 +1150,41 @@ static int i810_dma_mc(struct inode *inode, struct file *filp,
return 0;
}
-static int i810_rstatus(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_rstatus(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
return (int)(((u32 *) (dev_priv->hw_status_page))[4]);
}
-static int i810_ov0_info(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_ov0_info(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- drm_i810_overlay_t data;
+ drm_i810_overlay_t *ov = data;
+
+ ov->offset = dev_priv->overlay_offset;
+ ov->physical = dev_priv->overlay_physical;
- data.offset = dev_priv->overlay_offset;
- data.physical = dev_priv->overlay_physical;
- if (copy_to_user
- ((drm_i810_overlay_t __user *) arg, &data, sizeof(data)))
- return -EFAULT;
return 0;
}
-static int i810_fstatus(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_fstatus(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return I810_READ(0x30008);
}
-static int i810_ov0_flip(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_ov0_flip(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
//Tell the overlay to update
I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000);
@@ -1305,7 +1193,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp,
/* Not sure why this isn't set all the time:
*/
-static void i810_do_init_pageflip(drm_device_t * dev)
+static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1315,7 +1203,7 @@ static void i810_do_init_pageflip(drm_device_t * dev)
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
}
-static int i810_do_cleanup_pageflip(drm_device_t * dev)
+static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1327,16 +1215,14 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev)
return 0;
}
-static int i810_flip_bufs(struct inode *inode, struct file *filp,
- unsigned int cmd, unsigned long arg)
+static int i810_flip_bufs(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- drm_file_t *priv = filp->private_data;
- drm_device_t *dev = priv->head->dev;
drm_i810_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv->page_flipping)
i810_do_init_pageflip(dev);
@@ -1345,7 +1231,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp,
return 0;
}
-int i810_driver_load(drm_device_t *dev, unsigned long flags)
+int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
/* i810 has 4 more counters */
dev->counters += 4;
@@ -1357,12 +1243,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags)
return 0;
}
-void i810_driver_lastclose(drm_device_t * dev)
+void i810_driver_lastclose(struct drm_device * dev)
{
i810_dma_cleanup(dev);
}
-void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_i810_private_t *dev_priv = dev->dev_private;
@@ -1372,33 +1258,34 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file *file_priv)
{
- i810_reclaim_buffers(dev, filp);
+ i810_reclaim_buffers(dev, file_priv);
}
-int i810_driver_dma_quiescent(drm_device_t * dev)
+int i810_driver_dma_quiescent(struct drm_device * dev)
{
i810_dma_quiescent(dev);
return 0;
}
-drm_ioctl_desc_t i810_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH}
+struct drm_ioctl_desc i810_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH)
};
int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
@@ -1414,7 +1301,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls);
* \returns
* A value of 1 is always retured to indictate every i810 is AGP.
*/
-int i810_driver_device_is_agp(drm_device_t * dev)
+int i810_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h
index beec4a2a..d803aeca 100644
--- a/linux-core/i810_drm.h
+++ b/linux-core/i810_drm.h
@@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func {
/* This is the init structure after v1.2 */
typedef struct _drm_i810_init {
drm_i810_init_func_t func;
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
- int ring_map_idx;
- int buffer_map_idx;
-#else
unsigned int mmio_offset;
unsigned int buffers_offset;
-#endif
int sarea_priv_offset;
unsigned int ring_start;
unsigned int ring_end;
@@ -124,29 +119,6 @@ typedef struct _drm_i810_init {
unsigned int pitch_bits;
} drm_i810_init_t;
-/* This is the init structure prior to v1.2 */
-typedef struct _drm_i810_pre12_init {
- drm_i810_init_func_t func;
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
- int ring_map_idx;
- int buffer_map_idx;
-#else
- unsigned int mmio_offset;
- unsigned int buffers_offset;
-#endif
- int sarea_priv_offset;
- unsigned int ring_start;
- unsigned int ring_end;
- unsigned int ring_size;
- unsigned int front_offset;
- unsigned int back_offset;
- unsigned int depth_offset;
- unsigned int w;
- unsigned int h;
- unsigned int pitch;
- unsigned int pitch_bits;
-} drm_i810_pre12_init_t;
-
/* Warning: If you change the SAREA structure you must change the Xserver
* structure as well */
@@ -163,7 +135,7 @@ typedef struct _drm_i810_sarea {
unsigned int dirty;
unsigned int nbox;
- drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS];
/* Maintain an LRU of contiguous regions of texture space. If
* you think you own a region of texture memory, and it has an
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index 69d79499..c525e165 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer {
} drm_i810_ring_buffer_t;
typedef struct drm_i810_private {
- drm_map_t *sarea_map;
- drm_map_t *mmio_map;
+ struct drm_map *sarea_map;
+ struct drm_map *mmio_map;
drm_i810_sarea_t *sarea_priv;
drm_i810_ring_buffer_t ring;
@@ -88,7 +88,7 @@ typedef struct drm_i810_private {
dma_addr_t dma_status_page;
- drm_buf_t *mmap_buffer;
+ struct drm_buf *mmap_buffer;
u32 front_di1, back_di1, zi1;
@@ -115,17 +115,18 @@ typedef struct drm_i810_private {
} drm_i810_private_t;
/* i810_dma.c */
-extern int i810_driver_dma_quiescent(drm_device_t * dev);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
- struct file *filp);
+extern int i810_driver_dma_quiescent(struct drm_device * dev);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file *file_priv);
extern int i810_driver_load(struct drm_device *, unsigned long flags);
-extern void i810_driver_lastclose(drm_device_t * dev);
-extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev,
- struct file *filp);
-extern int i810_driver_device_is_agp(drm_device_t * dev);
-
-extern drm_ioctl_desc_t i810_ioctls[];
+extern void i810_driver_lastclose(struct drm_device * dev);
+extern void i810_driver_preclose(struct drm_device * dev,
+ struct drm_file *file_priv);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file *file_priv);
+extern int i810_driver_device_is_agp(struct drm_device * dev);
+
+extern struct drm_ioctl_desc i810_ioctls[];
extern int i810_max_ioctl;
#define I810_BASE(reg) ((unsigned long) \
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index 6e93ae21..69a1aab7 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -33,14 +33,13 @@
#include "i915_drm.h"
#include "i915_drv.h"
-drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev)
+struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)
{
return drm_agp_init_ttm(dev);
}
-int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type)
{
- *class = 0;
if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
@@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
return 0;
}
-int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
+int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
@@ -64,8 +63,8 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
-int i915_init_mem_type(drm_device_t * dev, uint32_t type,
- drm_mem_type_manager_t * man)
+int i915_init_mem_type(struct drm_device * dev, uint32_t type,
+ struct drm_mem_type_manager * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
@@ -109,7 +108,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type,
return 0;
}
-uint32_t i915_evict_mask(drm_buffer_object_t *bo)
+uint32_t i915_evict_mask(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
@@ -120,14 +119,14 @@ uint32_t i915_evict_mask(drm_buffer_object_t *bo)
}
}
-static void i915_emit_copy_blit(drm_device_t * dev,
+static void i915_emit_copy_blit(struct drm_device * dev,
uint32_t src_offset,
uint32_t dst_offset,
uint32_t pages, int direction)
{
uint32_t cur_pages;
uint32_t stride = PAGE_SIZE;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
RING_LOCALS;
if (!dev_priv)
@@ -154,10 +153,10 @@ static void i915_emit_copy_blit(drm_device_t * dev,
return;
}
-static int i915_move_blit(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+static int i915_move_blit(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
- drm_bo_mem_reg_t *old_mem = &bo->mem;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
int dir = 0;
if ((old_mem->mem_type == new_mem->mem_type) &&
@@ -184,11 +183,11 @@ static int i915_move_blit(drm_buffer_object_t * bo,
* then blit and subsequently move out again.
*/
-static int i915_move_flip(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+static int i915_move_flip(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
- drm_device_t *dev = bo->dev;
- drm_bo_mem_reg_t tmp_mem;
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_mem_reg tmp_mem;
int ret;
tmp_mem = *new_mem;
@@ -220,10 +219,10 @@ out_cleanup:
return ret;
}
-int i915_move(drm_buffer_object_t * bo,
- int evict, int no_wait, drm_bo_mem_reg_t * new_mem)
+int i915_move(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
{
- drm_bo_mem_reg_t *old_mem = &bo->mem;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index e612a46c..27e95d65 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = {
};
#ifdef I915_HAVE_FENCE
-static drm_fence_driver_t i915_fence_driver = {
+static struct drm_fence_driver i915_fence_driver = {
.num_classes = 1,
.wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
.flush_diff = (1U << (BREADCRUMB_BITS - 2)),
@@ -56,7 +56,7 @@ static drm_fence_driver_t i915_fence_driver = {
static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
-static drm_bo_driver_t i915_bo_driver = {
+static struct drm_bo_driver i915_bo_driver = {
.mem_type_prio = i915_mem_prios,
.mem_busy_prio = i915_busy_prios,
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 00873485..abea8ee1 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -38,12 +38,12 @@
* Implements an intel sync flush operation.
*/
-static void i915_perform_flush(drm_device_t * dev)
+static void i915_perform_flush(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_fence_manager_t *fm = &dev->fm;
- drm_fence_class_manager_t *fc = &fm->class[0];
- drm_fence_driver_t *driver = dev->driver->fence_driver;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &fm->class[0];
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
uint32_t flush_flags = 0;
uint32_t flush_sequence = 0;
uint32_t i_status;
@@ -109,9 +109,9 @@ static void i915_perform_flush(drm_device_t * dev)
}
-void i915_poke_flush(drm_device_t * dev, uint32_t class)
+void i915_poke_flush(struct drm_device * dev, uint32_t class)
{
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
write_lock_irqsave(&fm->lock, flags);
@@ -119,10 +119,10 @@ void i915_poke_flush(drm_device_t * dev, uint32_t class)
write_unlock_irqrestore(&fm->lock, flags);
}
-int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
+int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
if (!dev_priv)
return -EINVAL;
@@ -135,16 +135,16 @@ int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
return 0;
}
-void i915_fence_handler(drm_device_t * dev)
+void i915_fence_handler(struct drm_device * dev)
{
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
write_lock(&fm->lock);
i915_perform_flush(dev);
write_unlock(&fm->lock);
}
-int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags)
+int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
{
/*
* We have an irq that tells us when we have a new breadcrumb.
diff --git a/linux-core/intel_crt.c b/linux-core/intel_crt.c
index cdfb314f..d2e1f95c 100644
--- a/linux-core/intel_crt.c
+++ b/linux-core/intel_crt.c
@@ -34,8 +34,8 @@
static void intel_crt_dpms(struct drm_output *output, int mode)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
temp = I915_READ(ADPA);
@@ -93,10 +93,10 @@ static void intel_crt_mode_set(struct drm_output *output,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- drm_device_t *dev = output->dev;
+ struct drm_device *dev = output->dev;
struct drm_crtc *crtc = output->crtc;
struct intel_crtc *intel_crtc = crtc->driver_private;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_md_reg;
u32 adpa, dpll_md;
@@ -139,8 +139,8 @@ static void intel_crt_mode_set(struct drm_output *output,
*/
static bool intel_crt_detect_hotplug(struct drm_output *output)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
@@ -175,7 +175,7 @@ static bool intel_crt_detect_ddc(struct drm_output *output)
static enum drm_output_status intel_crt_detect(struct drm_output *output)
{
- drm_device_t *dev = output->dev;
+ struct drm_device *dev = output->dev;
if (IS_I945G(dev)| IS_I945GM(dev) || IS_I965G(dev)) {
if (intel_crt_detect_hotplug(output))
@@ -221,7 +221,7 @@ static const struct drm_output_funcs intel_crt_output_funcs = {
.cleanup = intel_crt_destroy,
};
-void intel_crt_init(drm_device_t *dev)
+void intel_crt_init(struct drm_device *dev)
{
struct drm_output *output;
struct intel_output *intel_output;
diff --git a/linux-core/intel_display.c b/linux-core/intel_display.c
index 3fce0941..3ff9f822 100644
--- a/linux-core/intel_display.c
+++ b/linux-core/intel_display.c
@@ -170,7 +170,7 @@ static const intel_limit_t intel_limits[] = {
static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
{
- drm_device_t *dev = crtc->dev;
+ struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (IS_I9XX(dev)) {
@@ -278,8 +278,8 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
int refclk, intel_clock_t *best_clock)
{
- drm_device_t *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
const intel_limit_t *limit = intel_limit(crtc);
int err = target;
@@ -334,9 +334,9 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
}
void
-intel_set_vblank(drm_device_t *dev)
+intel_set_vblank(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
int vbl_pipe = 0;
@@ -352,7 +352,7 @@ intel_set_vblank(drm_device_t *dev)
i915_enable_interrupt(dev);
}
void
-intel_wait_for_vblank(drm_device_t *dev)
+intel_wait_for_vblank(struct drm_device *dev)
{
/* Wait for 20ms, i.e. one cycle at 50hz. */
udelay(20000);
@@ -361,8 +361,8 @@ intel_wait_for_vblank(drm_device_t *dev)
void
intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
{
- drm_device_t *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int pipe = intel_crtc->pipe;
unsigned long Start, Offset;
@@ -389,12 +389,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
switch (pipe) {
case 0:
- dev_priv->sarea_priv->pipeA_x = x;
- dev_priv->sarea_priv->pipeA_y = y;
+ dev_priv->sarea_priv->planeA_x = x;
+ dev_priv->sarea_priv->planeA_y = y;
break;
case 1:
- dev_priv->sarea_priv->pipeB_x = x;
- dev_priv->sarea_priv->pipeB_y = y;
+ dev_priv->sarea_priv->planeB_x = x;
+ dev_priv->sarea_priv->planeB_y = y;
break;
default:
DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
@@ -410,8 +410,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y)
*/
static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
{
- drm_device_t *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int pipe = intel_crtc->pipe;
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
@@ -513,12 +513,12 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (pipe) {
case 0:
- dev_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
- dev_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
+ dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0;
+ dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0;
break;
case 1:
- dev_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
- dev_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
+ dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0;
+ dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0;
break;
default:
DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
@@ -576,7 +576,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
/** Returns the core display clock speed for i830 - i945 */
-static int intel_get_core_clock_speed(drm_device_t *dev)
+static int intel_get_core_clock_speed(struct drm_device *dev)
{
/* Core clock values taken from the published datasheets.
@@ -636,9 +636,9 @@ static int intel_get_core_clock_speed(drm_device_t *dev)
* Return the pipe currently connected to the panel fitter,
* or -1 if the panel fitter is not present or not in use
*/
-static int intel_panel_fitter_pipe (drm_device_t *dev)
+static int intel_panel_fitter_pipe (struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 pfit_control;
/* i830 doesn't have a panel fitter */
@@ -664,8 +664,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode,
int x, int y)
{
- drm_device_t *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int pipe = intel_crtc->pipe;
int fp_reg = (pipe == 0) ? FPA0 : FPB0;
@@ -938,8 +938,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
/** Loads the palette/gamma unit for the CRTC with the prepared values */
void intel_crtc_load_lut(struct drm_crtc *crtc)
{
- drm_device_t *dev = crtc->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B;
int i;
@@ -970,7 +970,7 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
/* Returns the clock of the currently programmed mode of the given pipe. */
static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int pipe = intel_crtc->pipe;
u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B);
@@ -1045,10 +1045,10 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
/** Returns the currently programmed mode of the given pipe. */
-struct drm_display_mode *intel_crtc_mode_get(drm_device_t *dev,
+struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = crtc->driver_private;
int pipe = intel_crtc->pipe;
struct drm_display_mode *mode;
@@ -1089,7 +1089,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
};
-void intel_crtc_init(drm_device_t *dev, int pipe)
+void intel_crtc_init(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc;
struct intel_crtc *intel_crtc;
@@ -1115,7 +1115,7 @@ void intel_crtc_init(drm_device_t *dev, int pipe)
crtc->driver_private = intel_crtc;
}
-struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe)
+struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe)
{
struct drm_crtc *crtc = NULL;
@@ -1127,7 +1127,7 @@ struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe)
return crtc;
}
-int intel_output_clones(drm_device_t *dev, int type_mask)
+int intel_output_clones(struct drm_device *dev, int type_mask)
{
int index_mask = 0;
struct drm_output *output;
@@ -1143,7 +1143,7 @@ int intel_output_clones(drm_device_t *dev, int type_mask)
}
-static void intel_setup_outputs(drm_device_t *dev)
+static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_output *output;
@@ -1194,7 +1194,7 @@ static void intel_setup_outputs(drm_device_t *dev)
}
}
-void intel_modeset_init(drm_device_t *dev)
+void intel_modeset_init(struct drm_device *dev)
{
int num_pipe;
int i;
@@ -1223,7 +1223,7 @@ void intel_modeset_init(drm_device_t *dev)
//drm_initial_config(dev, false);
}
-void intel_modeset_cleanup(drm_device_t *dev)
+void intel_modeset_cleanup(struct drm_device *dev)
{
drm_mode_config_cleanup(dev);
}
diff --git a/linux-core/intel_drv.h b/linux-core/intel_drv.h
index 0a03e37b..7de4b924 100644
--- a/linux-core/intel_drv.h
+++ b/linux-core/intel_drv.h
@@ -38,7 +38,7 @@
#define INTEL_DVO_CHIP_TVOUT 4
struct intel_i2c_chan {
- drm_device_t *drm_dev; /* for getting at dev. private (mmio etc.) */
+ struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
u32 reg; /* GPIO reg */
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo;
@@ -58,23 +58,23 @@ struct intel_crtc {
u8 lut_r[256], lut_g[256], lut_b[256];
};
-struct intel_i2c_chan *intel_i2c_create(drm_device_t *dev, const u32 reg,
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name);
void intel_i2c_destroy(struct intel_i2c_chan *chan);
int intel_ddc_get_modes(struct drm_output *output);
extern bool intel_ddc_probe(struct drm_output *output);
-extern void intel_crt_init(drm_device_t *dev);
-extern void intel_sdvo_init(drm_device_t *dev, int output_device);
-extern void intel_lvds_init(drm_device_t *dev);
+extern void intel_crt_init(struct drm_device *dev);
+extern void intel_sdvo_init(struct drm_device *dev, int output_device);
+extern void intel_lvds_init(struct drm_device *dev);
extern void intel_crtc_load_lut(struct drm_crtc *crtc);
extern void intel_output_prepare (struct drm_output *output);
extern void intel_output_commit (struct drm_output *output);
-extern struct drm_display_mode *intel_crtc_mode_get(drm_device_t *dev,
+extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
struct drm_crtc *crtc);
-extern void intel_wait_for_vblank(drm_device_t *dev);
-extern struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe);
+extern void intel_wait_for_vblank(struct drm_device *dev);
+extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc);
extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc);
diff --git a/linux-core/intel_fb.c b/linux-core/intel_fb.c
index 3bfbcda3..048295ab 100644
--- a/linux-core/intel_fb.c
+++ b/linux-core/intel_fb.c
@@ -446,7 +446,7 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
struct device *device = &dev->pdev->dev;
struct drm_framebuffer *fb;
struct drm_display_mode *mode = crtc->desired_mode;
- drm_buffer_object_t *fbo = NULL;
+ struct drm_buffer_object *fbo = NULL;
int ret;
info = framebuffer_alloc(sizeof(struct intelfb_par), device);
@@ -468,13 +468,11 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
fb->bits_per_pixel = 32;
fb->pitch = fb->width * ((fb->bits_per_pixel + 1) / 8);
fb->depth = 24;
- ret = drm_buffer_object_create(dev,
- fb->width * fb->height * 4,
+ ret = drm_buffer_object_create(dev, fb->width * fb->height * 4,
drm_bo_type_kernel,
DRM_BO_FLAG_READ |
DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_MEM_VRAM | /* FIXME! */
- DRM_BO_FLAG_NO_MOVE,
+ DRM_BO_FLAG_MEM_VRAM,
0, 0, 0,
&fbo);
if (ret || !fbo) {
@@ -483,6 +481,15 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc)
framebuffer_release(info);
return -EINVAL;
}
+
+ ret = drm_bo_set_pin(dev, fbo, 1);
+ if (ret) {
+ printk(KERN_ERR "failed to pin framebuffer, aborting\n");
+ drm_framebuffer_destroy(fb);
+ framebuffer_release(info);
+ return -EINVAL;
+ }
+
fb->offset = fbo->offset;
fb->bo = fbo;
printk("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width,
diff --git a/linux-core/intel_i2c.c b/linux-core/intel_i2c.c
index d4cf7eef..b512e592 100644
--- a/linux-core/intel_i2c.c
+++ b/linux-core/intel_i2c.c
@@ -46,7 +46,7 @@
static int get_clock(void *data)
{
struct intel_i2c_chan *chan = data;
- drm_i915_private_t *dev_priv = chan->drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 val;
val = I915_READ(chan->reg);
@@ -56,7 +56,7 @@ static int get_clock(void *data)
static int get_data(void *data)
{
struct intel_i2c_chan *chan = data;
- drm_i915_private_t *dev_priv = chan->drm_dev->dev_private;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 val;
val = I915_READ(chan->reg);
@@ -66,8 +66,8 @@ static int get_data(void *data)
static void set_clock(void *data, int state_high)
{
struct intel_i2c_chan *chan = data;
- drm_device_t *dev = chan->drm_dev;
- drm_i915_private_t *dev_priv = chan->drm_dev->dev_private;
+ struct drm_device *dev = chan->drm_dev;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 reserved = 0, clock_bits;
/* On most chips, these bits must be preserved in software. */
@@ -87,8 +87,8 @@ static void set_clock(void *data, int state_high)
static void set_data(void *data, int state_high)
{
struct intel_i2c_chan *chan = data;
- drm_device_t *dev = chan->drm_dev;
- drm_i915_private_t *dev_priv = chan->drm_dev->dev_private;
+ struct drm_device *dev = chan->drm_dev;
+ struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
u32 reserved = 0, data_bits;
/* On most chips, these bits must be preserved in software. */
@@ -127,7 +127,7 @@ static void set_data(void *data, int state_high)
* %GPIOH
* see PRM for details on how these different busses are used.
*/
-struct intel_i2c_chan *intel_i2c_create(drm_device_t *dev, const u32 reg,
+struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg,
const char *name)
{
struct intel_i2c_chan *chan;
diff --git a/linux-core/intel_lvds.c b/linux-core/intel_lvds.c
index 942eb2ab..4f15c13a 100644
--- a/linux-core/intel_lvds.c
+++ b/linux-core/intel_lvds.c
@@ -43,7 +43,7 @@
*/
static void intel_lvds_set_backlight(struct drm_device *dev, int level)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl;
blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
*/
static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >>
BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
@@ -67,7 +67,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
*/
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 pp_status;
if (on) {
@@ -104,7 +104,7 @@ static void intel_lvds_dpms(struct drm_output *output, int mode)
static void intel_lvds_save(struct drm_output *output)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->savePP_ON = I915_READ(LVDSPP_ON);
dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF);
@@ -125,7 +125,7 @@ static void intel_lvds_save(struct drm_output *output)
static void intel_lvds_restore(struct drm_output *output)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON);
@@ -142,7 +142,7 @@ static int intel_lvds_mode_valid(struct drm_output *output,
struct drm_display_mode *mode)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
if (fixed_mode) {
@@ -160,7 +160,7 @@ static bool intel_lvds_mode_fixup(struct drm_output *output,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = output->crtc->driver_private;
struct drm_output *tmp_output;
@@ -214,7 +214,7 @@ static bool intel_lvds_mode_fixup(struct drm_output *output,
static void intel_lvds_prepare(struct drm_output *output)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
@@ -226,7 +226,7 @@ static void intel_lvds_prepare(struct drm_output *output)
static void intel_lvds_commit( struct drm_output *output)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->backlight_duty_cycle == 0)
dev_priv->backlight_duty_cycle =
@@ -240,7 +240,7 @@ static void intel_lvds_mode_set(struct drm_output *output,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = output->crtc->driver_private;
u32 pfit_control;
@@ -285,7 +285,7 @@ static enum drm_output_status intel_lvds_detect(struct drm_output *output)
static int intel_lvds_get_modes(struct drm_output *output)
{
struct drm_device *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int ret = 0;
ret = intel_ddc_get_modes(output);
@@ -359,7 +359,7 @@ static const struct drm_output_funcs intel_lvds_output_funcs = {
*/
void intel_lvds_init(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_output *output;
struct intel_output *intel_output;
struct drm_display_mode *scan; /* *modes, *bios_mode; */
diff --git a/linux-core/intel_sdvo.c b/linux-core/intel_sdvo.c
index 196298ff..11f3a0a6 100644
--- a/linux-core/intel_sdvo.c
+++ b/linux-core/intel_sdvo.c
@@ -62,8 +62,8 @@ struct intel_sdvo_priv {
*/
static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = output->driver_private;
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u32 bval = val, cval = val;
@@ -567,8 +567,8 @@ static void intel_sdvo_mode_set(struct drm_output *output,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = output->crtc;
struct intel_crtc *intel_crtc = crtc->driver_private;
struct intel_output *intel_output = output->driver_private;
@@ -698,8 +698,8 @@ static void intel_sdvo_mode_set(struct drm_output *output,
static void intel_sdvo_dpms(struct drm_output *output, int mode)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = output->driver_private;
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
u32 temp;
@@ -745,8 +745,8 @@ static void intel_sdvo_dpms(struct drm_output *output, int mode)
static void intel_sdvo_save(struct drm_output *output)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = output->driver_private;
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int o;
@@ -782,8 +782,8 @@ static void intel_sdvo_save(struct drm_output *output)
static void intel_sdvo_restore(struct drm_output *output)
{
- drm_device_t *dev = output->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = output->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_output *intel_output = output->driver_private;
struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
int o;
@@ -941,7 +941,7 @@ static const struct drm_output_funcs intel_sdvo_output_funcs = {
.cleanup = intel_sdvo_destroy
};
-void intel_sdvo_init(drm_device_t *dev, int output_device)
+void intel_sdvo_init(struct drm_device *dev, int output_device)
{
struct drm_output *output;
struct intel_output *intel_output;
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index ef6f1e44..1eb6d9e6 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -36,7 +36,7 @@
#include "drm_pciids.h"
-static int mga_driver_device_is_agp(drm_device_t * dev);
+static int mga_driver_device_is_agp(struct drm_device * dev);
static struct pci_device_id pciidlist[] = {
mga_PCI_IDS
@@ -127,7 +127,7 @@ MODULE_LICENSE("GPL and additional rights");
* \returns
* If the device is a PCI G450, zero is returned. Otherwise 2 is returned.
*/
-static int mga_driver_device_is_agp(drm_device_t * dev)
+static int mga_driver_device_is_agp(struct drm_device * dev)
{
const struct pci_dev * const pdev = dev->pdev;
diff --git a/linux-core/nouveau_dma.c b/linux-core/nouveau_dma.c
new file mode 120000
index 00000000..f8e0bdc3
--- /dev/null
+++ b/linux-core/nouveau_dma.c
@@ -0,0 +1 @@
+../shared-core/nouveau_dma.c \ No newline at end of file
diff --git a/linux-core/nouveau_dma.h b/linux-core/nouveau_dma.h
new file mode 120000
index 00000000..a545e387
--- /dev/null
+++ b/linux-core/nouveau_dma.h
@@ -0,0 +1 @@
+../shared-core/nouveau_dma.h \ No newline at end of file
diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c
index ac030d89..6c73b0d3 100644
--- a/linux-core/nouveau_drv.c
+++ b/linux-core/nouveau_drv.c
@@ -32,7 +32,7 @@ static struct pci_device_id pciidlist[] = {
nouveau_PCI_IDS
};
-extern drm_ioctl_desc_t nouveau_ioctls[];
+extern struct drm_ioctl_desc nouveau_ioctls[];
extern int nouveau_max_ioctl;
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
new file mode 100644
index 00000000..97d5330b
--- /dev/null
+++ b/linux-core/nouveau_sgdma.c
@@ -0,0 +1,335 @@
+#include "drmP.h"
+#include "nouveau_drv.h"
+
+#define NV_CTXDMA_PAGE_SHIFT 12
+#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT)
+#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
+
+struct nouveau_sgdma_be {
+ struct drm_ttm_backend backend;
+ struct drm_device *dev;
+
+ int pages;
+ int pages_populated;
+ dma_addr_t *pagelist;
+ int is_bound;
+
+ unsigned int pte_start;
+};
+
+static int
+nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
+{
+ return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+static int
+nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
+ struct page **pages)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ int p, d, o;
+
+ DRM_DEBUG("num_pages = %ld\n", num_pages);
+
+ if (nvbe->pagelist)
+ return -EINVAL;
+ nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT;
+ nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t),
+ DRM_MEM_PAGES);
+
+ nvbe->pages_populated = d = 0;
+ for (p = 0; p < num_pages; p++) {
+ for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
+ nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
+ pages[p], o,
+ NV_CTXDMA_PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(nvbe->pagelist[d])) {
+ be->func->clear(be);
+ DRM_ERROR("pci_map_page failed\n");
+ return -EINVAL;
+ }
+ nvbe->pages_populated = ++d;
+ }
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_clear(struct drm_ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ int d;
+
+ DRM_DEBUG("\n");
+
+ if (nvbe && nvbe->pagelist) {
+ if (nvbe->is_bound)
+ be->func->unbind(be);
+
+ for (d = 0; d < nvbe->pages_populated; d++) {
+ pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d],
+ NV_CTXDMA_PAGE_SIZE,
+ PCI_DMA_BIDIRECTIONAL);
+ }
+ drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t),
+ DRM_MEM_PAGES);
+ }
+}
+
+static int
+nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start,
+ int cached)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ uint64_t offset = (pg_start << PAGE_SHIFT);
+ uint32_t i;
+
+ DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached);
+
+ if (offset & NV_CTXDMA_PAGE_MASK)
+ return -EINVAL;
+ nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50)
+ nvbe->pte_start += 2; /* skip ctxdma header */
+
+ for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) {
+ uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start];
+
+ if (pteval & NV_CTXDMA_PAGE_MASK) {
+ DRM_ERROR("Bad pteval 0x%llx\n", pteval);
+ return -EINVAL;
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ INSTANCE_WR(gpuobj, i, pteval | 3);
+ } else {
+ INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21);
+ INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000);
+ }
+ }
+
+ nvbe->is_bound = 1;
+ return 0;
+}
+
+static int
+nouveau_sgdma_unbind(struct drm_ttm_backend *be)
+{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ if (nvbe->is_bound) {
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ unsigned int pte;
+
+ pte = nvbe->pte_start;
+ while (pte < (nvbe->pte_start + nvbe->pages)) {
+ uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
+
+ if (dev_priv->card_type < NV_50) {
+ INSTANCE_WR(gpuobj, pte, pteval | 3);
+ } else {
+ INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010);
+ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004);
+ }
+
+ pte++;
+ }
+
+ nvbe->is_bound = 0;
+ }
+
+ return 0;
+}
+
+static void
+nouveau_sgdma_destroy(struct drm_ttm_backend *be)
+{
+ DRM_DEBUG("\n");
+ if (be) {
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ if (nvbe) {
+ if (nvbe->pagelist)
+ be->func->clear(be);
+ drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM);
+ }
+ }
+}
+
+static struct drm_ttm_backend_func nouveau_sgdma_backend = {
+ .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust,
+ .populate = nouveau_sgdma_populate,
+ .clear = nouveau_sgdma_clear,
+ .bind = nouveau_sgdma_bind,
+ .unbind = nouveau_sgdma_unbind,
+ .destroy = nouveau_sgdma_destroy
+};
+
+struct drm_ttm_backend *
+nouveau_sgdma_init_ttm(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_sgdma_be *nvbe;
+
+ if (!dev_priv->gart_info.sg_ctxdma)
+ return NULL;
+
+ nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM);
+ if (!nvbe)
+ return NULL;
+
+ nvbe->dev = dev;
+
+ nvbe->backend.func = &nouveau_sgdma_backend;
+ nvbe->backend.mem_type = DRM_BO_MEM_TT;
+
+ return &nvbe->backend;
+}
+
+int
+nouveau_sgdma_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ uint32_t aper_size, obj_size;
+ int i, ret;
+
+ if (dev_priv->card_type < NV_50) {
+ aper_size = (64 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4;
+ obj_size += 8; /* ctxdma header */
+ } else {
+ /* 1 entire VM page table */
+ aper_size = (512 * 1024 * 1024);
+ obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8;
+ }
+
+ if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
+ NVOBJ_FLAG_ALLOW_NO_REFS |
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE, &gpuobj))) {
+ DRM_ERROR("Error creating sgdma object: %d\n", ret);
+ return ret;
+ }
+
+ if (dev_priv->card_type < NV_50) {
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+ SetPageLocked(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev,
+ dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+
+ /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
+ * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
+ * on those cards? */
+ INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ INSTANCE_WR(gpuobj, 1, aper_size - 1);
+ for (i=2; i<2+(aper_size>>12); i++) {
+ INSTANCE_WR(gpuobj, i,
+ dev_priv->gart_info.sg_dummy_bus | 3);
+ }
+ } else {
+ for (i=0; i<obj_size; i+=8) {
+ INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010);
+ INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004);
+ }
+ }
+
+ dev_priv->gart_info.type = NOUVEAU_GART_SGDMA;
+ dev_priv->gart_info.aper_base = 0;
+ dev_priv->gart_info.aper_size = aper_size;
+ dev_priv->gart_info.sg_ctxdma = gpuobj;
+ return 0;
+}
+
+void
+nouveau_sgdma_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->gart_info.sg_dummy_page) {
+ pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus,
+ NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ unlock_page(dev_priv->gart_info.sg_dummy_page);
+ __free_page(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_page = NULL;
+ dev_priv->gart_info.sg_dummy_bus = 0;
+ }
+
+ nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+}
+
+int
+nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_ttm_backend *be;
+ struct drm_scatter_gather sgreq;
+ int ret;
+
+ dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev);
+ if (!dev_priv->gart_info.sg_be)
+ return -ENOMEM;
+ be = dev_priv->gart_info.sg_be;
+
+ /* Hack the aperture size down to the amount of system memory
+ * we're going to bind into it.
+ */
+ if (dev_priv->gart_info.aper_size > 32*1024*1024)
+ dev_priv->gart_info.aper_size = 32*1024*1024;
+
+ sgreq.size = dev_priv->gart_info.aper_size;
+ if ((ret = drm_sg_alloc(dev, &sgreq))) {
+ DRM_ERROR("drm_sg_alloc failed: %d\n", ret);
+ return ret;
+ }
+ dev_priv->gart_info.sg_handle = sgreq.handle;
+
+ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) {
+ DRM_ERROR("failed populate: %d\n", ret);
+ return ret;
+ }
+
+ if ((ret = be->func->bind(be, 0, 0))) {
+ DRM_ERROR("failed bind: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev)
+{
+}
+
+int
+nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ int pte;
+
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ if (dev_priv->card_type < NV_50) {
+ *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ return 0;
+ }
+
+ DRM_ERROR("Unimplemented on NV50\n");
+ return -EINVAL;
+}
+
diff --git a/linux-core/nv04_instmem.c b/linux-core/nv04_instmem.c
new file mode 120000
index 00000000..e720fb5b
--- /dev/null
+++ b/linux-core/nv04_instmem.c
@@ -0,0 +1 @@
+../shared-core/nv04_instmem.c \ No newline at end of file
diff --git a/linux-core/nv50_instmem.c b/linux-core/nv50_instmem.c
new file mode 120000
index 00000000..4e45344a
--- /dev/null
+++ b/linux-core/nv50_instmem.c
@@ -0,0 +1 @@
+../shared-core/nv50_instmem.c \ No newline at end of file
diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c
index 114ec8f9..c9112c63 100644
--- a/linux-core/sis_drv.c
+++ b/linux-core/sis_drv.c
@@ -36,14 +36,14 @@ static struct pci_device_id pciidlist[] = {
};
-static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
+static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_sis_private_t *dev_priv;
int ret;
dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
dev->dev_private = (void *)dev_priv;
dev_priv->chipset = chipset;
@@ -55,7 +55,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset)
return ret;
}
-static int sis_driver_unload(drm_device_t *dev)
+static int sis_driver_unload(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c
index 21c1f2d7..7e162a8e 100644
--- a/linux-core/sis_mm.c
+++ b/linux-core/sis_mm.c
@@ -81,19 +81,16 @@ unsigned long sis_sman_mm_offset(void *private, void *ref)
#endif
-static int sis_fb_init(DRM_IOCTL_ARGS)
+static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_fb_t fb;
+ drm_sis_fb_t *fb = data;
int ret;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
-
mutex_lock(&dev->struct_mutex);
#if defined(__linux__) && defined(CONFIG_FB_SIS)
{
- drm_sman_mm_t sman_mm;
+ struct drm_sman_mm sman_mm;
sman_mm.private = (void *)0xFFFFFFFF;
sman_mm.allocate = sis_sman_mm_allocate;
sman_mm.free = sis_sman_mm_free;
@@ -104,7 +101,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
}
#else
ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0,
- fb.size >> SIS_MM_ALIGN_SHIFT);
+ fb->size >> SIS_MM_ALIGN_SHIFT);
#endif
if (ret) {
@@ -114,24 +111,21 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
}
dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb.offset;
+ dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
+ DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
return 0;
}
-static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv,
- unsigned long data, int pool)
+static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv,
+ void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data;
- drm_sis_mem_t mem;
+ drm_sis_mem_t *mem = data;
int retval = 0;
- drm_memblock_item_t *item;
-
- DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem));
+ struct drm_memblock_item *item;
mutex_lock(&dev->struct_mutex);
@@ -139,73 +133,65 @@ static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv,
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0,
- (unsigned long)priv);
+ mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT;
+ item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0,
+ (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
if (item) {
- mem.offset = ((pool == 0) ?
+ mem->offset = ((pool == 0) ?
dev_priv->vram_offset : dev_priv->agp_offset) +
(item->mm->
offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT);
- mem.free = item->user_hash.key;
- mem.size = mem.size << SIS_MM_ALIGN_SHIFT;
+ mem->free = item->user_hash.key;
+ mem->size = mem->size << SIS_MM_ALIGN_SHIFT;
} else {
- mem.offset = 0;
- mem.size = 0;
- mem.free = 0;
- retval = DRM_ERR(ENOMEM);
+ mem->offset = 0;
+ mem->size = 0;
+ mem->free = 0;
+ retval = -ENOMEM;
}
- DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem));
-
- DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size,
- mem.offset);
+ DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size,
+ mem->offset);
return retval;
}
-static int sis_drm_free(DRM_IOCTL_ARGS)
+static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t mem;
+ drm_sis_mem_t *mem = data;
int ret;
- DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data,
- sizeof(mem));
-
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem.free);
+ ret = drm_sman_free_key(&dev_priv->sman, mem->free);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("free = 0x%lx\n", mem.free);
+ DRM_DEBUG("free = 0x%lx\n", mem->free);
return ret;
}
-static int sis_fb_alloc(DRM_IOCTL_ARGS)
+static int sis_fb_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- return sis_drm_alloc(dev, priv, data, VIDEO_TYPE);
+ return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE);
}
-static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
+static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_agp_t agp;
+ drm_sis_agp_t *agp = data;
int ret;
dev_priv = dev->dev_private;
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
- sizeof(agp));
mutex_lock(&dev->struct_mutex);
ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0,
- agp.size >> SIS_MM_ALIGN_SHIFT);
+ agp->size >> SIS_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
@@ -214,23 +200,23 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
}
dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp.offset;
+ dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
+ DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
return 0;
}
-static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
+static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- return sis_drm_alloc(dev, priv, data, AGP_TYPE);
+ return sis_drm_alloc(dev, file_priv, data, AGP_TYPE);
}
-static drm_local_map_t *sis_reg_init(drm_device_t *dev)
+static drm_local_map_t *sis_reg_init(struct drm_device *dev)
{
- drm_map_list_t *entry;
+ struct drm_map_list *entry;
drm_local_map_t *map;
list_for_each_entry(entry, &dev->maplist, head) {
@@ -245,7 +231,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev)
}
int
-sis_idle(drm_device_t *dev)
+sis_idle(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg;
@@ -314,13 +300,13 @@ void sis_lastclose(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void sis_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file *file_priv)
{
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_file_t *priv = filp->private_data;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
+ if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -329,20 +315,18 @@ void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
dev->driver->dma_quiescent(dev);
}
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv);
+ drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
return;
}
-drm_ioctl_desc_t sis_ioctls[] = {
- [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] =
- {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] =
- {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}
+struct drm_ioctl_desc sis_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY),
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
index ebc8c371..eb5ea826 100644
--- a/linux-core/via_buffer.c
+++ b/linux-core/via_buffer.c
@@ -32,19 +32,18 @@
#include "via_drm.h"
#include "via_drv.h"
-drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev)
+struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev)
{
return drm_agp_init_ttm(dev);
}
-int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type)
+int via_fence_types(struct drm_buffer_object *bo, uint32_t * type)
{
- *class = 0;
*type = 3;
return 0;
}
-int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
+int via_invalidate_caches(struct drm_device * dev, uint64_t flags)
{
/*
* FIXME: Invalidate texture caches here.
@@ -54,14 +53,14 @@ int via_invalidate_caches(drm_device_t * dev, uint32_t flags)
}
-static int via_vram_info(drm_device_t *dev,
+static int via_vram_info(struct drm_device *dev,
unsigned long *offset,
unsigned long *size)
{
struct pci_dev *pdev = dev->pdev;
unsigned long flags;
- int ret = DRM_ERR(EINVAL);
+ int ret = -EINVAL;
int i;
for (i=0; i<6; ++i) {
flags = pci_resource_flags(pdev, i);
@@ -82,8 +81,8 @@ static int via_vram_info(drm_device_t *dev,
return 0;
}
-int via_init_mem_type(drm_device_t * dev, uint32_t type,
- drm_mem_type_manager_t * man)
+int via_init_mem_type(struct drm_device * dev, uint32_t type,
+ struct drm_mem_type_manager * man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
@@ -144,7 +143,7 @@ int via_init_mem_type(drm_device_t * dev, uint32_t type,
return 0;
}
-uint32_t via_evict_mask(drm_buffer_object_t *bo)
+uint32_t via_evict_mask(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c
index 2f508374..d44c26f4 100644
--- a/linux-core/via_dmablit.c
+++ b/linux-core/via_dmablit.c
@@ -206,7 +206,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
*/
static void
-via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
+via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -236,7 +236,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
first_pfn + 1;
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
@@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
vsg->state = dr_via_pages_locked;
DRM_DEBUG("DMA pages locked\n");
@@ -271,14 +271,14 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->descriptors_per_page;
if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
vsg->num_desc);
@@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
}
static void
-via_abort_dmablit(drm_device_t *dev, int engine)
+via_abort_dmablit(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -294,7 +294,7 @@ via_abort_dmablit(drm_device_t *dev, int engine)
}
static void
-via_dmablit_engine_off(drm_device_t *dev, int engine)
+via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -311,7 +311,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine)
*/
void
-via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
+via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
@@ -432,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
*/
static int
-via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -465,7 +465,7 @@ static void
via_dmablit_timer(unsigned long data)
{
drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
- drm_device_t *dev = blitq->dev;
+ struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
@@ -509,7 +509,7 @@ via_dmablit_workqueue(struct work_struct *work)
#else
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
#endif
- drm_device_t *dev = blitq->dev;
+ struct drm_device *dev = blitq->dev;
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
@@ -552,7 +552,7 @@ via_dmablit_workqueue(struct work_struct *work)
void
-via_init_dmablit(drm_device_t *dev)
+via_init_dmablit(struct drm_device *dev)
{
int i,j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -568,7 +568,7 @@ via_init_dmablit(drm_device_t *dev)
blitq->head = 0;
blitq->cur = 0;
blitq->serviced = 0;
- blitq->num_free = VIA_NUM_BLIT_SLOTS;
+ blitq->num_free = VIA_NUM_BLIT_SLOTS - 1;
blitq->num_outstanding = 0;
blitq->is_active = 0;
blitq->aborting = 0;
@@ -594,7 +594,7 @@ via_init_dmablit(drm_device_t *dev)
static int
-via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
+via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
@@ -606,7 +606,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
DRM_ERROR("Zero size bitblt.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/*
@@ -619,7 +619,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
DRM_ERROR("Too large system memory stride. Stride: %d, "
"Length: %d\n", xfer->mem_stride, xfer->line_length);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if ((xfer->mem_stride == xfer->line_length) &&
@@ -637,7 +637,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/*
@@ -648,7 +648,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if (xfer->mem_stride < xfer->line_length ||
abs(xfer->fb_stride) < xfer->line_length) {
DRM_ERROR("Invalid frame-buffer / memory stride.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/*
@@ -661,13 +661,13 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *
if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
#else
if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
#endif
@@ -707,7 +707,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
if (ret) {
- return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
+ return (-EINTR == ret) ? -EAGAIN : ret;
}
spin_lock_irqsave(&blitq->blit_lock, irqsave);
@@ -740,7 +740,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
static int
-via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@@ -751,7 +751,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
if (dev_priv == NULL) {
DRM_ERROR("Called without initialization.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
engine = (xfer->to_fb) ? 0 : 1;
@@ -761,7 +761,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
}
if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
via_dmablit_release_slot(blitq);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
via_dmablit_release_slot(blitq);
@@ -792,21 +792,18 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)
*/
int
-via_dma_blit_sync( DRM_IOCTL_ARGS )
+via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
- drm_via_blitsync_t sync;
+ drm_via_blitsync_t *sync = data;
int err;
- DRM_DEVICE;
- DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
-
- if (sync.engine >= VIA_NUM_BLIT_ENGINES)
- return DRM_ERR(EINVAL);
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ return -EINVAL;
- err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);
+ err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
- if (DRM_ERR(EINTR) == err)
- err = DRM_ERR(EAGAIN);
+ if (-EINTR == err)
+ err = -EAGAIN;
return err;
}
@@ -819,17 +816,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS )
*/
int
-via_dma_blit( DRM_IOCTL_ARGS )
+via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
- drm_via_dmablit_t xfer;
+ drm_via_dmablit_t *xfer = data;
int err;
- DRM_DEVICE;
-
- DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));
-
- err = via_dmablit(dev, &xfer);
- DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));
+ err = via_dmablit(dev, xfer);
return err;
}
diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h
index f6ae03ec..726ad25d 100644
--- a/linux-core/via_dmablit.h
+++ b/linux-core/via_dmablit.h
@@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info {
} drm_via_sg_info_t;
typedef struct _drm_via_blitq {
- drm_device_t *dev;
+ struct drm_device *dev;
uint32_t cur_blit_handle;
uint32_t done_blit_handle;
unsigned serviced;
diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c
index 02249939..a6d4ece9 100644
--- a/linux-core/via_fence.c
+++ b/linux-core/via_fence.c
@@ -39,10 +39,10 @@
*/
-static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class)
+static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_fence_class_manager_t *fc = &dev->fm.class[class];
+ struct drm_fence_class_manager *fc = &dev->fm.class[class];
uint32_t pending_flush_types = 0;
uint32_t signaled_flush_types = 0;
uint32_t status;
@@ -113,7 +113,7 @@ static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class)
* Emit a fence sequence.
*/
-int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
+int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
uint32_t * sequence, uint32_t * native_type)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -142,7 +142,7 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
*native_type = DRM_FENCE_TYPE_EXE;
break;
default:
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
break;
}
return ret;
@@ -152,10 +152,10 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags,
* Manual poll (from the fence manager).
*/
-void via_poke_flush(drm_device_t * dev, uint32_t class)
+void via_poke_flush(struct drm_device * dev, uint32_t class)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
uint32_t pending_flush;
@@ -200,11 +200,11 @@ int via_fence_has_irq(struct drm_device * dev, uint32_t class,
void via_fence_timer(unsigned long data)
{
- drm_device_t *dev = (drm_device_t *) data;
+ struct drm_device *dev = (struct drm_device *) data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_fence_manager_t *fm = &dev->fm;
+ struct drm_fence_manager *fm = &dev->fm;
uint32_t pending_flush;
- drm_fence_class_manager_t *fc = &dev->fm.class[0];
+ struct drm_fence_class_manager *fc = &dev->fm.class[0];
if (!dev_priv)
return;
diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c
index d97269f5..35ca6bfc 100644
--- a/linux-core/via_mm.c
+++ b/linux-core/via_mm.c
@@ -33,18 +33,15 @@
#define VIA_MM_ALIGN_SHIFT 4
#define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1)
-int via_agp_init(DRM_IOCTL_ARGS)
+int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_agp_t agp;
+ drm_via_agp_t *agp = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret;
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
- sizeof(agp));
mutex_lock(&dev->struct_mutex);
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0,
- agp.size >> VIA_MM_ALIGN_SHIFT);
+ agp->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("AGP memory manager initialisation error\n");
@@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS)
}
dev_priv->agp_initialized = 1;
- dev_priv->agp_offset = agp.offset;
+ dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
+ DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
return 0;
}
-int via_fb_init(DRM_IOCTL_ARGS)
+int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_fb_t fb;
+ drm_via_fb_t *fb = data;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
int ret;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
-
mutex_lock(&dev->struct_mutex);
ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0,
- fb.size >> VIA_MM_ALIGN_SHIFT);
+ fb->size >> VIA_MM_ALIGN_SHIFT);
if (ret) {
DRM_ERROR("VRAM memory manager initialisation error\n");
@@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS)
}
dev_priv->vram_initialized = 1;
- dev_priv->vram_offset = fb.offset;
+ dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
+ DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
return 0;
@@ -123,80 +117,71 @@ void via_lastclose(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
}
-int via_mem_alloc(DRM_IOCTL_ARGS)
+int via_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
-
- drm_via_mem_t mem;
+ drm_via_mem_t *mem = data;
int retval = 0;
- drm_memblock_item_t *item;
+ struct drm_memblock_item *item;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned long tmpSize;
- DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
- sizeof(mem));
-
- if (mem.type > VIA_MEM_AGP) {
+ if (mem->type > VIA_MEM_AGP) {
DRM_ERROR("Unknown memory type allocation\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
mutex_lock(&dev->struct_mutex);
- if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
+ if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized :
dev_priv->agp_initialized)) {
DRM_ERROR
("Attempt to allocate from uninitialized memory manager.\n");
mutex_unlock(&dev->struct_mutex);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
- item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0,
- (unsigned long)priv);
+ tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT;
+ item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0,
+ (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
if (item) {
- mem.offset = ((mem.type == VIA_MEM_VIDEO) ?
+ mem->offset = ((mem->type == VIA_MEM_VIDEO) ?
dev_priv->vram_offset : dev_priv->agp_offset) +
(item->mm->
offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT);
- mem.index = item->user_hash.key;
+ mem->index = item->user_hash.key;
} else {
- mem.offset = 0;
- mem.size = 0;
- mem.index = 0;
+ mem->offset = 0;
+ mem->size = 0;
+ mem->index = 0;
DRM_DEBUG("Video memory allocation failed\n");
- retval = DRM_ERR(ENOMEM);
+ retval = -ENOMEM;
}
- DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem));
return retval;
}
-int via_mem_free(DRM_IOCTL_ARGS)
+int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_via_private_t *dev_priv = dev->dev_private;
- drm_via_mem_t mem;
+ drm_via_mem_t *mem = data;
int ret;
- DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
- sizeof(mem));
-
mutex_lock(&dev->struct_mutex);
- ret = drm_sman_free_key(&dev_priv->sman, mem.index);
+ ret = drm_sman_free_key(&dev_priv->sman, mem->index);
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("free = 0x%lx\n", mem.index);
+ DRM_DEBUG("free = 0x%lx\n", mem->index);
return ret;
}
-void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
+void via_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file *file_priv)
{
drm_via_private_t *dev_priv = dev->dev_private;
- drm_file_t *priv = filp->private_data;
mutex_lock(&dev->struct_mutex);
- if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) {
+ if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) {
mutex_unlock(&dev->struct_mutex);
return;
}
@@ -205,7 +190,7 @@ void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp)
dev->driver->dma_quiescent(dev);
}
- drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv);
+ drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv);
mutex_unlock(&dev->struct_mutex);
return;
}
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
new file mode 100644
index 00000000..261f4e13
--- /dev/null
+++ b/linux-core/xgi_cmdlist.c
@@ -0,0 +1,322 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+static void xgi_emit_flush(struct xgi_info * info, bool stop);
+static void xgi_emit_nop(struct xgi_info * info);
+static unsigned int get_batch_command(enum xgi_batch_type type);
+static void triggerHWCommandList(struct xgi_info * info);
+static void xgi_cmdlist_reset(struct xgi_info * info);
+
+
+/**
+ * Graphic engine register (2d/3d) acessing interface
+ */
+static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
+{
+#ifdef XGI_MMIO_DEBUG
+ DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
+ map->handle, addr, data);
+#endif
+ DRM_WRITE32(map, addr, data);
+}
+
+
+int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
+ struct drm_file * filp)
+{
+ struct xgi_mem_alloc mem_alloc = {
+ .location = XGI_MEMLOC_NON_LOCAL,
+ .size = size,
+ };
+ int err;
+
+ err = xgi_alloc(info, &mem_alloc, filp);
+ if (err) {
+ return err;
+ }
+
+ info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
+ info->cmdring.size = mem_alloc.size;
+ info->cmdring.ring_hw_base = mem_alloc.hw_addr;
+ info->cmdring.last_ptr = NULL;
+ info->cmdring.ring_offset = 0;
+
+ return 0;
+}
+
+
+/**
+ * get_batch_command - Get the command ID for the current begin type.
+ * @type: Type of the current batch
+ *
+ * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
+ *
+ * This function assumes that @type is on the range [0,3].
+ */
+unsigned int get_batch_command(enum xgi_batch_type type)
+{
+ static const unsigned int ports[4] = {
+ 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
+ };
+
+ return ports[type];
+}
+
+
+int xgi_submit_cmdlist(struct drm_device * dev, void * data,
+ struct drm_file * filp)
+{
+ struct xgi_info *const info = dev->dev_private;
+ const struct xgi_cmd_info *const pCmdInfo =
+ (struct xgi_cmd_info *) data;
+ const unsigned int cmd = get_batch_command(pCmdInfo->type);
+ u32 begin[4];
+
+
+ begin[0] = (cmd << 24) | BEGIN_VALID_MASK
+ | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
+ begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
+ begin[2] = pCmdInfo->hw_addr >> 4;
+ begin[3] = 0;
+
+ if (info->cmdring.last_ptr == NULL) {
+ const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
+
+
+ /* Enable PCI Trigger Mode
+ */
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
+ M2REG_CLEAR_COUNTERS_MASK | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+
+ /* Send PCI begin command
+ */
+ dwWriteReg(info->mmio_map, portOffset, begin[0]);
+ dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
+ dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
+ dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
+ } else {
+ DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
+
+ if (pCmdInfo->type == BTYPE_3D) {
+ xgi_emit_flush(info, FALSE);
+ }
+
+ info->cmdring.last_ptr[1] = begin[1];
+ info->cmdring.last_ptr[2] = begin[2];
+ info->cmdring.last_ptr[3] = begin[3];
+ DRM_WRITEMEMORYBARRIER();
+ info->cmdring.last_ptr[0] = begin[0];
+
+ triggerHWCommandList(info);
+ }
+
+ info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
+ drm_fence_flush_old(info->dev, 0, info->next_sequence);
+ return 0;
+}
+
+
+/*
+ state: 0 - console
+ 1 - graphic
+ 2 - fb
+ 3 - logout
+*/
+int xgi_state_change(struct xgi_info * info, unsigned int to,
+ unsigned int from)
+{
+#define STATE_CONSOLE 0
+#define STATE_GRAPHIC 1
+#define STATE_FBTERM 2
+#define STATE_LOGOUT 3
+#define STATE_REBOOT 4
+#define STATE_SHUTDOWN 5
+
+ if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
+ DRM_INFO("Leaving graphical mode (probably VT switch)\n");
+ } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
+ DRM_INFO("Entering graphical mode (probably VT switch)\n");
+ xgi_cmdlist_reset(info);
+ } else if ((from == STATE_GRAPHIC)
+ && ((to == STATE_LOGOUT)
+ || (to == STATE_REBOOT)
+ || (to == STATE_SHUTDOWN))) {
+ DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
+ } else {
+ DRM_ERROR("Invalid state change.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+
+int xgi_state_change_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp)
+{
+ struct xgi_state_info *const state =
+ (struct xgi_state_info *) data;
+ struct xgi_info *info = dev->dev_private;
+
+
+ return xgi_state_change(info, state->_toState, state->_fromState);
+}
+
+
+void xgi_cmdlist_reset(struct xgi_info * info)
+{
+ info->cmdring.last_ptr = NULL;
+ info->cmdring.ring_offset = 0;
+}
+
+
+void xgi_cmdlist_cleanup(struct xgi_info * info)
+{
+ if (info->cmdring.ring_hw_base != 0) {
+ /* If command lists have been issued, terminate the command
+ * list chain with a flush command.
+ */
+ if (info->cmdring.last_ptr != NULL) {
+ xgi_emit_flush(info, FALSE);
+ xgi_emit_nop(info);
+ }
+
+ xgi_waitfor_pci_idle(info);
+
+ (void) memset(&info->cmdring, 0, sizeof(info->cmdring));
+ }
+}
+
+static void triggerHWCommandList(struct xgi_info * info)
+{
+ static unsigned int s_triggerID = 1;
+
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
+ 0x05000000 + (0x0ffff & s_triggerID++));
+}
+
+
+/**
+ * Emit a flush to the CRTL command stream.
+ * @info XGI info structure
+ *
+ * This function assumes info->cmdring.ptr is non-NULL.
+ */
+void xgi_emit_flush(struct xgi_info * info, bool stop)
+{
+ const u32 flush_command[8] = {
+ ((0x10 << 24)
+ | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
+ BEGIN_LINK_ENABLE_MASK | (0x00004),
+ 0x00000000, 0x00000000,
+
+ /* Flush the 2D engine with the default 32 clock delay.
+ */
+ M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
+ M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
+ M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
+ M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK,
+ };
+ const unsigned int flush_size = sizeof(flush_command);
+ u32 *batch_addr;
+ u32 hw_addr;
+
+ /* check buf is large enough to contain a new flush batch */
+ if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
+ info->cmdring.ring_offset = 0;
+ }
+
+ hw_addr = info->cmdring.ring_hw_base
+ + info->cmdring.ring_offset;
+ batch_addr = info->cmdring.ptr
+ + (info->cmdring.ring_offset / 4);
+
+ (void) memcpy(batch_addr, flush_command, flush_size);
+
+ if (stop) {
+ *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK;
+ }
+
+ info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4);
+ info->cmdring.last_ptr[2] = hw_addr >> 4;
+ info->cmdring.last_ptr[3] = 0;
+ DRM_WRITEMEMORYBARRIER();
+ info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
+ | (BEGIN_VALID_MASK);
+
+ triggerHWCommandList(info);
+
+ info->cmdring.ring_offset += flush_size;
+ info->cmdring.last_ptr = batch_addr;
+}
+
+
+/**
+ * Emit an empty command to the CRTL command stream.
+ * @info XGI info structure
+ *
+ * This function assumes info->cmdring.ptr is non-NULL. In addition, since
+ * this function emits a command that does not have linkage information,
+ * it sets info->cmdring.ptr to NULL.
+ */
+void xgi_emit_nop(struct xgi_info * info)
+{
+ info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK
+ | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence);
+ info->cmdring.last_ptr[2] = 0;
+ info->cmdring.last_ptr[3] = 0;
+ DRM_WRITEMEMORYBARRIER();
+ info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
+ | (BEGIN_VALID_MASK);
+
+ triggerHWCommandList(info);
+
+ info->cmdring.last_ptr = NULL;
+}
+
+
+void xgi_emit_irq(struct xgi_info * info)
+{
+ if (info->cmdring.last_ptr == NULL)
+ return;
+
+ xgi_emit_flush(info, TRUE);
+}
diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h
new file mode 100644
index 00000000..f6f1c1ef
--- /dev/null
+++ b/linux-core/xgi_cmdlist.h
@@ -0,0 +1,66 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_CMDLIST_H_
+#define _XGI_CMDLIST_H_
+
+struct xgi_cmdring_info {
+ /**
+ * Kernel space pointer to the base of the command ring.
+ */
+ u32 * ptr;
+
+ /**
+ * Size, in bytes, of the command ring.
+ */
+ unsigned int size;
+
+ /**
+ * Base address of the command ring from the hardware's PoV.
+ */
+ unsigned int ring_hw_base;
+
+ u32 * last_ptr;
+
+ /**
+ * Offset, in bytes, from the start of the ring to the next available
+ * location to store a command.
+ */
+ unsigned int ring_offset;
+};
+
+struct xgi_info;
+extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
+ struct drm_file * filp);
+
+extern int xgi_state_change(struct xgi_info * info, unsigned int to,
+ unsigned int from);
+
+extern void xgi_cmdlist_cleanup(struct xgi_info * info);
+
+extern void xgi_emit_irq(struct xgi_info * info);
+
+#endif /* _XGI_CMDLIST_H_ */
diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h
new file mode 120000
index 00000000..677586d7
--- /dev/null
+++ b/linux-core/xgi_drm.h
@@ -0,0 +1 @@
+../shared-core/xgi_drm.h \ No newline at end of file
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
new file mode 100644
index 00000000..bc6873a9
--- /dev/null
+++ b/linux-core/xgi_drv.c
@@ -0,0 +1,431 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "drmP.h"
+#include "drm.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+ xgi_PCI_IDS
+};
+
+static struct drm_fence_driver xgi_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK,
+ .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1,
+ .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK,
+ .lazy_capable = 1,
+ .emit = xgi_fence_emit_sequence,
+ .poke_flush = xgi_poke_flush,
+ .has_irq = xgi_fence_has_irq
+};
+
+int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
+
+static struct drm_ioctl_desc xgi_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER),
+};
+
+static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
+static int xgi_driver_unload(struct drm_device *dev);
+static void xgi_driver_lastclose(struct drm_device * dev);
+static void xgi_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file * filp);
+static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
+
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
+ DRIVER_IRQ_SHARED | DRIVER_SG,
+ .dev_priv_size = sizeof(struct xgi_info),
+ .load = xgi_driver_load,
+ .unload = xgi_driver_unload,
+ .lastclose = xgi_driver_lastclose,
+ .dma_quiescent = NULL,
+ .irq_preinstall = NULL,
+ .irq_postinstall = NULL,
+ .irq_uninstall = NULL,
+ .irq_handler = xgi_kern_isr,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = xgi_ioctls,
+ .dma_ioctl = NULL,
+
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9)
+ .compat_ioctl = xgi_compat_ioctl,
+#endif
+ },
+
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = probe,
+ .remove = __devexit_p(drm_cleanup_pci),
+ },
+
+ .fence_driver = &xgi_fence_driver,
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+};
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+
+static int __init xgi_init(void)
+{
+ driver.num_ioctls = xgi_max_ioctl;
+ return drm_init(&driver, pciidlist);
+}
+
+static void __exit xgi_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(xgi_init);
+module_exit(xgi_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
+
+void xgi_engine_init(struct xgi_info * info)
+{
+ u8 temp;
+
+
+ OUT3C5B(info->mmio_map, 0x11, 0x92);
+
+ /* -------> copy from OT2D
+ * PCI Retry Control Register.
+ * disable PCI read retry & enable write retry in mem. (10xx xxxx)b
+ */
+ temp = IN3X5B(info->mmio_map, 0x55);
+ OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80);
+
+ xgi_enable_ge(info);
+
+ /* Enable linear addressing of the card. */
+ temp = IN3X5B(info->mmio_map, 0x21);
+ OUT3X5B(info->mmio_map, 0x21, temp | 0x20);
+
+ /* Enable 32-bit internal data path */
+ temp = IN3X5B(info->mmio_map, 0x2A);
+ OUT3X5B(info->mmio_map, 0x2A, temp | 0x40);
+
+ /* Enable PCI burst write ,disable burst read and enable MMIO. */
+ /*
+ * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO.
+ * 7 ---- Pixel Data Format 1: big endian 0: little endian
+ * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format
+ * 2 ---- PCI Burst Write Enable
+ * 1 ---- PCI Burst Read Enable
+ * 0 ---- MMIO Control
+ */
+ temp = IN3X5B(info->mmio_map, 0x39);
+ OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd);
+
+ /* enable GEIO decode */
+ /* temp = IN3X5B(info->mmio_map, 0x29);
+ * OUT3X5B(info->mmio_map, 0x29, temp | 0x08);
+ */
+
+ /* Enable graphic engine I/O PCI retry function*/
+ /* temp = IN3X5B(info->mmio_map, 0x62);
+ * OUT3X5B(info->mmio_map, 0x62, temp | 0x50);
+ */
+
+ /* protect all register except which protected by 3c5.0e.7 */
+ /* OUT3C5B(info->mmio_map, 0x11, 0x87); */
+}
+
+
+int xgi_bootstrap(struct drm_device * dev, void * data,
+ struct drm_file * filp)
+{
+ struct xgi_info *info = dev->dev_private;
+ struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data;
+ struct drm_map_list *maplist;
+ int err;
+
+
+ DRM_SPININIT(&info->fence_lock, "fence lock");
+ info->next_sequence = 0;
+ info->complete_sequence = 0;
+
+ if (info->mmio_map == NULL) {
+ err = drm_addmap(dev, info->mmio.base, info->mmio.size,
+ _DRM_REGISTERS, _DRM_KERNEL,
+ &info->mmio_map);
+ if (err) {
+ DRM_ERROR("Unable to map MMIO region: %d\n", err);
+ return err;
+ }
+
+ xgi_enable_mmio(info);
+ xgi_engine_init(info);
+ }
+
+
+ info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
+
+ DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n",
+ (unsigned long) info->fb.base, info->fb.size);
+
+
+ if ((info->fb.base == 0) || (info->fb.size == 0)) {
+ DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
+ (unsigned long) info->fb.base, info->fb.size);
+ return -EINVAL;
+ }
+
+
+ /* Init the resource manager */
+ if (!info->fb_heap_initialized) {
+ err = xgi_fb_heap_init(info);
+ if (err) {
+ DRM_ERROR("Unable to initialize FB heap.\n");
+ return err;
+ }
+ }
+
+
+ info->pcie.size = bs->gart.size;
+
+ /* Init the resource manager */
+ if (!info->pcie_heap_initialized) {
+ err = xgi_pcie_heap_init(info);
+ if (err) {
+ DRM_ERROR("Unable to initialize GART heap.\n");
+ return err;
+ }
+
+ /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
+ err = xgi_cmdlist_initialize(info, 0x100000, filp);
+ if (err) {
+ DRM_ERROR("xgi_cmdlist_initialize() failed\n");
+ return err;
+ }
+ }
+
+
+ if (info->pcie_map == NULL) {
+ err = drm_addmap(info->dev, 0, info->pcie.size,
+ _DRM_SCATTER_GATHER, _DRM_LOCKED,
+ & info->pcie_map);
+ if (err) {
+ DRM_ERROR("Could not add map for GART backing "
+ "store.\n");
+ return err;
+ }
+ }
+
+
+ maplist = drm_find_matching_map(dev, info->pcie_map);
+ if (maplist == NULL) {
+ DRM_ERROR("Could not find GART backing store map.\n");
+ return -EINVAL;
+ }
+
+ bs->gart = *info->pcie_map;
+ bs->gart.handle = (void *)(unsigned long) maplist->user_token;
+ return 0;
+}
+
+
+void xgi_driver_lastclose(struct drm_device * dev)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ if (info != NULL) {
+ if (info->mmio_map != NULL) {
+ xgi_cmdlist_cleanup(info);
+ xgi_disable_ge(info);
+ xgi_disable_mmio(info);
+ }
+
+ /* The core DRM lastclose routine will destroy all of our
+ * mappings for us. NULL out the pointers here so that
+ * xgi_bootstrap can do the right thing.
+ */
+ info->pcie_map = NULL;
+ info->mmio_map = NULL;
+ info->fb_map = NULL;
+
+ if (info->pcie_heap_initialized) {
+ drm_ati_pcigart_cleanup(dev, &info->gart_info);
+ }
+
+ if (info->fb_heap_initialized
+ || info->pcie_heap_initialized) {
+ drm_sman_cleanup(&info->sman);
+
+ info->fb_heap_initialized = FALSE;
+ info->pcie_heap_initialized = FALSE;
+ }
+ }
+}
+
+
+void xgi_reclaim_buffers_locked(struct drm_device * dev,
+ struct drm_file * filp)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ mutex_lock(&info->dev->struct_mutex);
+ if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) {
+ mutex_unlock(&info->dev->struct_mutex);
+ return;
+ }
+
+ if (dev->driver->dma_quiescent) {
+ dev->driver->dma_quiescent(dev);
+ }
+
+ drm_sman_owner_cleanup(&info->sman, (unsigned long) filp);
+ mutex_unlock(&info->dev->struct_mutex);
+ return;
+}
+
+
+/*
+ * driver receives an interrupt if someone waiting, then hand it off.
+ */
+irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct xgi_info *info = dev->dev_private;
+ const u32 irq_bits = DRM_READ32(info->mmio_map,
+ (0x2800
+ + M2REG_AUTO_LINK_STATUS_ADDRESS))
+ & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
+ | M2REG_ACTIVE_INTERRUPT_0_MASK
+ | M2REG_ACTIVE_INTERRUPT_2_MASK
+ | M2REG_ACTIVE_INTERRUPT_3_MASK);
+
+
+ if (irq_bits != 0) {
+ DRM_WRITE32(info->mmio_map,
+ 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits);
+ xgi_fence_handler(dev);
+ return IRQ_HANDLED;
+ } else {
+ return IRQ_NONE;
+ }
+}
+
+
+int xgi_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
+ int err;
+
+ if (!info)
+ return -ENOMEM;
+
+ (void) memset(info, 0, sizeof(*info));
+ dev->dev_private = info;
+ info->dev = dev;
+
+ info->mmio.base = drm_get_resource_start(dev, 1);
+ info->mmio.size = drm_get_resource_len(dev, 1);
+
+ DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
+ (unsigned long) info->mmio.base, info->mmio.size);
+
+
+ if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
+ DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
+ (unsigned long) info->mmio.base, info->mmio.size);
+ err = -EINVAL;
+ goto fail;
+ }
+
+
+ info->fb.base = drm_get_resource_start(dev, 0);
+ info->fb.size = drm_get_resource_len(dev, 0);
+
+ DRM_INFO("fb base: 0x%lx, size: 0x%x\n",
+ (unsigned long) info->fb.base, info->fb.size);
+
+
+ err = drm_sman_init(&info->sman, 2, 12, 8);
+ if (err) {
+ goto fail;
+ }
+
+
+ return 0;
+
+fail:
+ drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
+ return err;
+}
+
+int xgi_driver_unload(struct drm_device *dev)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ drm_sman_takedown(&info->sman);
+ drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+
+ return 0;
+}
diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h
new file mode 100644
index 00000000..a68dc03b
--- /dev/null
+++ b/linux-core/xgi_drv.h
@@ -0,0 +1,117 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_DRV_H_
+#define _XGI_DRV_H_
+
+#include "drmP.h"
+#include "drm.h"
+#include "drm_sman.h"
+
+#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>"
+
+#define DRIVER_NAME "xgi"
+#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
+#define DRIVER_DATE "20070918"
+
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 1
+#define DRIVER_PATCHLEVEL 0
+
+#include "xgi_cmdlist.h"
+#include "xgi_drm.h"
+
+struct xgi_aperture {
+ dma_addr_t base;
+ unsigned int size;
+};
+
+struct xgi_info {
+ struct drm_device *dev;
+
+ bool bootstrap_done;
+
+ /* physical characteristics */
+ struct xgi_aperture mmio;
+ struct xgi_aperture fb;
+ struct xgi_aperture pcie;
+
+ struct drm_map *mmio_map;
+ struct drm_map *pcie_map;
+ struct drm_map *fb_map;
+
+ /* look up table parameters */
+ struct ati_pcigart_info gart_info;
+ unsigned int lutPageSize;
+
+ struct drm_sman sman;
+ bool fb_heap_initialized;
+ bool pcie_heap_initialized;
+
+ struct xgi_cmdring_info cmdring;
+
+ DRM_SPINTYPE fence_lock;
+ unsigned complete_sequence;
+ unsigned next_sequence;
+};
+
+extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+extern int xgi_fb_heap_init(struct xgi_info * info);
+
+extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ struct drm_file * filp);
+
+extern int xgi_free(struct xgi_info * info, unsigned long index,
+ struct drm_file * filp);
+
+extern int xgi_pcie_heap_init(struct xgi_info * info);
+
+extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
+
+extern void xgi_enable_mmio(struct xgi_info * info);
+extern void xgi_disable_mmio(struct xgi_info * info);
+extern void xgi_enable_ge(struct xgi_info * info);
+extern void xgi_disable_ge(struct xgi_info * info);
+
+extern void xgi_poke_flush(struct drm_device * dev, uint32_t class);
+extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
+ uint32_t flags, uint32_t * sequence, uint32_t * native_type);
+extern void xgi_fence_handler(struct drm_device * dev);
+extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class,
+ uint32_t flags);
+
+extern int xgi_alloc_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp);
+extern int xgi_free_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp);
+extern int xgi_submit_cmdlist(struct drm_device * dev, void * data,
+ struct drm_file * filp);
+extern int xgi_state_change_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp);
+
+#endif
diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c
new file mode 100644
index 00000000..2e2d0094
--- /dev/null
+++ b/linux-core/xgi_fb.c
@@ -0,0 +1,130 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+
+#define XGI_FB_HEAP_START 0x1000000
+
+int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ struct drm_file * filp)
+{
+ struct drm_memblock_item *block;
+ const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
+ ? "on-card" : "GART";
+
+
+ if ((alloc->location != XGI_MEMLOC_LOCAL)
+ && (alloc->location != XGI_MEMLOC_NON_LOCAL)) {
+ DRM_ERROR("Invalid memory pool (0x%08x) specified.\n",
+ alloc->location);
+ return -EINVAL;
+ }
+
+ if ((alloc->location == XGI_MEMLOC_LOCAL)
+ ? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
+ DRM_ERROR("Attempt to allocate from uninitialized memory "
+ "pool (0x%08x).\n", alloc->location);
+ return -EINVAL;
+ }
+
+ mutex_lock(&info->dev->struct_mutex);
+ block = drm_sman_alloc(&info->sman, alloc->location, alloc->size,
+ 0, (unsigned long) filp);
+ mutex_unlock(&info->dev->struct_mutex);
+
+ if (block == NULL) {
+ alloc->size = 0;
+ DRM_ERROR("%s memory allocation failed\n", mem_name);
+ return -ENOMEM;
+ } else {
+ alloc->offset = (*block->mm->offset)(block->mm,
+ block->mm_info);
+ alloc->hw_addr = alloc->offset;
+ alloc->index = block->user_hash.key;
+
+ if (block->user_hash.key != (unsigned long) alloc->index) {
+ DRM_ERROR("%s truncated handle %lx for pool %d "
+ "offset %x\n",
+ __func__, block->user_hash.key,
+ alloc->location, alloc->offset);
+ }
+
+ if (alloc->location == XGI_MEMLOC_NON_LOCAL) {
+ alloc->hw_addr += info->pcie.base;
+ }
+
+ DRM_DEBUG("%s memory allocation succeeded: 0x%x\n",
+ mem_name, alloc->offset);
+ }
+
+ return 0;
+}
+
+
+int xgi_alloc_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp)
+{
+ struct xgi_info *info = dev->dev_private;
+
+ return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp);
+}
+
+
+int xgi_free(struct xgi_info * info, unsigned long index,
+ struct drm_file * filp)
+{
+ int err;
+
+ mutex_lock(&info->dev->struct_mutex);
+ err = drm_sman_free_key(&info->sman, index);
+ mutex_unlock(&info->dev->struct_mutex);
+
+ return err;
+}
+
+
+int xgi_free_ioctl(struct drm_device * dev, void * data,
+ struct drm_file * filp)
+{
+ struct xgi_info *info = dev->dev_private;
+
+ return xgi_free(info, *(unsigned long *) data, filp);
+}
+
+
+int xgi_fb_heap_init(struct xgi_info * info)
+{
+ int err;
+
+ mutex_lock(&info->dev->struct_mutex);
+ err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
+ XGI_FB_HEAP_START,
+ info->fb.size - XGI_FB_HEAP_START);
+ mutex_unlock(&info->dev->struct_mutex);
+
+ info->fb_heap_initialized = (err == 0);
+ return err;
+}
diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c
new file mode 100644
index 00000000..adedf300
--- /dev/null
+++ b/linux-core/xgi_fence.c
@@ -0,0 +1,127 @@
+/*
+ * (C) Copyright IBM Corporation 2007
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ian Romanick <idr@us.ibm.com>
+ */
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
+{
+ struct xgi_info * info = dev->dev_private;
+ struct drm_fence_class_manager * fc = &dev->fm.class[class];
+ uint32_t pending_flush_types = 0;
+ uint32_t signaled_flush_types = 0;
+
+
+ if ((info == NULL) || (class != 0))
+ return 0;
+
+ DRM_SPINLOCK(&info->fence_lock);
+
+ pending_flush_types = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+ if (pending_flush_types) {
+ if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
+ const u32 begin_id = DRM_READ32(info->mmio_map,
+ 0x2820)
+ & BEGIN_BEGIN_IDENTIFICATION_MASK;
+
+ if (begin_id != info->complete_sequence) {
+ info->complete_sequence = begin_id;
+ signaled_flush_types |= DRM_FENCE_TYPE_EXE;
+ }
+ }
+
+ if (signaled_flush_types) {
+ drm_fence_handler(dev, 0, info->complete_sequence,
+ signaled_flush_types);
+ }
+ }
+
+ DRM_SPINUNLOCK(&info->fence_lock);
+
+ return fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+}
+
+
+int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
+ uint32_t flags, uint32_t * sequence,
+ uint32_t * native_type)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ if ((info == NULL) || (class != 0))
+ return -EINVAL;
+
+
+ DRM_SPINLOCK(&info->fence_lock);
+ info->next_sequence++;
+ if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) {
+ info->next_sequence = 1;
+ }
+ DRM_SPINUNLOCK(&info->fence_lock);
+
+
+ xgi_emit_irq(info);
+
+ *sequence = (uint32_t) info->next_sequence;
+ *native_type = DRM_FENCE_TYPE_EXE;
+
+ return 0;
+}
+
+
+void xgi_poke_flush(struct drm_device * dev, uint32_t class)
+{
+ struct drm_fence_manager * fm = &dev->fm;
+ unsigned long flags;
+
+
+ write_lock_irqsave(&fm->lock, flags);
+ xgi_do_flush(dev, class);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+
+void xgi_fence_handler(struct drm_device * dev)
+{
+ struct drm_fence_manager * fm = &dev->fm;
+
+
+ write_lock(&fm->lock);
+ xgi_do_flush(dev, 0);
+ write_unlock(&fm->lock);
+}
+
+
+int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+{
+ return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0;
+}
diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c
new file mode 100644
index 00000000..c54044fa
--- /dev/null
+++ b/linux-core/xgi_ioc32.c
@@ -0,0 +1,140 @@
+/*
+ * (C) Copyright IBM Corporation 2007
+ * Copyright (C) Paul Mackerras 2005.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * on the rights to use, copy, modify, merge, publish, distribute, sub
+ * license, and/or sell copies of the Software, and to permit persons to whom
+ * the Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Ian Romanick <idr@us.ibm.com>
+ */
+
+#include <linux/compat.h>
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "xgi_drm.h"
+
+/* This is copied from drm_ioc32.c.
+ */
+struct drm_map32 {
+ u32 offset; /**< Requested physical address (0 for SAREA)*/
+ u32 size; /**< Requested physical size (bytes) */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
+ u32 handle; /**< User-space: "Handle" to pass to mmap() */
+ int mtrr; /**< MTRR slot used */
+};
+
+struct drm32_xgi_bootstrap {
+ struct drm_map32 gart;
+};
+
+
+extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *);
+
+static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg;
+ struct drm32_xgi_bootstrap bs32;
+ struct xgi_bootstrap __user *bs;
+ int err;
+ void *handle;
+
+
+ if (copy_from_user(&bs32, argp, sizeof(bs32))) {
+ return -EFAULT;
+ }
+
+ bs = compat_alloc_user_space(sizeof(*bs));
+ if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) {
+ return -EFAULT;
+ }
+
+ if (__put_user(bs32.gart.offset, &bs->gart.offset)
+ || __put_user(bs32.gart.size, &bs->gart.size)
+ || __put_user(bs32.gart.type, &bs->gart.type)
+ || __put_user(bs32.gart.flags, &bs->gart.flags)) {
+ return -EFAULT;
+ }
+
+ err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP,
+ (unsigned long)bs);
+ if (err) {
+ return err;
+ }
+
+ if (__get_user(bs32.gart.offset, &bs->gart.offset)
+ || __get_user(bs32.gart.mtrr, &bs->gart.mtrr)
+ || __get_user(handle, &bs->gart.handle)) {
+ return -EFAULT;
+ }
+
+ bs32.gart.handle = (unsigned long)handle;
+ if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) {
+ printk(KERN_ERR "%s truncated handle %p for type %d "
+ "offset %x\n",
+ __func__, handle, bs32.gart.type, bs32.gart.offset);
+ }
+
+ if (copy_to_user(argp, &bs32, sizeof(bs32))) {
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+
+drm_ioctl_compat_t *xgi_compat_ioctls[] = {
+ [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap,
+};
+
+/**
+ * Called whenever a 32-bit process running under a 64-bit kernel
+ * performs an ioctl on /dev/dri/card<n>.
+ *
+ * \param filp file pointer.
+ * \param cmd command.
+ * \param arg user argument.
+ * \return zero on success or negative number on failure.
+ */
+long xgi_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ const unsigned int nr = DRM_IOCTL_NR(cmd);
+ drm_ioctl_compat_t *fn = NULL;
+ int ret;
+
+ if (nr < DRM_COMMAND_BASE)
+ return drm_compat_ioctl(filp, cmd, arg);
+
+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls))
+ fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE];
+
+ lock_kernel();
+ ret = (fn != NULL)
+ ? (*fn)(filp, cmd, arg)
+ : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
+ unlock_kernel();
+
+ return ret;
+}
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
new file mode 100644
index 00000000..50a721c0
--- /dev/null
+++ b/linux-core/xgi_misc.c
@@ -0,0 +1,477 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+
+#include <linux/delay.h>
+
+/*
+ * irq functions
+ */
+#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
+
+static unsigned int s_invalid_begin = 0;
+
+static bool xgi_validate_signal(struct drm_map * map)
+{
+ if (DRM_READ32(map, 0x2800) & 0x001c0000) {
+ u16 check;
+
+ /* Check Read back status */
+ DRM_WRITE8(map, 0x235c, 0x80);
+ check = DRM_READ16(map, 0x2360);
+
+ if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
+ return FALSE;
+ }
+
+ /* Check RO channel */
+ DRM_WRITE8(map, 0x235c, 0x83);
+ check = DRM_READ16(map, 0x2360);
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RW channel */
+ DRM_WRITE8(map, 0x235c, 0x88);
+ check = DRM_READ16(map, 0x2360);
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RO channel outstanding */
+ DRM_WRITE8(map, 0x235c, 0x8f);
+ check = DRM_READ16(map, 0x2360);
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* Check RW channel outstanding */
+ DRM_WRITE8(map, 0x235c, 0x90);
+ check = DRM_READ16(map, 0x2360);
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* No pending PCIE request. GE stall. */
+ }
+
+ return TRUE;
+}
+
+
+static void xgi_ge_hang_reset(struct drm_map * map)
+{
+ int time_out = 0xffff;
+
+ DRM_WRITE8(map, 0xb057, 8);
+ while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) {
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ if (0 == time_out) {
+ u8 old_3ce;
+ u8 old_3cf;
+ u8 old_index;
+ u8 old_36;
+
+ DRM_INFO("Can not reset back 0x%x!\n",
+ DRM_READ32(map, 0x2800));
+
+ DRM_WRITE8(map, 0xb057, 0);
+
+ /* Have to use 3x5.36 to reset. */
+ /* Save and close dynamic gating */
+
+ old_3ce = DRM_READ8(map, 0x3ce);
+ DRM_WRITE8(map, 0x3ce, 0x2a);
+ old_3cf = DRM_READ8(map, 0x3cf);
+ DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe);
+
+ /* Reset GE */
+ old_index = DRM_READ8(map, 0x3d4);
+ DRM_WRITE8(map, 0x3d4, 0x36);
+ old_36 = DRM_READ8(map, 0x3d5);
+ DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
+
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ DRM_WRITE8(map, 0x3d5, old_36);
+ DRM_WRITE8(map, 0x3d4, old_index);
+
+ /* Restore dynamic gating */
+ DRM_WRITE8(map, 0x3cf, old_3cf);
+ DRM_WRITE8(map, 0x3ce, old_3ce);
+ break;
+ }
+ }
+
+ DRM_WRITE8(map, 0xb057, 0);
+}
+
+
+bool xgi_ge_irq_handler(struct xgi_info * info)
+{
+ const u32 int_status = DRM_READ32(info->mmio_map, 0x2810);
+ bool is_support_auto_reset = FALSE;
+
+ /* Check GE on/off */
+ if (0 == (0xffffc0f0 & int_status)) {
+ if (0 != (0x1000 & int_status)) {
+ /* We got GE stall interrupt.
+ */
+ DRM_WRITE32(info->mmio_map, 0x2810,
+ int_status | 0x04000000);
+
+ if (is_support_auto_reset) {
+ static cycles_t last_tick;
+ static unsigned continue_int_count = 0;
+
+ /* OE II is busy. */
+
+ if (!xgi_validate_signal(info->mmio_map)) {
+ /* Nothing but skip. */
+ } else if (0 == continue_int_count++) {
+ last_tick = get_cycles();
+ } else {
+ const cycles_t new_tick = get_cycles();
+ if ((new_tick - last_tick) >
+ STALL_INTERRUPT_RESET_THRESHOLD) {
+ continue_int_count = 0;
+ } else if (continue_int_count >= 3) {
+ continue_int_count = 0;
+
+ /* GE Hung up, need reset. */
+ DRM_INFO("Reset GE!\n");
+
+ xgi_ge_hang_reset(info->mmio_map);
+ }
+ }
+ }
+ } else if (0 != (0x1 & int_status)) {
+ s_invalid_begin++;
+ DRM_WRITE32(info->mmio_map, 0x2810,
+ (int_status & ~0x01) | 0x04000000);
+ }
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+bool xgi_crt_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
+
+ /* CRT1 interrupt just happened
+ */
+ if (IN3CFB(info->mmio_map, 0x37) & 0x01) {
+ u8 op3cf_3d;
+ u8 op3cf_37;
+
+ /* What happened?
+ */
+ op3cf_37 = IN3CFB(info->mmio_map, 0x37);
+
+ /* Clear CRT interrupt
+ */
+ op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
+ OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
+ OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
+ ret = TRUE;
+ }
+ DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
+
+ return (ret);
+}
+
+bool xgi_dvi_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
+
+ /* DVI interrupt just happened
+ */
+ if (IN3CFB(info->mmio_map, 0x38) & 0x20) {
+ const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
+ u8 op3cf_39;
+ u8 op3cf_37;
+ u8 op3x5_5a;
+
+ /* What happened?
+ */
+ op3cf_37 = IN3CFB(info->mmio_map, 0x37);
+
+ /* Notify BIOS that DVI plug/unplug happened
+ */
+ op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
+ OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
+
+ DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
+
+ /* Clear DVI interrupt
+ */
+ op3cf_39 = IN3CFB(info->mmio_map, 0x39);
+ OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01));
+ OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01));
+
+ ret = TRUE;
+ }
+ DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
+
+ return (ret);
+}
+
+
+static void dump_reg_header(unsigned regbase)
+{
+ printk("\n=====xgi_dump_register========0x%x===============\n",
+ regbase);
+ printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n");
+}
+
+
+static void dump_indexed_reg(struct xgi_info * info, unsigned regbase)
+{
+ unsigned i, j;
+ u8 temp;
+
+
+ dump_reg_header(regbase);
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ DRM_WRITE8(info->mmio_map, regbase - 1,
+ (i * 0x10) + j);
+ temp = DRM_READ8(info->mmio_map, regbase);
+ printk("%3x", temp);
+ }
+ printk("\n");
+ }
+}
+
+
+static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
+{
+ unsigned i, j;
+
+
+ dump_reg_header(regbase);
+ for (i = 0; i < range; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ u8 temp = DRM_READ8(info->mmio_map,
+ regbase + (i * 0x10) + j);
+ printk("%3x", temp);
+ }
+ printk("\n");
+ }
+}
+
+
+void xgi_dump_register(struct xgi_info * info)
+{
+ dump_indexed_reg(info, 0x3c5);
+ dump_indexed_reg(info, 0x3d5);
+ dump_indexed_reg(info, 0x3cf);
+
+ dump_reg(info, 0xB000, 0x05);
+ dump_reg(info, 0x2200, 0x0B);
+ dump_reg(info, 0x2300, 0x07);
+ dump_reg(info, 0x2400, 0x10);
+ dump_reg(info, 0x2800, 0x10);
+}
+
+
+#define WHOLD_GE_STATUS 0x2800
+
+/* Test everything except the "whole GE busy" bit, the "master engine busy"
+ * bit, and the reserved bits [26:21].
+ */
+#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21))
+
+void xgi_waitfor_pci_idle(struct xgi_info * info)
+{
+ unsigned int idleCount = 0;
+ u32 old_status = 0;
+ unsigned int same_count = 0;
+
+ while (idleCount < 5) {
+ const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
+ & IDLE_MASK;
+
+ if (status == old_status) {
+ same_count++;
+
+ if ((same_count % 100) == 0) {
+ DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n",
+ old_status, same_count);
+ }
+ } else {
+ old_status = status;
+ same_count = 0;
+ }
+
+ if (status != 0) {
+ msleep(1);
+ idleCount = 0;
+ } else {
+ idleCount++;
+ }
+ }
+}
+
+
+void xgi_enable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+ u8 temp;
+
+ /* Unprotect registers */
+ DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+ protect = DRM_READ8(info->mmio_map, 0x3C5);
+ DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
+
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
+
+ /* Enable MMIO */
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
+
+ /* Protect registers */
+ OUT3C5B(info->mmio_map, 0x11, protect);
+}
+
+
+void xgi_disable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+ u8 temp;
+
+ /* Unprotect registers */
+ DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+ protect = DRM_READ8(info->mmio_map, 0x3C5);
+ DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
+
+ /* Disable MMIO access */
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
+
+ /* Protect registers */
+ OUT3C5B(info->mmio_map, 0x11, protect);
+}
+
+
+void xgi_enable_ge(struct xgi_info * info)
+{
+ u8 bOld3cf2a;
+ int wait = 0;
+
+ OUT3C5B(info->mmio_map, 0x11, 0x92);
+
+ /* Save and close dynamic gating
+ */
+ bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL);
+ OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM);
+
+ /* Enable 2D and 3D GE
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ /* Reset both 3D and 2D engine
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL,
+ (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ /* Enable 2D engine only
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE);
+
+ /* Enable 2D+3D engine
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
+
+ /* Restore dynamic gating
+ */
+ OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a);
+}
+
+
+void xgi_disable_ge(struct xgi_info * info)
+{
+ int wait = 0;
+
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ /* Reset both 3D and 2D engine
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL,
+ (GE_ENABLE | GE_RESET | GE_ENABLE_3D));
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D));
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ /* Disable 2D engine and 3D engine.
+ */
+ OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0);
+}
diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h
new file mode 100644
index 00000000..af19a11a
--- /dev/null
+++ b/linux-core/xgi_misc.h
@@ -0,0 +1,37 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_MISC_H_
+#define _XGI_MISC_H_
+
+extern void xgi_dump_register(struct xgi_info * info);
+
+extern bool xgi_ge_irq_handler(struct xgi_info * info);
+extern bool xgi_crt_irq_handler(struct xgi_info * info);
+extern bool xgi_dvi_irq_handler(struct xgi_info * info);
+extern void xgi_waitfor_pci_idle(struct xgi_info * info);
+
+#endif
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
new file mode 100644
index 00000000..a7d3ea24
--- /dev/null
+++ b/linux-core/xgi_pcie.c
@@ -0,0 +1,126 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+
+void xgi_gart_flush(struct drm_device *dev)
+{
+ struct xgi_info *const info = dev->dev_private;
+ u8 temp;
+
+ DRM_MEMORYBARRIER();
+
+ /* Set GART in SFB */
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
+ DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
+
+ /* Set GART base address to HW */
+ DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
+
+ /* Flush GART table. */
+ DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);
+ DRM_WRITE8(info->mmio_map, 0xB03F, 0x00);
+}
+
+
+int xgi_pcie_heap_init(struct xgi_info * info)
+{
+ u8 temp = 0;
+ int err;
+ struct drm_scatter_gather request;
+
+ /* Get current FB aperture size */
+ temp = IN3X5B(info->mmio_map, 0x27);
+ DRM_INFO("In3x5(0x27): 0x%x \n", temp);
+
+ if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
+ info->pcie.base = 256 * 1024 * 1024;
+ } else { /* 128MB; Jong 06/05/2006; 0x08000000 */
+ info->pcie.base = 128 * 1024 * 1024;
+ }
+
+
+ DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
+
+ /* Get current lookup table page size */
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
+ if (temp & 0x04) { /* 8KB */
+ info->lutPageSize = 8 * 1024;
+ } else { /* 4KB */
+ info->lutPageSize = 4 * 1024;
+ }
+
+ DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
+
+
+ request.size = info->pcie.size;
+ err = drm_sg_alloc(info->dev, & request);
+ if (err) {
+ DRM_ERROR("cannot allocate PCIE GART backing store! "
+ "size = %d\n", info->pcie.size);
+ return err;
+ }
+
+ info->gart_info.gart_table_location = DRM_ATI_GART_MAIN;
+ info->gart_info.gart_reg_if = DRM_ATI_GART_PCI;
+ info->gart_info.table_size = info->dev->sg->pages * sizeof(u32);
+
+ if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) {
+ DRM_ERROR("failed to init PCI GART!\n");
+ return -ENOMEM;
+ }
+
+
+ xgi_gart_flush(info->dev);
+
+ mutex_lock(&info->dev->struct_mutex);
+ err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL,
+ 0, info->pcie.size);
+ mutex_unlock(&info->dev->struct_mutex);
+ if (err) {
+ drm_ati_pcigart_cleanup(info->dev, &info->gart_info);
+ }
+
+ info->pcie_heap_initialized = (err == 0);
+ return err;
+}
+
+
+/**
+ * xgi_find_pcie_virt
+ * @address: GE HW address
+ *
+ * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
+ * the same block
+ */
+void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
+{
+ const unsigned long offset = address - info->pcie.base;
+
+ return ((u8 *) info->dev->sg->virtual) + offset;
+}
diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h
new file mode 100644
index 00000000..5c0100a0
--- /dev/null
+++ b/linux-core/xgi_regs.h
@@ -0,0 +1,169 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_REGS_H_
+#define _XGI_REGS_H_
+
+#include "drmP.h"
+#include "drm.h"
+
+#define MAKE_MASK(bits) ((1U << (bits)) - 1)
+
+#define ONE_BIT_MASK MAKE_MASK(1)
+#define TWENTY_BIT_MASK MAKE_MASK(20)
+#define TWENTYONE_BIT_MASK MAKE_MASK(21)
+#define TWENTYTWO_BIT_MASK MAKE_MASK(22)
+
+
+/* Port 0x3d4/0x3d5, index 0x2a */
+#define XGI_INTERFACE_SEL 0x2a
+#define DUAL_64BIT (1U<<7)
+#define INTERNAL_32BIT (1U<<6)
+#define EN_SEP_WR (1U<<5)
+#define POWER_DOWN_SEL (1U<<4)
+/*#define RESERVED_3 (1U<<3) */
+#define SUBS_MCLK_PCICLK (1U<<2)
+#define MEM_SIZE_MASK (3<<0)
+#define MEM_SIZE_32MB (0<<0)
+#define MEM_SIZE_64MB (1<<0)
+#define MEM_SIZE_128MB (2<<0)
+#define MEM_SIZE_256MB (3<<0)
+
+/* Port 0x3d4/0x3d5, index 0x36 */
+#define XGI_GE_CNTL 0x36
+#define GE_ENABLE (1U<<7)
+/*#define RESERVED_6 (1U<<6) */
+/*#define RESERVED_5 (1U<<5) */
+#define GE_RESET (1U<<4)
+/*#define RESERVED_3 (1U<<3) */
+#define GE_ENABLE_3D (1U<<2)
+/*#define RESERVED_1 (1U<<1) */
+/*#define RESERVED_0 (1U<<0) */
+
+/* Port 0x3ce/0x3cf, index 0x2a */
+#define XGI_MISC_CTRL 0x2a
+#define MOTION_VID_SUSPEND (1U<<7)
+#define DVI_CRTC_TIMING_SEL (1U<<6)
+#define LCD_SEL_CTL_NEW (1U<<5)
+#define LCD_SEL_EXT_DELYCTRL (1U<<4)
+#define REG_LCDDPARST (1U<<3)
+#define LCD2DPAOFF (1U<<2)
+/*#define RESERVED_1 (1U<<1) */
+#define EN_GEPWM (1U<<0) /* Enable GE power management */
+
+
+#define BASE_3D_ENG 0x2800
+
+#define M2REG_FLUSH_ENGINE_ADDRESS 0x000
+#define M2REG_FLUSH_ENGINE_COMMAND 0x00
+#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)
+#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)
+#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK
+
+#define M2REG_RESET_ADDRESS 0x004
+#define M2REG_RESET_COMMAND 0x01
+#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10)
+#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9)
+#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8)
+#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4)
+#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2)
+
+/* Write register */
+#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010
+#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04
+#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
+#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
+#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
+#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
+#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)
+#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)
+#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0)
+
+/* Read register */
+#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010
+#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04
+#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11)
+#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10)
+#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9)
+#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8)
+#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0)
+
+#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014
+#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05
+
+
+/**
+ * Begin instruction, double-word 0
+ */
+#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22)
+#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)
+#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK
+
+/**
+ * Begin instruction, double-word 1
+ */
+#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)
+#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK
+
+
+/* Hardware access functions */
+static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3C4, index);
+ DRM_WRITE8(map, 0x3C5, data);
+}
+
+static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3D4, index);
+ DRM_WRITE8(map, 0x3D5, data);
+}
+
+static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3CE, index);
+ DRM_WRITE8(map, 0x3CF, data);
+}
+
+static inline u8 IN3C5B(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3C4, index);
+ return DRM_READ8(map, 0x3C5);
+}
+
+static inline u8 IN3X5B(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3D4, index);
+ return DRM_READ8(map, 0x3D5);
+}
+
+static inline u8 IN3CFB(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3CE, index);
+ return DRM_READ8(map, 0x3CF);
+}
+
+#endif
diff --git a/shared-core/Makefile.am b/shared-core/Makefile.am
index f0ebf2a3..7193e527 100644
--- a/shared-core/Makefile.am
+++ b/shared-core/Makefile.am
@@ -36,4 +36,5 @@ klibdrminclude_HEADERS = \
sis_drm.h \
via_drm.h \
r300_reg.h \
- via_3d_reg.h
+ via_3d_reg.h \
+ xgi_drm.h
diff --git a/shared-core/drm.h b/shared-core/drm.h
index 0f4a878b..ab05ffc1 100644
--- a/shared-core/drm.h
+++ b/shared-core/drm.h
@@ -89,24 +89,6 @@
#define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size)
#endif
-#define XFREE86_VERSION(major,minor,patch,snap) \
- ((major << 16) | (minor << 8) | patch)
-
-#ifndef CONFIG_XFREE86_VERSION
-#define CONFIG_XFREE86_VERSION XFREE86_VERSION(4,1,0,0)
-#endif
-
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
-#define DRM_PROC_DEVICES "/proc/devices"
-#define DRM_PROC_MISC "/proc/misc"
-#define DRM_PROC_DRM "/proc/drm"
-#define DRM_DEV_DRM "/dev/drm"
-#define DRM_DEV_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
-#define DRM_DEV_UID 0
-#define DRM_DEV_GID 0
-#endif
-
-#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
#ifdef __OpenBSD__
#define DRM_MAJOR 81
#endif
@@ -114,7 +96,7 @@
#define DRM_MAJOR 226
#endif
#define DRM_MAX_MINOR 15
-#endif
+
#define DRM_NAME "drm" /**< Name in kernel, /dev, and /proc */
#define DRM_MIN_ORDER 5 /**< At least 2^5 bytes = 32 bytes */
#define DRM_MAX_ORDER 22 /**< Up to 2^22 bytes = 4MB */
@@ -127,16 +109,9 @@
#define _DRM_LOCKING_CONTEXT(lock) ((lock) & ~(_DRM_LOCK_HELD|_DRM_LOCK_CONT))
#if defined(__linux__)
-#if defined(__KERNEL__)
-typedef __u64 drm_u64_t;
-#else
-typedef unsigned long long drm_u64_t;
-#endif
-
typedef unsigned int drm_handle_t;
#else
#include <sys/types.h>
-typedef u_int64_t drm_u64_t;
typedef unsigned long drm_handle_t; /**< To mapped regions */
#endif
typedef unsigned int drm_context_t; /**< GLXContext handle */
@@ -152,31 +127,31 @@ typedef unsigned int drm_magic_t; /**< Magic for authentication */
* \note KW: Actually it's illegal to change either for
* backwards-compatibility reasons.
*/
-typedef struct drm_clip_rect {
+struct drm_clip_rect {
unsigned short x1;
unsigned short y1;
unsigned short x2;
unsigned short y2;
-} drm_clip_rect_t;
+};
/**
* Drawable information.
*/
-typedef struct drm_drawable_info {
+struct drm_drawable_info {
unsigned int num_rects;
- drm_clip_rect_t *rects;
-} drm_drawable_info_t;
+ struct drm_clip_rect *rects;
+};
/**
* Texture region,
*/
-typedef struct drm_tex_region {
+struct drm_tex_region {
unsigned char next;
unsigned char prev;
unsigned char in_use;
unsigned char padding;
unsigned int age;
-} drm_tex_region_t;
+};
/**
* Hardware lock.
@@ -185,10 +160,10 @@ typedef struct drm_tex_region {
* processor bus contention on a multiprocessor system, there should not be any
* other data stored in the same cache line.
*/
-typedef struct drm_hw_lock {
+struct drm_hw_lock {
__volatile__ unsigned int lock; /**< lock variable */
char padding[60]; /**< Pad to cache line */
-} drm_hw_lock_t;
+};
/* This is beyond ugly, and only works on GCC. However, it allows me to use
* drm.h in places (i.e., in the X-server) where I can't use size_t. The real
@@ -211,7 +186,7 @@ typedef struct drm_hw_lock {
*
* \sa drmGetVersion().
*/
-typedef struct drm_version {
+struct drm_version {
int version_major; /**< Major version */
int version_minor; /**< Minor version */
int version_patchlevel; /**< Patch level */
@@ -221,35 +196,35 @@ typedef struct drm_version {
char __user *date; /**< User-space buffer to hold date */
DRM_SIZE_T desc_len; /**< Length of desc buffer */
char __user *desc; /**< User-space buffer to hold desc */
-} drm_version_t;
+};
/**
* DRM_IOCTL_GET_UNIQUE ioctl argument type.
*
* \sa drmGetBusid() and drmSetBusId().
*/
-typedef struct drm_unique {
+struct drm_unique {
DRM_SIZE_T unique_len; /**< Length of unique */
char __user *unique; /**< Unique name for driver instantiation */
-} drm_unique_t;
+};
#undef DRM_SIZE_T
-typedef struct drm_list {
+struct drm_list {
int count; /**< Length of user-space structures */
- drm_version_t __user *version;
-} drm_list_t;
+ struct drm_version __user *version;
+};
-typedef struct drm_block {
+struct drm_block {
int unused;
-} drm_block_t;
+};
/**
* DRM_IOCTL_CONTROL ioctl argument type.
*
* \sa drmCtlInstHandler() and drmCtlUninstHandler().
*/
-typedef struct drm_control {
+struct drm_control {
enum {
DRM_ADD_COMMAND,
DRM_RM_COMMAND,
@@ -257,12 +232,12 @@ typedef struct drm_control {
DRM_UNINST_HANDLER
} func;
int irq;
-} drm_control_t;
+};
/**
* Type of memory to map.
*/
-typedef enum drm_map_type {
+enum drm_map_type {
_DRM_FRAME_BUFFER = 0, /**< WC (no caching), no core dump */
_DRM_REGISTERS = 1, /**< no caching, no core dump */
_DRM_SHM = 2, /**< shared, cached */
@@ -270,12 +245,12 @@ typedef enum drm_map_type {
_DRM_SCATTER_GATHER = 4, /**< Scatter/gather memory for PCI DMA */
_DRM_CONSISTENT = 5, /**< Consistent memory for PCI DMA */
_DRM_TTM = 6
-} drm_map_type_t;
+};
/**
* Memory mapping flags.
*/
-typedef enum drm_map_flags {
+enum drm_map_flags {
_DRM_RESTRICTED = 0x01, /**< Cannot be mapped to user-virtual */
_DRM_READ_ONLY = 0x02,
_DRM_LOCKED = 0x04, /**< shared, cached, locked */
@@ -284,12 +259,12 @@ typedef enum drm_map_flags {
_DRM_CONTAINS_LOCK = 0x20, /**< SHM page that contains lock */
_DRM_REMOVABLE = 0x40, /**< Removable mapping */
_DRM_DRIVER = 0x80 /**< Driver will take care of it */
-} drm_map_flags_t;
+};
-typedef struct drm_ctx_priv_map {
+struct drm_ctx_priv_map {
unsigned int ctx_id; /**< Context requesting private mapping */
void *handle; /**< Handle of map */
-} drm_ctx_priv_map_t;
+};
/**
* DRM_IOCTL_GET_MAP, DRM_IOCTL_ADD_MAP and DRM_IOCTL_RM_MAP ioctls
@@ -297,30 +272,30 @@ typedef struct drm_ctx_priv_map {
*
* \sa drmAddMap().
*/
-typedef struct drm_map {
+struct drm_map {
unsigned long offset; /**< Requested physical address (0 for SAREA)*/
unsigned long size; /**< Requested physical size (bytes) */
- drm_map_type_t type; /**< Type of memory to map */
- drm_map_flags_t flags; /**< Flags */
+ enum drm_map_type type; /**< Type of memory to map */
+ enum drm_map_flags flags; /**< Flags */
void *handle; /**< User-space: "Handle" to pass to mmap() */
/**< Kernel-space: kernel-virtual address */
int mtrr; /**< MTRR slot used */
/* Private data */
-} drm_map_t;
+};
/**
* DRM_IOCTL_GET_CLIENT ioctl argument type.
*/
-typedef struct drm_client {
+struct drm_client {
int idx; /**< Which client desired? */
int auth; /**< Is client authenticated? */
unsigned long pid; /**< Process ID */
unsigned long uid; /**< User ID */
unsigned long magic; /**< Magic */
unsigned long iocs; /**< Ioctl count */
-} drm_client_t;
+};
-typedef enum {
+enum drm_stat_type {
_DRM_STAT_LOCK,
_DRM_STAT_OPENS,
_DRM_STAT_CLOSES,
@@ -338,23 +313,23 @@ typedef enum {
_DRM_STAT_SPECIAL, /**< Special DMA (e.g., priority or polled) */
_DRM_STAT_MISSED /**< Missed DMA opportunity */
/* Add to the *END* of the list */
-} drm_stat_type_t;
+};
/**
* DRM_IOCTL_GET_STATS ioctl argument type.
*/
-typedef struct drm_stats {
+struct drm_stats {
unsigned long count;
struct {
unsigned long value;
- drm_stat_type_t type;
+ enum drm_stat_type type;
} data[15];
-} drm_stats_t;
+};
/**
* Hardware locking flags.
*/
-typedef enum drm_lock_flags {
+enum drm_lock_flags {
_DRM_LOCK_READY = 0x01, /**< Wait until hardware is ready for DMA */
_DRM_LOCK_QUIESCENT = 0x02, /**< Wait until hardware quiescent */
_DRM_LOCK_FLUSH = 0x04, /**< Flush this context's DMA queue first */
@@ -364,17 +339,17 @@ typedef enum drm_lock_flags {
full-screen DGA-like mode. */
_DRM_HALT_ALL_QUEUES = 0x10, /**< Halt all current and future queues */
_DRM_HALT_CUR_QUEUES = 0x20 /**< Halt all current queues */
-} drm_lock_flags_t;
+};
/**
* DRM_IOCTL_LOCK, DRM_IOCTL_UNLOCK and DRM_IOCTL_FINISH ioctl argument type.
*
* \sa drmGetLock() and drmUnlock().
*/
-typedef struct drm_lock {
+struct drm_lock {
int context;
- drm_lock_flags_t flags;
-} drm_lock_t;
+ enum drm_lock_flags flags;
+};
/**
* DMA flags
@@ -384,7 +359,7 @@ typedef struct drm_lock {
*
* \sa drm_dma.
*/
-typedef enum drm_dma_flags {
+enum drm_dma_flags {
/* Flags for DMA buffer dispatch */
_DRM_DMA_BLOCK = 0x01, /**<
* Block until buffer dispatched.
@@ -403,14 +378,14 @@ typedef enum drm_dma_flags {
_DRM_DMA_WAIT = 0x10, /**< Wait for free buffers */
_DRM_DMA_SMALLER_OK = 0x20, /**< Smaller-than-requested buffers OK */
_DRM_DMA_LARGER_OK = 0x40 /**< Larger-than-requested buffers OK */
-} drm_dma_flags_t;
+};
/**
* DRM_IOCTL_ADD_BUFS and DRM_IOCTL_MARK_BUFS ioctl argument type.
*
* \sa drmAddBufs().
*/
-typedef struct drm_buf_desc {
+struct drm_buf_desc {
int count; /**< Number of buffers of this size */
int size; /**< Size in bytes */
int low_mark; /**< Low water mark */
@@ -426,48 +401,48 @@ typedef struct drm_buf_desc {
* Start address of where the AGP buffers are
* in the AGP aperture
*/
-} drm_buf_desc_t;
+};
/**
* DRM_IOCTL_INFO_BUFS ioctl argument type.
*/
-typedef struct drm_buf_info {
+struct drm_buf_info {
int count; /**< Number of buffers described in list */
- drm_buf_desc_t __user *list; /**< List of buffer descriptions */
-} drm_buf_info_t;
+ struct drm_buf_desc __user *list; /**< List of buffer descriptions */
+};
/**
* DRM_IOCTL_FREE_BUFS ioctl argument type.
*/
-typedef struct drm_buf_free {
+struct drm_buf_free {
int count;
int __user *list;
-} drm_buf_free_t;
+};
/**
* Buffer information
*
* \sa drm_buf_map.
*/
-typedef struct drm_buf_pub {
+struct drm_buf_pub {
int idx; /**< Index into the master buffer list */
int total; /**< Buffer size */
int used; /**< Amount of buffer in use (for DMA) */
void __user *address; /**< Address of buffer */
-} drm_buf_pub_t;
+};
/**
* DRM_IOCTL_MAP_BUFS ioctl argument type.
*/
-typedef struct drm_buf_map {
+struct drm_buf_map {
int count; /**< Length of the buffer list */
#if defined(__cplusplus)
void __user *c_virtual;
#else
void __user *virtual; /**< Mmap'd area in user-virtual */
#endif
- drm_buf_pub_t __user *list; /**< Buffer information */
-} drm_buf_map_t;
+ struct drm_buf_pub __user *list; /**< Buffer information */
+};
/**
* DRM_IOCTL_DMA ioctl argument type.
@@ -476,48 +451,48 @@ typedef struct drm_buf_map {
*
* \sa drmDMA().
*/
-typedef struct drm_dma {
+struct drm_dma {
int context; /**< Context handle */
int send_count; /**< Number of buffers to send */
int __user *send_indices; /**< List of handles to buffers */
int __user *send_sizes; /**< Lengths of data to send */
- drm_dma_flags_t flags; /**< Flags */
+ enum drm_dma_flags flags; /**< Flags */
int request_count; /**< Number of buffers requested */
int request_size; /**< Desired size for buffers */
int __user *request_indices; /**< Buffer information */
int __user *request_sizes;
int granted_count; /**< Number of buffers granted */
-} drm_dma_t;
+};
-typedef enum {
+enum drm_ctx_flags {
_DRM_CONTEXT_PRESERVED = 0x01,
_DRM_CONTEXT_2DONLY = 0x02
-} drm_ctx_flags_t;
+};
/**
* DRM_IOCTL_ADD_CTX ioctl argument type.
*
* \sa drmCreateContext() and drmDestroyContext().
*/
-typedef struct drm_ctx {
+struct drm_ctx {
drm_context_t handle;
- drm_ctx_flags_t flags;
-} drm_ctx_t;
+ enum drm_ctx_flags flags;
+};
/**
* DRM_IOCTL_RES_CTX ioctl argument type.
*/
-typedef struct drm_ctx_res {
+struct drm_ctx_res {
int count;
- drm_ctx_t __user *contexts;
-} drm_ctx_res_t;
+ struct drm_ctx __user *contexts;
+};
/**
* DRM_IOCTL_ADD_DRAW and DRM_IOCTL_RM_DRAW ioctl argument type.
*/
-typedef struct drm_draw {
+struct drm_draw {
drm_drawable_t handle;
-} drm_draw_t;
+};
/**
* DRM_IOCTL_UPDATE_DRAW ioctl argument type.
@@ -526,53 +501,53 @@ typedef enum {
DRM_DRAWABLE_CLIPRECTS,
} drm_drawable_info_type_t;
-typedef struct drm_update_draw {
+struct drm_update_draw {
drm_drawable_t handle;
unsigned int type;
unsigned int num;
unsigned long long data;
-} drm_update_draw_t;
+};
/**
* DRM_IOCTL_GET_MAGIC and DRM_IOCTL_AUTH_MAGIC ioctl argument type.
*/
-typedef struct drm_auth {
+struct drm_auth {
drm_magic_t magic;
-} drm_auth_t;
+};
/**
* DRM_IOCTL_IRQ_BUSID ioctl argument type.
*
* \sa drmGetInterruptFromBusID().
*/
-typedef struct drm_irq_busid {
+struct drm_irq_busid {
int irq; /**< IRQ number */
int busnum; /**< bus number */
int devnum; /**< device number */
int funcnum; /**< function number */
-} drm_irq_busid_t;
+};
-typedef enum {
+enum drm_vblank_seq_type {
_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */
-} drm_vblank_seq_type_t;
+};
#define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE)
#define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_SIGNAL | _DRM_VBLANK_SECONDARY | \
_DRM_VBLANK_NEXTONMISS)
struct drm_wait_vblank_request {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
unsigned long signal;
};
struct drm_wait_vblank_reply {
- drm_vblank_seq_type_t type;
+ enum drm_vblank_seq_type type;
unsigned int sequence;
long tval_sec;
long tval_usec;
@@ -583,41 +558,41 @@ struct drm_wait_vblank_reply {
*
* \sa drmWaitVBlank().
*/
-typedef union drm_wait_vblank {
+union drm_wait_vblank {
struct drm_wait_vblank_request request;
struct drm_wait_vblank_reply reply;
-} drm_wait_vblank_t;
+};
/**
* DRM_IOCTL_AGP_ENABLE ioctl argument type.
*
* \sa drmAgpEnable().
*/
-typedef struct drm_agp_mode {
+struct drm_agp_mode {
unsigned long mode; /**< AGP mode */
-} drm_agp_mode_t;
+};
/**
* DRM_IOCTL_AGP_ALLOC and DRM_IOCTL_AGP_FREE ioctls argument type.
*
* \sa drmAgpAlloc() and drmAgpFree().
*/
-typedef struct drm_agp_buffer {
+struct drm_agp_buffer {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for binding / unbinding */
unsigned long type; /**< Type of memory to allocate */
unsigned long physical; /**< Physical used by i810 */
-} drm_agp_buffer_t;
+};
/**
* DRM_IOCTL_AGP_BIND and DRM_IOCTL_AGP_UNBIND ioctls argument type.
*
* \sa drmAgpBind() and drmAgpUnbind().
*/
-typedef struct drm_agp_binding {
+struct drm_agp_binding {
unsigned long handle; /**< From drm_agp_buffer */
unsigned long offset; /**< In bytes -- will round to page boundary */
-} drm_agp_binding_t;
+};
/**
* DRM_IOCTL_AGP_INFO ioctl argument type.
@@ -626,7 +601,7 @@ typedef struct drm_agp_binding {
* drmAgpBase(), drmAgpSize(), drmAgpMemoryUsed(), drmAgpMemoryAvail(),
* drmAgpVendorId() and drmAgpDeviceId().
*/
-typedef struct drm_agp_info {
+struct drm_agp_info {
int agp_version_major;
int agp_version_minor;
unsigned long mode;
@@ -640,25 +615,25 @@ typedef struct drm_agp_info {
unsigned short id_vendor;
unsigned short id_device;
/*@} */
-} drm_agp_info_t;
+};
/**
* DRM_IOCTL_SG_ALLOC ioctl argument type.
*/
-typedef struct drm_scatter_gather {
+struct drm_scatter_gather {
unsigned long size; /**< In bytes -- will round to page boundary */
unsigned long handle; /**< Used for mapping / unmapping */
-} drm_scatter_gather_t;
+};
/**
* DRM_IOCTL_SET_VERSION ioctl argument type.
*/
-typedef struct drm_set_version {
+struct drm_set_version {
int drm_di_major;
int drm_di_minor;
int drm_dd_major;
int drm_dd_minor;
-} drm_set_version_t;
+};
#define DRM_FENCE_FLAG_EMIT 0x00000001
@@ -671,25 +646,15 @@ typedef struct drm_set_version {
#define DRM_FENCE_TYPE_EXE 0x00000001
-typedef struct drm_fence_arg {
- unsigned handle;
- int class;
- unsigned type;
- unsigned flags;
- unsigned signaled;
- unsigned expand_pad[4]; /*Future expansion */
- enum {
- drm_fence_create,
- drm_fence_destroy,
- drm_fence_reference,
- drm_fence_unreference,
- drm_fence_signaled,
- drm_fence_flush,
- drm_fence_wait,
- drm_fence_emit,
- drm_fence_buffers
- } op;
-} drm_fence_arg_t;
+struct drm_fence_arg {
+ unsigned int handle;
+ unsigned int class;
+ unsigned int type;
+ unsigned int flags;
+ unsigned int signaled;
+ unsigned int pad64;
+ uint64_t expand_pad[3]; /*Future expansion */
+};
/* Buffer permissions, referring to how the GPU uses the buffers.
* these translate to fence types used for the buffers.
@@ -697,9 +662,9 @@ typedef struct drm_fence_arg {
* a command (batch-) buffer is exe. Can be or-ed together.
*/
-#define DRM_BO_FLAG_READ 0x00000001
-#define DRM_BO_FLAG_WRITE 0x00000002
-#define DRM_BO_FLAG_EXE 0x00000004
+#define DRM_BO_FLAG_READ (1ULL << 0)
+#define DRM_BO_FLAG_WRITE (1ULL << 1)
+#define DRM_BO_FLAG_EXE (1ULL << 2)
/*
* Status flags. Can be read to determine the actual state of a buffer.
@@ -707,30 +672,22 @@ typedef struct drm_fence_arg {
*/
/*
- * Mask: Never evict this buffer. Not even with force. This type of buffer is only
- * available to root and must be manually removed before buffer manager shutdown
- * or lock.
- * Flags: Acknowledge
- */
-#define DRM_BO_FLAG_NO_EVICT 0x00000010
-
-/*
* Mask: Require that the buffer is placed in mappable memory when validated.
* If not set the buffer may or may not be in mappable memory when validated.
* Flags: If set, the buffer is in mappable memory.
*/
-#define DRM_BO_FLAG_MAPPABLE 0x00000020
+#define DRM_BO_FLAG_MAPPABLE (1ULL << 5)
/* Mask: The buffer should be shareable with other processes.
* Flags: The buffer is shareable with other processes.
*/
-#define DRM_BO_FLAG_SHAREABLE 0x00000040
+#define DRM_BO_FLAG_SHAREABLE (1ULL << 6)
/* Mask: If set, place the buffer in cache-coherent memory if available.
* If clear, never place the buffer in cache coherent memory if validated.
* Flags: The buffer is currently in cache-coherent memory.
*/
-#define DRM_BO_FLAG_CACHED 0x00000080
+#define DRM_BO_FLAG_CACHED (1ULL << 7)
/* Mask: Make sure that every time this buffer is validated,
* it ends up on the same location provided that the memory mask is the same.
@@ -739,23 +696,23 @@ typedef struct drm_fence_arg {
* part of buffer manager shutdown or locking.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_NO_MOVE 0x00000100
+#define DRM_BO_FLAG_NO_MOVE (1ULL << 8)
/* Mask: Make sure the buffer is in cached memory when mapped for reading.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_READ_CACHED 0x00080000
+#define DRM_BO_FLAG_READ_CACHED (1ULL << 19)
/* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_FORCE_CACHING 0x00002000
+#define DRM_BO_FLAG_FORCE_CACHING (1ULL << 13)
/*
* Mask: Force DRM_BO_FLAG_MAPPABLE flag strictly also if it is clear.
* Flags: Acknowledge.
*/
-#define DRM_BO_FLAG_FORCE_MAPPABLE 0x00004000
+#define DRM_BO_FLAG_FORCE_MAPPABLE (1ULL << 14)
/*
* Memory type flags that can be or'ed together in the mask, but only
@@ -763,21 +720,25 @@ typedef struct drm_fence_arg {
*/
/* System memory */
-#define DRM_BO_FLAG_MEM_LOCAL 0x01000000
+#define DRM_BO_FLAG_MEM_LOCAL (1ULL << 24)
/* Translation table memory */
-#define DRM_BO_FLAG_MEM_TT 0x02000000
+#define DRM_BO_FLAG_MEM_TT (1ULL << 25)
/* Vram memory */
-#define DRM_BO_FLAG_MEM_VRAM 0x04000000
+#define DRM_BO_FLAG_MEM_VRAM (1ULL << 26)
/* Up to the driver to define. */
-#define DRM_BO_FLAG_MEM_PRIV0 0x08000000
-#define DRM_BO_FLAG_MEM_PRIV1 0x10000000
-#define DRM_BO_FLAG_MEM_PRIV2 0x20000000
-#define DRM_BO_FLAG_MEM_PRIV3 0x40000000
-#define DRM_BO_FLAG_MEM_PRIV4 0x80000000
+#define DRM_BO_FLAG_MEM_PRIV0 (1ULL << 27)
+#define DRM_BO_FLAG_MEM_PRIV1 (1ULL << 28)
+#define DRM_BO_FLAG_MEM_PRIV2 (1ULL << 29)
+#define DRM_BO_FLAG_MEM_PRIV3 (1ULL << 30)
+#define DRM_BO_FLAG_MEM_PRIV4 (1ULL << 31)
+/* We can add more of these now with a 64-bit flag type */
/* Memory flag mask */
-#define DRM_BO_MASK_MEM 0xFF000000
-#define DRM_BO_MASK_MEMTYPE 0xFF0000A0
+#define DRM_BO_MASK_MEM 0x00000000FF000000ULL
+#define DRM_BO_MASK_MEMTYPE 0x00000000FF0000A0ULL
+
+/* Driver-private flags */
+#define DRM_BO_MASK_DRIVER 0xFFFF000000000000ULL
/* Don't block on validate and map */
#define DRM_BO_HINT_DONT_BLOCK 0x00000002
@@ -786,40 +747,58 @@ typedef struct drm_fence_arg {
#define DRM_BO_HINT_WAIT_LAZY 0x00000008
#define DRM_BO_HINT_ALLOW_UNFENCED_MAP 0x00000010
+#define DRM_BO_INIT_MAGIC 0xfe769812
+#define DRM_BO_INIT_MAJOR 0
+#define DRM_BO_INIT_MINOR 1
-typedef enum {
+
+enum drm_bo_type {
drm_bo_type_dc,
drm_bo_type_user,
drm_bo_type_fake,
drm_bo_type_kernel, /* for initial kernel allocations */
-}drm_bo_type_t;
-
-
-typedef struct drm_bo_arg_request {
- unsigned handle; /* User space handle */
- unsigned mask;
- unsigned hint;
- drm_u64_t size;
- drm_bo_type_t type;
- unsigned arg_handle;
- drm_u64_t buffer_start;
- unsigned page_alignment;
- unsigned expand_pad[4]; /*Future expansion */
+};
+
+struct drm_bo_info_req {
+ uint64_t mask;
+ uint64_t flags;
+ unsigned int handle;
+ unsigned int hint;
+ unsigned int fence_class;
+ unsigned int pad64;
+};
+
+struct drm_bo_create_req {
+ uint64_t mask;
+ uint64_t size;
+ uint64_t buffer_start;
+ unsigned int hint;
+ unsigned int page_alignment;
+ enum drm_bo_type type;
+ unsigned int pad64;
+};
+
+struct drm_bo_op_req {
enum {
- drm_bo_create,
drm_bo_validate,
- drm_bo_map,
- drm_bo_unmap,
drm_bo_fence,
- drm_bo_destroy,
- drm_bo_reference,
- drm_bo_unreference,
- drm_bo_info,
- drm_bo_wait_idle,
- drm_bo_ref_fence
+ drm_bo_ref_fence,
} op;
-} drm_bo_arg_request_t;
+ unsigned int arg_handle;
+ struct drm_bo_info_req bo_req;
+};
+struct drm_bo_set_pin_req {
+ /** Buffer object ID */
+ unsigned int handle;
+ /**
+ * - 0: Unpin the given buffer object.
+ * - 1: Pin the given buffer object, requiring that its offset and
+ * memory area stay constant until unpin. The intended use is for
+ * scanout buffers.
+ */
+ unsigned int pin;
+};
/*
* Reply flags
@@ -827,30 +806,71 @@ typedef struct drm_bo_arg_request {
#define DRM_BO_REP_BUSY 0x00000001
-typedef struct drm_bo_arg_reply {
+struct drm_bo_info_rep {
+ uint64_t flags;
+ uint64_t mask;
+ uint64_t size;
+ uint64_t offset;
+ uint64_t arg_handle;
+ uint64_t buffer_start;
+ unsigned int handle;
+ unsigned int fence_flags;
+ unsigned int rep_flags;
+ unsigned int page_alignment;
+ unsigned int desired_tile_stride;
+ unsigned int hw_tile_stride;
+ unsigned int tile_info;
+ unsigned int pad64;
+ uint64_t expand_pad[4]; /*Future expansion */
+};
+
+struct drm_bo_arg_rep {
+ struct drm_bo_info_rep bo_info;
int ret;
- unsigned handle;
- unsigned flags;
- drm_u64_t size;
- drm_u64_t offset;
- drm_u64_t arg_handle;
- unsigned mask;
- drm_u64_t buffer_start;
- unsigned fence_flags;
- unsigned rep_flags;
- unsigned page_alignment;
- unsigned expand_pad[4]; /*Future expansion */
-}drm_bo_arg_reply_t;
-
-
-typedef struct drm_bo_arg{
+ unsigned int pad64;
+};
+
+struct drm_bo_create_arg {
+ union {
+ struct drm_bo_create_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_handle_arg {
+ unsigned int handle;
+};
+
+struct drm_bo_reference_info_arg {
+ union {
+ struct drm_bo_handle_arg req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_map_wait_idle_arg {
+ union {
+ struct drm_bo_info_req req;
+ struct drm_bo_info_rep rep;
+ } d;
+};
+
+struct drm_bo_op_arg {
+ uint64_t next;
+ union {
+ struct drm_bo_op_req req;
+ struct drm_bo_arg_rep rep;
+ } d;
int handled;
- drm_u64_t next;
+ unsigned int pad64;
+};
+
+struct drm_bo_set_pin_arg {
union {
- drm_bo_arg_request_t req;
- drm_bo_arg_reply_t rep;
+ struct drm_bo_set_pin_req req;
+ struct drm_bo_info_rep rep;
} d;
-} drm_bo_arg_t;
+};
#define DRM_BO_MEM_LOCAL 0
#define DRM_BO_MEM_TT 1
@@ -863,25 +883,18 @@ typedef struct drm_bo_arg{
#define DRM_BO_MEM_TYPES 8 /* For now. */
-typedef union drm_mm_init_arg{
- struct {
- enum {
- mm_init,
- mm_takedown,
- mm_query,
- mm_lock,
- mm_unlock
- } op;
- drm_u64_t p_offset;
- drm_u64_t p_size;
- unsigned mem_type;
- unsigned expand_pad[8]; /*Future expansion */
- } req;
- struct {
- drm_handle_t mm_sarea;
- unsigned expand_pad[8]; /*Future expansion */
- } rep;
-} drm_mm_init_arg_t;
+struct drm_mm_type_arg {
+ unsigned int mem_type;
+};
+
+struct drm_mm_init_arg {
+ unsigned int magic;
+ unsigned int major;
+ unsigned int minor;
+ unsigned int mem_type;
+ uint64_t p_offset;
+ uint64_t p_size;
+};
/*
* Drm mode setting
@@ -987,65 +1000,87 @@ struct drm_mode_mode_cmd {
#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
-#define DRM_IOCTL_VERSION DRM_IOWR(0x00, drm_version_t)
-#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, drm_unique_t)
-#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, drm_auth_t)
-#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, drm_irq_busid_t)
-#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, drm_map_t)
-#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, drm_client_t)
-#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, drm_stats_t)
-#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, drm_set_version_t)
-
-#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, drm_unique_t)
-#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, drm_auth_t)
-#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, drm_block_t)
-#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, drm_block_t)
-#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, drm_control_t)
-#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, drm_map_t)
-#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, drm_buf_desc_t)
-#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, drm_buf_desc_t)
-#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, drm_buf_info_t)
-#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, drm_buf_map_t)
-#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, drm_buf_free_t)
-
-#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, drm_map_t)
-
-#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, drm_ctx_priv_map_t)
-#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, drm_ctx_priv_map_t)
-
-#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, drm_ctx_t)
-#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, drm_ctx_t)
-#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, drm_ctx_t)
-#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, drm_ctx_t)
-#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, drm_ctx_t)
-#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, drm_ctx_t)
-#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, drm_ctx_res_t)
-#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, drm_draw_t)
-#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, drm_draw_t)
-#define DRM_IOCTL_DMA DRM_IOWR(0x29, drm_dma_t)
-#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, drm_lock_t)
-#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, drm_lock_t)
-#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, drm_lock_t)
+#define DRM_IOCTL_VERSION DRM_IOWR(0x00, struct drm_version)
+#define DRM_IOCTL_GET_UNIQUE DRM_IOWR(0x01, struct drm_unique)
+#define DRM_IOCTL_GET_MAGIC DRM_IOR( 0x02, struct drm_auth)
+#define DRM_IOCTL_IRQ_BUSID DRM_IOWR(0x03, struct drm_irq_busid)
+#define DRM_IOCTL_GET_MAP DRM_IOWR(0x04, struct drm_map)
+#define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client)
+#define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats)
+#define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version)
+
+#define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique)
+#define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+#define DRM_IOCTL_BLOCK DRM_IOWR(0x12, struct drm_block)
+#define DRM_IOCTL_UNBLOCK DRM_IOWR(0x13, struct drm_block)
+#define DRM_IOCTL_CONTROL DRM_IOW( 0x14, struct drm_control)
+#define DRM_IOCTL_ADD_MAP DRM_IOWR(0x15, struct drm_map)
+#define DRM_IOCTL_ADD_BUFS DRM_IOWR(0x16, struct drm_buf_desc)
+#define DRM_IOCTL_MARK_BUFS DRM_IOW( 0x17, struct drm_buf_desc)
+#define DRM_IOCTL_INFO_BUFS DRM_IOWR(0x18, struct drm_buf_info)
+#define DRM_IOCTL_MAP_BUFS DRM_IOWR(0x19, struct drm_buf_map)
+#define DRM_IOCTL_FREE_BUFS DRM_IOW( 0x1a, struct drm_buf_free)
+
+#define DRM_IOCTL_RM_MAP DRM_IOW( 0x1b, struct drm_map)
+
+#define DRM_IOCTL_SET_SAREA_CTX DRM_IOW( 0x1c, struct drm_ctx_priv_map)
+#define DRM_IOCTL_GET_SAREA_CTX DRM_IOWR(0x1d, struct drm_ctx_priv_map)
+
+#define DRM_IOCTL_ADD_CTX DRM_IOWR(0x20, struct drm_ctx)
+#define DRM_IOCTL_RM_CTX DRM_IOWR(0x21, struct drm_ctx)
+#define DRM_IOCTL_MOD_CTX DRM_IOW( 0x22, struct drm_ctx)
+#define DRM_IOCTL_GET_CTX DRM_IOWR(0x23, struct drm_ctx)
+#define DRM_IOCTL_SWITCH_CTX DRM_IOW( 0x24, struct drm_ctx)
+#define DRM_IOCTL_NEW_CTX DRM_IOW( 0x25, struct drm_ctx)
+#define DRM_IOCTL_RES_CTX DRM_IOWR(0x26, struct drm_ctx_res)
+#define DRM_IOCTL_ADD_DRAW DRM_IOWR(0x27, struct drm_draw)
+#define DRM_IOCTL_RM_DRAW DRM_IOWR(0x28, struct drm_draw)
+#define DRM_IOCTL_DMA DRM_IOWR(0x29, struct drm_dma)
+#define DRM_IOCTL_LOCK DRM_IOW( 0x2a, struct drm_lock)
+#define DRM_IOCTL_UNLOCK DRM_IOW( 0x2b, struct drm_lock)
+#define DRM_IOCTL_FINISH DRM_IOW( 0x2c, struct drm_lock)
#define DRM_IOCTL_AGP_ACQUIRE DRM_IO( 0x30)
#define DRM_IOCTL_AGP_RELEASE DRM_IO( 0x31)
-#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, drm_agp_mode_t)
-#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, drm_agp_info_t)
-#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, drm_agp_buffer_t)
-#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, drm_agp_binding_t)
-#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, drm_agp_binding_t)
-
-#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, drm_scatter_gather_t)
-#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, drm_scatter_gather_t)
-
-#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, drm_wait_vblank_t)
-
-#define DRM_IOCTL_FENCE DRM_IOWR(0x3b, drm_fence_arg_t)
-#define DRM_IOCTL_BUFOBJ DRM_IOWR(0x3d, drm_bo_arg_t)
-#define DRM_IOCTL_MM_INIT DRM_IOWR(0x3e, drm_mm_init_arg_t)
-
-#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, drm_update_draw_t)
+#define DRM_IOCTL_AGP_ENABLE DRM_IOW( 0x32, struct drm_agp_mode)
+#define DRM_IOCTL_AGP_INFO DRM_IOR( 0x33, struct drm_agp_info)
+#define DRM_IOCTL_AGP_ALLOC DRM_IOWR(0x34, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_FREE DRM_IOW( 0x35, struct drm_agp_buffer)
+#define DRM_IOCTL_AGP_BIND DRM_IOW( 0x36, struct drm_agp_binding)
+#define DRM_IOCTL_AGP_UNBIND DRM_IOW( 0x37, struct drm_agp_binding)
+
+#define DRM_IOCTL_SG_ALLOC DRM_IOW( 0x38, struct drm_scatter_gather)
+#define DRM_IOCTL_SG_FREE DRM_IOW( 0x39, struct drm_scatter_gather)
+
+#define DRM_IOCTL_WAIT_VBLANK DRM_IOWR(0x3a, union drm_wait_vblank)
+
+#define DRM_IOCTL_UPDATE_DRAW DRM_IOW(0x3f, struct drm_update_draw)
+
+#define DRM_IOCTL_MM_INIT DRM_IOWR(0xc0, struct drm_mm_init_arg)
+#define DRM_IOCTL_MM_TAKEDOWN DRM_IOWR(0xc1, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_LOCK DRM_IOWR(0xc2, struct drm_mm_type_arg)
+#define DRM_IOCTL_MM_UNLOCK DRM_IOWR(0xc3, struct drm_mm_type_arg)
+
+#define DRM_IOCTL_FENCE_CREATE DRM_IOWR(0xc4, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_DESTROY DRM_IOWR(0xc5, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_REFERENCE DRM_IOWR(0xc6, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_UNREFERENCE DRM_IOWR(0xc7, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_SIGNALED DRM_IOWR(0xc8, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_FLUSH DRM_IOWR(0xc9, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_WAIT DRM_IOWR(0xca, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_EMIT DRM_IOWR(0xcb, struct drm_fence_arg)
+#define DRM_IOCTL_FENCE_BUFFERS DRM_IOWR(0xcc, struct drm_fence_arg)
+
+#define DRM_IOCTL_BO_CREATE DRM_IOWR(0xcd, struct drm_bo_create_arg)
+#define DRM_IOCTL_BO_DESTROY DRM_IOWR(0xce, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_MAP DRM_IOWR(0xcf, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_UNMAP DRM_IOWR(0xd0, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_REFERENCE DRM_IOWR(0xd1, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_UNREFERENCE DRM_IOWR(0xd2, struct drm_bo_handle_arg)
+#define DRM_IOCTL_BO_OP DRM_IOWR(0xd3, struct drm_bo_op_arg)
+#define DRM_IOCTL_BO_INFO DRM_IOWR(0xd4, struct drm_bo_reference_info_arg)
+#define DRM_IOCTL_BO_WAIT_IDLE DRM_IOWR(0xd5, struct drm_bo_map_wait_idle_arg)
+#define DRM_IOCTL_BO_SET_PIN DRM_IOWR(0xd6, struct drm_bo_set_pin_arg)
#define DRM_IOCTL_MODE_GETRESOURCES DRM_IOWR(0xA0, struct drm_mode_card_res)
#define DRM_IOCTL_MODE_GETCRTC DRM_IOWR(0xA1, struct drm_mode_crtc)
@@ -1072,4 +1107,53 @@ struct drm_mode_mode_cmd {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
+/* typedef area */
+#if !defined(__KERNEL__) || defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
+typedef struct drm_clip_rect drm_clip_rect_t;
+typedef struct drm_drawable_info drm_drawable_info_t;
+typedef struct drm_tex_region drm_tex_region_t;
+typedef struct drm_hw_lock drm_hw_lock_t;
+typedef struct drm_version drm_version_t;
+typedef struct drm_unique drm_unique_t;
+typedef struct drm_list drm_list_t;
+typedef struct drm_block drm_block_t;
+typedef struct drm_control drm_control_t;
+typedef enum drm_map_type drm_map_type_t;
+typedef enum drm_map_flags drm_map_flags_t;
+typedef struct drm_ctx_priv_map drm_ctx_priv_map_t;
+typedef struct drm_map drm_map_t;
+typedef struct drm_client drm_client_t;
+typedef enum drm_stat_type drm_stat_type_t;
+typedef struct drm_stats drm_stats_t;
+typedef enum drm_lock_flags drm_lock_flags_t;
+typedef struct drm_lock drm_lock_t;
+typedef enum drm_dma_flags drm_dma_flags_t;
+typedef struct drm_buf_desc drm_buf_desc_t;
+typedef struct drm_buf_info drm_buf_info_t;
+typedef struct drm_buf_free drm_buf_free_t;
+typedef struct drm_buf_pub drm_buf_pub_t;
+typedef struct drm_buf_map drm_buf_map_t;
+typedef struct drm_dma drm_dma_t;
+typedef union drm_wait_vblank drm_wait_vblank_t;
+typedef struct drm_agp_mode drm_agp_mode_t;
+typedef enum drm_ctx_flags drm_ctx_flags_t;
+typedef struct drm_ctx drm_ctx_t;
+typedef struct drm_ctx_res drm_ctx_res_t;
+typedef struct drm_draw drm_draw_t;
+typedef struct drm_update_draw drm_update_draw_t;
+typedef struct drm_auth drm_auth_t;
+typedef struct drm_irq_busid drm_irq_busid_t;
+typedef enum drm_vblank_seq_type drm_vblank_seq_type_t;
+typedef struct drm_agp_buffer drm_agp_buffer_t;
+typedef struct drm_agp_binding drm_agp_binding_t;
+typedef struct drm_agp_info drm_agp_info_t;
+typedef struct drm_scatter_gather drm_scatter_gather_t;
+typedef struct drm_set_version drm_set_version_t;
+
+typedef struct drm_fence_arg drm_fence_arg_t;
+typedef struct drm_mm_type_arg drm_mm_type_arg_t;
+typedef struct drm_mm_init_arg drm_mm_init_arg_t;
+typedef enum drm_bo_type drm_bo_type_t;
+#endif
+
#endif
diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt
index 126974d0..74e7e75a 100644
--- a/shared-core/drm_pciids.txt
+++ b/shared-core/drm_pciids.txt
@@ -482,9 +482,6 @@
0x10DE 0x009E NV40 "NVidia 0x009E"
[nouveau]
-0x10de 0x0008 NV_03 "EDGE 3D"
-0x10de 0x0009 NV_03 "EDGE 3D"
-0x10de 0x0010 NV_03 "Mutara V08"
0x10de 0x0020 NV_04 "RIVA TNT"
0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro"
0x10de 0x0029 NV_04 "RIVA TNT2 Ultra"
@@ -510,8 +507,9 @@
0x10de 0x0091 NV_40 "GeForce 7800 GTX"
0x10de 0x0092 NV_40 "GeForce 7800 GT"
0x10de 0x0093 NV_40 "GeForce 7800 GS"
+0x10de 0x0095 NV_40 "GeForce 7800 SLI"
0x10de 0x0098 NV_40 "GeForce Go 7800"
-0x10de 0x0099 NV_40 "GE Force Go 7800 GTX"
+0x10de 0x0099 NV_40 "GeForce Go 7800 GTX"
0x10de 0x009d NV_40 "Quadro FX4500"
0x10de 0x00a0 NV_04 "Aladdin TNT2"
0x10de 0x00c0 NV_40 "GeForce 6800 GS"
@@ -547,13 +545,16 @@
0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go"
0x10de 0x0140 NV_40 "GeForce 6600 GT"
0x10de 0x0141 NV_40 "GeForce 6600"
-0x10de 0x0142 NV_40 "GeForce 6600 PCIe"
+0x10de 0x0142 NV_40 "GeForce 6600 LE"
+0x10de 0x0143 NV_40 "GeForce 6600 VE"
0x10de 0x0144 NV_40 "GeForce Go 6600"
0x10de 0x0145 NV_40 "GeForce 6610 XL"
0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE"
+0x10de 0x0147 NV_40 "GeForce 6700 XL"
0x10de 0x0148 NV_40 "GeForce Go 6600"
0x10de 0x0149 NV_40 "GeForce Go 6600 GT"
0x10de 0x014a NV_40 "Quadro NVS 440"
+0x10de 0x014c NV_40 "Quadro FX 550"
0x10de 0x014d NV_17 "Quadro FX 550"
0x10de 0x014e NV_40 "Quadro FX 540"
0x10de 0x014f NV_40 "GeForce 6200"
@@ -561,6 +562,7 @@
0x10de 0x0151 NV_15 "GeForce2 Ti"
0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner"
0x10de 0x0153 NV_15 "Quadro2 Pro"
+0x10de 0x0160 NV_44 "GeForce 6500"
0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)"
0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)"
0x10de 0x0163 NV_44 "GeForce 6200 LE"
@@ -569,6 +571,7 @@
0x10de 0x0166 NV_44 "GeForce Go 6400"
0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache"
0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache"
+0x10de 0x0169 NV_44 "GeForce 6250"
0x10de 0x0170 NV_17 "GeForce4 MX 460"
0x10de 0x0171 NV_17 "GeForce4 MX 440"
0x10de 0x0172 NV_17 "GeForce4 MX 420"
@@ -601,11 +604,16 @@
0x10de 0x019e NV_50 "Quadro FX 4600"
0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics"
0x10de 0x01d1 NV_44 "GeForce 7300 LE"
+0x10de 0x01d3 NV_44 "Geforce 7300 SE"
0x10de 0x01d6 NV_44 "GeForce Go 7200"
0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300"
0x10de 0x01d8 NV_44 "GeForce Go 7400"
+0x10de 0x01d9 NV_44 "GeForce Go 7400 GS"
0x10de 0x01da NV_44 "Quadro NVS 110M"
+0x10de 0x01db NV_44 "Quadro NVS 120M"
0x10de 0x01dc NV_44 "Quadro FX 350M"
+0x10de 0x01dd NV_44 "GeForce 7500 LE"
+0x10de 0x01de NV_44 "Quadro FX 350"
0x10de 0x01df NV_44 "GeForce 7300 GS"
0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU"
0x10de 0x0200 NV_20 "GeForce3"
@@ -617,9 +625,12 @@
0x10de 0x0215 NV_40 "GeForce 6800 GT"
0x10de 0x0218 NV_40 "GeForce 6800 XT"
0x10de 0x0221 NV_44 "GeForce 6200"
+0x10de 0x0222 NV_44 "GeForce 6200 A-LE"
0x10de 0x0240 NV_44 "GeForce 6150"
+0x10de 0x0241 NV_44 "GeForce 6150 LE"
0x10de 0x0242 NV_44 "GeForce 6100"
-0x10de 0x0244 NV_44 "GeForce 6150 Go"
+0x10de 0x0244 NV_44 "GeForce Go 6150"
+0x10de 0x0247 NV_44 "GeForce Go 6100"
0x10de 0x0250 NV_25 "GeForce4 Ti 4600"
0x10de 0x0251 NV_25 "GeForce4 Ti 4400"
0x10de 0x0252 NV_25 "GeForce4 Ti"
@@ -700,7 +711,15 @@
0x10de 0x0391 NV_40 "GeForce 7600 GT"
0x10de 0x0392 NV_40 "GeForce 7600 GS"
0x10de 0x0393 NV_40 "GeForce 7300 GT"
+0x10de 0x0394 NV_40 "GeForce 7600 LE"
+0x10de 0x0395 NV_40 "GeForce 7300 GT"
+0x10de 0x0397 NV_40 "GeForce Go 7700"
0x10de 0x0398 NV_40 "GeForce Go 7600"
+0x10de 0x0399 NV_40 "GeForce Go 7600 GT"
+0x10de 0x039a NV_40 "Quadro NVS 300M"
+0x10de 0x039b NV_40 "GeForce Go 7900 SE"
+0x10de 0x039c NV_40 "Quadro FX 550M"
+0x10de 0x039e NV_40 "Quadro FX 560"
0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430"
0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405"
0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400"
@@ -710,13 +729,13 @@
0x10de 0x0421 NV_50 "GeForce 8500 GT"
0x10de 0x0422 NV_50 "GeForce 8400 GS"
0x10de 0x0423 NV_50 "GeForce 8300 GS"
-0x12d2 0x0008 NV_03 "NV1"
-0x12d2 0x0009 NV_03 "DAC64"
-0x12d2 0x0018 NV_03 "Riva128"
-0x12d2 0x0019 NV_03 "Riva128ZX"
+0x10de 0x0429 NV_50 "Quadro NVS 140"
0x12d2 0x0020 NV_04 "TNT"
0x12d2 0x0028 NV_04 "TNT2"
0x12d2 0x0029 NV_04 "UTNT2"
0x12d2 0x002c NV_04 "VTNT2"
0x12d2 0x00a0 NV_04 "ITNT2"
+[xgi]
+0x18ca 0x2200 0 "XP5"
+0x18ca 0x0047 0 "XP10 / XG47"
diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h
index 43d1114f..34050a6d 100644
--- a/shared-core/drm_sarea.h
+++ b/shared-core/drm_sarea.h
@@ -50,29 +50,35 @@
#define SAREA_DRAWABLE_CLAIMED_ENTRY 0x80000000
/** SAREA drawable */
-typedef struct drm_sarea_drawable {
+struct drm_sarea_drawable {
unsigned int stamp;
unsigned int flags;
-} drm_sarea_drawable_t;
+};
/** SAREA frame */
-typedef struct drm_sarea_frame {
+struct drm_sarea_frame {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int fullscreen;
-} drm_sarea_frame_t;
+};
/** SAREA */
-typedef struct drm_sarea {
+struct drm_sarea {
/** first thing is always the DRM locking structure */
- drm_hw_lock_t lock;
+ struct drm_hw_lock lock;
/** \todo Use readers/writer lock for drm_sarea::drawable_lock */
- drm_hw_lock_t drawable_lock;
- drm_sarea_drawable_t drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
- drm_sarea_frame_t frame; /**< frame */
+ struct drm_hw_lock drawable_lock;
+ struct drm_sarea_drawable drawableTable[SAREA_MAX_DRAWABLES]; /**< drawables */
+ struct drm_sarea_frame frame; /**< frame */
drm_context_t dummy_context;
-} drm_sarea_t;
+};
+
+#ifndef __KERNEL__
+typedef struct drm_sarea_drawable drm_sarea_drawable_t;
+typedef struct drm_sarea_frame drm_sarea_frame_t;
+typedef struct drm_sarea drm_sarea_t;
+#endif
#endif /* _DRM_SAREA_H_ */
diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c
index 7dd68954..a653a06b 100644
--- a/shared-core/i915_dma.c
+++ b/shared-core/i915_dma.c
@@ -36,10 +36,10 @@
* the head pointer changes, so that EBUSY only happens if the ring
* actually stalls for (eg) 3 seconds.
*/
-int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
+int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
int i;
@@ -60,13 +60,13 @@ int i915_wait_ring(drm_device_t * dev, int n, const char *caller)
DRM_UDELAY(1);
}
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
-void i915_kernel_lost_context(drm_device_t * dev)
+void i915_kernel_lost_context(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_ring_buffer *ring = &(dev_priv->ring);
ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
@@ -78,9 +78,9 @@ void i915_kernel_lost_context(drm_device_t * dev)
dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
}
-int i915_dma_cleanup(drm_device_t * dev)
+int i915_dma_cleanup(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
@@ -94,18 +94,18 @@ int i915_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int i915_initialize(drm_device_t * dev,
- drm_i915_private_t * dev_priv,
- drm_i915_init_t * init)
+static int i915_initialize(struct drm_device * dev,
+ struct drm_i915_private * dev_priv,
+ struct drm_i915_init * init)
{
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
+ memset(dev_priv, 0, sizeof(struct drm_i915_private));
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
@@ -113,9 +113,35 @@ static int i915_initialize(drm_device_t * dev,
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("can not find mmio map!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
+ dev_priv->sarea_priv = (drm_i915_sarea_t *)
+ ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
+
+ dev_priv->ring.Start = init->ring_start;
+ dev_priv->ring.End = init->ring_end;
+ dev_priv->ring.Size = init->ring_size;
+ dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+
+ dev_priv->ring.map.offset = init->ring_start;
+ dev_priv->ring.map.size = init->ring_size;
+ dev_priv->ring.map.type = 0;
+ dev_priv->ring.map.flags = 0;
+ dev_priv->ring.map.mtrr = 0;
+
+ drm_core_ioremap(&dev_priv->ring.map, dev);
+
+ if (dev_priv->ring.map.handle == NULL) {
+ dev->dev_private = (void *)dev_priv;
+ i915_dma_cleanup(dev);
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
dev_priv->cpp = init->cpp;
dev_priv->sarea_priv->pf_current_page = 0;
@@ -132,42 +158,81 @@ static int i915_initialize(drm_device_t * dev,
*/
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
+ /* Program Hardware Status Page */
+ if (!IS_G33(dev)) {
+ dev_priv->status_page_dmah =
+ drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+ if (!dev_priv->status_page_dmah) {
+ dev->dev_private = (void *)dev_priv;
+ i915_dma_cleanup(dev);
+ DRM_ERROR("Can not allocate hardware status page\n");
+ return -ENOMEM;
+ }
+ dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+ dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+ memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+ I915_WRITE(0x02080, dev_priv->dma_status_page);
+ }
+ DRM_DEBUG("Enabled hardware status page\n");
dev->dev_private = (void *)dev_priv;
return 0;
}
-static int i915_dma_resume(drm_device_t * dev)
+static int i915_dma_resume(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
if (!dev_priv->sarea) {
DRM_ERROR("can not find sarea!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!dev_priv->mmio_map) {
DRM_ERROR("can not find mmio map!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
+ if (dev_priv->ring.map.handle == NULL) {
+ DRM_ERROR("can not ioremap virtual address for"
+ " ring buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Program Hardware Status Page */
+ if (!dev_priv->hw_status_page) {
+ DRM_ERROR("Can not find hardware status page\n");
+ return -EINVAL;
+ }
+ DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
+
+ if (dev_priv->status_gfx_addr != 0)
+ I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+ else
+ I915_WRITE(0x02080, dev_priv->dma_status_page);
+ DRM_DEBUG("Enabled hardware status page\n");
+
return 0;
}
-static int i915_dma_init(DRM_IOCTL_ARGS)
+static int i915_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_init_t init;
+ struct drm_i915_private *dev_priv;
+ struct drm_i915_init *init = data;
int retcode = 0;
- DRM_COPY_FROM_USER_IOCTL(init, (drm_i915_init_t __user *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case I915_INIT_DMA:
- retcode = i915_initialize(dev, dev_priv, &init);
+ dev_priv = drm_alloc(sizeof(struct drm_i915_private),
+ DRM_MEM_DRIVER);
+ if (dev_priv == NULL)
+ return -ENOMEM;
+ retcode = i915_initialize(dev, dev_priv, init);
break;
case I915_CLEANUP_DMA:
retcode = i915_dma_cleanup(dev);
@@ -176,7 +241,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS)
retcode = i915_dma_resume(dev);
break;
default:
- retcode = DRM_ERR(EINVAL);
+ retcode = -EINVAL;
break;
}
@@ -259,14 +324,15 @@ static int validate_cmd(int cmd)
return ret;
}
-static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
+static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,
+ int dwords)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
RING_LOCALS;
if ((dwords+1) * sizeof(int) >= dev_priv->ring.Size - 8)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
BEGIN_LP_RING((dwords+1)&~1);
@@ -274,17 +340,17 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
int cmd, sz;
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
- return DRM_ERR(EINVAL);
+ return -EINVAL;
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
OUT_RING(cmd);
while (++i, --sz) {
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
sizeof(cmd))) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
OUT_RING(cmd);
}
@@ -298,22 +364,22 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords)
return 0;
}
-static int i915_emit_box(drm_device_t * dev,
- drm_clip_rect_t __user * boxes,
+static int i915_emit_box(struct drm_device * dev,
+ struct drm_clip_rect __user * boxes,
int i, int DR1, int DR4)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t box;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_clip_rect box;
RING_LOCALS;
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box.x1, box.y1, box.x2, box.y2);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (IS_I965G(dev)) {
@@ -341,9 +407,9 @@ static int i915_emit_box(drm_device_t * dev,
* emit. For now, do it in both places:
*/
-void i915_emit_breadcrumb(drm_device_t *dev)
+void i915_emit_breadcrumb(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
RING_LOCALS;
if (++dev_priv->counter > BREADCRUMB_MASK) {
@@ -362,9 +428,9 @@ void i915_emit_breadcrumb(drm_device_t *dev)
}
-int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
+int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t flush_cmd = CMD_MI_FLUSH;
RING_LOCALS;
@@ -383,18 +449,18 @@ int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush)
}
-static int i915_dispatch_cmdbuffer(drm_device_t * dev,
- drm_i915_cmdbuffer_t * cmd)
+static int i915_dispatch_cmdbuffer(struct drm_device * dev,
+ struct drm_i915_cmdbuffer * cmd)
{
#ifdef I915_HAVE_FENCE
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
#endif
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
if (cmd->sz & 0x3) {
DRM_ERROR("alignment");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
i915_kernel_lost_context(dev);
@@ -421,18 +487,18 @@ static int i915_dispatch_cmdbuffer(drm_device_t * dev,
return 0;
}
-static int i915_dispatch_batchbuffer(drm_device_t * dev,
+static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t __user *boxes = batch->cliprects;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_clip_rect __user *boxes = batch->cliprects;
int nbox = batch->num_cliprects;
int i = 0, count;
RING_LOCALS;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
i915_kernel_lost_context(dev);
@@ -449,9 +515,15 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
if (dev_priv->use_mi_batchbuffer_start) {
BEGIN_LP_RING(2);
- OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
- OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ if (IS_I965G(dev)) {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
+ OUT_RING(batch->start);
+ } else {
+ OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
+ OUT_RING(batch->start | MI_BATCH_NON_SECURE);
+ }
ADVANCE_LP_RING();
+
} else {
BEGIN_LP_RING(4);
OUT_RING(MI_BATCH_BUFFER);
@@ -469,11 +541,11 @@ static int i915_dispatch_batchbuffer(drm_device_t * dev,
return 0;
}
-static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync)
+static void i915_do_dispatch_flip(struct drm_device * dev, int plane, int sync)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 num_pages, current_page, next_page, dspbase;
- int shift = 2 * pipe, x, y;
+ int shift = 2 * plane, x, y;
RING_LOCALS;
/* Calculate display base offset */
@@ -494,25 +566,25 @@ static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync)
break;
}
- if (pipe == 0) {
- x = dev_priv->sarea_priv->pipeA_x;
- y = dev_priv->sarea_priv->pipeA_y;
+ if (plane == 0) {
+ x = dev_priv->sarea_priv->planeA_x;
+ y = dev_priv->sarea_priv->planeA_y;
} else {
- x = dev_priv->sarea_priv->pipeB_x;
- y = dev_priv->sarea_priv->pipeB_y;
+ x = dev_priv->sarea_priv->planeB_x;
+ y = dev_priv->sarea_priv->planeB_y;
}
dspbase += (y * dev_priv->sarea_priv->pitch + x) * dev_priv->cpp;
- DRM_DEBUG("pipe=%d current_page=%d dspbase=0x%x\n", pipe, current_page,
+ DRM_DEBUG("plane=%d current_page=%d dspbase=0x%x\n", plane, current_page,
dspbase);
BEGIN_LP_RING(4);
OUT_RING(sync ? 0 :
- (MI_WAIT_FOR_EVENT | (pipe ? MI_WAIT_FOR_PLANE_B_FLIP :
+ (MI_WAIT_FOR_EVENT | (plane ? MI_WAIT_FOR_PLANE_B_FLIP :
MI_WAIT_FOR_PLANE_A_FLIP)));
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | (sync ? 0 : ASYNC_FLIP) |
- (pipe ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
+ (plane ? DISPLAY_PLANE_B : DISPLAY_PLANE_A));
OUT_RING(dev_priv->sarea_priv->pitch * dev_priv->cpp);
OUT_RING(dspbase);
ADVANCE_LP_RING();
@@ -521,19 +593,19 @@ static void i915_do_dispatch_flip(drm_device_t * dev, int pipe, int sync)
dev_priv->sarea_priv->pf_current_page |= next_page << shift;
}
-void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync)
+void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
int i;
- DRM_DEBUG("%s: pipes=0x%x pfCurrentPage=%d\n",
+ DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n",
__FUNCTION__,
- pipes, dev_priv->sarea_priv->pf_current_page);
+ planes, dev_priv->sarea_priv->pf_current_page);
i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH);
for (i = 0; i < 2; i++)
- if (pipes & (1 << i))
+ if (planes & (1 << i))
i915_do_dispatch_flip(dev, i, sync);
i915_emit_breadcrumb(dev);
@@ -543,82 +615,76 @@ void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync)
#endif
}
-static int i915_quiescent(drm_device_t * dev)
+static int i915_quiescent(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
i915_kernel_lost_context(dev);
return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__);
}
-static int i915_flush_ioctl(DRM_IOCTL_ARGS)
+static int i915_flush_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return i915_quiescent(dev);
}
-static int i915_batchbuffer(DRM_IOCTL_ARGS)
+static int i915_batchbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
dev_priv->sarea_priv;
- drm_i915_batchbuffer_t batch;
+ drm_i915_batchbuffer_t *batch = data;
int ret;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(batch, (drm_i915_batchbuffer_t __user *) data,
- sizeof(batch));
-
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
- batch.start, batch.used, batch.num_cliprects);
+ batch->start, batch->used, batch->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (batch.num_cliprects && DRM_VERIFYAREA_READ(batch.cliprects,
- batch.num_cliprects *
- sizeof(drm_clip_rect_t)))
- return DRM_ERR(EFAULT);
+ if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
+ batch->num_cliprects *
+ sizeof(struct drm_clip_rect)))
+ return -EFAULT;
- ret = i915_dispatch_batchbuffer(dev, &batch);
+ ret = i915_dispatch_batchbuffer(dev, batch);
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return ret;
}
-static int i915_cmdbuffer(DRM_IOCTL_ARGS)
+static int i915_cmdbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_i915_sarea *sarea_priv = (struct drm_i915_sarea *)
dev_priv->sarea_priv;
- drm_i915_cmdbuffer_t cmdbuf;
+ struct drm_i915_cmdbuffer *cmdbuf = data;
int ret;
- DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_i915_cmdbuffer_t __user *) data,
- sizeof(cmdbuf));
-
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
- cmdbuf.buf, cmdbuf.sz, cmdbuf.num_cliprects);
+ cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (cmdbuf.num_cliprects &&
- DRM_VERIFYAREA_READ(cmdbuf.cliprects,
- cmdbuf.num_cliprects *
- sizeof(drm_clip_rect_t))) {
+ if (cmdbuf->num_cliprects &&
+ DRM_VERIFYAREA_READ(cmdbuf->cliprects,
+ cmdbuf->num_cliprects *
+ sizeof(struct drm_clip_rect))) {
DRM_ERROR("Fault accessing cliprects\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- ret = i915_dispatch_cmdbuffer(dev, &cmdbuf);
+ ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
@@ -628,68 +694,61 @@ static int i915_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
-static int i915_do_cleanup_pageflip(drm_device_t * dev)
+int i915_do_cleanup_pageflip(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int i, pipes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2;
DRM_DEBUG("%s\n", __FUNCTION__);
- for (i = 0, pipes = 0; i < 2; i++)
+ for (i = 0, planes = 0; i < 2; i++)
if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {
dev_priv->sarea_priv->pf_current_page =
(dev_priv->sarea_priv->pf_current_page &
~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i);
- pipes |= 1 << i;
+ planes |= 1 << i;
}
- if (pipes)
- i915_dispatch_flip(dev, pipes, 0);
+ if (planes)
+ i915_dispatch_flip(dev, planes, 0);
return 0;
}
-static int i915_flip_bufs(DRM_IOCTL_ARGS)
+static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_flip_t param;
+ struct drm_i915_flip *param = data;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_flip_t __user *) data,
- sizeof(param));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (param.pipes & ~0x3) {
- DRM_ERROR("Invalid pipes 0x%x, only <= 0x3 is valid\n",
- param.pipes);
- return DRM_ERR(EINVAL);
+ if (param->planes & ~0x3) {
+ DRM_ERROR("Invalid planes 0x%x, only <= 0x3 is valid\n",
+ param->planes);
+ return -EINVAL;
}
- i915_dispatch_flip(dev, param.pipes, 0);
+ i915_dispatch_flip(dev, param->planes, 0);
return 0;
}
-static int i915_getparam(DRM_IOCTL_ARGS)
+static int i915_getparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_getparam_t param;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_getparam *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_getparam_t __user *) data,
- sizeof(param));
-
- switch (param.param) {
+ switch (param->param) {
case I915_PARAM_IRQ_ACTIVE:
value = dev->irq ? 1 : 0;
break;
@@ -700,45 +759,42 @@ static int i915_getparam(DRM_IOCTL_ARGS)
value = READ_BREADCRUMB(dev_priv);
break;
default:
- DRM_ERROR("Unknown parameter %d\n", param.param);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("Unknown parameter %d\n", param->param);
+ return -EINVAL;
}
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-static int i915_setparam(DRM_IOCTL_ARGS)
+static int i915_setparam(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_setparam_t param;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_setparam_t *param = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_i915_setparam_t __user *) data,
- sizeof(param));
-
- switch (param.param) {
+ switch (param->param) {
case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
- dev_priv->use_mi_batchbuffer_start = param.value;
+ dev_priv->use_mi_batchbuffer_start = param->value;
break;
case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
- dev_priv->tex_lru_log_granularity = param.value;
+ dev_priv->tex_lru_log_granularity = param->value;
break;
case I915_SETPARAM_ALLOW_BATCHBUFFER:
- dev_priv->allow_batchbuffer = param.value;
+ dev_priv->allow_batchbuffer = param->value;
break;
default:
- DRM_ERROR("unknown parameter %d\n", param.param);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("unknown parameter %d\n", param->param);
+ return -EINVAL;
}
return 0;
@@ -754,68 +810,68 @@ drm_i915_mmio_entry_t mmio_table[] = {
static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t);
-static int i915_mmio(DRM_IOCTL_ARGS)
+static int i915_mmio(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- char buf[32];
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
+ uint32_t buf[8];
+ struct drm_i915_private *dev_priv = dev->dev_private;
drm_i915_mmio_entry_t *e;
- drm_i915_mmio_t mmio;
+ drm_i915_mmio_t *mmio = data;
void __iomem *base;
+ int i;
+
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(mmio, (drm_i915_mmio_t __user *) data,
- sizeof(mmio));
- if (mmio.reg >= mmio_table_size)
- return DRM_ERR(EINVAL);
+ if (mmio->reg >= mmio_table_size)
+ return -EINVAL;
- e = &mmio_table[mmio.reg];
+ e = &mmio_table[mmio->reg];
base = (u8 *) dev_priv->mmio_map->handle + e->offset;
- switch (mmio.read_write) {
+ switch (mmio->read_write) {
case I915_MMIO_READ:
if (!(e->flag & I915_MMIO_MAY_READ))
- return DRM_ERR(EINVAL);
- memcpy_fromio(buf, base, e->size);
- if (DRM_COPY_TO_USER(mmio.data, buf, e->size)) {
+ return -EINVAL;
+ for (i = 0; i < e->size / 4; i++)
+ buf[i] = I915_READ(e->offset + i * 4);
+ if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
break;
case I915_MMIO_WRITE:
if (!(e->flag & I915_MMIO_MAY_WRITE))
- return DRM_ERR(EINVAL);
- if(DRM_COPY_FROM_USER(buf, mmio.data, e->size)) {
+ return -EINVAL;
+ if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) {
DRM_ERROR("DRM_COPY_TO_USER failed\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- memcpy_toio(base, buf, e->size);
+ for (i = 0; i < e->size / 4; i++)
+ I915_WRITE(e->offset + i * 4, buf[i]);
break;
}
return 0;
}
-static int i915_set_status_page(DRM_IOCTL_ARGS)
+static int i915_set_status_page(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_hws_addr_t hws;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ drm_i915_hws_addr_t *hws = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(hws, (drm_i915_hws_addr_t __user *) data,
- sizeof(hws));
- printk(KERN_DEBUG "set status page addr 0x%08x\n", (u32)hws.addr);
+ DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr);
- dev_priv->status_gfx_addr = hws.addr & (0x1ffff<<12);
+ dev_priv->status_gfx_addr = hws->addr & (0x1ffff<<12);
- dev_priv->hws_map.offset = dev->agp->agp_info.aper_base + hws.addr;
+ dev_priv->hws_map.offset = dev->agp->base + hws->addr;
dev_priv->hws_map.size = 4*1024;
dev_priv->hws_map.type = 0;
dev_priv->hws_map.flags = 0;
@@ -828,7 +884,7 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
dev_priv->status_gfx_addr = 0;
DRM_ERROR("can not ioremap virtual address for"
" G33 hw status page\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->hws_map.handle;
@@ -840,25 +896,25 @@ static int i915_set_status_page(DRM_IOCTL_ARGS)
return 0;
}
-drm_ioctl_desc_t i915_ioctls[] = {
- [DRM_IOCTL_NR(DRM_I915_INIT)] = {i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_FLUSH)] = {i915_flush_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_FLIP)] = {i915_flip_bufs, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_BATCHBUFFER)] = {i915_batchbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_IRQ_EMIT)] = {i915_irq_emit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_IRQ_WAIT)] = {i915_irq_wait, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_GETPARAM)] = {i915_getparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_SETPARAM)] = {i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_ALLOC)] = {i915_mem_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_FREE)] = {i915_mem_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_INIT_HEAP)] = {i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_I915_CMDBUFFER)] = {i915_cmdbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_DESTROY_HEAP)] = { i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_I915_SET_VBLANK_PIPE)] = { i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY },
- [DRM_IOCTL_NR(DRM_I915_GET_VBLANK_PIPE)] = { i915_vblank_pipe_get, DRM_AUTH },
- [DRM_IOCTL_NR(DRM_I915_VBLANK_SWAP)] = {i915_vblank_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_MMIO)] = {i915_mmio, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_I915_HWS_ADDR)] = {i915_set_status_page, DRM_AUTH},
+struct drm_ioctl_desc i915_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FLIP, i915_flip_bufs, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_BATCHBUFFER, i915_batchbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_EMIT, i915_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_IRQ_WAIT, i915_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_GETPARAM, i915_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_ALLOC, i915_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_FREE, i915_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_INIT_HEAP, i915_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I915_CMDBUFFER, i915_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_DESTROY_HEAP, i915_mem_destroy_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
+ DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ),
+ DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_MMIO, i915_mmio, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
};
int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
@@ -874,7 +930,7 @@ int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
* \returns
* A value of 1 is always retured to indictate every i9x5 is AGP.
*/
-int i915_driver_device_is_agp(drm_device_t * dev)
+int i915_driver_device_is_agp(struct drm_device * dev)
{
return 1;
}
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index 1c6ff4d3..8022bc91 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -39,7 +39,7 @@
* of chars for next/prev indices */
#define I915_LOG_MIN_TEX_REGION_SIZE 14
-typedef struct _drm_i915_init {
+typedef struct drm_i915_init {
enum {
I915_INIT_DMA = 0x01,
I915_CLEANUP_DMA = 0x02,
@@ -63,8 +63,8 @@ typedef struct _drm_i915_init {
unsigned int chipset;
} drm_i915_init_t;
-typedef struct _drm_i915_sarea {
- drm_tex_region_t texList[I915_NR_TEX_REGIONS + 1];
+typedef struct drm_i915_sarea {
+ struct drm_tex_region texList[I915_NR_TEX_REGIONS + 1];
int last_upload; /* last time texture was uploaded */
int last_enqueue; /* last time a buffer was enqueued */
int last_dispatch; /* age of the most recently dispatched buffer */
@@ -105,14 +105,14 @@ typedef struct _drm_i915_sarea {
unsigned int rotated_tiled;
unsigned int rotated2_tiled;
- int pipeA_x;
- int pipeA_y;
- int pipeA_w;
- int pipeA_h;
- int pipeB_x;
- int pipeB_y;
- int pipeB_w;
- int pipeB_h;
+ int planeA_x;
+ int planeA_y;
+ int planeA_w;
+ int planeA_h;
+ int planeB_x;
+ int planeB_y;
+ int planeB_w;
+ int planeB_h;
/* Triple buffering */
drm_handle_t third_handle;
@@ -182,31 +182,31 @@ typedef struct _drm_i915_sarea {
/* Asynchronous page flipping:
*/
typedef struct drm_i915_flip {
- int pipes;
+ int planes;
} drm_i915_flip_t;
/* Allow drivers to submit batchbuffers directly to hardware, relying
* on the security mechanisms provided by hardware.
*/
-typedef struct _drm_i915_batchbuffer {
+typedef struct drm_i915_batchbuffer {
int start; /* agp offset */
int used; /* nr bytes in use */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_batchbuffer_t;
/* As above, but pass a pointer to userspace buffer which can be
* validated by the kernel prior to sending to hardware.
*/
-typedef struct _drm_i915_cmdbuffer {
+typedef struct drm_i915_cmdbuffer {
char __user *buf; /* pointer to userspace command buffer */
int sz; /* nr bytes in buf */
int DR1; /* hw flags for GFX_OP_DRAWRECT_INFO */
int DR4; /* window origin for GFX_OP_DRAWRECT_INFO */
int num_cliprects; /* mulitpass with multiple cliprects? */
- drm_clip_rect_t __user *cliprects; /* pointer to userspace cliprects */
+ struct drm_clip_rect __user *cliprects; /* pointer to userspace cliprects */
} drm_i915_cmdbuffer_t;
/* Userspace can request & wait on irq's:
@@ -283,7 +283,7 @@ typedef struct drm_i915_vblank_pipe {
*/
typedef struct drm_i915_vblank_swap {
drm_drawable_t drawable;
- drm_vblank_seq_type_t seqtype;
+ enum drm_vblank_seq_type seqtype;
unsigned int sequence;
} drm_i915_vblank_swap_t;
diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h
index 35e5be1c..d6b64b61 100644
--- a/shared-core/i915_drv.h
+++ b/shared-core/i915_drv.h
@@ -39,6 +39,11 @@
#define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20070209"
+#if defined(__linux__)
+#define I915_HAVE_FENCE
+#define I915_HAVE_BUFFER
+#endif
+
/* Interface history:
*
* 1.1: Original.
@@ -50,17 +55,17 @@
* - Support vertical blank on secondary display pipe
* 1.8: New ioctl for ARB_Occlusion_Query
* 1.9: Usable page flipping and triple buffering
+ * 1.10: Plane/pipe disentangling
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 9
-#define DRIVER_PATCHLEVEL 0
-
-#if defined(__linux__)
-#define I915_HAVE_FENCE
-#define I915_HAVE_BUFFER
+#if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER)
+#define DRIVER_MINOR 10
+#else
+#define DRIVER_MINOR 6
#endif
+#define DRIVER_PATCHLEVEL 0
-typedef struct _drm_i915_ring_buffer {
+struct drm_i915_ring_buffer {
int tail_mask;
unsigned long Start;
unsigned long End;
@@ -70,36 +75,36 @@ typedef struct _drm_i915_ring_buffer {
int tail;
int space;
drm_local_map_t map;
-} drm_i915_ring_buffer_t;
+};
struct mem_block {
struct mem_block *next;
struct mem_block *prev;
int start;
int size;
- DRMFILE filp; /* 0: free, -1: heap, other: real files */
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
-typedef struct _drm_i915_vbl_swap {
+struct drm_i915_vbl_swap {
struct list_head head;
drm_drawable_t drw_id;
- unsigned int pipe;
+ unsigned int plane;
unsigned int sequence;
int flip;
-} drm_i915_vbl_swap_t;
+};
-typedef struct drm_i915_private {
- drm_buffer_object_t *ring_buffer;
+struct drm_i915_private {
+ struct drm_buffer_object *ring_buffer;
drm_local_map_t *sarea;
drm_local_map_t *mmio_map;
unsigned long mmiobase;
unsigned long mmiolen;
- drm_i915_sarea_t *sarea_priv;
- drm_i915_ring_buffer_t ring;
+ struct drm_i915_sarea *sarea_priv;
+ struct drm_i915_ring_buffer ring;
- drm_dma_handle_t *status_page_dmah;
+ struct drm_dma_handle *status_page_dmah;
void *hw_status_page;
dma_addr_t dma_status_page;
uint32_t counter;
@@ -118,7 +123,7 @@ typedef struct drm_i915_private {
struct mem_block *agp_heap;
unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
int vblank_pipe;
- spinlock_t user_irq_lock;
+ DRM_SPINTYPE user_irq_lock;
int user_irq_refcount;
int fence_irq_on;
uint32_t irq_enable_reg;
@@ -133,8 +138,8 @@ typedef struct drm_i915_private {
#ifdef I915_HAVE_BUFFER
void *agp_iomap;
#endif
- spinlock_t swaps_lock;
- drm_i915_vbl_swap_t vbl_swaps;
+ DRM_SPINTYPE swaps_lock;
+ struct drm_i915_vbl_swap vbl_swaps;
unsigned int swaps_pending;
/* LVDS info */
@@ -197,7 +202,7 @@ typedef struct drm_i915_private {
u32 savePaletteB[256];
u32 saveSWF[17];
u32 saveBLC_PWM_CTL;
-} drm_i915_private_t;
+};
enum intel_chip_family {
CHIP_I8XX = 0x01,
@@ -206,80 +211,93 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
-extern drm_ioctl_desc_t i915_ioctls[];
+extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
/* i915_dma.c */
-extern void i915_kernel_lost_context(drm_device_t * dev);
+extern void i915_kernel_lost_context(struct drm_device * dev);
extern int i915_driver_load(struct drm_device *, unsigned long flags);
-extern int i915_driver_unload(drm_device_t *dev);
-extern void i915_driver_lastclose(drm_device_t * dev);
-extern void i915_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern int i915_driver_device_is_agp(drm_device_t * dev);
+extern int i915_driver_unload(struct drm_device *dev);
+extern void i915_driver_lastclose(struct drm_device * dev);
+extern void i915_driver_preclose(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
-extern void i915_emit_breadcrumb(drm_device_t *dev);
-extern void i915_dispatch_flip(drm_device_t * dev, int pipes, int sync);
-extern int i915_emit_mi_flush(drm_device_t *dev, uint32_t flush);
-extern int i915_dma_cleanup(drm_device_t * dev);
+extern void i915_emit_breadcrumb(struct drm_device *dev);
+extern void i915_dispatch_flip(struct drm_device * dev, int pipes, int sync);
+extern int i915_emit_mi_flush(struct drm_device *dev, uint32_t flush);
+extern int i915_driver_firstopen(struct drm_device *dev);
+extern int i915_do_cleanup_pageflip(struct drm_device *dev);
+extern int i915_dma_cleanup(struct drm_device *dev);
/* i915_irq.c */
-extern int i915_irq_emit(DRM_IOCTL_ARGS);
-extern int i915_irq_wait(DRM_IOCTL_ARGS);
-
-extern void i915_driver_wait_next_vblank(drm_device_t *dev, int pipe);
-extern int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence);
-extern int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence);
+extern int i915_irq_emit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+extern void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe);
+extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence);
+extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
-extern void i915_driver_irq_preinstall(drm_device_t * dev);
-extern void i915_driver_irq_postinstall(drm_device_t * dev);
-extern void i915_driver_irq_uninstall(drm_device_t * dev);
-extern int i915_vblank_pipe_set(DRM_IOCTL_ARGS);
-extern int i915_vblank_pipe_get(DRM_IOCTL_ARGS);
-extern int i915_emit_irq(drm_device_t * dev);
-extern void i915_user_irq_on(drm_i915_private_t *dev_priv);
-extern void i915_user_irq_off(drm_i915_private_t *dev_priv);
-extern void i915_enable_interrupt (drm_device_t *dev);
-extern int i915_vblank_swap(DRM_IOCTL_ARGS);
+extern void i915_driver_irq_preinstall(struct drm_device * dev);
+extern void i915_driver_irq_postinstall(struct drm_device * dev);
+extern void i915_driver_irq_uninstall(struct drm_device * dev);
+extern int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_emit_irq(struct drm_device * dev);
+extern void i915_user_irq_on(struct drm_i915_private *dev_priv);
+extern void i915_user_irq_off(struct drm_i915_private *dev_priv);
+extern void i915_enable_interrupt (struct drm_device *dev);
+extern int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* i915_mem.c */
-extern int i915_mem_alloc(DRM_IOCTL_ARGS);
-extern int i915_mem_free(DRM_IOCTL_ARGS);
-extern int i915_mem_init_heap(DRM_IOCTL_ARGS);
-extern int i915_mem_destroy_heap(DRM_IOCTL_ARGS);
+extern int i915_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_init_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
extern void i915_mem_takedown(struct mem_block **heap);
-extern void i915_mem_release(drm_device_t * dev,
- DRMFILE filp, struct mem_block *heap);
+extern void i915_mem_release(struct drm_device * dev,
+ struct drm_file *file_priv,
+ struct mem_block *heap);
#ifdef I915_HAVE_FENCE
/* i915_fence.c */
-extern void i915_fence_handler(drm_device_t *dev);
-extern int i915_fence_emit_sequence(drm_device_t *dev, uint32_t class,
+extern void i915_fence_handler(struct drm_device *dev);
+extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
uint32_t flags,
uint32_t *sequence,
uint32_t *native_type);
-extern void i915_poke_flush(drm_device_t *dev, uint32_t class);
-extern int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags);
+extern void i915_poke_flush(struct drm_device *dev, uint32_t class);
+extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags);
#endif
#ifdef I915_HAVE_BUFFER
/* i915_buffer.c */
-extern drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t *dev);
-extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
-extern int i915_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
-extern int i915_init_mem_type(drm_device_t *dev, uint32_t type,
- drm_mem_type_manager_t *man);
-extern uint32_t i915_evict_mask(drm_buffer_object_t *bo);
-extern int i915_move(drm_buffer_object_t *bo, int evict,
- int no_wait, drm_bo_mem_reg_t *new_mem);
+extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev);
+extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type);
+extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
+extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man);
+extern uint32_t i915_evict_mask(struct drm_buffer_object *bo);
+extern int i915_move(struct drm_buffer_object *bo, int evict,
+ int no_wait, struct drm_bo_mem_reg *new_mem);
#endif
/* modesetting */
-extern void intel_modeset_init(drm_device_t *dev);
-extern void intel_modeset_cleanup(drm_device_t *dev);
+extern void intel_modeset_init(struct drm_device *dev);
+extern void intel_modeset_cleanup(struct drm_device *dev);
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
@@ -321,7 +339,7 @@ extern void intel_modeset_cleanup(drm_device_t *dev);
#define MI_NOOP (0x00 << 23)
-extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
+extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
/*
* The Bridge device's PCI config space has information about the
@@ -354,6 +372,12 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define MI_NO_WRITE_FLUSH (1 << 2)
#define MI_READ_FLUSH (1 << 0)
#define MI_EXE_FLUSH (1 << 1)
+#define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */
+#define MI_SCENE_COUNT (1 << 3) /* just increment scene count */
+
+/* Packet to load a register value from the ring/batch command stream:
+ */
+#define CMD_MI_LOAD_REGISTER_IMM ((0x22 << 23)|0x1)
#define BB1_START_ADDR_MASK (~0x7)
#define BB1_PROTECTED (1<<0)
@@ -361,6 +385,14 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define BB2_END_ADDR_MASK (~0x7)
#define I915REG_HWS_PGA 0x02080
+
+/* Interrupt bits:
+ */
+#define USER_INT_FLAG (1<<1)
+#define VSYNC_PIPEB_FLAG (1<<5)
+#define VSYNC_PIPEA_FLAG (1<<7)
+#define HWB_OOM_FLAG (1<<13) /* binner out of memory */
+
#define I915REG_HWSTAM 0x02098
#define I915REG_INT_IDENTITY_R 0x020a4
#define I915REG_INT_MASK_R 0x020a8
@@ -457,6 +489,10 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define NOPID 0x2094
#define LP_RING 0x2030
#define HP_RING 0x2040
+/* The binner has its own ring buffer:
+ */
+#define HWB_RING 0x2400
+
#define RING_TAIL 0x00
#define TAIL_ADDR 0x001FFFF8
#define RING_HEAD 0x04
@@ -475,11 +511,105 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define RING_VALID 0x00000001
#define RING_INVALID 0x00000000
+/* Instruction parser error reg:
+ */
+#define IPEIR 0x2088
+
+/* Scratch pad debug 0 reg:
+ */
+#define SCPD0 0x209c
+
+/* Error status reg:
+ */
+#define ESR 0x20b8
+
+/* Secondary DMA fetch address debug reg:
+ */
+#define DMA_FADD_S 0x20d4
+
+/* Cache mode 0 reg.
+ * - Manipulating render cache behaviour is central
+ * to the concept of zone rendering, tuning this reg can help avoid
+ * unnecessary render cache reads and even writes (for z/stencil)
+ * at beginning and end of scene.
+ *
+ * - To change a bit, write to this reg with a mask bit set and the
+ * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set.
+ */
+#define Cache_Mode_0 0x2120
+#define CM0_MASK_SHIFT 16
+#define CM0_IZ_OPT_DISABLE (1<<6)
+#define CM0_ZR_OPT_DISABLE (1<<5)
+#define CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define CM0_COLOR_EVICT_DISABLE (1<<3)
+#define CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+
+
+/* Graphics flush control. A CPU write flushes the GWB of all writes.
+ * The data is discarded.
+ */
+#define GFX_FLSH_CNTL 0x2170
+
+/* Binner control. Defines the location of the bin pointer list:
+ */
+#define BINCTL 0x2420
+#define BC_MASK (1 << 9)
+
+/* Binned scene info.
+ */
+#define BINSCENE 0x2428
+#define BS_OP_LOAD (1 << 8)
+#define BS_MASK (1 << 22)
+
+/* Bin command parser debug reg:
+ */
+#define BCPD 0x2480
+
+/* Bin memory control debug reg:
+ */
+#define BMCD 0x2484
+
+/* Bin data cache debug reg:
+ */
+#define BDCD 0x2488
+
+/* Binner pointer cache debug reg:
+ */
+#define BPCD 0x248c
+
+/* Binner scratch pad debug reg:
+ */
+#define BINSKPD 0x24f0
+
+/* HWB scratch pad debug reg:
+ */
+#define HWBSKPD 0x24f4
+
+/* Binner memory pool reg:
+ */
+#define BMP_BUFFER 0x2430
+#define BMP_PAGE_SIZE_4K (0 << 10)
+#define BMP_BUFFER_SIZE_SHIFT 1
+#define BMP_ENABLE (1 << 0)
+
+/* Get/put memory from the binner memory pool:
+ */
+#define BMP_GET 0x2438
+#define BMP_PUT 0x2440
+#define BMP_OFFSET_SHIFT 5
+
+/* 3D state packets:
+ */
+#define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24))
+
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
#define SC_ENABLE_MASK (0x1<<0)
#define SC_ENABLE (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+
#define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
#define SCI_YMIN_MASK (0xffff<<16)
#define SCI_XMIN_MASK (0xffff<<0)
@@ -512,6 +642,8 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define MI_BATCH_BUFFER_END (0xA<<23)
#define MI_BATCH_NON_SECURE (1)
+#define MI_BATCH_NON_SECURE_I965 (1<<8)
+
#define MI_WAIT_FOR_EVENT ((0x3<<23))
#define MI_WAIT_FOR_PLANE_B_FLIP (1<<6)
#define MI_WAIT_FOR_PLANE_A_FLIP (1<<2)
@@ -524,6 +656,15 @@ extern int i915_wait_ring(drm_device_t * dev, int n, const char *caller);
#define DISPLAY_PLANE_A (0<<20)
#define DISPLAY_PLANE_B (1<<20)
+/* Display regs */
+#define DSPACNTR 0x70180
+#define DSPBCNTR 0x71180
+#define DISPPLANE_SEL_PIPE_MASK (1<<24)
+
+/* Define the region of interest for the binner:
+ */
+#define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4)
+
#define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
#define BREADCRUMB_BITS 31
diff --git a/shared-core/i915_init.c b/shared-core/i915_init.c
index fd102b35..065afcdf 100644
--- a/shared-core/i915_init.c
+++ b/shared-core/i915_init.c
@@ -109,18 +109,18 @@ int i915_probe_agp(struct pci_dev *pdev, unsigned long *aperture_size,
* - allocate initial config memory
* - setup the DRM framebuffer with the allocated memory
*/
-int i915_driver_load(drm_device_t *dev, unsigned long flags)
+int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
- drm_i915_private_t *dev_priv;
+ struct drm_i915_private *dev_priv;
unsigned long agp_size, prealloc_size;
unsigned long sareapage;
int size, ret;
- dev_priv = drm_alloc(sizeof(drm_i915_private_t), DRM_MEM_DRIVER);
+ dev_priv = drm_alloc(sizeof(struct drm_i915_private), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_i915_private_t));
+ memset(dev_priv, 0, sizeof(struct drm_i915_private));
dev->dev_private = (void *)dev_priv;
// dev_priv->flags = flags;
@@ -167,7 +167,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
init_waitqueue_head(&dev->lock.lock_queue);
/* FIXME: assume sarea_priv is right after SAREA */
- dev_priv->sarea_priv = dev_priv->sarea->handle + sizeof(drm_sarea_t);
+ dev_priv->sarea_priv = dev_priv->sarea->handle + sizeof(struct drm_sarea);
/*
* Initialize the memory manager for local and AGP space
@@ -186,14 +186,19 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
ret = drm_buffer_object_create(dev, size, drm_bo_type_kernel,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE |
DRM_BO_FLAG_MEM_VRAM |
- DRM_BO_FLAG_NO_EVICT,
- DRM_BO_HINT_DONT_FENCE, 0x1, 0,
+ DRM_BO_HINT_DONT_FENCE, 0, 0x1, 0,
&dev_priv->ring_buffer);
if (ret < 0) {
DRM_ERROR("Unable to allocate ring buffer\n");
return -EINVAL;
}
+ ret = drm_bo_set_pin(dev, dev_priv->ring_buffer, 1);
+ if (ret < 0) {
+ DRM_ERROR("Unable to pin ring buffer\n");
+ return -EINVAL;
+ }
+
/* remap the buffer object properly */
dev_priv->ring.Start = dev_priv->ring_buffer->offset;
dev_priv->ring.End = dev_priv->ring.Start + size;
@@ -236,7 +241,7 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
dev->dev_private = (void *)dev_priv;
i915_dma_cleanup(dev);
DRM_ERROR("Can not allocate hardware status page\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
@@ -253,9 +258,9 @@ int i915_driver_load(drm_device_t *dev, unsigned long flags)
return 0;
}
-int i915_driver_unload(drm_device_t *dev)
+int i915_driver_unload(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
if (dev_priv->ring.virtual_start) {
drm_core_ioremapfree(&dev_priv->ring.map, dev);
@@ -305,18 +310,20 @@ int i915_driver_unload(drm_device_t *dev)
return 0;
}
-void i915_driver_lastclose(drm_device_t * dev)
+void i915_driver_lastclose(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ i915_do_cleanup_pageflip(dev);
i915_mem_takedown(&(dev_priv->agp_heap));
i915_dma_cleanup(dev);
}
-void i915_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void i915_driver_preclose(struct drm_device *dev, struct drm_file *filp)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
i915_mem_release(dev, filp, dev_priv->agp_heap);
}
diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c
index 98f51b2b..b280aa9d 100644
--- a/shared-core/i915_irq.c
+++ b/shared-core/i915_irq.c
@@ -38,47 +38,70 @@
#define MAX_NOPID ((u32)~0)
/**
+ * i915_get_pipe - return the the pipe associated with a given plane
+ * @dev: DRM device
+ * @plane: plane to look for
+ *
+ * We need to get the pipe associated with a given plane to correctly perform
+ * vblank driven swapping, and they may not always be equal. So look up the
+ * pipe associated with @plane here.
+ */
+static int
+i915_get_pipe(struct drm_device *dev, int plane)
+{
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ u32 dspcntr;
+
+ dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
+
+ return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
+}
+
+/**
* Emit a synchronous flip.
*
* This function must be called with the drawable spinlock held.
*/
static void
-i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe)
+i915_dispatch_vsync_flip(struct drm_device *dev, struct drm_drawable_info *drw,
+ int plane)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
+ struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
u16 x1, y1, x2, y2;
- int pf_pipes = 1 << pipe;
+ int pf_planes = 1 << plane;
- /* If the window is visible on the other pipe, we have to flip on that
- * pipe as well.
+ DRM_SPINLOCK_ASSERT(&dev->drw_lock);
+
+ /* If the window is visible on the other plane, we have to flip on that
+ * plane as well.
*/
- if (pipe == 1) {
- x1 = sarea_priv->pipeA_x;
- y1 = sarea_priv->pipeA_y;
- x2 = x1 + sarea_priv->pipeA_w;
- y2 = y1 + sarea_priv->pipeA_h;
+ if (plane == 1) {
+ x1 = sarea_priv->planeA_x;
+ y1 = sarea_priv->planeA_y;
+ x2 = x1 + sarea_priv->planeA_w;
+ y2 = y1 + sarea_priv->planeA_h;
} else {
- x1 = sarea_priv->pipeB_x;
- y1 = sarea_priv->pipeB_y;
- x2 = x1 + sarea_priv->pipeB_w;
- y2 = y1 + sarea_priv->pipeB_h;
+ x1 = sarea_priv->planeB_x;
+ y1 = sarea_priv->planeB_y;
+ x2 = x1 + sarea_priv->planeB_w;
+ y2 = y1 + sarea_priv->planeB_h;
}
if (x2 > 0 && y2 > 0) {
int i, num_rects = drw->num_rects;
- drm_clip_rect_t *rect = drw->rects;
+ struct drm_clip_rect *rect = drw->rects;
for (i = 0; i < num_rects; i++)
if (!(rect[i].x1 >= x2 || rect[i].y1 >= y2 ||
rect[i].x2 <= x1 || rect[i].y2 <= y1)) {
- pf_pipes = 0x3;
+ pf_planes = 0x3;
break;
}
}
- i915_dispatch_flip(dev, pf_pipes, 1);
+ i915_dispatch_flip(dev, pf_planes, 1);
}
/**
@@ -86,16 +109,15 @@ i915_dispatch_vsync_flip(drm_device_t *dev, drm_drawable_info_t *drw, int pipe)
*
* This function will be called with the HW lock held.
*/
-static void i915_vblank_tasklet(drm_device_t *dev)
+static void i915_vblank_tasklet(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
struct list_head *list, *tmp, hits, *hit;
int nhits, nrects, slice[2], upper[2], lower[2], i, num_pages;
unsigned counter[2] = { atomic_read(&dev->vbl_received),
atomic_read(&dev->vbl_received2) };
- drm_drawable_info_t *drw;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ struct drm_drawable_info *drw;
+ struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
u32 cpp = dev_priv->cpp, offsets[3];
u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
XY_SRC_COPY_BLT_WRITE_ALPHA |
@@ -111,35 +133,41 @@ static void i915_vblank_tasklet(drm_device_t *dev)
nhits = nrects = 0;
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ /* No irqsave/restore necessary. This tasklet may be run in an
+ * interrupt context or normal context, but we don't have to worry
+ * about getting interrupted by something acquiring the lock, because
+ * we are the interrupt context thing that acquires the lock.
+ */
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
/* Find buffer swaps scheduled for this vertical blank */
list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
- drm_i915_vbl_swap_t *vbl_swap =
- list_entry(list, drm_i915_vbl_swap_t, head);
+ struct drm_i915_vbl_swap *vbl_swap =
+ list_entry(list, struct drm_i915_vbl_swap, head);
+ int pipe = i915_get_pipe(dev, vbl_swap->plane);
- if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+ if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
continue;
list_del(list);
dev_priv->swaps_pending--;
- spin_unlock(&dev_priv->swaps_lock);
- spin_lock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev->drw_lock);
drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
if (!drw) {
- spin_unlock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev->drw_lock);
drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
- spin_lock(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
continue;
}
list_for_each(hit, &hits) {
- drm_i915_vbl_swap_t *swap_cmp =
- list_entry(hit, drm_i915_vbl_swap_t, head);
- drm_drawable_info_t *drw_cmp =
+ struct drm_i915_vbl_swap *swap_cmp =
+ list_entry(hit, struct drm_i915_vbl_swap, head);
+ struct drm_drawable_info *drw_cmp =
drm_get_drawable_info(dev, swap_cmp->drw_id);
if (drw_cmp &&
@@ -149,7 +177,7 @@ static void i915_vblank_tasklet(drm_device_t *dev)
}
}
- spin_unlock(&dev->drw_lock);
+ DRM_SPINUNLOCK(&dev->drw_lock);
/* List of hits was empty, or we reached the end of it */
if (hit == &hits)
@@ -157,30 +185,29 @@ static void i915_vblank_tasklet(drm_device_t *dev)
nhits++;
- spin_lock(&dev_priv->swaps_lock);
+ DRM_SPINLOCK(&dev_priv->swaps_lock);
}
+ DRM_SPINUNLOCK(&dev_priv->swaps_lock);
+
if (nhits == 0) {
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
return;
}
- spin_unlock(&dev_priv->swaps_lock);
-
i915_kernel_lost_context(dev);
upper[0] = upper[1] = 0;
- slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
- slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
- lower[0] = sarea_priv->pipeA_y + slice[0];
- lower[1] = sarea_priv->pipeB_y + slice[0];
+ slice[0] = max(sarea_priv->planeA_h / nhits, 1);
+ slice[1] = max(sarea_priv->planeB_h / nhits, 1);
+ lower[0] = sarea_priv->planeA_y + slice[0];
+ lower[1] = sarea_priv->planeB_y + slice[0];
offsets[0] = sarea_priv->front_offset;
offsets[1] = sarea_priv->back_offset;
offsets[2] = sarea_priv->third_offset;
num_pages = sarea_priv->third_handle ? 3 : 2;
- spin_lock(&dev->drw_lock);
+ DRM_SPINLOCK(&dev->drw_lock);
/* Emit blits for buffer swaps, partitioning both outputs into as many
* slices as there are buffer swaps scheduled in order to avoid tearing
@@ -196,10 +223,10 @@ static void i915_vblank_tasklet(drm_device_t *dev)
lower[0] = lower[1] = sarea_priv->height;
list_for_each(hit, &hits) {
- drm_i915_vbl_swap_t *swap_hit =
- list_entry(hit, drm_i915_vbl_swap_t, head);
- drm_clip_rect_t *rect;
- int num_rects, pipe, front, back;
+ struct drm_i915_vbl_swap *swap_hit =
+ list_entry(hit, struct drm_i915_vbl_swap, head);
+ struct drm_clip_rect *rect;
+ int num_rects, plane, front, back;
unsigned short top, bottom;
drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -207,10 +234,10 @@ static void i915_vblank_tasklet(drm_device_t *dev)
if (!drw)
continue;
- pipe = swap_hit->pipe;
+ plane = swap_hit->plane;
if (swap_hit->flip) {
- i915_dispatch_vsync_flip(dev, drw, pipe);
+ i915_dispatch_vsync_flip(dev, drw, plane);
continue;
}
@@ -232,11 +259,11 @@ static void i915_vblank_tasklet(drm_device_t *dev)
}
rect = drw->rects;
- top = upper[pipe];
- bottom = lower[pipe];
+ top = upper[plane];
+ bottom = lower[plane];
front = (dev_priv->sarea_priv->pf_current_page >>
- (2 * pipe)) & 0x3;
+ (2 * plane)) & 0x3;
back = (front + 1) % num_pages;
for (num_rects = drw->num_rects; num_rects--; rect++) {
@@ -262,11 +289,11 @@ static void i915_vblank_tasklet(drm_device_t *dev)
}
}
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK(&dev->drw_lock);
list_for_each_safe(hit, tmp, &hits) {
- drm_i915_vbl_swap_t *swap_hit =
- list_entry(hit, drm_i915_vbl_swap_t, head);
+ struct drm_i915_vbl_swap *swap_hit =
+ list_entry(hit, struct drm_i915_vbl_swap, head);
list_del(hit);
@@ -276,8 +303,8 @@ static void i915_vblank_tasklet(drm_device_t *dev)
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_device *dev = (struct drm_device *) arg;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u16 temp;
u32 pipea_stats, pipeb_stats;
@@ -338,10 +365,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-int i915_emit_irq(drm_device_t * dev)
+int i915_emit_irq(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
RING_LOCALS;
i915_kernel_lost_context(dev);
@@ -360,31 +387,31 @@ int i915_emit_irq(drm_device_t * dev)
}
-void i915_user_irq_on(drm_i915_private_t *dev_priv)
+void i915_user_irq_on(struct drm_i915_private *dev_priv)
{
- spin_lock(&dev_priv->user_irq_lock);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (++dev_priv->user_irq_refcount == 1)){
dev_priv->irq_enable_reg |= USER_INT_FLAG;
I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
- spin_unlock(&dev_priv->user_irq_lock);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-void i915_user_irq_off(drm_i915_private_t *dev_priv)
+void i915_user_irq_off(struct drm_i915_private *dev_priv)
{
- spin_lock(&dev_priv->user_irq_lock);
+ DRM_SPINLOCK(&dev_priv->user_irq_lock);
if (dev_priv->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
// dev_priv->irq_enable_reg &= ~USER_INT_FLAG;
// I915_WRITE16(I915REG_INT_ENABLE_R, dev_priv->irq_enable_reg);
}
- spin_unlock(&dev_priv->user_irq_lock);
+ DRM_SPINUNLOCK(&dev_priv->user_irq_lock);
}
-static int i915_wait_irq(drm_device_t * dev, int irq_nr)
+static int i915_wait_irq(struct drm_device * dev, int irq_nr)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
int ret = 0;
DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
@@ -400,7 +427,7 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
READ_BREADCRUMB(dev_priv) >= irq_nr);
i915_user_irq_off(dev_priv);
- if (ret == DRM_ERR(EBUSY)) {
+ if (ret == -EBUSY) {
DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
__FUNCTION__,
READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
@@ -410,16 +437,17 @@ static int i915_wait_irq(drm_device_t * dev, int irq_nr)
return ret;
}
-static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
+static int i915_driver_vblank_do_wait(struct drm_device *dev,
+ unsigned int *sequence,
atomic_t *counter)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int cur_vblank;
int ret = 0;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
@@ -431,7 +459,7 @@ static int i915_driver_vblank_do_wait(drm_device_t *dev, unsigned int *sequence,
return ret;
}
-void i915_driver_wait_next_vblank(drm_device_t *dev, int pipe)
+void i915_driver_wait_next_vblank(struct drm_device *dev, int pipe)
{
unsigned int seq;
@@ -444,40 +472,36 @@ void i915_driver_wait_next_vblank(drm_device_t *dev, int pipe)
i915_driver_vblank_do_wait(dev, &seq, &dev->vbl_received2);
}
-int i915_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
}
-int i915_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
+int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
}
/* Needs the lock as it touches the ring.
*/
-int i915_irq_emit(DRM_IOCTL_ARGS)
+int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_emit_t emit;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_irq_emit *emit = data;
int result;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
- sizeof(emit));
-
result = i915_emit_irq(dev);
- if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
@@ -485,26 +509,23 @@ int i915_irq_emit(DRM_IOCTL_ARGS)
/* Doesn't need the hardware lock.
*/
-int i915_irq_wait(DRM_IOCTL_ARGS)
+int i915_irq_wait(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_irq_wait_t irqwait;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_irq_wait *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
- sizeof(irqwait));
-
- return i915_wait_irq(dev, irqwait.irq_seq);
+ return i915_wait_irq(dev, irqwait->irq_seq);
}
-void i915_enable_interrupt (drm_device_t *dev)
+void i915_enable_interrupt (struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
dev_priv->irq_enable_reg = USER_INT_FLAG;
if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
@@ -518,221 +539,218 @@ void i915_enable_interrupt (drm_device_t *dev)
/* Set the vblank monitor pipe
*/
-int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
+int i915_vblank_pipe_set(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t pipe;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_vblank_pipe *pipe = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
- sizeof(pipe));
-
- if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
+ if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
DRM_ERROR("%s called with invalid pipe 0x%x\n",
- __FUNCTION__, pipe.pipe);
- return DRM_ERR(EINVAL);
+ __FUNCTION__, pipe->pipe);
+ return -EINVAL;
}
- dev_priv->vblank_pipe = pipe.pipe;
+ dev_priv->vblank_pipe = pipe->pipe;
i915_enable_interrupt (dev);
return 0;
}
-int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
+int i915_vblank_pipe_get(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_pipe_t pipe;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_vblank_pipe *pipe = data;
u16 flag;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
flag = I915_READ(I915REG_INT_ENABLE_R);
- pipe.pipe = 0;
+ pipe->pipe = 0;
if (flag & VSYNC_PIPEA_FLAG)
- pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
+ pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
if (flag & VSYNC_PIPEB_FLAG)
- pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
- DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
- sizeof(pipe));
+ pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+
return 0;
}
/**
* Schedule buffer swap at given vertical blank.
*/
-int i915_vblank_swap(DRM_IOCTL_ARGS)
+int i915_vblank_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_vblank_swap_t swap;
- drm_i915_vbl_swap_t *vbl_swap;
- unsigned int pipe, seqtype, curseq;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_vblank_swap *swap = data;
+ struct drm_i915_vbl_swap *vbl_swap;
+ unsigned int pipe, seqtype, curseq, plane;
unsigned long irqflags;
struct list_head *list;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __func__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (dev_priv->sarea_priv->rotation) {
DRM_DEBUG("Rotation not supported\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
- sizeof(swap));
-
- if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
+ if (swap->seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
_DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS |
_DRM_VBLANK_FLIP)) {
- DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("Invalid sequence type 0x%x\n", swap->seqtype);
+ return -EINVAL;
}
- pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+ plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+ pipe = i915_get_pipe(dev, plane);
- seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
+ seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
if (!(dev_priv->vblank_pipe & (1 << pipe))) {
DRM_ERROR("Invalid pipe %d\n", pipe);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- spin_lock_irqsave(&dev->drw_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
- if (!drm_get_drawable_info(dev, swap.drawable)) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
- return DRM_ERR(EINVAL);
+ /* It makes no sense to schedule a swap for a drawable that doesn't have
+ * valid information at this point. E.g. this could mean that the X
+ * server is too old to push drawable information to the DRM, in which
+ * case all such swaps would become ineffective.
+ */
+ if (!drm_get_drawable_info(dev, swap->drawable)) {
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
+ DRM_DEBUG("Invalid drawable ID %d\n", swap->drawable);
+ return -EINVAL;
}
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
if (seqtype == _DRM_VBLANK_RELATIVE)
- swap.sequence += curseq;
+ swap->sequence += curseq;
- if ((curseq - swap.sequence) <= (1<<23)) {
- if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
- swap.sequence = curseq + 1;
+ if ((curseq - swap->sequence) <= (1<<23)) {
+ if (swap->seqtype & _DRM_VBLANK_NEXTONMISS) {
+ swap->sequence = curseq + 1;
} else {
DRM_DEBUG("Missed target sequence\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
- if (swap.seqtype & _DRM_VBLANK_FLIP) {
- swap.sequence--;
+ if (swap->seqtype & _DRM_VBLANK_FLIP) {
+ swap->sequence--;
- if ((curseq - swap.sequence) <= (1<<23)) {
- drm_drawable_info_t *drw;
+ if ((curseq - swap->sequence) <= (1<<23)) {
+ struct drm_drawable_info *drw;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- spin_lock_irqsave(&dev->drw_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev->drw_lock, irqflags);
- drw = drm_get_drawable_info(dev, swap.drawable);
+ drw = drm_get_drawable_info(dev, swap->drawable);
if (!drw) {
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock,
+ irqflags);
DRM_DEBUG("Invalid drawable ID %d\n",
- swap.drawable);
- return DRM_ERR(EINVAL);
+ swap->drawable);
+ return -EINVAL;
}
- i915_dispatch_vsync_flip(dev, drw, pipe);
+ i915_dispatch_vsync_flip(dev, drw, plane);
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev->drw_lock, irqflags);
return 0;
}
}
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
list_for_each(list, &dev_priv->vbl_swaps.head) {
- vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
+ vbl_swap = list_entry(list, struct drm_i915_vbl_swap, head);
- if (vbl_swap->drw_id == swap.drawable &&
- vbl_swap->pipe == pipe &&
- vbl_swap->sequence == swap.sequence) {
- vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP);
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ if (vbl_swap->drw_id == swap->drawable &&
+ vbl_swap->plane == plane &&
+ vbl_swap->sequence == swap->sequence) {
+ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
DRM_DEBUG("Already scheduled\n");
return 0;
}
}
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
if (dev_priv->swaps_pending >= 100) {
DRM_DEBUG("Too many swaps queued\n");
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
if (!vbl_swap) {
DRM_ERROR("Failed to allocate memory to queue swap\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
DRM_DEBUG("\n");
- vbl_swap->drw_id = swap.drawable;
- vbl_swap->pipe = pipe;
- vbl_swap->sequence = swap.sequence;
- vbl_swap->flip = (swap.seqtype & _DRM_VBLANK_FLIP);
+ vbl_swap->drw_id = swap->drawable;
+ vbl_swap->plane = plane;
+ vbl_swap->sequence = swap->sequence;
+ vbl_swap->flip = (swap->seqtype & _DRM_VBLANK_FLIP);
if (vbl_swap->flip)
- swap.sequence++;
+ swap->sequence++;
- spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
+ DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags);
list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
dev_priv->swaps_pending++;
- spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
-
- DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
- sizeof(swap));
+ DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags);
return 0;
}
/* drm_dma.h hooks
*/
-void i915_driver_irq_preinstall(drm_device_t * dev)
+void i915_driver_irq_preinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
I915_WRITE16(I915REG_HWSTAM, 0xeffe);
I915_WRITE16(I915REG_INT_MASK_R, 0x0);
I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
}
-void i915_driver_irq_postinstall(drm_device_t * dev)
+void i915_driver_irq_postinstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
- spin_lock_init(&dev_priv->swaps_lock);
+ DRM_SPININIT(&dev_priv->swaps_lock, "swap");
INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
dev_priv->swaps_pending = 0;
- dev_priv->user_irq_lock = SPIN_LOCK_UNLOCKED;
+ DRM_SPININIT(&dev_priv->user_irq_lock, "userirq");
dev_priv->user_irq_refcount = 0;
i915_enable_interrupt(dev);
@@ -745,9 +763,9 @@ void i915_driver_irq_postinstall(drm_device_t * dev)
I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21));
}
-void i915_driver_irq_uninstall(drm_device_t * dev)
+void i915_driver_irq_uninstall(struct drm_device * dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private;
u16 temp;
if (!dev_priv)
return;
diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c
index 13f19f3a..2916d5a1 100644
--- a/shared-core/i915_mem.c
+++ b/shared-core/i915_mem.c
@@ -43,11 +43,11 @@
* block to allocate, and the ring is drained prior to allocations --
* in other words allocation is expensive.
*/
-static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
+static void mark_block(struct drm_device * dev, struct mem_block *p, int in_use)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_tex_region_t *list;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_sarea *sarea_priv = dev_priv->sarea_priv;
+ struct drm_tex_region *list;
unsigned shift, nr;
unsigned start;
unsigned end;
@@ -89,7 +89,7 @@ static void mark_block(drm_device_t * dev, struct mem_block *p, int in_use)
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
- DRMFILE filp)
+ struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
@@ -99,7 +99,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -116,7 +116,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -126,20 +126,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out:
/* Our block is in the middle */
- p->filp = filp;
+ p->file_priv = file_priv;
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, DRMFILE filp)
+ int align2, struct drm_file *file_priv)
{
struct mem_block *p;
int mask = (1 << align2) - 1;
for (p = heap->next; p != heap; p = p->next) {
int start = (p->start + mask) & ~mask;
- if (p->filp == NULL && start + size <= p->start + p->size)
- return split_block(p, start, size, filp);
+ if (p->file_priv == NULL && start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
}
return NULL;
@@ -158,12 +158,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
static void free_block(struct mem_block *p)
{
- p->filp = NULL;
+ p->file_priv = NULL;
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
- if (p->next->filp == NULL) {
+ if (p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -171,7 +171,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFLISTS);
}
- if (p->prev->filp == NULL) {
+ if (p->prev->file_priv == NULL) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
@@ -197,18 +197,19 @@ static int init_heap(struct mem_block **heap, int start, int size)
blocks->start = start;
blocks->size = size;
- blocks->filp = NULL;
+ blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
- (*heap)->filp = (DRMFILE) - 1;
+ (*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
-void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
+void i915_mem_release(struct drm_device * dev, struct drm_file *file_priv,
+ struct mem_block *heap)
{
struct mem_block *p;
@@ -216,17 +217,17 @@ void i915_mem_release(drm_device_t * dev, DRMFILE filp, struct mem_block *heap)
return;
for (p = heap->next; p != heap; p = p->next) {
- if (p->filp == filp) {
- p->filp = NULL;
+ if (p->file_priv == file_priv) {
+ p->file_priv = NULL;
mark_block(dev, p, 0);
}
}
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
for (p = heap->next; p != heap; p = p->next) {
- while (p->filp == NULL && p->next->filp == NULL) {
+ while (p->file_priv == NULL && p->next->file_priv == NULL) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -255,7 +256,7 @@ void i915_mem_takedown(struct mem_block **heap)
*heap = NULL;
}
-static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
+static struct mem_block **get_heap(struct drm_i915_private * dev_priv, int region)
{
switch (region) {
case I915_MEM_REGION_AGP:
@@ -267,129 +268,117 @@ static struct mem_block **get_heap(drm_i915_private_t * dev_priv, int region)
/* IOCTL HANDLERS */
-int i915_mem_alloc(DRM_IOCTL_ARGS)
+int i915_mem_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_alloc_t alloc;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_mem_alloc *alloc = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(alloc, (drm_i915_mem_alloc_t __user *) data,
- sizeof(alloc));
-
- heap = get_heap(dev_priv, alloc.region);
+ heap = get_heap(dev_priv, alloc->region);
if (!heap || !*heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
- if (alloc.alignment < 12)
- alloc.alignment = 12;
+ if (alloc->alignment < 12)
+ alloc->alignment = 12;
- block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
+ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
if (!block)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
mark_block(dev, block, 1);
- if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
+ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+ sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-int i915_mem_free(DRM_IOCTL_ARGS)
+int i915_mem_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_free_t memfree;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_mem_free *memfree = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(memfree, (drm_i915_mem_free_t __user *) data,
- sizeof(memfree));
-
- heap = get_heap(dev_priv, memfree.region);
+ heap = get_heap(dev_priv, memfree->region);
if (!heap || !*heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- block = find_block(*heap, memfree.region_offset);
+ block = find_block(*heap, memfree->region_offset);
if (!block)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- if (block->filp != filp)
- return DRM_ERR(EPERM);
+ if (block->file_priv != file_priv)
+ return -EPERM;
mark_block(dev, block, 0);
free_block(block);
return 0;
}
-int i915_mem_init_heap(DRM_IOCTL_ARGS)
+int i915_mem_init_heap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_init_heap_t initheap;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_mem_init_heap *initheap = data;
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(initheap,
- (drm_i915_mem_init_heap_t __user *) data,
- sizeof(initheap));
-
- heap = get_heap(dev_priv, initheap.region);
+ heap = get_heap(dev_priv, initheap->region);
if (!heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (*heap) {
DRM_ERROR("heap already initialized?");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- return init_heap(heap, initheap.start, initheap.size);
+ return init_heap(heap, initheap->start, initheap->size);
}
-int i915_mem_destroy_heap( DRM_IOCTL_ARGS )
+int i915_mem_destroy_heap( struct drm_device *dev, void *data,
+ struct drm_file *file_priv )
{
- DRM_DEVICE;
- drm_i915_private_t *dev_priv = dev->dev_private;
- drm_i915_mem_destroy_heap_t destroyheap;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_mem_destroy_heap *destroyheap = data;
struct mem_block **heap;
if ( !dev_priv ) {
DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ );
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL( destroyheap, (drm_i915_mem_destroy_heap_t *)data,
- sizeof(destroyheap) );
-
- heap = get_heap( dev_priv, destroyheap.region );
+ heap = get_heap( dev_priv, destroyheap->region );
if (!heap) {
DRM_ERROR("get_heap failed");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (!*heap) {
DRM_ERROR("heap not initialized?");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
i915_mem_takedown( heap );
diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c
index 60f55900..e0a67458 100644
--- a/shared-core/mach64_dma.c
+++ b/shared-core/mach64_dma.c
@@ -70,7 +70,7 @@ int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries)
DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots,
entries);
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/**
@@ -94,7 +94,7 @@ int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)
DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
MACH64_READ(MACH64_GUI_STAT));
mach64_dump_ring_info(dev_priv);
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/**
@@ -135,7 +135,7 @@ int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)
/* FIXME: This is being ignored... */
DRM_ERROR("failed!\n");
mach64_dump_ring_info(dev_priv);
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/**
@@ -172,7 +172,7 @@ static int mach64_ring_idle(drm_mach64_private_t * dev_priv)
DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__,
MACH64_READ(MACH64_GUI_STAT));
mach64_dump_ring_info(dev_priv);
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/**
@@ -418,7 +418,7 @@ void mach64_dump_engine_info(drm_mach64_private_t * dev_priv)
* pointed by the ring head.
*/
static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv,
- drm_buf_t * buf)
+ struct drm_buf * buf)
{
u32 addr = GETBUFADDR(buf);
u32 used = buf->used >> 2;
@@ -522,7 +522,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)
list_for_each(ptr, &dev_priv->pending) {
drm_mach64_freelist_t *entry =
list_entry(ptr, drm_mach64_freelist_t, list);
- drm_buf_t *buf = entry->buf;
+ struct drm_buf *buf = entry->buf;
u32 buf_addr = GETBUFADDR(buf);
@@ -572,7 +572,7 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)
* DMA operation. It is left here since it so tricky to get DMA operating
* properly in some architectures and hardware.
*/
-static int mach64_bm_dma_test(drm_device_t * dev)
+static int mach64_bm_dma_test(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_dma_handle_t *cpu_addr_dmah;
@@ -592,7 +592,7 @@ static int mach64_bm_dma_test(drm_device_t * dev)
drm_pci_alloc(dev, 0x1000, 0x1000, 0xfffffffful);
if (!cpu_addr_dmah) {
DRM_INFO("data-memory allocation failed!\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
} else {
data = (u32 *) cpu_addr_dmah->vaddr;
data_addr = (u32) cpu_addr_dmah->busaddr;
@@ -624,7 +624,7 @@ static int mach64_bm_dma_test(drm_device_t * dev)
mach64_do_engine_reset(dev_priv);
DRM_INFO("freeing data buffer memory.\n");
drm_pci_free(dev, cpu_addr_dmah);
- return DRM_ERR(EIO);
+ return -EIO;
}
}
@@ -752,7 +752,7 @@ static int mach64_bm_dma_test(drm_device_t * dev)
* Called during the DMA initialization ioctl to initialize all the necessary
* software and hardware state for DMA operation.
*/
-static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
+static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)
{
drm_mach64_private_t *dev_priv;
u32 tmp;
@@ -762,7 +762,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_mach64_private_t));
@@ -797,21 +797,21 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
DRM_ERROR("can not find sarea!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->fb = drm_core_findmap(dev, init->fb_offset);
if (!dev_priv->fb) {
DRM_ERROR("can not find frame buffer map!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("can not find mmio map!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->ring_map = drm_core_findmap(dev, init->ring_offset);
@@ -819,7 +819,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
DRM_ERROR("can not find ring map!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->sarea_priv = (drm_mach64_sarea_t *)
@@ -832,7 +832,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
" descriptor ring\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map =
@@ -841,7 +841,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
DRM_ERROR("can not find dma buffer map!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* there might be a nicer way to do this -
dev isn't passed all the way though the mach64 - DA */
@@ -853,7 +853,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
" dma buffer\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->agp_textures =
drm_core_findmap(dev, init->agp_textures_offset);
@@ -861,7 +861,7 @@ static int mach64_do_dma_init(drm_device_t * dev, drm_mach64_init_t * init)
DRM_ERROR("can not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
mach64_do_cleanup_dma(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -974,7 +974,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)
volatile u32 *ring_read;
struct list_head *ptr;
drm_mach64_freelist_t *entry;
- drm_buf_t *buf = NULL;
+ struct drm_buf *buf = NULL;
u32 *buf_ptr;
u32 used, reg, target;
int fifo, count, found, ret, no_idle_wait;
@@ -1035,7 +1035,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)
head, ring->tail, buf_addr, (eol ? "eol" : ""));
mach64_dump_ring_info(dev_priv);
mach64_do_engine_reset(dev_priv);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Hand feed the buffer to the card via MMIO, waiting for the fifo
@@ -1117,7 +1117,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)
/** \name DMA cleanup */
/*@{*/
-int mach64_do_cleanup_dma(drm_device_t * dev)
+int mach64_do_cleanup_dma(struct drm_device * dev)
{
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -1158,60 +1158,57 @@ int mach64_do_cleanup_dma(drm_device_t * dev)
/** \name IOCTL handlers */
/*@{*/
-int mach64_dma_init(DRM_IOCTL_ARGS)
+int mach64_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_mach64_init_t init;
+ drm_mach64_init_t *init = data;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_mach64_init_t *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case DRM_MACH64_INIT_DMA:
- return mach64_do_dma_init(dev, &init);
+ return mach64_do_dma_init(dev, init);
case DRM_MACH64_CLEANUP_DMA:
return mach64_do_cleanup_dma(dev);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
-int mach64_dma_idle(DRM_IOCTL_ARGS)
+int mach64_dma_idle(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return mach64_do_dma_idle(dev_priv);
}
-int mach64_dma_flush(DRM_IOCTL_ARGS)
+int mach64_dma_flush(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return mach64_do_dma_flush(dev_priv);
}
-int mach64_engine_reset(DRM_IOCTL_ARGS)
+int mach64_engine_reset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return mach64_do_engine_reset(dev_priv);
}
@@ -1223,9 +1220,9 @@ int mach64_engine_reset(DRM_IOCTL_ARGS)
/** \name Freelist management */
/*@{*/
-int mach64_init_freelist(drm_device_t * dev)
+int mach64_init_freelist(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_freelist_t *entry;
struct list_head *ptr;
@@ -1239,7 +1236,7 @@ int mach64_init_freelist(drm_device_t * dev)
(drm_mach64_freelist_t *)
drm_alloc(sizeof(drm_mach64_freelist_t),
DRM_MEM_BUFLISTS)) == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(entry, 0, sizeof(drm_mach64_freelist_t));
entry->buf = dma->buflist[i];
ptr = &entry->list;
@@ -1249,7 +1246,7 @@ int mach64_init_freelist(drm_device_t * dev)
return 0;
}
-void mach64_destroy_freelist(drm_device_t * dev)
+void mach64_destroy_freelist(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_freelist_t *entry;
@@ -1381,7 +1378,7 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv)
return 1;
}
-drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
+struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv)
{
drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;
drm_mach64_freelist_t *entry;
@@ -1427,7 +1424,7 @@ drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv)
return entry->buf;
}
-int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf)
+int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_buf)
{
struct list_head *ptr;
drm_mach64_freelist_t *entry;
@@ -1438,7 +1435,7 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf)
if (copy_buf == entry->buf) {
DRM_ERROR("%s: Trying to release a pending buf\n",
__FUNCTION__);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
}
#endif
@@ -1461,76 +1458,73 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, drm_buf_t * copy_buf)
/** \name DMA buffer request and submission IOCTL handler */
/*@{*/
-static int mach64_dma_get_buffers(DRMFILE filp, drm_device_t * dev,
- drm_dma_t * d)
+static int mach64_dma_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_dma * d)
{
int i;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_mach64_private_t *dev_priv = dev->dev_private;
for (i = d->granted_count; i < d->request_count; i++) {
buf = mach64_freelist_get(dev_priv);
#if MACH64_EXTRA_CHECKING
if (!buf)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
#else
if (!buf)
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
#endif
- buf->filp = filp;
+ buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
d->granted_count++;
}
return 0;
}
-int mach64_dma_buffers(DRM_IOCTL_ARGS)
+int mach64_dma_buffers(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
- drm_dma_t d;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_dma *d = data;
int ret = 0;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t *) data, sizeof(d));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
- if (d.send_count != 0) {
+ if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d.send_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
}
/* We'll send you buffers.
*/
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d.request_count, dma->buf_count);
- ret = DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ ret = -EINVAL;
}
- d.granted_count = 0;
+ d->granted_count = 0;
- if (d.request_count) {
- ret = mach64_dma_get_buffers(filp, dev, &d);
+ if (d->request_count) {
+ ret = mach64_dma_get_buffers(dev, file_priv, d);
}
- DRM_COPY_TO_USER_IOCTL((drm_dma_t *) data, d, sizeof(d));
-
return ret;
}
-void mach64_driver_lastclose(drm_device_t * dev)
+void mach64_driver_lastclose(struct drm_device * dev)
{
mach64_do_cleanup_dma(dev);
}
diff --git a/shared-core/mach64_drm.h b/shared-core/mach64_drm.h
index 083f959d..1f5fd842 100644
--- a/shared-core/mach64_drm.h
+++ b/shared-core/mach64_drm.h
@@ -130,7 +130,7 @@ typedef struct drm_mach64_sarea {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[MACH64_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[MACH64_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@@ -139,7 +139,7 @@ typedef struct drm_mach64_sarea {
/* Texture memory LRU.
*/
- drm_tex_region_t tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
+ struct drm_tex_region tex_list[MACH64_NR_TEX_HEAPS][MACH64_NR_TEX_REGIONS +
1];
unsigned int tex_age[MACH64_NR_TEX_HEAPS];
int ctx_owner;
diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h
index bb8b309e..cebd4c6e 100644
--- a/shared-core/mach64_drv.h
+++ b/shared-core/mach64_drv.h
@@ -55,7 +55,7 @@
typedef struct drm_mach64_freelist {
struct list_head list; /* List pointers for free_list, placeholders, or pending list */
- drm_buf_t *buf; /* Pointer to the buffer */
+ struct drm_buf *buf; /* Pointer to the buffer */
int discard; /* This flag is set when we're done (re)using a buffer */
u32 ring_ofs; /* dword offset in ring of last descriptor for this buffer */
} drm_mach64_freelist_t;
@@ -108,22 +108,27 @@ typedef struct drm_mach64_private {
drm_local_map_t *agp_textures;
} drm_mach64_private_t;
-extern drm_ioctl_desc_t mach64_ioctls[];
+extern struct drm_ioctl_desc mach64_ioctls[];
extern int mach64_max_ioctl;
/* mach64_dma.c */
-extern int mach64_dma_init(DRM_IOCTL_ARGS);
-extern int mach64_dma_idle(DRM_IOCTL_ARGS);
-extern int mach64_dma_flush(DRM_IOCTL_ARGS);
-extern int mach64_engine_reset(DRM_IOCTL_ARGS);
-extern int mach64_dma_buffers(DRM_IOCTL_ARGS);
-extern void mach64_driver_lastclose(drm_device_t * dev);
-
-extern int mach64_init_freelist(drm_device_t * dev);
-extern void mach64_destroy_freelist(drm_device_t * dev);
-extern drm_buf_t *mach64_freelist_get(drm_mach64_private_t * dev_priv);
+extern int mach64_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_idle(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_flush(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_engine_reset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_buffers(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern void mach64_driver_lastclose(struct drm_device * dev);
+
+extern int mach64_init_freelist(struct drm_device * dev);
+extern void mach64_destroy_freelist(struct drm_device * dev);
+extern struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv);
extern int mach64_freelist_put(drm_mach64_private_t * dev_priv,
- drm_buf_t * copy_buf);
+ struct drm_buf * copy_buf);
extern int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv,
int entries);
@@ -137,21 +142,26 @@ extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);
extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);
-extern int mach64_do_cleanup_dma(drm_device_t * dev);
+extern int mach64_do_cleanup_dma(struct drm_device * dev);
/* mach64_state.c */
-extern int mach64_dma_clear(DRM_IOCTL_ARGS);
-extern int mach64_dma_swap(DRM_IOCTL_ARGS);
-extern int mach64_dma_vertex(DRM_IOCTL_ARGS);
-extern int mach64_dma_blit(DRM_IOCTL_ARGS);
-extern int mach64_get_param(DRM_IOCTL_ARGS);
-extern int mach64_driver_vblank_wait(drm_device_t * dev,
+extern int mach64_dma_clear(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_vertex(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_dma_blit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mach64_driver_vblank_wait(struct drm_device * dev,
unsigned int *sequence);
extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mach64_driver_irq_preinstall(drm_device_t * dev);
-extern void mach64_driver_irq_postinstall(drm_device_t * dev);
-extern void mach64_driver_irq_uninstall(drm_device_t * dev);
+extern void mach64_driver_irq_preinstall(struct drm_device * dev);
+extern void mach64_driver_irq_postinstall(struct drm_device * dev);
+extern void mach64_driver_irq_uninstall(struct drm_device * dev);
/* ================================================================
* Registers
@@ -798,7 +808,7 @@ do { \
#define DMALOCALS \
drm_mach64_freelist_t *_entry = NULL; \
- drm_buf_t *_buf = NULL; \
+ struct drm_buf *_buf = NULL; \
u32 *_buf_wptr; int _outcount
#define GETBUFPTR( __buf ) \
@@ -813,20 +823,20 @@ do { \
static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *
dev_priv,
drm_mach64_freelist_t **
- entry, drm_buf_t * buf)
+ entry, struct drm_buf * buf)
{
struct list_head *ptr;
#if MACH64_EXTRA_CHECKING
if (list_empty(&dev_priv->pending)) {
DRM_ERROR("Empty pending list in %s\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
#endif
ptr = dev_priv->pending.prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
while ((*entry)->buf != buf) {
if (ptr == &dev_priv->pending) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
ptr = ptr->prev;
*entry = list_entry(ptr, drm_mach64_freelist_t, list);
@@ -842,7 +852,7 @@ do { \
} while(0)
/* FIXME: use a private set of smaller buffers for state emits, clears, and swaps? */
-#define DMAGETPTR( filp, dev_priv, n ) \
+#define DMAGETPTR( file_priv, dev_priv, n ) \
do { \
if ( MACH64_VERBOSE ) { \
DRM_INFO( "DMAGETPTR( %d ) in %s\n", \
@@ -852,14 +862,14 @@ do { \
if (_buf == NULL) { \
DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n", \
__FUNCTION__ ); \
- return DRM_ERR(EAGAIN); \
+ return -EAGAIN; \
} \
if (_buf->pending) { \
DRM_ERROR("%s: pending buf in DMAGETPTR\n", \
__FUNCTION__ ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
- _buf->filp = filp; \
+ _buf->file_priv = file_priv; \
_outcount = 0; \
\
_buf_wptr = GETBUFPTR( _buf ); \
@@ -888,7 +898,7 @@ do { \
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCE() in %s: sending empty buf %d\n", \
__FUNCTION__, _buf->idx ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
if (_buf->pending) { \
/* This is a resued buffer, so we need to find it in the pending list */ \
@@ -901,13 +911,13 @@ do { \
if (_entry->discard) { \
DRM_ERROR( "DMAADVANCE() in %s: sending discarded pending buf %d\n", \
__FUNCTION__, _buf->idx ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
} else { \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n", \
__FUNCTION__ ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
ptr = dev_priv->placeholders.next; \
list_del(ptr); \
@@ -983,12 +993,12 @@ do { \
if (_buf->used <= 0) { \
DRM_ERROR( "DMAADVANCEHOSTDATA() in %s: sending empty buf %d\n", \
__FUNCTION__, _buf->idx ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
if (list_empty(&dev_priv->placeholders)) { \
DRM_ERROR( "%s: empty placeholder list in DMAADVANCEHOSTDATA()\n", \
__FUNCTION__ ); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
\
ptr = dev_priv->placeholders.next; \
diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c
index 663642db..4122dd91 100644
--- a/shared-core/mach64_irq.c
+++ b/shared-core/mach64_irq.c
@@ -42,7 +42,7 @@
irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
int status;
@@ -70,7 +70,7 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@@ -90,7 +90,7 @@ int mach64_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
/* drm_dma.h hooks
*/
-void mach64_driver_irq_preinstall(drm_device_t * dev)
+void mach64_driver_irq_preinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
@@ -104,7 +104,7 @@ void mach64_driver_irq_preinstall(drm_device_t * dev)
| MACH64_CRTC_VBLANK_INT);
}
-void mach64_driver_irq_postinstall(drm_device_t * dev)
+void mach64_driver_irq_postinstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
@@ -118,7 +118,7 @@ void mach64_driver_irq_postinstall(drm_device_t * dev)
}
-void mach64_driver_irq_uninstall(drm_device_t * dev)
+void mach64_driver_irq_uninstall(struct drm_device * dev)
{
drm_mach64_private_t *dev_priv =
(drm_mach64_private_t *) dev->dev_private;
diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c
index 38cefca9..89b6c6ce 100644
--- a/shared-core/mach64_state.c
+++ b/shared-core/mach64_state.c
@@ -40,16 +40,16 @@
* 1.0 - Initial mach64 DRM
*
*/
-drm_ioctl_desc_t mach64_ioctls[] = {
- [DRM_IOCTL_NR(DRM_MACH64_INIT)] = {mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_MACH64_CLEAR)] = {mach64_dma_clear, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_SWAP)] = {mach64_dma_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_IDLE)] = {mach64_dma_idle, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_RESET)] = {mach64_engine_reset, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_VERTEX)] = {mach64_dma_vertex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_BLIT)] = {mach64_dma_blit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_FLUSH)] = {mach64_dma_flush, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MACH64_GETPARAM)] = {mach64_get_param, DRM_AUTH},
+struct drm_ioctl_desc mach64_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_MACH64_INIT, mach64_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_MACH64_CLEAR, mach64_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_SWAP, mach64_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_IDLE, mach64_dma_idle, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_RESET, mach64_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_VERTEX, mach64_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_BLIT, mach64_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_FLUSH, mach64_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MACH64_GETPARAM, mach64_get_param, DRM_AUTH),
};
int mach64_max_ioctl = DRM_ARRAY_SIZE(mach64_ioctls);
@@ -85,11 +85,12 @@ static void mach64_print_dirty(const char *msg, unsigned int flags)
/* This function returns 0 on success, 1 for no intersection, and
* negative for an error
*/
-static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
- drm_clip_rect_t * box)
+static int mach64_emit_cliprect(struct drm_file *file_priv,
+ drm_mach64_private_t * dev_priv,
+ struct drm_clip_rect * box)
{
u32 sc_left_right, sc_top_bottom;
- drm_clip_rect_t scissor;
+ struct drm_clip_rect scissor;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *regs = &sarea_priv->context_state;
DMALOCALS;
@@ -120,7 +121,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
if (scissor.y1 >= scissor.y2)
return 1;
- DMAGETPTR(filp, dev_priv, 2); /* returns on failure to get buffer */
+ DMAGETPTR(file_priv, dev_priv, 2); /* returns on failure to get buffer */
sc_left_right = ((scissor.x1 << 0) | (scissor.x2 << 16));
sc_top_bottom = ((scissor.y1 << 0) | (scissor.y2 << 16));
@@ -133,7 +134,7 @@ static int mach64_emit_cliprect(DRMFILE filp, drm_mach64_private_t * dev_priv,
return 0;
}
-static __inline__ int mach64_emit_state(DRMFILE filp,
+static __inline__ int mach64_emit_state(struct drm_file *file_priv,
drm_mach64_private_t * dev_priv)
{
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
@@ -148,7 +149,7 @@ static __inline__ int mach64_emit_state(DRMFILE filp,
DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty);
}
- DMAGETPTR(filp, dev_priv, 17); /* returns on failure to get buffer */
+ DMAGETPTR(file_priv, dev_priv, 17); /* returns on failure to get buffer */
if (dirty & MACH64_UPLOAD_MISC) {
DMAOUTREG(MACH64_DP_MIX, regs->dp_mix);
@@ -212,7 +213,8 @@ static __inline__ int mach64_emit_state(DRMFILE filp,
* DMA command dispatch functions
*/
-static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
+static int mach64_dma_dispatch_clear(struct drm_device * dev,
+ struct drm_file *file_priv,
unsigned int flags,
int cx, int cy, int cw, int ch,
unsigned int clear_color,
@@ -222,7 +224,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mach64_context_regs_t *ctx = &sarea_priv->context_state;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp, depth_bpp;
int i;
DMALOCALS;
@@ -237,7 +239,7 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
fb_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
switch (dev_priv->depth_bpp) {
case 16:
@@ -248,13 +250,13 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
depth_bpp = MACH64_DATATYPE_ARGB8888;
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!nbox)
return 0;
- DMAGETPTR(filp, dev_priv, nbox * 31); /* returns on failure to get buffer */
+ DMAGETPTR(file_priv, dev_priv, nbox * 31); /* returns on failure to get buffer */
for (i = 0; i < nbox; i++) {
int x = pbox[i].x1;
@@ -355,12 +357,13 @@ static int mach64_dma_dispatch_clear(DRMFILE filp, drm_device_t * dev,
return 0;
}
-static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev)
+static int mach64_dma_dispatch_swap(struct drm_device * dev,
+ struct drm_file *file_priv)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
u32 fb_bpp;
int i;
DMALOCALS;
@@ -380,7 +383,7 @@ static int mach64_dma_dispatch_swap(DRMFILE filp, drm_device_t * dev)
if (!nbox)
return 0;
- DMAGETPTR(filp, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
+ DMAGETPTR(file_priv, dev_priv, 13 + nbox * 4); /* returns on failure to get buffer */
DMAOUTREG(MACH64_Z_CNTL, 0);
DMAOUTREG(MACH64_SCALE_3D_CNTL, 0);
@@ -489,11 +492,11 @@ static __inline__ int copy_from_user_vertex(u32 *to,
from = drm_alloc(bytes, DRM_MEM_DRIVER);
if (from == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
if (DRM_COPY_FROM_USER(from, ufrom, bytes)) {
drm_free(from, bytes, DRM_MEM_DRIVER);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
orig_from = from; /* we'll be modifying the "from" ptr, so save it */
@@ -525,14 +528,14 @@ static __inline__ int copy_from_user_vertex(u32 *to,
DRM_ERROR("%s: Got bad command: 0x%04x\n",
__FUNCTION__, reg);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
- return DRM_ERR(EACCES);
+ return -EACCES;
}
} else {
DRM_ERROR
("%s: Got bad command count(=%u) dwords remaining=%lu\n",
__FUNCTION__, count, n);
drm_free(orig_from, bytes, DRM_MEM_DRIVER);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -541,16 +544,17 @@ static __inline__ int copy_from_user_vertex(u32 *to,
return 0;
else {
DRM_ERROR("%s: Bad buf->used(=%lu)\n", __FUNCTION__, bytes);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
-static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
+static int mach64_dma_dispatch_vertex(struct drm_device * dev,
+ struct drm_file *file_priv,
drm_mach64_vertex_t * vertex)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_buf_t *copy_buf;
+ struct drm_buf *copy_buf;
void *buf = vertex->buf;
unsigned long used = vertex->used;
int ret = 0;
@@ -568,7 +572,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__);
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
}
verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);
@@ -583,7 +587,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
DMASETPTR(copy_buf);
if (sarea_priv->dirty & ~MACH64_UPLOAD_CLIPRECTS) {
- ret = mach64_emit_state(filp, dev_priv);
+ ret = mach64_emit_state(file_priv, dev_priv);
if (ret < 0)
return ret;
}
@@ -591,7 +595,7 @@ static int mach64_dma_dispatch_vertex(DRMFILE filp, drm_device_t * dev,
do {
/* Emit the next cliprect */
if (i < sarea_priv->nbox) {
- ret = mach64_emit_cliprect(filp, dev_priv,
+ ret = mach64_emit_cliprect(file_priv, dev_priv,
&sarea_priv->boxes[i]);
if (ret < 0) {
/* failed to get buffer */
@@ -634,19 +638,20 @@ static __inline__ int copy_from_user_blit(u32 *to,
to = (u32 *)((char *)to + MACH64_HOSTDATA_BLIT_OFFSET);
if (DRM_COPY_FROM_USER(to, ufrom, bytes)) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
+static int mach64_dma_dispatch_blit(struct drm_device * dev,
+ struct drm_file *file_priv,
drm_mach64_blit_t * blit)
{
drm_mach64_private_t *dev_priv = dev->dev_private;
int dword_shift, dwords;
unsigned long used;
- drm_buf_t *copy_buf;
+ struct drm_buf *copy_buf;
int verify_ret = 0;
DMALOCALS;
@@ -671,7 +676,7 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
break;
default:
DRM_ERROR("invalid blit format %d\n", blit->format);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Set buf->used to the bytes of blit data based on the blit dimensions
@@ -684,13 +689,13 @@ static int mach64_dma_dispatch_blit(DRMFILE filp, drm_device_t * dev,
if (used <= 0 ||
used > MACH64_BUFFER_SIZE - MACH64_HOSTDATA_BLIT_OFFSET) {
DRM_ERROR("Invalid blit size: %lu bytes\n", used);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
copy_buf = mach64_freelist_get(dev_priv);
if (copy_buf == NULL) {
DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__);
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
}
verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);
@@ -753,27 +758,25 @@ _blit_done:
* IOCTL functions
*/
-int mach64_dma_clear(DRM_IOCTL_ARGS)
+int mach64_dma_clear(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mach64_clear_t clear;
+ drm_mach64_clear_t *clear = data;
int ret;
DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID);
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(clear, (drm_mach64_clear_t *) data,
- sizeof(clear));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
- ret = mach64_dma_dispatch_clear(filp, dev, clear.flags,
- clear.x, clear.y, clear.w, clear.h,
- clear.clear_color, clear.clear_depth);
+ ret = mach64_dma_dispatch_clear(dev, file_priv, clear->flags,
+ clear->x, clear->y, clear->w, clear->h,
+ clear->clear_color,
+ clear->clear_depth);
/* Make sure we restore the 3D state next time.
*/
@@ -781,21 +784,21 @@ int mach64_dma_clear(DRM_IOCTL_ARGS)
return ret;
}
-int mach64_dma_swap(DRM_IOCTL_ARGS)
+int mach64_dma_swap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
int ret;
DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
- ret = mach64_dma_dispatch_swap(filp, dev);
+ ret = mach64_dma_dispatch_swap(dev, file_priv);
/* Make sure we restore the 3D state next time.
*/
@@ -803,58 +806,52 @@ int mach64_dma_swap(DRM_IOCTL_ARGS)
return ret;
}
-int mach64_dma_vertex(DRM_IOCTL_ARGS)
+int mach64_dma_vertex(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mach64_vertex_t vertex;
+ drm_mach64_vertex_t *vertex = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(vertex, (drm_mach64_vertex_t *) data,
- sizeof(vertex));
-
DRM_DEBUG("%s: pid=%d buf=%p used=%lu discard=%d\n",
__FUNCTION__, DRM_CURRENTPID,
- vertex.buf, vertex.used, vertex.discard);
+ vertex->buf, vertex->used, vertex->discard);
- if (vertex.prim < 0 || vertex.prim > MACH64_PRIM_POLYGON) {
- DRM_ERROR("buffer prim %d\n", vertex.prim);
- return DRM_ERR(EINVAL);
+ if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) {
+ DRM_ERROR("buffer prim %d\n", vertex->prim);
+ return -EINVAL;
}
- if (vertex.used > MACH64_BUFFER_SIZE || (vertex.used & 3) != 0) {
+ if (vertex->used > MACH64_BUFFER_SIZE || (vertex->used & 3) != 0) {
DRM_ERROR("Invalid vertex buffer size: %lu bytes\n",
- vertex.used);
- return DRM_ERR(EINVAL);
+ vertex->used);
+ return -EINVAL;
}
if (sarea_priv->nbox > MACH64_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MACH64_NR_SAREA_CLIPRECTS;
- return mach64_dma_dispatch_vertex(filp, dev, &vertex);
+ return mach64_dma_dispatch_vertex(dev, file_priv, vertex);
}
-int mach64_dma_blit(DRM_IOCTL_ARGS)
+int mach64_dma_blit(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mach64_blit_t blit;
+ drm_mach64_blit_t *blit = data;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(blit, (drm_mach64_blit_t *) data,
- sizeof(blit));
-
- ret = mach64_dma_dispatch_blit(filp, dev, &blit);
+ ret = mach64_dma_dispatch_blit(dev, file_priv, blit);
/* Make sure we restore the 3D state next time.
*/
@@ -864,39 +861,36 @@ int mach64_dma_blit(DRM_IOCTL_ARGS)
return ret;
}
-int mach64_get_param(DRM_IOCTL_ARGS)
+int mach64_get_param(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mach64_private_t *dev_priv = dev->dev_private;
- drm_mach64_getparam_t param;
+ drm_mach64_getparam_t *param = data;
int value;
DRM_DEBUG("%s\n", __FUNCTION__);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_mach64_getparam_t *) data,
- sizeof(param));
-
- switch (param.param) {
+ switch (param->param) {
case MACH64_PARAM_FRAMES_QUEUED:
/* Needs lock since it calls mach64_ring_tick() */
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
value = mach64_do_get_frames_queued(dev_priv);
break;
case MACH64_PARAM_IRQ_NR:
value = dev->irq;
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c
index d48313c7..a86dd31c 100644
--- a/shared-core/mga_dma.c
+++ b/shared-core/mga_dma.c
@@ -46,7 +46,7 @@
#define MINIMAL_CLEANUP 0
#define FULL_CLEANUP 1
-static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup);
+static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup);
/* ================================================================
* Engine control
@@ -71,7 +71,7 @@ int mga_do_wait_for_idle(drm_mga_private_t * dev_priv)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int mga_do_dma_reset(drm_mga_private_t * dev_priv)
@@ -224,7 +224,7 @@ void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv)
#define MGA_BUFFER_FREE 0
#if MGA_FREELIST_DEBUG
-static void mga_freelist_print(drm_device_t * dev)
+static void mga_freelist_print(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -245,10 +245,10 @@ static void mga_freelist_print(drm_device_t * dev)
}
#endif
-static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
+static int mga_freelist_init(struct drm_device * dev, drm_mga_private_t * dev_priv)
{
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
drm_mga_freelist_t *entry;
int i;
@@ -256,7 +256,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
SET_AGE(&dev_priv->head->age, MGA_BUFFER_USED, 0);
@@ -267,7 +267,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
entry = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
if (entry == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(entry, 0, sizeof(drm_mga_freelist_t));
@@ -291,7 +291,7 @@ static int mga_freelist_init(drm_device_t * dev, drm_mga_private_t * dev_priv)
return 0;
}
-static void mga_freelist_cleanup(drm_device_t * dev)
+static void mga_freelist_cleanup(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *entry;
@@ -311,10 +311,10 @@ static void mga_freelist_cleanup(drm_device_t * dev)
#if 0
/* FIXME: Still needed?
*/
-static void mga_freelist_reset(drm_device_t * dev)
+static void mga_freelist_reset(struct drm_device * dev)
{
drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
int i;
@@ -326,7 +326,7 @@ static void mga_freelist_reset(drm_device_t * dev)
}
#endif
-static drm_buf_t *mga_freelist_get(drm_device_t * dev)
+static struct drm_buf *mga_freelist_get(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_freelist_t *next;
@@ -359,7 +359,7 @@ static drm_buf_t *mga_freelist_get(drm_device_t * dev)
return NULL;
}
-int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
+int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -393,13 +393,13 @@ int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf)
* DMA initialization, cleanup
*/
-int mga_driver_load(drm_device_t *dev, unsigned long flags)
+int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
drm_mga_private_t * dev_priv;
dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
if (!dev_priv)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
dev->dev_private = (void *)dev_priv;
memset(dev_priv, 0, sizeof(drm_mga_private_t));
@@ -433,7 +433,7 @@ int mga_driver_load(drm_device_t *dev, unsigned long flags)
*
* \sa mga_do_dma_bootstrap, mga_do_pci_dma_bootstrap
*/
-static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
+static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -443,11 +443,11 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
const unsigned secondary_size = dma_bs->secondary_bin_count
* dma_bs->secondary_bin_size;
const unsigned agp_size = (dma_bs->agp_size << 20);
- drm_buf_desc_t req;
- drm_agp_mode_t mode;
- drm_agp_info_t info;
- drm_agp_buffer_t agp_req;
- drm_agp_binding_t bind_req;
+ struct drm_buf_desc req;
+ struct drm_agp_mode mode;
+ struct drm_agp_info info;
+ struct drm_agp_buffer agp_req;
+ struct drm_agp_binding bind_req;
/* Acquire AGP. */
err = drm_agp_acquire(dev);
@@ -548,7 +548,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
#ifdef __linux__
{
- drm_map_list_t *_entry;
+ struct drm_map_list *_entry;
unsigned long agp_token = 0;
list_for_each_entry(_entry, &dev->maplist, head) {
@@ -579,7 +579,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
DRM_ERROR("failed to ioremap agp regions! (%p, %p, %p)\n",
dev_priv->warp->handle, dev_priv->primary->handle,
dev->agp_buffer_map->handle);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->dma_access = MGA_PAGPXFER;
@@ -603,7 +603,7 @@ static int mga_do_agp_dma_bootstrap(drm_device_t * dev,
*
* \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap
*/
-static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
+static int mga_do_pci_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -611,12 +611,12 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
unsigned int primary_size;
unsigned int bin_count;
int err;
- drm_buf_desc_t req;
+ struct drm_buf_desc req;
if (dev->dma == NULL) {
DRM_ERROR("dev->dma is NULL\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
/* Make drm_addbufs happy by not trying to create a mapping for less
@@ -651,7 +651,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
if (err != 0) {
DRM_ERROR("Unable to allocate primary DMA region: %d\n", err);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (dev_priv->primary->size != dma_bs->primary_size) {
@@ -696,7 +696,7 @@ static int mga_do_pci_dma_bootstrap(drm_device_t * dev,
}
-static int mga_do_dma_bootstrap(drm_device_t * dev,
+static int mga_do_dma_bootstrap(struct drm_device * dev,
drm_mga_dma_bootstrap_t * dma_bs)
{
const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);
@@ -762,44 +762,37 @@ static int mga_do_dma_bootstrap(drm_device_t * dev,
return err;
}
-int mga_dma_bootstrap(DRM_IOCTL_ARGS)
+int mga_dma_bootstrap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_mga_dma_bootstrap_t bootstrap;
+ drm_mga_dma_bootstrap_t *bootstrap = data;
int err;
static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 };
const drm_mga_private_t * const dev_priv =
(drm_mga_private_t *) dev->dev_private;
- DRM_COPY_FROM_USER_IOCTL(bootstrap,
- (drm_mga_dma_bootstrap_t __user *) data,
- sizeof(bootstrap));
-
- err = mga_do_dma_bootstrap(dev, & bootstrap);
+ err = mga_do_dma_bootstrap(dev, bootstrap);
if (err) {
mga_do_cleanup_dma(dev, FULL_CLEANUP);
return err;
}
if (dev_priv->agp_textures != NULL) {
- bootstrap.texture_handle = dev_priv->agp_textures->offset;
- bootstrap.texture_size = dev_priv->agp_textures->size;
+ bootstrap->texture_handle = dev_priv->agp_textures->offset;
+ bootstrap->texture_size = dev_priv->agp_textures->size;
} else {
- bootstrap.texture_handle = 0;
- bootstrap.texture_size = 0;
+ bootstrap->texture_handle = 0;
+ bootstrap->texture_size = 0;
}
- bootstrap.agp_mode = modes[bootstrap.agp_mode & 0x07];
-
- DRM_COPY_TO_USER_IOCTL((drm_mga_dma_bootstrap_t __user *)data,
- bootstrap, sizeof(bootstrap));
+ bootstrap->agp_mode = modes[bootstrap->agp_mode & 0x07];
return 0;
}
-static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
+static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)
{
drm_mga_private_t *dev_priv;
int ret;
@@ -833,7 +826,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->sarea = drm_getsarea(dev);
if (!dev_priv->sarea) {
DRM_ERROR("failed to find sarea!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (! dev_priv->used_new_dma_init) {
@@ -844,28 +837,28 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("failed to find status page!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
if (!dev_priv->mmio) {
DRM_ERROR("failed to find mmio region!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->warp = drm_core_findmap(dev, init->warp_offset);
if (!dev_priv->warp) {
DRM_ERROR("failed to find warp microcode region!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->primary = drm_core_findmap(dev, init->primary_offset);
if (!dev_priv->primary) {
DRM_ERROR("failed to find primary dma region!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to find dma buffer region!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
drm_core_ioremap(dev_priv->warp, dev);
@@ -883,7 +876,7 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
((dev->agp_buffer_map == NULL) ||
(dev->agp_buffer_map->handle == NULL)))) {
DRM_ERROR("failed to ioremap agp regions!\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
ret = mga_warp_install_microcode(dev_priv);
@@ -933,13 +926,13 @@ static int mga_do_init_dma(drm_device_t * dev, drm_mga_init_t * init)
if (mga_freelist_init(dev, dev_priv) < 0) {
DRM_ERROR("could not initialize freelist\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
return 0;
}
-static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
+static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)
{
int err = 0;
DRM_DEBUG("\n");
@@ -967,8 +960,8 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
if (dev_priv->used_new_dma_init) {
if (dev_priv->agp_handle != 0) {
- drm_agp_binding_t unbind_req;
- drm_agp_buffer_t free_req;
+ struct drm_agp_binding unbind_req;
+ struct drm_agp_buffer free_req;
unbind_req.handle = dev_priv->agp_handle;
drm_agp_unbind(dev, &unbind_req);
@@ -1010,20 +1003,17 @@ static int mga_do_cleanup_dma(drm_device_t * dev, int full_cleanup)
return 0;
}
-int mga_dma_init(DRM_IOCTL_ARGS)
+int mga_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_mga_init_t init;
+ drm_mga_init_t *init = data;
int err;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_mga_init_t __user *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case MGA_INIT_DMA:
- err = mga_do_init_dma(dev, &init);
+ err = mga_do_init_dma(dev, init);
if (err) {
(void) mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
@@ -1032,36 +1022,33 @@ int mga_dma_init(DRM_IOCTL_ARGS)
return mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* ================================================================
* Primary DMA stream management
*/
-int mga_dma_flush(DRM_IOCTL_ARGS)
+int mga_dma_flush(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- drm_lock_t lock;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ struct drm_lock *lock = data;
- DRM_COPY_FROM_USER_IOCTL(lock, (drm_lock_t __user *) data,
- sizeof(lock));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
DRM_DEBUG("%s%s%s\n",
- (lock.flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
- (lock.flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
- (lock.flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
+ (lock->flags & _DRM_LOCK_FLUSH) ? "flush, " : "",
+ (lock->flags & _DRM_LOCK_FLUSH_ALL) ? "flush all, " : "",
+ (lock->flags & _DRM_LOCK_QUIESCENT) ? "idle, " : "");
WRAP_WAIT_WITH_RETURN(dev_priv);
- if (lock.flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
+ if (lock->flags & (_DRM_LOCK_FLUSH | _DRM_LOCK_FLUSH_ALL)) {
mga_do_dma_flush(dev_priv);
}
- if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ if (lock->flags & _DRM_LOCK_QUIESCENT) {
#if MGA_DMA_DEBUG
int ret = mga_do_wait_for_idle(dev_priv);
if (ret < 0)
@@ -1075,12 +1062,12 @@ int mga_dma_flush(DRM_IOCTL_ARGS)
}
}
-int mga_dma_reset(DRM_IOCTL_ARGS)
+int mga_dma_reset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return mga_do_dma_reset(dev_priv);
}
@@ -1089,76 +1076,72 @@ int mga_dma_reset(DRM_IOCTL_ARGS)
* DMA buffer management
*/
-static int mga_dma_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int mga_dma_get_buffers(struct drm_device * dev,
+ struct drm_file *file_priv, struct drm_dma * d)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = mga_freelist_get(dev);
if (!buf)
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
- buf->filp = filp;
+ buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
d->granted_count++;
}
return 0;
}
-int mga_dma_buffers(DRM_IOCTL_ARGS)
+int mga_dma_buffers(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma *d = data;
int ret = 0;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
- if (d.send_count != 0) {
+ if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d.send_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
}
/* We'll send you buffers.
*/
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d.request_count, dma->buf_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
- d.granted_count = 0;
+ d->granted_count = 0;
- if (d.request_count) {
- ret = mga_dma_get_buffers(filp, dev, &d);
+ if (d->request_count) {
+ ret = mga_dma_get_buffers(dev, file_priv, d);
}
- DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
-
return ret;
}
/**
* Called just before the module is unloaded.
*/
-int mga_driver_unload(drm_device_t * dev)
+int mga_driver_unload(struct drm_device * dev)
{
drm_free(dev->dev_private, sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
dev->dev_private = NULL;
@@ -1169,12 +1152,12 @@ int mga_driver_unload(drm_device_t * dev)
/**
* Called when the last opener of the device is closed.
*/
-void mga_driver_lastclose(drm_device_t * dev)
+void mga_driver_lastclose(struct drm_device * dev)
{
mga_do_cleanup_dma(dev, FULL_CLEANUP);
}
-int mga_driver_dma_quiescent(drm_device_t * dev)
+int mga_driver_dma_quiescent(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
return mga_do_wait_for_idle(dev_priv);
diff --git a/shared-core/mga_drm.h b/shared-core/mga_drm.h
index 5bcdbfab..15c2dea2 100644
--- a/shared-core/mga_drm.h
+++ b/shared-core/mga_drm.h
@@ -181,7 +181,7 @@ typedef struct _drm_mga_sarea {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[MGA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[MGA_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Information about the most recently used 3d drawable. The
@@ -202,7 +202,7 @@ typedef struct _drm_mga_sarea {
unsigned int exported_nback;
int exported_back_x, exported_front_x, exported_w;
int exported_back_y, exported_front_y, exported_h;
- drm_clip_rect_t exported_boxes[MGA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect exported_boxes[MGA_NR_SAREA_CLIPRECTS];
/* Counters for aging textures and for client-side throttling.
*/
@@ -216,7 +216,7 @@ typedef struct _drm_mga_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
- drm_tex_region_t texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
+ struct drm_tex_region texList[MGA_NR_TEX_HEAPS][MGA_NR_TEX_REGIONS + 1];
unsigned int texAge[MGA_NR_TEX_HEAPS];
/* Mechanism to validate card state.
diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h
index bce82135..8254c3f1 100644
--- a/shared-core/mga_drv.h
+++ b/shared-core/mga_drv.h
@@ -65,7 +65,7 @@ typedef struct drm_mga_freelist {
struct drm_mga_freelist *next;
struct drm_mga_freelist *prev;
drm_mga_age_t age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
} drm_mga_freelist_t;
typedef struct {
@@ -148,19 +148,24 @@ typedef struct drm_mga_private {
unsigned int agp_size;
} drm_mga_private_t;
-extern drm_ioctl_desc_t mga_ioctls[];
+extern struct drm_ioctl_desc mga_ioctls[];
extern int mga_max_ioctl;
/* mga_dma.c */
-extern int mga_dma_bootstrap(DRM_IOCTL_ARGS);
-extern int mga_dma_init(DRM_IOCTL_ARGS);
-extern int mga_dma_flush(DRM_IOCTL_ARGS);
-extern int mga_dma_reset(DRM_IOCTL_ARGS);
-extern int mga_dma_buffers(DRM_IOCTL_ARGS);
-extern int mga_driver_load(drm_device_t *dev, unsigned long flags);
-extern int mga_driver_unload(drm_device_t * dev);
-extern void mga_driver_lastclose(drm_device_t * dev);
-extern int mga_driver_dma_quiescent(drm_device_t * dev);
+extern int mga_dma_bootstrap(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mga_dma_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mga_dma_flush(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mga_dma_reset(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mga_dma_buffers(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+extern int mga_driver_load(struct drm_device *dev, unsigned long flags);
+extern int mga_driver_unload(struct drm_device * dev);
+extern void mga_driver_lastclose(struct drm_device * dev);
+extern int mga_driver_dma_quiescent(struct drm_device * dev);
extern int mga_do_wait_for_idle(drm_mga_private_t * dev_priv);
@@ -168,7 +173,7 @@ extern void mga_do_dma_flush(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_start(drm_mga_private_t * dev_priv);
extern void mga_do_dma_wrap_end(drm_mga_private_t * dev_priv);
-extern int mga_freelist_put(drm_device_t * dev, drm_buf_t * buf);
+extern int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf);
/* mga_warp.c */
extern unsigned int mga_warp_microcode_size(const drm_mga_private_t * dev_priv);
@@ -176,12 +181,12 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
extern int mga_warp_init(drm_mga_private_t * dev_priv);
/* mga_irq.c */
-extern int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence);
-extern int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
+extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
-extern void mga_driver_irq_preinstall(drm_device_t * dev);
-extern void mga_driver_irq_postinstall(drm_device_t * dev);
-extern void mga_driver_irq_uninstall(drm_device_t * dev);
+extern void mga_driver_irq_preinstall(struct drm_device * dev);
+extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern void mga_driver_irq_uninstall(struct drm_device * dev);
extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -245,7 +250,7 @@ do { \
dev_priv->prim.high_mark ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
- return DRM_ERR(EBUSY); \
+ return -EBUSY; \
} \
} \
} while (0)
@@ -256,7 +261,7 @@ do { \
if ( mga_do_wait_for_idle( dev_priv ) < 0 ) { \
if ( MGA_DMA_DEBUG ) \
DRM_INFO( "%s: wrap...\n", __FUNCTION__ ); \
- return DRM_ERR(EBUSY); \
+ return -EBUSY; \
} \
mga_do_dma_wrap_end( dev_priv ); \
} \
diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c
index 490d1fbb..8b555e2e 100644
--- a/shared-core/mga_irq.c
+++ b/shared-core/mga_irq.c
@@ -38,7 +38,7 @@
irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
int status;
int handled = 0;
@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@@ -98,7 +98,7 @@ int mga_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
+int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
unsigned int cur_fence;
@@ -117,7 +117,7 @@ int mga_driver_fence_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-void mga_driver_irq_preinstall(drm_device_t * dev)
+void mga_driver_irq_preinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -127,7 +127,7 @@ void mga_driver_irq_preinstall(drm_device_t * dev)
MGA_WRITE(MGA_ICLEAR, ~0);
}
-void mga_driver_irq_postinstall(drm_device_t * dev)
+void mga_driver_irq_postinstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
@@ -137,7 +137,7 @@ void mga_driver_irq_postinstall(drm_device_t * dev)
MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
}
-void mga_driver_irq_uninstall(drm_device_t * dev)
+void mga_driver_irq_uninstall(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c
index 8e5cb334..70b7caa0 100644
--- a/shared-core/mga_state.c
+++ b/shared-core/mga_state.c
@@ -43,7 +43,7 @@
*/
static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,
- drm_clip_rect_t * box)
+ struct drm_clip_rect * box)
{
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
@@ -416,7 +416,7 @@ static int mga_verify_context(drm_mga_private_t * dev_priv)
ctx->dstorg, dev_priv->front_offset,
dev_priv->back_offset);
ctx->dstorg = 0;
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
@@ -435,7 +435,7 @@ static int mga_verify_tex(drm_mga_private_t * dev_priv, int unit)
if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
tex->texorg = 0;
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
@@ -477,13 +477,13 @@ static int mga_verify_iload(drm_mga_private_t * dev_priv,
dstorg + length > (dev_priv->texture_offset +
dev_priv->texture_size)) {
DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (length & MGA_ILOAD_MASK) {
DRM_ERROR("*** bad iload length: 0x%x\n",
length & MGA_ILOAD_MASK);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
@@ -495,7 +495,7 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
(dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
}
@@ -504,12 +504,12 @@ static int mga_verify_blit(drm_mga_private_t * dev_priv,
*
*/
-static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
+static void mga_dma_dispatch_clear(struct drm_device * dev, drm_mga_clear_t * clear)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@@ -525,7 +525,7 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
ADVANCE_DMA();
for (i = 0; i < nbox; i++) {
- drm_clip_rect_t *box = &pbox[i];
+ struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
DRM_DEBUG(" from=%d,%d to=%d,%d\n",
@@ -594,12 +594,12 @@ static void mga_dma_dispatch_clear(drm_device_t * dev, drm_mga_clear_t * clear)
FLUSH_DMA();
}
-static void mga_dma_dispatch_swap(drm_device_t * dev)
+static void mga_dma_dispatch_swap(struct drm_device * dev)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
int i;
DMA_LOCALS;
@@ -626,7 +626,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
MGA_DWGCTL, MGA_DWGCTL_COPY);
for (i = 0; i < nbox; i++) {
- drm_clip_rect_t *box = &pbox[i];
+ struct drm_clip_rect *box = &pbox[i];
u32 height = box->y2 - box->y1;
u32 start = box->y1 * dev_priv->front_pitch;
@@ -651,7 +651,7 @@ static void mga_dma_dispatch_swap(drm_device_t * dev)
DRM_DEBUG("%s... done.\n", __FUNCTION__);
}
-static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_buf_priv_t *buf_priv = buf->dev_private;
@@ -698,7 +698,7 @@ static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
FLUSH_DMA();
}
-static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * buf,
unsigned int start, unsigned int end)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -747,7 +747,7 @@ static void mga_dma_dispatch_indices(drm_device_t * dev, drm_buf_t * buf,
/* This copies a 64 byte aligned agp region to the frambuffer with a
* standard blit, the ioctl needs to do checking.
*/
-static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
+static void mga_dma_dispatch_iload(struct drm_device * dev, struct drm_buf * buf,
unsigned int dstorg, unsigned int length)
{
drm_mga_private_t *dev_priv = dev->dev_private;
@@ -800,12 +800,12 @@ static void mga_dma_dispatch_iload(drm_device_t * dev, drm_buf_t * buf,
FLUSH_DMA();
}
-static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
+static void mga_dma_dispatch_blit(struct drm_device * dev, drm_mga_blit_t * blit)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int nbox = sarea_priv->nbox;
u32 scandir = 0, i;
DMA_LOCALS;
@@ -865,24 +865,20 @@ static void mga_dma_dispatch_blit(drm_device_t * dev, drm_mga_blit_t * blit)
*
*/
-static int mga_dma_clear(DRM_IOCTL_ARGS)
+static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_clear_t clear;
+ drm_mga_clear_t *clear = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(clear, (drm_mga_clear_t __user *) data,
- sizeof(clear));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
WRAP_TEST_WITH_RETURN(dev_priv);
- mga_dma_dispatch_clear(dev, &clear);
+ mga_dma_dispatch_clear(dev, clear);
/* Make sure we restore the 3D state next time.
*/
@@ -891,13 +887,12 @@ static int mga_dma_clear(DRM_IOCTL_ARGS)
return 0;
}
-static int mga_dma_swap(DRM_IOCTL_ARGS)
+static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
@@ -913,37 +908,32 @@ static int mga_dma_swap(DRM_IOCTL_ARGS)
return 0;
}
-static int mga_dma_vertex(DRM_IOCTL_ARGS)
+static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
- drm_mga_vertex_t vertex;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ drm_mga_vertex_t *vertex = data;
- DRM_COPY_FROM_USER_IOCTL(vertex,
- (drm_mga_vertex_t __user *) data,
- sizeof(vertex));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (vertex.idx < 0 || vertex.idx > dma->buf_count)
- return DRM_ERR(EINVAL);
- buf = dma->buflist[vertex.idx];
+ if (vertex->idx < 0 || vertex->idx > dma->buf_count)
+ return -EINVAL;
+ buf = dma->buflist[vertex->idx];
buf_priv = buf->dev_private;
- buf->used = vertex.used;
- buf_priv->discard = vertex.discard;
+ buf->used = vertex->used;
+ buf_priv->discard = vertex->discard;
if (!mga_verify_state(dev_priv)) {
- if (vertex.discard) {
+ if (vertex->discard) {
if (buf_priv->dispatched == 1)
AGE_BUFFER(buf_priv);
buf_priv->dispatched = 0;
mga_freelist_put(dev, buf);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
@@ -953,82 +943,73 @@ static int mga_dma_vertex(DRM_IOCTL_ARGS)
return 0;
}
-static int mga_dma_indices(DRM_IOCTL_ARGS)
+static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
- drm_mga_indices_t indices;
-
- LOCK_TEST_WITH_RETURN(dev, filp);
+ drm_mga_indices_t *indices = data;
- DRM_COPY_FROM_USER_IOCTL(indices,
- (drm_mga_indices_t __user *) data,
- sizeof(indices));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (indices.idx < 0 || indices.idx > dma->buf_count)
- return DRM_ERR(EINVAL);
+ if (indices->idx < 0 || indices->idx > dma->buf_count)
+ return -EINVAL;
- buf = dma->buflist[indices.idx];
+ buf = dma->buflist[indices->idx];
buf_priv = buf->dev_private;
- buf_priv->discard = indices.discard;
+ buf_priv->discard = indices->discard;
if (!mga_verify_state(dev_priv)) {
- if (indices.discard) {
+ if (indices->discard) {
if (buf_priv->dispatched == 1)
AGE_BUFFER(buf_priv);
buf_priv->dispatched = 0;
mga_freelist_put(dev, buf);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
- mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
+ mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
return 0;
}
-static int mga_dma_iload(DRM_IOCTL_ARGS)
+static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_mga_buf_priv_t *buf_priv;
- drm_mga_iload_t iload;
+ drm_mga_iload_t *iload = data;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(iload, (drm_mga_iload_t __user *) data,
- sizeof(iload));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
#if 0
if (mga_do_wait_for_idle(dev_priv) < 0) {
if (MGA_DMA_DEBUG)
DRM_INFO("%s: -EBUSY\n", __FUNCTION__);
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
#endif
- if (iload.idx < 0 || iload.idx > dma->buf_count)
- return DRM_ERR(EINVAL);
+ if (iload->idx < 0 || iload->idx > dma->buf_count)
+ return -EINVAL;
- buf = dma->buflist[iload.idx];
+ buf = dma->buflist[iload->idx];
buf_priv = buf->dev_private;
- if (mga_verify_iload(dev_priv, iload.dstorg, iload.length)) {
+ if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
mga_freelist_put(dev, buf);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
WRAP_TEST_WITH_RETURN(dev_priv);
- mga_dma_dispatch_iload(dev, buf, iload.dstorg, iload.length);
+ mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
/* Make sure we restore the 3D state next time.
*/
@@ -1037,28 +1018,24 @@ static int mga_dma_iload(DRM_IOCTL_ARGS)
return 0;
}
-static int mga_dma_blit(DRM_IOCTL_ARGS)
+static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_mga_blit_t blit;
+ drm_mga_blit_t *blit = data;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(blit, (drm_mga_blit_t __user *) data,
- sizeof(blit));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
- if (mga_verify_blit(dev_priv, blit.srcorg, blit.dstorg))
- return DRM_ERR(EINVAL);
+ if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
+ return -EINVAL;
WRAP_TEST_WITH_RETURN(dev_priv);
- mga_dma_dispatch_blit(dev, &blit);
+ mga_dma_dispatch_blit(dev, blit);
/* Make sure we restore the 3D state next time.
*/
@@ -1067,24 +1044,20 @@ static int mga_dma_blit(DRM_IOCTL_ARGS)
return 0;
}
-static int mga_getparam(DRM_IOCTL_ARGS)
+static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- drm_mga_getparam_t param;
+ drm_mga_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_mga_getparam_t __user *) data,
- sizeof(param));
-
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
- switch (param.param) {
+ switch (param->param) {
case MGA_PARAM_IRQ_NR:
value = dev->irq;
break;
@@ -1092,36 +1065,35 @@ static int mga_getparam(DRM_IOCTL_ARGS)
value = dev_priv->chipset;
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-static int mga_set_fence(DRM_IOCTL_ARGS)
+static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- u32 temp;
+ u32 *fence = data;
DMA_LOCALS;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
- /* I would normal do this assignment in the declaration of temp,
+ /* I would normal do this assignment in the declaration of fence,
* but dev_priv may be NULL.
*/
- temp = dev_priv->next_fence_to_post;
+ *fence = dev_priv->next_fence_to_post;
dev_priv->next_fence_to_post++;
BEGIN_DMA(1);
@@ -1131,47 +1103,40 @@ static int mga_set_fence(DRM_IOCTL_ARGS)
MGA_SOFTRAP, 0x00000000);
ADVANCE_DMA();
- DRM_COPY_TO_USER_IOCTL((u32 __user *)data, temp, sizeof(u32));
-
return 0;
}
-static int mga_wait_fence(DRM_IOCTL_ARGS)
+static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_mga_private_t *dev_priv = dev->dev_private;
- u32 fence;
+ u32 *fence = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(fence, (u32 __user *) data, sizeof(u32));
-
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
- mga_driver_fence_wait(dev, & fence);
-
- DRM_COPY_TO_USER_IOCTL((u32 __user *)data, fence, sizeof(u32));
+ mga_driver_fence_wait(dev, fence);
return 0;
}
-drm_ioctl_desc_t mga_ioctls[] = {
- [DRM_IOCTL_NR(DRM_MGA_INIT)] = {mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_MGA_FLUSH)] = {mga_dma_flush, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_RESET)] = {mga_dma_reset, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_SWAP)] = {mga_dma_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_CLEAR)] = {mga_dma_clear, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_VERTEX)] = {mga_dma_vertex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_INDICES)] = {mga_dma_indices, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_ILOAD)] = {mga_dma_iload, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_BLIT)] = {mga_dma_blit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_GETPARAM)] = {mga_getparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_SET_FENCE)] = {mga_set_fence, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_WAIT_FENCE)] = {mga_wait_fence, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_MGA_DMA_BOOTSTRAP)] = {mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+struct drm_ioctl_desc mga_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_MGA_FLUSH, mga_dma_flush, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_RESET, mga_dma_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_SWAP, mga_dma_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_CLEAR, mga_dma_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_INDICES, mga_dma_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_ILOAD, mga_dma_iload, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_BLIT, mga_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_GETPARAM, mga_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
};
diff --git a/shared-core/mga_warp.c b/shared-core/mga_warp.c
index 05ed8058..9a44bddb 100644
--- a/shared-core/mga_warp.c
+++ b/shared-core/mga_warp.c
@@ -146,7 +146,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
if (size > dev_priv->warp->size) {
DRM_ERROR("microcode too large! (%u > %lu)\n",
size, dev_priv->warp->size);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
switch (dev_priv->chipset) {
@@ -156,7 +156,7 @@ int mga_warp_install_microcode(drm_mga_private_t * dev_priv)
case MGA_CARD_TYPE_G200:
return mga_warp_install_g200_microcode(dev_priv);
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -182,7 +182,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
MGA_WRITE(MGA_WVRTXSZ, 7);
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE |
@@ -191,7 +191,7 @@ int mga_warp_init(drm_mga_private_t * dev_priv)
if (wmisc != WMISC_EXPECTED) {
DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n",
wmisc, WMISC_EXPECTED);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
diff --git a/shared-core/nouveau_dma.c b/shared-core/nouveau_dma.c
new file mode 100644
index 00000000..ab502e6a
--- /dev/null
+++ b/shared-core/nouveau_dma.c
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+#define SKIPS 8
+
+int
+nouveau_dma_channel_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct mem_block *pushbuf;
+ int grclass, ret, i;
+
+ DRM_DEBUG("\n");
+
+ pushbuf = nouveau_mem_alloc(dev, 0, 0x8000,
+ NOUVEAU_MEM_FB | NOUVEAU_MEM_MAPPED,
+ (struct drm_file *)-2);
+ if (!pushbuf) {
+ DRM_ERROR("Failed to allocate DMA push buffer\n");
+ return -ENOMEM;
+ }
+
+ /* Allocate channel */
+ ret = nouveau_fifo_alloc(dev, &dchan->chan, (struct drm_file *)-2,
+ pushbuf, NvDmaFB, NvDmaTT);
+ if (ret) {
+ DRM_ERROR("Error allocating GPU channel: %d\n", ret);
+ return ret;
+ }
+ DRM_DEBUG("Using FIFO channel %d\n", dchan->chan->id);
+
+ /* Map push buffer */
+ drm_core_ioremap(dchan->chan->pushbuf_mem->map, dev);
+ if (!dchan->chan->pushbuf_mem->map->handle) {
+ DRM_ERROR("Failed to ioremap push buffer\n");
+ return -EINVAL;
+ }
+ dchan->pushbuf = (void*)dchan->chan->pushbuf_mem->map->handle;
+
+ /* Initialise DMA vars */
+ dchan->max = (dchan->chan->pushbuf_mem->size >> 2) - 2;
+ dchan->put = dchan->chan->pushbuf_base >> 2;
+ dchan->cur = dchan->put;
+ dchan->free = dchan->max - dchan->cur;
+
+ /* Insert NOPS for SKIPS */
+ dchan->free -= SKIPS;
+ dchan->push_free = SKIPS;
+ for (i=0; i<SKIPS; i++)
+ OUT_RING(0);
+
+ /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier */
+ if ((ret = nouveau_notifier_alloc(dchan->chan, NvNotify0, 1,
+ &dchan->notify0_offset))) {
+ DRM_ERROR("Error allocating NvNotify0: %d\n", ret);
+ return ret;
+ }
+
+ /* We use NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
+ if (dev_priv->card_type < NV_50) grclass = NV_MEMORY_TO_MEMORY_FORMAT;
+ else grclass = NV50_MEMORY_TO_MEMORY_FORMAT;
+ if ((ret = nouveau_gpuobj_gr_new(dchan->chan, grclass, &gpuobj))) {
+ DRM_ERROR("Error creating NvM2MF: %d\n", ret);
+ return ret;
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, dchan->chan, NvM2MF,
+ gpuobj, NULL))) {
+ DRM_ERROR("Error referencing NvM2MF: %d\n", ret);
+ return ret;
+ }
+ dchan->m2mf_dma_source = NvDmaFB;
+ dchan->m2mf_dma_destin = NvDmaFB;
+
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
+ OUT_RING (NvM2MF);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY, 1);
+ OUT_RING (NvNotify0);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
+ OUT_RING (dchan->m2mf_dma_source);
+ OUT_RING (dchan->m2mf_dma_destin);
+ FIRE_RING();
+
+ return 0;
+}
+
+void
+nouveau_dma_channel_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+
+ DRM_DEBUG("\n");
+
+ if (dchan->chan) {
+ nouveau_fifo_free(dchan->chan);
+ dchan->chan = NULL;
+ }
+}
+
+#define RING_SKIPS 8
+
+#define READ_GET() ((NV_READ(NV03_FIFO_REGS_DMAGET(dchan->chan->id)) - \
+ dchan->chan->pushbuf_base) >> 2)
+#define WRITE_PUT(val) do { \
+ NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \
+ ((val) << 2) + dchan->chan->pushbuf_base); \
+} while(0)
+
+int
+nouveau_dma_wait(struct drm_device *dev, int size)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ uint32_t get;
+
+ while (dchan->free < size) {
+ get = READ_GET();
+
+ if (dchan->put >= get) {
+ dchan->free = dchan->max - dchan->cur;
+
+ if (dchan->free < size) {
+ dchan->push_free = 1;
+ OUT_RING(0x20000000|dchan->chan->pushbuf_base);
+ if (get <= RING_SKIPS) {
+ /*corner case - will be idle*/
+ if (dchan->put <= RING_SKIPS)
+ WRITE_PUT(RING_SKIPS + 1);
+
+ do {
+ get = READ_GET();
+ } while (get <= RING_SKIPS);
+ }
+
+ WRITE_PUT(RING_SKIPS);
+ dchan->cur = dchan->put = RING_SKIPS;
+ dchan->free = get - (RING_SKIPS + 1);
+ }
+ } else {
+ dchan->free = get - dchan->cur - 1;
+ }
+ }
+
+ return 0;
+}
+
diff --git a/shared-core/nouveau_dma.h b/shared-core/nouveau_dma.h
new file mode 100644
index 00000000..5e51c1c4
--- /dev/null
+++ b/shared-core/nouveau_dma.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __NOUVEAU_DMA_H__
+#define __NOUVEAU_DMA_H__
+
+typedef enum {
+ NvSubM2MF = 0,
+} nouveau_subchannel_id_t;
+
+typedef enum {
+ NvM2MF = 0x80039001,
+ NvDmaFB = 0x8003d001,
+ NvDmaTT = 0x8003d002,
+ NvNotify0 = 0x8003d003
+} nouveau_object_handle_t;
+
+#define NV_MEMORY_TO_MEMORY_FORMAT 0x00000039
+#define NV_MEMORY_TO_MEMORY_FORMAT_NAME 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_REF 0x00000050
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOP 0x00000100
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY 0x00000104
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE 0x00000000
+#define NV_MEMORY_TO_MEMORY_FORMAT_NOTIFY_STYLE_WRITE_LE_AWAKEN 0x00000001
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_NOTIFY 0x00000180
+#define NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE 0x00000184
+#define NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN 0x0000030c
+
+#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK200 0x00000200
+#define NV50_MEMORY_TO_MEMORY_FORMAT_UNK21C 0x0000021c
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN_HIGH 0x00000238
+#define NV50_MEMORY_TO_MEMORY_FORMAT_OFFSET_OUT_HIGH 0x0000023c
+
+#define BEGIN_RING(subc, mthd, cnt) do { \
+ int push_size = (cnt) + 1; \
+ if (dchan->push_free) { \
+ DRM_ERROR("prior packet incomplete: %d\n", dchan->push_free); \
+ break; \
+ } \
+ if (dchan->free < push_size) { \
+ if (nouveau_dma_wait(dev, push_size)) { \
+ DRM_ERROR("FIFO timeout\n"); \
+ break; \
+ } \
+ } \
+ dchan->free -= push_size; \
+ dchan->push_free = push_size; \
+ OUT_RING(((cnt)<<18) | ((subc)<<15) | mthd); \
+} while(0)
+
+#define OUT_RING(data) do { \
+ if (dchan->push_free == 0) { \
+ DRM_ERROR("no space left in packet\n"); \
+ break; \
+ } \
+ dchan->pushbuf[dchan->cur++] = (data); \
+ dchan->push_free--; \
+} while(0)
+
+#define FIRE_RING() do { \
+ if (dchan->push_free) { \
+ DRM_ERROR("packet incomplete: %d\n", dchan->push_free); \
+ break; \
+ } \
+ if (dchan->cur != dchan->put) { \
+ DRM_MEMORYBARRIER(); \
+ dchan->put = dchan->cur; \
+ NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id), \
+ (dchan->put<<2)); \
+ } \
+} while(0)
+
+#endif
+
diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h
index 0758991a..c4f1e9a4 100644
--- a/shared-core/nouveau_drm.h
+++ b/shared-core/nouveau_drm.h
@@ -25,9 +25,9 @@
#ifndef __NOUVEAU_DRM_H__
#define __NOUVEAU_DRM_H__
-#define NOUVEAU_DRM_HEADER_PATCHLEVEL 7
+#define NOUVEAU_DRM_HEADER_PATCHLEVEL 10
-typedef struct drm_nouveau_fifo_alloc {
+struct drm_nouveau_channel_alloc {
uint32_t fb_ctxdma_handle;
uint32_t tt_ctxdma_handle;
@@ -42,50 +42,58 @@ typedef struct drm_nouveau_fifo_alloc {
/* Notifier memory */
drm_handle_t notifier;
int notifier_size;
-}
-drm_nouveau_fifo_alloc_t;
+};
+
+struct drm_nouveau_channel_free {
+ int channel;
+};
-typedef struct drm_nouveau_grobj_alloc {
+struct drm_nouveau_grobj_alloc {
int channel;
uint32_t handle;
int class;
-}
-drm_nouveau_grobj_alloc_t;
+};
#define NOUVEAU_MEM_ACCESS_RO 1
#define NOUVEAU_MEM_ACCESS_WO 2
#define NOUVEAU_MEM_ACCESS_RW 3
-typedef struct drm_nouveau_notifier_alloc {
+struct drm_nouveau_notifierobj_alloc {
int channel;
uint32_t handle;
int count;
uint32_t offset;
-}
-drm_nouveau_notifier_alloc_t;
+};
+
+struct drm_nouveau_gpuobj_free {
+ int channel;
+ uint32_t handle;
+};
#define NOUVEAU_MEM_FB 0x00000001
#define NOUVEAU_MEM_AGP 0x00000002
#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004
#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008
-#define NOUVEAU_MEM_PINNED 0x00000010
-#define NOUVEAU_MEM_USER_BACKED 0x00000020
-#define NOUVEAU_MEM_MAPPED 0x00000040
-#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */
-#define NOUVEAU_MEM_NOTIFIER 0x00000100 /* internal */
-typedef struct drm_nouveau_mem_alloc {
+#define NOUVEAU_MEM_PCI 0x00000010
+#define NOUVEAU_MEM_PCI_ACCEPTABLE 0x00000020
+#define NOUVEAU_MEM_PINNED 0x00000040
+#define NOUVEAU_MEM_USER_BACKED 0x00000080
+#define NOUVEAU_MEM_MAPPED 0x00000100
+#define NOUVEAU_MEM_INSTANCE 0x00000200 /* internal */
+#define NOUVEAU_MEM_NOTIFIER 0x00000400 /* internal */
+
+struct drm_nouveau_mem_alloc {
int flags;
int alignment;
uint64_t size; // in bytes
- uint64_t region_offset;
-}
-drm_nouveau_mem_alloc_t;
+ uint64_t offset;
+ drm_handle_t map_handle;
+};
-typedef struct drm_nouveau_mem_free {
- uint64_t region_offset;
+struct drm_nouveau_mem_free {
+ uint64_t offset;
int flags;
-}
-drm_nouveau_mem_free_t;
+};
/* FIXME : maybe unify {GET,SET}PARAMs */
#define NOUVEAU_GETPARAM_PCI_VENDOR 3
@@ -95,29 +103,27 @@ drm_nouveau_mem_free_t;
#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7
#define NOUVEAU_GETPARAM_FB_SIZE 8
#define NOUVEAU_GETPARAM_AGP_SIZE 9
-typedef struct drm_nouveau_getparam {
+#define NOUVEAU_GETPARAM_PCI_PHYSICAL 10
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+struct drm_nouveau_getparam {
uint64_t param;
uint64_t value;
-}
-drm_nouveau_getparam_t;
+};
#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1
#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2
-typedef struct drm_nouveau_setparam {
+struct drm_nouveau_setparam {
uint64_t param;
uint64_t value;
-}
-drm_nouveau_setparam_t;
+};
enum nouveau_card_type {
NV_UNKNOWN =0,
- NV_01 =1,
- NV_03 =3,
NV_04 =4,
NV_05 =5,
NV_10 =10,
- NV_11 =10,
- NV_15 =10,
+ NV_11 =11,
+ NV_15 =11,
NV_17 =17,
NV_20 =20,
NV_25 =20,
@@ -137,20 +143,22 @@ enum nouveau_bus_type {
#define NOUVEAU_MAX_SAREA_CLIPRECTS 16
-typedef struct drm_nouveau_sarea {
+struct drm_nouveau_sarea {
/* the cliprects */
- drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[NOUVEAU_MAX_SAREA_CLIPRECTS];
unsigned int nbox;
-}
-drm_nouveau_sarea_t;
-
-#define DRM_NOUVEAU_FIFO_ALLOC 0x00
-#define DRM_NOUVEAU_GROBJ_ALLOC 0x01
-#define DRM_NOUVEAU_NOTIFIER_ALLOC 0x02
-#define DRM_NOUVEAU_MEM_ALLOC 0x03
-#define DRM_NOUVEAU_MEM_FREE 0x04
-#define DRM_NOUVEAU_GETPARAM 0x05
-#define DRM_NOUVEAU_SETPARAM 0x06
+};
+
+#define DRM_NOUVEAU_CARD_INIT 0x00
+#define DRM_NOUVEAU_GETPARAM 0x01
+#define DRM_NOUVEAU_SETPARAM 0x02
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x03
+#define DRM_NOUVEAU_CHANNEL_FREE 0x04
+#define DRM_NOUVEAU_GROBJ_ALLOC 0x05
+#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x06
+#define DRM_NOUVEAU_GPUOBJ_FREE 0x07
+#define DRM_NOUVEAU_MEM_ALLOC 0x08
+#define DRM_NOUVEAU_MEM_FREE 0x09
#endif /* __NOUVEAU_DRM_H__ */
diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h
index 3cca07fc..e96c8fad 100644
--- a/shared-core/nouveau_drv.h
+++ b/shared-core/nouveau_drv.h
@@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 7
+#define DRIVER_PATCHLEVEL 10
#define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000
@@ -47,9 +47,10 @@ struct mem_block {
struct mem_block *prev;
uint64_t start;
uint64_t size;
- DRMFILE filp; /* 0: free, -1: heap, other: real files */
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
int flags;
drm_local_map_t *map;
+ drm_handle_t map_handle;
};
enum nouveau_flags {
@@ -57,43 +58,96 @@ enum nouveau_flags {
NV_NFORCE2 =0x20000000
};
-struct nouveau_object
-{
- struct nouveau_object *next;
- struct nouveau_object *prev;
- int channel;
+#define NVOBJ_ENGINE_SW 0
+#define NVOBJ_ENGINE_GR 1
+#define NVOBJ_ENGINE_INT 0xdeadbeef
+
+#define NVOBJ_FLAG_ALLOW_NO_REFS (1 << 0)
+#define NVOBJ_FLAG_ZERO_ALLOC (1 << 1)
+#define NVOBJ_FLAG_ZERO_FREE (1 << 2)
+#define NVOBJ_FLAG_FAKE (1 << 3)
+struct nouveau_gpuobj {
+ struct list_head list;
- struct mem_block *instance;
+ int im_channel;
+ struct mem_block *im_pramin;
+ struct mem_block *im_backing;
+ int im_bound;
- uint32_t handle;
- int class;
- int engine;
+ uint32_t flags;
+ int refcount;
+
+ uint32_t engine;
+ uint32_t class;
+
+ void (*dtor)(struct drm_device *, struct nouveau_gpuobj *);
+ void *priv;
};
-struct nouveau_fifo
+struct nouveau_gpuobj_ref {
+ struct list_head list;
+
+ struct nouveau_gpuobj *gpuobj;
+ uint32_t instance;
+
+ int channel;
+ int handle;
+};
+
+struct nouveau_channel
{
- int used;
+ struct drm_device *dev;
+ int id;
+
/* owner of this fifo */
- DRMFILE filp;
+ struct drm_file *file_priv;
/* mapping of the fifo itself */
drm_local_map_t *map;
/* mapping of the regs controling the fifo */
drm_local_map_t *regs;
- /* dma object for the command buffer itself */
- struct mem_block *cmdbuf_mem;
- struct nouveau_object *cmdbuf_obj;
- uint32_t pushbuf_base;
- /* notifier memory */
+
+ /* DMA push buffer */
+ struct nouveau_gpuobj_ref *pushbuf;
+ struct mem_block *pushbuf_mem;
+ uint32_t pushbuf_base;
+
+ /* Notifier memory */
struct mem_block *notifier_block;
struct mem_block *notifier_heap;
drm_local_map_t *notifier_map;
- /* PGRAPH context, for cards that keep it in RAMIN */
- struct mem_block *ramin_grctx;
- /* objects belonging to this fifo */
- struct nouveau_object *objs;
- /* XXX dynamic alloc ? */
- uint32_t pgraph_ctx [340];
+ /* PFIFO context */
+ struct nouveau_gpuobj_ref *ramfc;
+
+ /* PGRAPH context */
+ struct nouveau_gpuobj_ref *ramin_grctx;
+ uint32_t pgraph_ctx [340]; /* XXX dynamic alloc ? */
+
+ /* NV50 VM */
+ struct nouveau_gpuobj *vm_pd;
+ struct nouveau_gpuobj_ref *vm_gart_pt;
+
+ /* Objects */
+ struct nouveau_gpuobj_ref *ramin; /* Private instmem */
+ struct mem_block *ramin_heap; /* Private PRAMIN heap */
+ struct nouveau_gpuobj_ref *ramht; /* Hash table */
+ struct list_head ramht_refs; /* Objects referenced by RAMHT */
+};
+
+struct nouveau_drm_channel {
+ struct nouveau_channel *chan;
+
+ /* DMA state */
+ int max, put, cur, free;
+ int push_free;
+ volatile uint32_t *pushbuf;
+
+ /* Notifiers */
+ uint32_t notify0_offset;
+
+ /* Buffer moves */
+ uint32_t m2mf_dma_source;
+ uint32_t m2mf_dma_destin;
};
struct nouveau_config {
@@ -103,44 +157,73 @@ struct nouveau_config {
} cmdbuf;
};
-typedef struct nouveau_engine_func {
- struct {
- int (*init)(drm_device_t *dev);
- void (*takedown)(drm_device_t *dev);
- } mc;
+struct nouveau_instmem_engine {
+ void *priv;
- struct {
- int (*init)(drm_device_t *dev);
- void (*takedown)(drm_device_t *dev);
- } timer;
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
- struct {
- int (*init)(drm_device_t *dev);
- void (*takedown)(drm_device_t *dev);
- } fb;
+ int (*populate)(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+ void (*clear)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*bind)(struct drm_device *, struct nouveau_gpuobj *);
+ int (*unbind)(struct drm_device *, struct nouveau_gpuobj *);
+};
- struct {
- int (*init)(drm_device_t *);
- void (*takedown)(drm_device_t *);
+struct nouveau_mc_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
- int (*create_context)(drm_device_t *, int channel);
- void (*destroy_context)(drm_device_t *, int channel);
- int (*load_context)(drm_device_t *, int channel);
- int (*save_context)(drm_device_t *, int channel);
- } graph;
+struct nouveau_timer_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+ uint64_t (*read)(struct drm_device *dev);
+};
- struct {
- int (*init)(drm_device_t *);
- void (*takedown)(drm_device_t *);
+struct nouveau_fb_engine {
+ int (*init)(struct drm_device *dev);
+ void (*takedown)(struct drm_device *dev);
+};
+
+struct nouveau_fifo_engine {
+ void *priv;
+
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*save_context)(struct nouveau_channel *);
+};
+
+struct nouveau_pgraph_engine {
+ int (*init)(struct drm_device *);
+ void (*takedown)(struct drm_device *);
+
+ int (*create_context)(struct nouveau_channel *);
+ void (*destroy_context)(struct nouveau_channel *);
+ int (*load_context)(struct nouveau_channel *);
+ int (*save_context)(struct nouveau_channel *);
+};
+
+struct nouveau_engine {
+ struct nouveau_instmem_engine instmem;
+ struct nouveau_mc_engine mc;
+ struct nouveau_timer_engine timer;
+ struct nouveau_fb_engine fb;
+ struct nouveau_pgraph_engine graph;
+ struct nouveau_fifo_engine fifo;
+};
- int (*create_context)(drm_device_t *, int channel);
- void (*destroy_context)(drm_device_t *, int channel);
- int (*load_context)(drm_device_t *, int channel);
- int (*save_context)(drm_device_t *, int channel);
- } fifo;
-} nouveau_engine_func_t;
+struct drm_nouveau_private {
+ enum {
+ NOUVEAU_CARD_INIT_DOWN,
+ NOUVEAU_CARD_INIT_DONE,
+ NOUVEAU_CARD_INIT_FAILED
+ } init_state;
-typedef struct drm_nouveau_private {
/* the card type, takes NV_* as values */
int card_type;
/* exact chipset, derived from NV_PMC_BOOT_0 */
@@ -152,12 +235,14 @@ typedef struct drm_nouveau_private {
drm_local_map_t *ramin; /* NV40 onwards */
int fifo_alloc_count;
- struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER];
+ struct nouveau_channel *fifos[NV_MAX_FIFO_NUMBER];
- struct nouveau_engine_func Engine;
+ struct nouveau_engine Engine;
+ struct nouveau_drm_channel channel;
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
- uint32_t ramin_size;
+ struct nouveau_gpuobj *ramht;
+ uint32_t ramin_rsvd_vram;
uint32_t ramht_offset;
uint32_t ramht_size;
uint32_t ramht_bits;
@@ -169,8 +254,24 @@ typedef struct drm_nouveau_private {
/* base physical adresses */
uint64_t fb_phys;
uint64_t fb_available_size;
- uint64_t agp_phys;
- uint64_t agp_available_size;
+
+ struct {
+ enum {
+ NOUVEAU_GART_NONE = 0,
+ NOUVEAU_GART_AGP,
+ NOUVEAU_GART_SGDMA
+ } type;
+ uint64_t aper_base;
+ uint64_t aper_size;
+
+ struct nouveau_gpuobj *sg_ctxdma;
+ struct page *sg_dummy_page;
+ dma_addr_t sg_dummy_bus;
+
+ /* nottm hack */
+ struct drm_ttm_backend *sg_be;
+ unsigned long sg_handle;
+ } gart_info;
/* the mtrr covering the FB */
int fb_mtrr;
@@ -179,193 +280,285 @@ typedef struct drm_nouveau_private {
struct mem_block *fb_heap;
struct mem_block *fb_nomap_heap;
struct mem_block *ramin_heap;
+ struct mem_block *pci_heap;
/* context table pointed to be NV_PGRAPH_CHANNEL_CTX_TABLE (0x400780) */
uint32_t ctx_table_size;
- struct mem_block *ctx_table;
+ struct nouveau_gpuobj_ref *ctx_table;
struct nouveau_config config;
-}
-drm_nouveau_private_t;
+
+ struct list_head gpuobj_list;
+};
+
+#define NOUVEAU_CHECK_INITIALISED_WITH_RETURN do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (nv->init_state != NOUVEAU_CARD_INIT_DONE) { \
+ DRM_ERROR("called without init\n"); \
+ return -EINVAL; \
+ } \
+} while(0)
+
+#define NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(id,cl,ch) do { \
+ struct drm_nouveau_private *nv = dev->dev_private; \
+ if (!nouveau_fifo_owner(dev, (cl), (id))) { \
+ DRM_ERROR("pid %d doesn't own channel %d\n", \
+ DRM_CURRENTPID, (id)); \
+ return -EPERM; \
+ } \
+ (ch) = nv->fifos[(id)]; \
+} while(0)
/* nouveau_state.c */
-extern void nouveau_preclose(drm_device_t * dev, DRMFILE filp);
-extern int nouveau_load(struct drm_device *dev, unsigned long flags);
-extern int nouveau_firstopen(struct drm_device *dev);
-extern void nouveau_lastclose(struct drm_device *dev);
-extern int nouveau_unload(struct drm_device *dev);
-extern int nouveau_ioctl_getparam(DRM_IOCTL_ARGS);
-extern int nouveau_ioctl_setparam(DRM_IOCTL_ARGS);
-extern void nouveau_wait_for_idle(struct drm_device *dev);
-extern int nouveau_ioctl_card_init(DRM_IOCTL_ARGS);
+extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
+extern int nouveau_load(struct drm_device *, unsigned long flags);
+extern int nouveau_firstopen(struct drm_device *);
+extern void nouveau_lastclose(struct drm_device *);
+extern int nouveau_unload(struct drm_device *);
+extern int nouveau_ioctl_getparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_setparam(struct drm_device *, void *data,
+ struct drm_file *);
+extern void nouveau_wait_for_idle(struct drm_device *);
+extern int nouveau_card_init(struct drm_device *);
+extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
+ struct drm_file *);
/* nouveau_mem.c */
-extern int nouveau_mem_init_heap(struct mem_block **,
- uint64_t start, uint64_t size);
+extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
+ uint64_t size);
extern struct mem_block *nouveau_mem_alloc_block(struct mem_block *,
uint64_t size, int align2,
- DRMFILE);
-extern void nouveau_mem_free_block(struct mem_block *);
-extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev);
-extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap);
-extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS);
-extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS);
-extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp);
-extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*);
-extern int nouveau_mem_init(struct drm_device *dev);
-extern void nouveau_mem_close(struct drm_device *dev);
-extern int nouveau_instmem_init(struct drm_device *dev);
-extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev,
- uint32_t size, uint32_t align);
-extern void nouveau_instmem_free(struct drm_device *dev,
- struct mem_block *block);
+ struct drm_file *);
+extern void nouveau_mem_takedown(struct mem_block **heap);
+extern void nouveau_mem_free_block(struct mem_block *);
+extern uint64_t nouveau_mem_fb_amount(struct drm_device *);
+extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
+extern int nouveau_ioctl_mem_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_mem_free(struct drm_device *, void *data,
+ struct drm_file *);
+extern struct mem_block* nouveau_mem_alloc(struct drm_device *,
+ int alignment, uint64_t size,
+ int flags, struct drm_file *);
+extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);
+extern int nouveau_mem_init(struct drm_device *);
+extern void nouveau_mem_close(struct drm_device *);
/* nouveau_notifier.c */
-extern int nouveau_notifier_init_channel(drm_device_t *, int channel, DRMFILE);
-extern void nouveau_notifier_takedown_channel(drm_device_t *, int channel);
-extern int nouveau_notifier_alloc(drm_device_t *, int channel,
- uint32_t handle, int cout, uint32_t *offset);
-extern int nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS);
+extern int nouveau_notifier_init_channel(struct nouveau_channel *);
+extern void nouveau_notifier_takedown_channel(struct nouveau_channel *);
+extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle,
+ int cout, uint32_t *offset);
+extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_notifier_free(struct drm_device *, void *data,
+ struct drm_file *);
/* nouveau_fifo.c */
-extern int nouveau_fifo_init(drm_device_t *dev);
-extern int nouveau_fifo_number(drm_device_t *dev);
-extern int nouveau_fifo_ctx_size(drm_device_t *dev);
-extern void nouveau_fifo_cleanup(drm_device_t *dev, DRMFILE filp);
-extern int nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel);
-extern void nouveau_fifo_free(drm_device_t *dev, int channel);
+extern int nouveau_fifo_init(struct drm_device *);
+extern int nouveau_fifo_number(struct drm_device *);
+extern int nouveau_fifo_ctx_size(struct drm_device *);
+extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);
+extern int nouveau_fifo_owner(struct drm_device *, struct drm_file *,
+ int channel);
+extern int nouveau_fifo_alloc(struct drm_device *dev,
+ struct nouveau_channel **chan,
+ struct drm_file *file_priv,
+ struct mem_block *pushbuf,
+ uint32_t fb_ctxdma, uint32_t tt_ctxdma);
+extern void nouveau_fifo_free(struct nouveau_channel *);
/* nouveau_object.c */
-extern int nouveau_object_init_channel(drm_device_t *, int channel,
- uint32_t vram_handle,
- uint32_t tt_handle);
-extern void nouveau_object_takedown_channel(drm_device_t *dev, int channel);
-extern void nouveau_object_cleanup(drm_device_t *dev, int channel);
-extern int nouveau_ramht_insert(drm_device_t *, int channel,
- uint32_t handle, struct nouveau_object *);
-extern struct nouveau_object *
-nouveau_object_gr_create(drm_device_t *dev, int channel, int class);
-extern struct nouveau_object *
-nouveau_object_dma_create(drm_device_t *dev, int channel, int class,
- uint32_t offset, uint32_t size,
- int access, int target);
-extern void nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj);
-extern int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS);
-extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem);
+extern int nouveau_gpuobj_early_init(struct drm_device *);
+extern int nouveau_gpuobj_init(struct drm_device *);
+extern void nouveau_gpuobj_takedown(struct drm_device *);
+extern void nouveau_gpuobj_late_takedown(struct drm_device *);
+extern int nouveau_gpuobj_channel_init(struct nouveau_channel *,
+ uint32_t vram_h, uint32_t tt_h);
+extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
+extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
+ int size, int align, uint32_t flags,
+ struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
+ uint32_t handle, struct nouveau_gpuobj *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_del(struct drm_device *,
+ struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret);
+extern int nouveau_gpuobj_new_ref(struct drm_device *,
+ struct nouveau_channel *alloc_chan,
+ struct nouveau_channel *ref_chan,
+ uint32_t handle, int size, int align,
+ uint32_t flags, struct nouveau_gpuobj_ref **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *,
+ uint32_t p_offset, uint32_t b_offset,
+ uint32_t size, uint32_t flags,
+ struct nouveau_gpuobj **,
+ struct nouveau_gpuobj_ref**);
+extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
+ uint64_t offset, uint64_t size,
+ int access, struct nouveau_gpuobj **,
+ uint32_t *o_ret);
+extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
+ struct nouveau_gpuobj **);
+extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
+ struct drm_file *);
+extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
+ struct drm_file *);
/* nouveau_irq.c */
extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS);
-extern void nouveau_irq_preinstall(drm_device_t*);
-extern void nouveau_irq_postinstall(drm_device_t*);
-extern void nouveau_irq_uninstall(drm_device_t*);
+extern void nouveau_irq_preinstall(struct drm_device *);
+extern void nouveau_irq_postinstall(struct drm_device *);
+extern void nouveau_irq_uninstall(struct drm_device *);
+
+/* nouveau_sgdma.c */
+extern int nouveau_sgdma_init(struct drm_device *);
+extern void nouveau_sgdma_takedown(struct drm_device *);
+extern int nouveau_sgdma_get_page(struct drm_device *, uint32_t offset,
+ uint32_t *page);
+extern struct drm_ttm_backend *nouveau_sgdma_init_ttm(struct drm_device *);
+extern int nouveau_sgdma_nottm_hack_init(struct drm_device *);
+extern void nouveau_sgdma_nottm_hack_takedown(struct drm_device *);
+
+/* nouveau_dma.c */
+extern int nouveau_dma_channel_init(struct drm_device *);
+extern void nouveau_dma_channel_takedown(struct drm_device *);
+extern int nouveau_dma_wait(struct drm_device *, int size);
/* nv04_fb.c */
-extern int nv04_fb_init(drm_device_t *dev);
-extern void nv04_fb_takedown(drm_device_t *dev);
+extern int nv04_fb_init(struct drm_device *);
+extern void nv04_fb_takedown(struct drm_device *);
/* nv10_fb.c */
-extern int nv10_fb_init(drm_device_t *dev);
-extern void nv10_fb_takedown(drm_device_t *dev);
+extern int nv10_fb_init(struct drm_device *);
+extern void nv10_fb_takedown(struct drm_device *);
/* nv40_fb.c */
-extern int nv40_fb_init(drm_device_t *dev);
-extern void nv40_fb_takedown(drm_device_t *dev);
+extern int nv40_fb_init(struct drm_device *);
+extern void nv40_fb_takedown(struct drm_device *);
/* nv04_fifo.c */
-extern int nv04_fifo_create_context(drm_device_t *dev, int channel);
-extern void nv04_fifo_destroy_context(drm_device_t *dev, int channel);
-extern int nv04_fifo_load_context(drm_device_t *dev, int channel);
-extern int nv04_fifo_save_context(drm_device_t *dev, int channel);
+extern int nv04_fifo_create_context(struct nouveau_channel *);
+extern void nv04_fifo_destroy_context(struct nouveau_channel *);
+extern int nv04_fifo_load_context(struct nouveau_channel *);
+extern int nv04_fifo_save_context(struct nouveau_channel *);
/* nv10_fifo.c */
-extern int nv10_fifo_create_context(drm_device_t *dev, int channel);
-extern void nv10_fifo_destroy_context(drm_device_t *dev, int channel);
-extern int nv10_fifo_load_context(drm_device_t *dev, int channel);
-extern int nv10_fifo_save_context(drm_device_t *dev, int channel);
+extern int nv10_fifo_create_context(struct nouveau_channel *);
+extern void nv10_fifo_destroy_context(struct nouveau_channel *);
+extern int nv10_fifo_load_context(struct nouveau_channel *);
+extern int nv10_fifo_save_context(struct nouveau_channel *);
/* nv40_fifo.c */
-extern int nv40_fifo_create_context(drm_device_t *, int channel);
-extern void nv40_fifo_destroy_context(drm_device_t *, int channel);
-extern int nv40_fifo_load_context(drm_device_t *, int channel);
-extern int nv40_fifo_save_context(drm_device_t *, int channel);
+extern int nv40_fifo_init(struct drm_device *);
+extern int nv40_fifo_create_context(struct nouveau_channel *);
+extern void nv40_fifo_destroy_context(struct nouveau_channel *);
+extern int nv40_fifo_load_context(struct nouveau_channel *);
+extern int nv40_fifo_save_context(struct nouveau_channel *);
/* nv50_fifo.c */
-extern int nv50_fifo_init(drm_device_t *);
-extern void nv50_fifo_takedown(drm_device_t *);
-extern int nv50_fifo_create_context(drm_device_t *, int channel);
-extern void nv50_fifo_destroy_context(drm_device_t *, int channel);
-extern int nv50_fifo_load_context(drm_device_t *, int channel);
-extern int nv50_fifo_save_context(drm_device_t *, int channel);
+extern int nv50_fifo_init(struct drm_device *);
+extern void nv50_fifo_takedown(struct drm_device *);
+extern int nv50_fifo_create_context(struct nouveau_channel *);
+extern void nv50_fifo_destroy_context(struct nouveau_channel *);
+extern int nv50_fifo_load_context(struct nouveau_channel *);
+extern int nv50_fifo_save_context(struct nouveau_channel *);
/* nv04_graph.c */
-extern void nouveau_nv04_context_switch(drm_device_t *dev);
-extern int nv04_graph_init(drm_device_t *dev);
-extern void nv04_graph_takedown(drm_device_t *dev);
-extern int nv04_graph_create_context(drm_device_t *dev, int channel);
-extern void nv04_graph_destroy_context(drm_device_t *dev, int channel);
-extern int nv04_graph_load_context(drm_device_t *dev, int channel);
-extern int nv04_graph_save_context(drm_device_t *dev, int channel);
+extern void nouveau_nv04_context_switch(struct drm_device *);
+extern int nv04_graph_init(struct drm_device *);
+extern void nv04_graph_takedown(struct drm_device *);
+extern int nv04_graph_create_context(struct nouveau_channel *);
+extern void nv04_graph_destroy_context(struct nouveau_channel *);
+extern int nv04_graph_load_context(struct nouveau_channel *);
+extern int nv04_graph_save_context(struct nouveau_channel *);
/* nv10_graph.c */
-extern void nouveau_nv10_context_switch(drm_device_t *dev);
-extern int nv10_graph_init(drm_device_t *dev);
-extern void nv10_graph_takedown(drm_device_t *dev);
-extern int nv10_graph_create_context(drm_device_t *dev, int channel);
-extern void nv10_graph_destroy_context(drm_device_t *dev, int channel);
-extern int nv10_graph_load_context(drm_device_t *dev, int channel);
-extern int nv10_graph_save_context(drm_device_t *dev, int channel);
+extern void nouveau_nv10_context_switch(struct drm_device *);
+extern int nv10_graph_init(struct drm_device *);
+extern void nv10_graph_takedown(struct drm_device *);
+extern int nv10_graph_create_context(struct nouveau_channel *);
+extern void nv10_graph_destroy_context(struct nouveau_channel *);
+extern int nv10_graph_load_context(struct nouveau_channel *);
+extern int nv10_graph_save_context(struct nouveau_channel *);
/* nv20_graph.c */
-extern void nouveau_nv20_context_switch(drm_device_t *dev);
-extern int nv20_graph_init(drm_device_t *dev);
-extern void nv20_graph_takedown(drm_device_t *dev);
-extern int nv20_graph_create_context(drm_device_t *dev, int channel);
-extern void nv20_graph_destroy_context(drm_device_t *dev, int channel);
-extern int nv20_graph_load_context(drm_device_t *dev, int channel);
-extern int nv20_graph_save_context(drm_device_t *dev, int channel);
+extern void nouveau_nv20_context_switch(struct drm_device *);
+extern int nv20_graph_init(struct drm_device *);
+extern void nv20_graph_takedown(struct drm_device *);
+extern int nv20_graph_create_context(struct nouveau_channel *);
+extern void nv20_graph_destroy_context(struct nouveau_channel *);
+extern int nv20_graph_load_context(struct nouveau_channel *);
+extern int nv20_graph_save_context(struct nouveau_channel *);
/* nv30_graph.c */
-extern int nv30_graph_init(drm_device_t *dev);
-extern void nv30_graph_takedown(drm_device_t *dev);
-extern int nv30_graph_create_context(drm_device_t *, int channel);
-extern void nv30_graph_destroy_context(drm_device_t *, int channel);
-extern int nv30_graph_load_context(drm_device_t *, int channel);
-extern int nv30_graph_save_context(drm_device_t *, int channel);
+extern int nv30_graph_init(struct drm_device *);
+extern void nv30_graph_takedown(struct drm_device *);
+extern int nv30_graph_create_context(struct nouveau_channel *);
+extern void nv30_graph_destroy_context(struct nouveau_channel *);
+extern int nv30_graph_load_context(struct nouveau_channel *);
+extern int nv30_graph_save_context(struct nouveau_channel *);
/* nv40_graph.c */
-extern int nv40_graph_init(drm_device_t *);
-extern void nv40_graph_takedown(drm_device_t *);
-extern int nv40_graph_create_context(drm_device_t *, int channel);
-extern void nv40_graph_destroy_context(drm_device_t *, int channel);
-extern int nv40_graph_load_context(drm_device_t *, int channel);
-extern int nv40_graph_save_context(drm_device_t *, int channel);
+extern int nv40_graph_init(struct drm_device *);
+extern void nv40_graph_takedown(struct drm_device *);
+extern int nv40_graph_create_context(struct nouveau_channel *);
+extern void nv40_graph_destroy_context(struct nouveau_channel *);
+extern int nv40_graph_load_context(struct nouveau_channel *);
+extern int nv40_graph_save_context(struct nouveau_channel *);
/* nv50_graph.c */
-extern int nv50_graph_init(drm_device_t *);
-extern void nv50_graph_takedown(drm_device_t *);
-extern int nv50_graph_create_context(drm_device_t *, int channel);
-extern void nv50_graph_destroy_context(drm_device_t *, int channel);
-extern int nv50_graph_load_context(drm_device_t *, int channel);
-extern int nv50_graph_save_context(drm_device_t *, int channel);
+extern int nv50_graph_init(struct drm_device *);
+extern void nv50_graph_takedown(struct drm_device *);
+extern int nv50_graph_create_context(struct nouveau_channel *);
+extern void nv50_graph_destroy_context(struct nouveau_channel *);
+extern int nv50_graph_load_context(struct nouveau_channel *);
+extern int nv50_graph_save_context(struct nouveau_channel *);
+
+/* nv04_instmem.c */
+extern int nv04_instmem_init(struct drm_device *);
+extern void nv04_instmem_takedown(struct drm_device *);
+extern int nv04_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv04_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv04_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
+
+/* nv50_instmem.c */
+extern int nv50_instmem_init(struct drm_device *);
+extern void nv50_instmem_takedown(struct drm_device *);
+extern int nv50_instmem_populate(struct drm_device *, struct nouveau_gpuobj *,
+ uint32_t *size);
+extern void nv50_instmem_clear(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_bind(struct drm_device *, struct nouveau_gpuobj *);
+extern int nv50_instmem_unbind(struct drm_device *, struct nouveau_gpuobj *);
/* nv04_mc.c */
-extern int nv04_mc_init(drm_device_t *dev);
-extern void nv04_mc_takedown(drm_device_t *dev);
+extern int nv04_mc_init(struct drm_device *);
+extern void nv04_mc_takedown(struct drm_device *);
/* nv40_mc.c */
-extern int nv40_mc_init(drm_device_t *dev);
-extern void nv40_mc_takedown(drm_device_t *dev);
+extern int nv40_mc_init(struct drm_device *);
+extern void nv40_mc_takedown(struct drm_device *);
/* nv50_mc.c */
-extern int nv50_mc_init(drm_device_t *dev);
-extern void nv50_mc_takedown(drm_device_t *dev);
+extern int nv50_mc_init(struct drm_device *);
+extern void nv50_mc_takedown(struct drm_device *);
/* nv04_timer.c */
-extern int nv04_timer_init(drm_device_t *dev);
-extern void nv04_timer_takedown(drm_device_t *dev);
+extern int nv04_timer_init(struct drm_device *);
+extern uint64_t nv04_timer_read(struct drm_device *);
+extern void nv04_timer_takedown(struct drm_device *);
-extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
- unsigned long arg);
+extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
#if defined(__powerpc__)
#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )
@@ -384,8 +577,8 @@ extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
#define NV_WI32(o,v) DRM_WRITE32(dev_priv->ramin, (o), (v))
#endif
-#define INSTANCE_RD(o,i) NV_RI32((o)->start + ((i)<<2))
-#define INSTANCE_WR(o,i,v) NV_WI32((o)->start + ((i)<<2), (v))
+#define INSTANCE_RD(o,i) NV_RI32((o)->im_pramin->start + ((i)<<2))
+#define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))
#endif /* __NOUVEAU_DRV_H__ */
diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c
index 81dbfcda..437c84f2 100644
--- a/shared-core/nouveau_fifo.c
+++ b/shared-core/nouveau_fifo.c
@@ -29,25 +29,25 @@
/* returns the number of hw fifos */
-int nouveau_fifo_number(drm_device_t* dev)
+int nouveau_fifo_number(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type)
{
- case NV_03:
- return 8;
case NV_04:
case NV_05:
return 16;
+ case NV_50:
+ return 128;
default:
return 32;
}
}
/* returns the size of fifo context */
-int nouveau_fifo_ctx_size(drm_device_t* dev)
+int nouveau_fifo_ctx_size(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
if (dev_priv->card_type >= NV_40)
return 128;
@@ -66,9 +66,9 @@ int nouveau_fifo_ctx_size(drm_device_t* dev)
* voir nv_driver.c : NVPreInit
*/
-static int nouveau_fifo_instmem_configure(drm_device_t *dev)
+static int nouveau_fifo_instmem_configure(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
NV_WRITE(NV03_PFIFO_RAMHT,
(0x03 << 24) /* search 128 */ |
@@ -82,9 +82,16 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
{
case NV_50:
case NV_40:
+ switch (dev_priv->chipset) {
+ case 0x47:
+ case 0x49:
+ case 0x4b:
+ NV_WRITE(0x2230, 1);
+ break;
+ default:
+ break;
+ }
NV_WRITE(NV40_PFIFO_RAMFC, 0x30002);
- if((dev_priv->chipset == 0x49) || (dev_priv->chipset == 0x4b))
- NV_WRITE(0x2230,0x00000001);
break;
case NV_44:
NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) |
@@ -97,9 +104,9 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
(1 << 16) /* 64 Bytes entry*/);
/* XXX nvidia blob set bit 18, 21,23 for nv20 & nv30 */
break;
+ case NV_11:
case NV_10:
case NV_04:
- case NV_03:
NV_WRITE(NV03_PFIFO_RAMFC, dev_priv->ramfc_offset>>8);
break;
}
@@ -107,9 +114,9 @@ static int nouveau_fifo_instmem_configure(drm_device_t *dev)
return 0;
}
-int nouveau_fifo_init(drm_device_t *dev)
+int nouveau_fifo_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
@@ -117,6 +124,10 @@ int nouveau_fifo_init(drm_device_t *dev)
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PFIFO);
+ /* Enable PFIFO error reporting */
+ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
ret = nouveau_fifo_instmem_configure(dev);
@@ -183,71 +194,90 @@ int nouveau_fifo_init(drm_device_t *dev)
}
static int
-nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel)
+nouveau_fifo_pushbuf_ctxdma_init(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_config *config = &dev_priv->config;
- struct mem_block *cb;
- struct nouveau_object *cb_dma = NULL;
- int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
-
- /* Defaults for unconfigured values */
- if (!config->cmdbuf.location)
- config->cmdbuf.location = NOUVEAU_MEM_FB;
- if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size)
- config->cmdbuf.size = cb_min_size;
-
- cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
- config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
- (DRMFILE)-2);
- if (!cb) {
- DRM_ERROR("Couldn't allocate DMA command buffer.\n");
- return DRM_ERR(ENOMEM);
- }
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct mem_block *pb = chan->pushbuf_mem;
+ struct nouveau_gpuobj *pushbuf = NULL;
+ int ret;
- if (cb->flags & NOUVEAU_MEM_AGP) {
- cb_dma = nouveau_object_dma_create(dev, channel,
- NV_CLASS_DMA_IN_MEMORY,
- cb->start - dev_priv->agp_phys,
- cb->size,
- NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP);
+ if (pb->flags & NOUVEAU_MEM_AGP) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ &pushbuf,
+ &chan->pushbuf_base);
+ } else
+ if (pb->flags & NOUVEAU_MEM_PCI) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI_NONLINEAR,
+ &pushbuf);
+ chan->pushbuf_base = 0;
} else if (dev_priv->card_type != NV_04) {
- cb_dma = nouveau_object_dma_create(dev, channel,
- NV_CLASS_DMA_IN_MEMORY,
- cb->start - drm_get_resource_start(dev, 1),
- cb->size,
- NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM);
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start, pb->size,
+ NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_VIDMEM, &pushbuf);
+ chan->pushbuf_base = 0;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
* VRAM.
*/
- cb_dma = nouveau_object_dma_create(dev, channel,
- NV_CLASS_DMA_IN_MEMORY,
- cb->start, cb->size,
- NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI);
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ pb->start +
+ drm_get_resource_start(dev, 1),
+ pb->size, NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_PCI, &pushbuf);
+ chan->pushbuf_base = 0;
}
- if (!cb_dma) {
- nouveau_mem_free(dev, cb);
- DRM_ERROR("Failed to alloc DMA object for command buffer\n");
- return DRM_ERR(ENOMEM);
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf,
+ &chan->pushbuf))) {
+ DRM_ERROR("Error referencing push buffer ctxdma: %d\n", ret);
+ if (pushbuf != dev_priv->gart_info.sg_ctxdma)
+ nouveau_gpuobj_del(dev, &pushbuf);
+ return ret;
}
- dev_priv->fifos[channel].pushbuf_base = 0;
- dev_priv->fifos[channel].cmdbuf_mem = cb;
- dev_priv->fifos[channel].cmdbuf_obj = cb_dma;
return 0;
}
+static struct mem_block *
+nouveau_fifo_user_pushbuf_alloc(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_config *config = &dev_priv->config;
+ struct mem_block *pb;
+ int pb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE);
+
+ /* Defaults for unconfigured values */
+ if (!config->cmdbuf.location)
+ config->cmdbuf.location = NOUVEAU_MEM_FB;
+ if (!config->cmdbuf.size || config->cmdbuf.size < pb_min_size)
+ config->cmdbuf.size = pb_min_size;
+
+ pb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size,
+ config->cmdbuf.location | NOUVEAU_MEM_MAPPED,
+ (struct drm_file *)-2);
+ if (!pb)
+ DRM_ERROR("Couldn't allocate DMA push buffer.\n");
+
+ return pb;
+}
+
/* allocates and initializes a fifo for user space consumption */
-int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp,
- uint32_t vram_handle, uint32_t tt_handle)
+int
+nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
+ struct drm_file *file_priv, struct mem_block *pushbuf,
+ uint32_t vram_handle, uint32_t tt_handle)
{
int ret;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- nouveau_engine_func_t *engine = &dev_priv->Engine;
- struct nouveau_fifo *chan;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_channel *chan;
int channel;
/*
@@ -258,42 +288,45 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp,
* (woo, full userspace command submission !)
* When there are no more contexts, you lost
*/
- for(channel=0; channel<nouveau_fifo_number(dev); channel++)
- if (dev_priv->fifos[channel].used==0)
+ for(channel=0; channel<nouveau_fifo_number(dev); channel++) {
+ if (dev_priv->fifos[channel] == NULL)
break;
+ }
/* no more fifos. you lost. */
if (channel==nouveau_fifo_number(dev))
- return DRM_ERR(EINVAL);
- (*chan_ret) = channel;
- chan = &dev_priv->fifos[channel];
+ return -EINVAL;
- DRM_INFO("Allocating FIFO number %d\n", channel);
-
- /* that fifo is used */
- chan->used = 1;
- chan->filp = filp;
+ dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel),
+ DRM_MEM_DRIVER);
+ if (!dev_priv->fifos[channel])
+ return -ENOMEM;
+ dev_priv->fifo_alloc_count++;
+ chan = dev_priv->fifos[channel];
+ chan->dev = dev;
+ chan->id = channel;
+ chan->file_priv = file_priv;
+ chan->pushbuf_mem = pushbuf;
- /* FIFO has no objects yet */
- chan->objs = NULL;
+ DRM_INFO("Allocating FIFO number %d\n", channel);
- /* allocate a command buffer, and create a dma object for the gpu */
- ret = nouveau_fifo_cmdbuf_alloc(dev, channel);
+ /* Allocate space for per-channel fixed notifier memory */
+ ret = nouveau_notifier_init_channel(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
/* Setup channel's default objects */
- ret = nouveau_object_init_channel(dev, channel, vram_handle, tt_handle);
+ ret = nouveau_gpuobj_channel_init(chan, vram_handle, tt_handle);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
- /* Allocate space for per-channel fixed notifier memory */
- ret = nouveau_notifier_init_channel(dev, channel, filp);
+ /* Create a dma object for the push buffer */
+ ret = nouveau_fifo_pushbuf_ctxdma_init(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
@@ -306,177 +339,198 @@ int nouveau_fifo_alloc(drm_device_t* dev, int *chan_ret, DRMFILE filp,
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);
/* Create a graphics context for new channel */
- ret = engine->graph.create_context(dev, channel);
+ ret = engine->graph.create_context(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
/* Construct inital RAMFC for new channel */
- ret = engine->fifo.create_context(dev, channel);
+ ret = engine->fifo.create_context(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
- /* enable the fifo dma operation */
- NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<channel));
-
/* setup channel's default get/put values */
- NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
- NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
+ if (dev_priv->card_type < NV_50) {
+ NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
+ NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
+ } else {
+ NV_WRITE(NV50_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base);
+ NV_WRITE(NV50_FIFO_REGS_DMAGET(channel), chan->pushbuf_base);
+ }
/* If this is the first channel, setup PFIFO ourselves. For any
* other case, the GPU will handle this when it switches contexts.
*/
- if (dev_priv->fifo_alloc_count == 0) {
- ret = engine->fifo.load_context(dev, channel);
+ if (dev_priv->fifo_alloc_count == 1) {
+ ret = engine->fifo.load_context(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
- ret = engine->graph.load_context(dev, channel);
+ ret = engine->graph.load_context(chan);
if (ret) {
- nouveau_fifo_free(dev, channel);
+ nouveau_fifo_free(chan);
return ret;
}
-
- /* Temporary hack, to avoid breaking Xv on cards where the
- * initial context value for 0x400710 doesn't have these bits
- * set. Proper fix would be to find which object+method is
- * responsible for modifying this state.
- */
- if (dev_priv->chipset >= 0x10) {
- uint32_t tmp;
- tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
- NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
- tmp = NV_READ(NV10_PGRAPH_SURFACE) | 0x00020100;
- NV_WRITE(NV10_PGRAPH_SURFACE, tmp);
- }
}
- NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001);
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH,
+ NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
NV_WRITE(NV03_PFIFO_CACHE1_PUSH0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000001);
NV_WRITE(NV04_PFIFO_CACHE1_PULL1, 0x00000001);
/* reenable the fifo caches */
- NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
-
- dev_priv->fifo_alloc_count++;
+ NV_WRITE(NV03_PFIFO_CACHES, 1);
DRM_INFO("%s: initialised FIFO %d\n", __func__, channel);
+ *chan_ret = chan;
return 0;
}
/* stops a fifo */
-void nouveau_fifo_free(drm_device_t* dev, int channel)
+void nouveau_fifo_free(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- nouveau_engine_func_t *engine = &dev_priv->Engine;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
- chan->used = 0;
- DRM_INFO("%s: freeing fifo %d\n", __func__, channel);
+ DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id);
/* disable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);
- NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<channel));
// FIXME XXX needs more code
-
- engine->fifo.destroy_context(dev, channel);
+
+ engine->fifo.destroy_context(chan);
/* Cleanup PGRAPH state */
- engine->graph.destroy_context(dev, channel);
+ engine->graph.destroy_context(chan);
/* reenable the fifo caches */
NV_WRITE(NV03_PFIFO_CACHES, 0x00000001);
- /* Deallocate command buffer */
- if (chan->cmdbuf_mem)
- nouveau_mem_free(dev, chan->cmdbuf_mem);
-
- nouveau_notifier_takedown_channel(dev, channel);
+ /* Deallocate push buffer */
+ nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+ if (chan->pushbuf_mem) {
+ nouveau_mem_free(dev, chan->pushbuf_mem);
+ chan->pushbuf_mem = NULL;
+ }
/* Destroy objects belonging to the channel */
- nouveau_object_cleanup(dev, channel);
+ nouveau_gpuobj_channel_takedown(chan);
+ nouveau_notifier_takedown_channel(chan);
+
+ dev_priv->fifos[chan->id] = NULL;
dev_priv->fifo_alloc_count--;
+ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
}
-/* cleanups all the fifos from filp */
-void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp)
+/* cleanups all the fifos from file_priv */
+void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("clearing FIFO enables from filp\n");
- for(i=0;i<nouveau_fifo_number(dev);i++)
- if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp)
- nouveau_fifo_free(dev,i);
+ DRM_DEBUG("clearing FIFO enables from file_priv\n");
+ for(i = 0; i < nouveau_fifo_number(dev); i++) {
+ struct nouveau_channel *chan = dev_priv->fifos[i];
+
+ if (chan && chan->file_priv == file_priv)
+ nouveau_fifo_free(chan);
+ }
}
int
-nouveau_fifo_owner(drm_device_t *dev, DRMFILE filp, int channel)
+nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,
+ int channel)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
if (channel >= nouveau_fifo_number(dev))
return 0;
- if (dev_priv->fifos[channel].used == 0)
+ if (dev_priv->fifos[channel] == NULL)
return 0;
- return (dev_priv->fifos[channel].filp == filp);
+ return (dev_priv->fifos[channel]->file_priv == file_priv);
}
/***********************************
* ioctls wrapping the functions
***********************************/
-static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
+static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan;
- drm_nouveau_fifo_alloc_t init;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_channel_alloc *init = data;
+ struct drm_map_list *entry;
+ struct nouveau_channel *chan;
+ struct mem_block *pushbuf;
int res;
- DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data,
- sizeof(init));
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- res = nouveau_fifo_alloc(dev, &init.channel, filp,
- init.fb_ctxdma_handle,
- init.tt_ctxdma_handle);
+ if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
+ return -EINVAL;
+
+ pushbuf = nouveau_fifo_user_pushbuf_alloc(dev);
+ if (!pushbuf)
+ return -ENOMEM;
+
+ res = nouveau_fifo_alloc(dev, &chan, file_priv, pushbuf,
+ init->fb_ctxdma_handle,
+ init->tt_ctxdma_handle);
if (res)
return res;
- chan = &dev_priv->fifos[init.channel];
-
- init.put_base = chan->pushbuf_base;
+ init->channel = chan->id;
+ init->put_base = chan->pushbuf_base;
/* make the fifo available to user space */
/* first, the fifo control regs */
- init.ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init.channel);
- init.ctrl_size = NV03_FIFO_REGS_SIZE;
- res = drm_addmap(dev, init.ctrl, init.ctrl_size, _DRM_REGISTERS,
+ init->ctrl = dev_priv->mmio->offset;
+ if (dev_priv->card_type < NV_50) {
+ init->ctrl += NV03_FIFO_REGS(init->channel);
+ init->ctrl_size = NV03_FIFO_REGS_SIZE;
+ } else {
+ init->ctrl += NV50_FIFO_REGS(init->channel);
+ init->ctrl_size = NV50_FIFO_REGS_SIZE;
+ }
+ res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,
0, &chan->regs);
if (res != 0)
return res;
+ entry = drm_find_matching_map(dev, chan->regs);
+ if (!entry)
+ return -EINVAL;
+ init->ctrl = entry->user_token;
+
/* pass back FIFO map info to the caller */
- init.cmdbuf = chan->cmdbuf_mem->start;
- init.cmdbuf_size = chan->cmdbuf_mem->size;
+ init->cmdbuf = chan->pushbuf_mem->map_handle;
+ init->cmdbuf_size = chan->pushbuf_mem->size;
/* and the notifier block */
- init.notifier = chan->notifier_block->start;
- init.notifier_size = chan->notifier_block->size;
- res = drm_addmap(dev, init.notifier, init.notifier_size, _DRM_REGISTERS,
- 0, &chan->notifier_map);
- if (res != 0)
- return res;
+ init->notifier = chan->notifier_block->map_handle;
+ init->notifier_size = chan->notifier_block->size;
+
+ return 0;
+}
+
+static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_channel_free *cfree = data;
+ struct nouveau_channel *chan;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(cfree->channel, file_priv, chan);
- DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data,
- init, sizeof(init));
+ nouveau_fifo_free(chan);
return 0;
}
@@ -484,14 +538,17 @@ static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS)
* finally, the ioctl table
***********************************/
-drm_ioctl_desc_t nouveau_ioctls[] = {
- [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_GROBJ_ALLOC)] = {nouveau_ioctl_grobj_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_NOTIFIER_ALLOC)] = {nouveau_ioctl_notifier_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+struct drm_ioctl_desc nouveau_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_ALLOC, nouveau_ioctl_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_NOUVEAU_MEM_FREE, nouveau_ioctl_mem_free, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c
index 8de6e705..e64677ed 100644
--- a/shared-core/nouveau_irq.c
+++ b/shared-core/nouveau_irq.c
@@ -36,132 +36,34 @@
#include "nouveau_drv.h"
#include "nouveau_reg.h"
-void nouveau_irq_preinstall(drm_device_t *dev)
+void nouveau_irq_preinstall(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- DRM_DEBUG("IRQ: preinst\n");
-
- if (!dev_priv) {
- DRM_ERROR("AIII, no dev_priv\n");
- return;
- }
- if (!dev_priv->mmio) {
- DRM_ERROR("AIII, no dev_priv->mmio\n");
- return;
- }
-
- /* Disable/Clear PFIFO interrupts */
- NV_WRITE(NV03_PFIFO_INTR_EN_0, 0);
- NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
- /* Disable/Clear PGRAPH interrupts */
- if (dev_priv->card_type<NV_40)
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0);
- else
- NV_WRITE(NV40_PGRAPH_INTR_EN, 0);
- NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
-#if 0
- /* Disable/Clear CRTC0/1 interrupts */
- NV_WRITE(NV_CRTC0_INTEN, 0);
- NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
- NV_WRITE(NV_CRTC1_INTEN, 0);
- NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
-#endif
/* Master disable */
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
}
-void nouveau_irq_postinstall(drm_device_t *dev)
+void nouveau_irq_postinstall(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("AIII, no dev_priv\n");
- return;
- }
- if (!dev_priv->mmio) {
- DRM_ERROR("AIII, no dev_priv->mmio\n");
- return;
- }
-
- DRM_DEBUG("IRQ: postinst\n");
-
- /* Enable PFIFO error reporting */
- NV_WRITE(NV03_PFIFO_INTR_EN_0 ,
- NV_PFIFO_INTR_CACHE_ERROR |
- NV_PFIFO_INTR_RUNOUT |
- NV_PFIFO_INTR_RUNOUT_OVERFLOW |
- NV_PFIFO_INTR_DMA_PUSHER |
- NV_PFIFO_INTR_DMA_PT |
- NV_PFIFO_INTR_SEMAPHORE |
- NV_PFIFO_INTR_ACQUIRE_TIMEOUT
- );
- NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
-
- /* Enable PGRAPH interrupts */
- if (dev_priv->card_type<NV_40)
- NV_WRITE(NV03_PGRAPH_INTR_EN,
- NV_PGRAPH_INTR_NOTIFY |
- NV_PGRAPH_INTR_MISSING_HW |
- NV_PGRAPH_INTR_CONTEXT_SWITCH |
- NV_PGRAPH_INTR_BUFFER_NOTIFY |
- NV_PGRAPH_INTR_ERROR
- );
- else
- NV_WRITE(NV40_PGRAPH_INTR_EN,
- NV_PGRAPH_INTR_NOTIFY |
- NV_PGRAPH_INTR_MISSING_HW |
- NV_PGRAPH_INTR_CONTEXT_SWITCH |
- NV_PGRAPH_INTR_BUFFER_NOTIFY |
- NV_PGRAPH_INTR_ERROR
- );
- NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
-
-#if 0
- /* Enable CRTC0/1 interrupts */
- NV_WRITE(NV_CRTC0_INTEN, NV_CRTC_INTR_VBLANK);
- NV_WRITE(NV_CRTC1_INTEN, NV_CRTC_INTR_VBLANK);
-#endif
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
/* Master enable */
NV_WRITE(NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
}
-void nouveau_irq_uninstall(drm_device_t *dev)
+void nouveau_irq_uninstall(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
-
- if (!dev_priv) {
- DRM_ERROR("AIII, no dev_priv\n");
- return;
- }
- if (!dev_priv->mmio) {
- DRM_ERROR("AIII, no dev_priv->mmio\n");
- return;
- }
-
- DRM_DEBUG("IRQ: uninst\n");
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- /* Disable PFIFO interrupts */
- NV_WRITE(NV03_PFIFO_INTR_EN_0, 0);
- /* Disable PGRAPH interrupts */
- if (dev_priv->card_type<NV_40)
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0);
- else
- NV_WRITE(NV40_PGRAPH_INTR_EN, 0);
-#if 0
- /* Disable CRTC0/1 interrupts */
- NV_WRITE(NV_CRTC0_INTEN, 0);
- NV_WRITE(NV_CRTC1_INTEN, 0);
-#endif
/* Master disable */
NV_WRITE(NV03_PMC_INTR_EN_0, 0);
}
-static void nouveau_fifo_irq_handler(drm_device_t *dev)
+static void nouveau_fifo_irq_handler(struct drm_device *dev)
{
uint32_t status, chmode, chstat, channel;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
status = NV_READ(NV03_PFIFO_INTR_0);
if (!status)
@@ -170,12 +72,10 @@ static void nouveau_fifo_irq_handler(drm_device_t *dev)
chstat = NV_READ(NV04_PFIFO_DMA);
channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
- DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat);
-
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t c1get, c1method, c1data;
- DRM_ERROR("NV: PFIFO error interrupt\n");
+ DRM_ERROR("PFIFO error interrupt\n");
c1get = NV_READ(NV03_PFIFO_CACHE1_GET) >> 2;
if (dev_priv->card_type < NV_40) {
@@ -187,17 +87,17 @@ static void nouveau_fifo_irq_handler(drm_device_t *dev)
c1data = NV_READ(NV40_PFIFO_CACHE1_DATA(c1get));
}
- DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n",
- channel, (c1method >> 13) & 7,
- c1method & 0x1ffc, c1data
- );
+ DRM_ERROR("Channel %d/%d - Method 0x%04x, Data 0x%08x\n",
+ channel, (c1method >> 13) & 7, c1method & 0x1ffc,
+ c1data);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_CACHE_ERROR);
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
- DRM_INFO("NV: PFIFO DMA pusher interrupt\n");
+ DRM_ERROR("PFIFO DMA pusher interrupt: ch%d, 0x%08x\n",
+ channel, NV_READ(NV04_PFIFO_CACHE1_DMA_GET));
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
NV_WRITE(NV03_PFIFO_INTR_0, NV_PFIFO_INTR_DMA_PUSHER);
@@ -211,7 +111,7 @@ static void nouveau_fifo_irq_handler(drm_device_t *dev)
}
if (status) {
- DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status);
+ DRM_ERROR("Unhandled PFIFO interrupt: status=0x%08x\n", status);
NV_WRITE(NV03_PFIFO_INTR_0, status);
}
@@ -220,9 +120,9 @@ static void nouveau_fifo_irq_handler(drm_device_t *dev)
}
#if 0
-static void nouveau_nv04_context_switch(drm_device_t *dev)
+static void nouveau_nv04_context_switch(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t channel,i;
uint32_t max=0;
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
@@ -266,95 +166,182 @@ static void nouveau_nv04_context_switch(drm_device_t *dev)
}
#endif
+
+struct nouveau_bitfield_names
+{
+ uint32_t mask;
+ const char * name;
+};
+
+static struct nouveau_bitfield_names nouveau_nstatus_names[] =
+{
+ { NV03_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
+ { NV03_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
+ { NV03_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
+ { NV03_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
+};
+
+static struct nouveau_bitfield_names nouveau_nsource_names[] =
+{
+ { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
+ { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
+ { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
+ { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
+ { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
+ { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
+ { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
+ { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
+ { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
+ { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
+ { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
+ { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
+ { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
+};
+
static void
-nouveau_graph_dump_trap_info(drm_device_t *dev)
+nouveau_print_bitfield_names(uint32_t value,
+ const struct nouveau_bitfield_names *namelist,
+ const int namelist_len)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ int i;
+ for(i=0; i<namelist_len; ++i) {
+ uint32_t mask = namelist[i].mask;
+ if(value & mask) {
+ printk(" %s", namelist[i].name);
+ value &= ~mask;
+ }
+ }
+ if(value)
+ printk(" (unknown bits 0x%08x)", value);
+}
+
+static int
+nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int channel;
+
+ if (dev_priv->card_type < NV_10) {
+ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
+ } else if (dev_priv->card_type < NV_40) {
+ channel = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 4;
+
+ /* 0x400704 *sometimes* contains a sensible channel ID, but
+ * mostly not.. for now lookup which channel owns the active
+ * PGRAPH context. Probably a better way, but this'll do
+ * for now.
+ */
+ for (channel = 0; channel < 32; channel++) {
+ if (dev_priv->fifos[channel] == NULL)
+ continue;
+ if (cur_grctx ==
+ dev_priv->fifos[channel]->ramin_grctx->instance)
+ break;
+ }
+ if (channel == 32) {
+ DRM_ERROR("AIII, unable to determine active channel "
+ "from PGRAPH context 0x%08x\n", cur_grctx);
+ return -EINVAL;
+ }
+ } else {
+ uint32_t cur_grctx = (NV_READ(0x40032C) & 0xfffff) << 12;
+
+ for (channel = 0; channel < 128; channel++) {
+ if (dev_priv->fifos[channel] == NULL)
+ continue;
+ if (cur_grctx ==
+ dev_priv->fifos[channel]->ramin_grctx->instance)
+ break;
+ }
+ if (channel == 128) {
+ DRM_ERROR("AIII, unable to determine active channel "
+ "from PGRAPH context 0x%08x\n", cur_grctx);
+ return -EINVAL;
+ }
+ }
+
+ if (channel > nouveau_fifo_number(dev) ||
+ dev_priv->fifos[channel] == NULL) {
+ DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);
+ return -EINVAL;
+ }
+
+ *channel_ret = channel;
+ return 0;
+}
+
+static void
+nouveau_graph_dump_trap_info(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t address;
- uint32_t channel;
- uint32_t method, subc, data;
+ uint32_t channel, class;
+ uint32_t method, subc, data, data2;
+ uint32_t nsource, nstatus;
- address = NV_READ(0x400704);
- data = NV_READ(0x400708);
- channel = (address >> 20) & 0x1F;
- subc = (address >> 16) & 0x7;
+ if (nouveau_graph_trapped_channel(dev, &channel))
+ channel = -1;
+
+ data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);
+ address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR);
method = address & 0x1FFC;
+ if (dev_priv->card_type < NV_10) {
+ subc = (address >> 13) & 0x7;
+ data2= 0;
+ } else {
+ subc = (address >> 16) & 0x7;
+ data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);
+ }
+ nsource = NV_READ(NV03_PGRAPH_NSOURCE);
+ nstatus = NV_READ(NV03_PGRAPH_NSTATUS);
+ if (dev_priv->card_type < NV_50) {
+ class = NV_READ(0x400160 + subc*4) & 0xFFFF;
+ } else {
+ class = NV_READ(0x400814);
+ }
- DRM_ERROR("NV: nSource: 0x%08x, nStatus: 0x%08x\n",
- NV_READ(0x400108), NV_READ(0x400104));
- DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -"
- "Method 0x%04x, Data 0x%08x\n",
- channel, subc,
- NV_READ(0x400160+subc*4) & 0xFFFF,
- method, data
- );
+ DRM_ERROR("nSource:");
+ nouveau_print_bitfield_names(nsource, nouveau_nsource_names,
+ ARRAY_SIZE(nouveau_nsource_names));
+ printk(", nStatus:");
+ nouveau_print_bitfield_names(nstatus, nouveau_nstatus_names,
+ ARRAY_SIZE(nouveau_nstatus_names));
+ printk("\n");
+
+ DRM_ERROR("Channel %d/%d (class 0x%04x) - Method 0x%04x, Data 0x%08x:0x%08x\n",
+ channel, subc, class, method, data2, data);
}
-static void nouveau_pgraph_irq_handler(drm_device_t *dev)
+static void nouveau_pgraph_irq_handler(struct drm_device *dev)
{
- uint32_t status;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t status, nsource;
status = NV_READ(NV03_PGRAPH_INTR);
if (!status)
return;
+ nsource = NV_READ(NV03_PGRAPH_NSOURCE);
if (status & NV_PGRAPH_INTR_NOTIFY) {
- uint32_t nsource, nstatus, instance, notify;
- DRM_DEBUG("NV: PGRAPH notify interrupt\n");
+ DRM_DEBUG("PGRAPH notify interrupt\n");
- nstatus = NV_READ(0x00400104);
- nsource = NV_READ(0x00400108);
- DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
-
- /* if this wasn't NOTIFICATION_PENDING, dump extra trap info */
- if (nsource & ~(1<<0)) {
- nouveau_graph_dump_trap_info(dev);
- } else {
- instance = NV_READ(0x00400158);
- notify = NV_READ(0x00400150) >> 16;
- DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n",
- nsource, nstatus);
- }
+ nouveau_graph_dump_trap_info(dev);
status &= ~NV_PGRAPH_INTR_NOTIFY;
NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
}
- if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) {
- uint32_t nsource, nstatus, instance, notify;
- DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n");
-
- nstatus = NV_READ(0x00400104);
- nsource = NV_READ(0x00400108);
- DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
-
- instance = NV_READ(0x00400158);
- notify = NV_READ(0x00400150) >> 16;
- DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify);
-
- status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY;
- NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_BUFFER_NOTIFY);
- }
-
- if (status & NV_PGRAPH_INTR_MISSING_HW) {
- DRM_ERROR("NV: PGRAPH missing hw interrupt\n");
-
- status &= ~NV_PGRAPH_INTR_MISSING_HW;
- NV_WRITE(NV03_PGRAPH_INTR, NV_PGRAPH_INTR_MISSING_HW);
- }
-
if (status & NV_PGRAPH_INTR_ERROR) {
- uint32_t nsource, nstatus, instance;
-
- DRM_ERROR("NV: PGRAPH error interrupt\n");
-
- nstatus = NV_READ(0x00400104);
- nsource = NV_READ(0x00400108);
- DRM_ERROR("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus);
-
- instance = NV_READ(0x00400158);
- DRM_ERROR("instance:0x%08x\n", instance);
+ DRM_ERROR("PGRAPH error interrupt\n");
nouveau_graph_dump_trap_info(dev);
@@ -364,7 +351,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
uint32_t channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
- DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel);
+ DRM_DEBUG("PGRAPH context switch interrupt channel %x\n",channel);
switch(dev_priv->card_type)
{
case NV_04:
@@ -372,6 +359,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
nouveau_nv04_context_switch(dev);
break;
case NV_10:
+ case NV_11:
case NV_17:
nouveau_nv10_context_switch(dev);
break;
@@ -380,7 +368,7 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
nouveau_nv20_context_switch(dev);
break;
default:
- DRM_INFO("NV: Context switch not implemented\n");
+ DRM_ERROR("Context switch not implemented\n");
break;
}
@@ -389,16 +377,17 @@ static void nouveau_pgraph_irq_handler(drm_device_t *dev)
}
if (status) {
- DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status);
+ DRM_ERROR("Unhandled PGRAPH interrupt: STAT=0x%08x\n", status);
NV_WRITE(NV03_PGRAPH_INTR, status);
}
NV_WRITE(NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
}
-static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc)
+static void nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
if (crtc&1) {
NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
}
@@ -410,24 +399,24 @@ static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc)
irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t*)arg;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_device *dev = (struct drm_device*)arg;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status;
status = NV_READ(NV03_PMC_INTR_0);
if (!status)
return IRQ_NONE;
- DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status);
-
if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
nouveau_fifo_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
}
+
if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
nouveau_pgraph_irq_handler(dev);
status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
}
+
if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
nouveau_crtc_irq_handler(dev, (status>>24)&3);
status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c
index d8ae52b7..dbfba351 100644
--- a/shared-core/nouveau_mem.c
+++ b/shared-core/nouveau_mem.c
@@ -36,7 +36,7 @@
#include "nouveau_drv.h"
static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size,
- DRMFILE filp)
+ struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
@@ -46,7 +46,7 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -63,7 +63,7 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -73,12 +73,14 @@ static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64
out:
/* Our block is in the middle */
- p->filp = filp;
+ p->file_priv = file_priv;
return p;
}
-struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
- int align2, DRMFILE filp)
+struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap,
+ uint64_t size,
+ int align2,
+ struct drm_file *file_priv)
{
struct mem_block *p;
uint64_t mask = (1 << align2) - 1;
@@ -88,8 +90,8 @@ struct mem_block *nouveau_mem_alloc_block(struct mem_block *heap, uint64_t size,
list_for_each(p, heap) {
uint64_t start = (p->start + mask) & ~mask;
- if (p->filp == 0 && start + size <= p->start + p->size)
- return split_block(p, start, size, filp);
+ if (p->file_priv == 0 && start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
}
return NULL;
@@ -108,12 +110,12 @@ static struct mem_block *find_block(struct mem_block *heap, uint64_t start)
void nouveau_mem_free_block(struct mem_block *p)
{
- p->filp = NULL;
+ p->file_priv = NULL;
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
- if (p->next->filp == 0) {
+ if (p->next->file_priv == 0) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -121,7 +123,7 @@ void nouveau_mem_free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
- if (p->prev->filp == 0) {
+ if (p->prev->file_priv == 0) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
@@ -138,29 +140,29 @@ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
if (!blocks)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
if (!*heap) {
drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
blocks->start = start;
blocks->size = size;
- blocks->filp = NULL;
+ blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
- (*heap)->filp = (DRMFILE) - 1;
+ (*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/*
- * Free all blocks associated with the releasing filp
+ * Free all blocks associated with the releasing file_priv
*/
-void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
+void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
{
struct mem_block *p;
@@ -168,15 +170,16 @@ void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
return;
list_for_each(p, heap) {
- if (p->filp == filp)
- p->filp = NULL;
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
}
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
- while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) {
+ while ((p->file_priv == 0) && (p->next->file_priv == 0) &&
+ (p->next!=heap)) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -189,7 +192,7 @@ void nouveau_mem_release(DRMFILE filp, struct mem_block *heap)
/*
* Cleanup everything
*/
-static void nouveau_mem_takedown(struct mem_block **heap)
+void nouveau_mem_takedown(struct mem_block **heap)
{
struct mem_block *p;
@@ -208,29 +211,52 @@ static void nouveau_mem_takedown(struct mem_block **heap)
void nouveau_mem_close(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
nouveau_mem_takedown(&dev_priv->agp_heap);
nouveau_mem_takedown(&dev_priv->fb_heap);
+ if (dev_priv->pci_heap)
+ nouveau_mem_takedown(&dev_priv->pci_heap);
+}
+
+/*XXX won't work on BSD because of pci_read_config_dword */
+static uint32_t
+nouveau_mem_fb_amount_igp(struct drm_device *dev)
+{
+#if defined(LINUX) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pci_dev *bridge;
+ uint32_t mem;
+
+ bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0,1));
+ if (!bridge) {
+ DRM_ERROR("no bridge device\n");
+ return 0;
+ }
+
+ if (dev_priv->flags&NV_NFORCE) {
+ pci_read_config_dword(bridge, 0x7C, &mem);
+ return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
+ } else
+ if(dev_priv->flags&NV_NFORCE2) {
+ pci_read_config_dword(bridge, 0x84, &mem);
+ return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ }
+
+ DRM_ERROR("impossible!\n");
+#else
+ DRM_ERROR("Linux kernel >= 2.6.19 required to check for igp memory amount\n");
+#endif
+
+ return 0;
}
/* returns the amount of FB ram in bytes */
uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
switch(dev_priv->card_type)
{
- case NV_03:
- switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT)
- {
- case NV03_BOOT_0_RAM_AMOUNT_8MB:
- case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM:
- return 8*1024*1024;
- case NV03_BOOT_0_RAM_AMOUNT_4MB:
- return 4*1024*1024;
- case NV03_BOOT_0_RAM_AMOUNT_2MB:
- return 2*1024*1024;
- }
- break;
case NV_04:
case NV_05:
if (NV_READ(NV03_BOOT_0) & 0x00000100) {
@@ -249,6 +275,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
}
break;
case NV_10:
+ case NV_11:
case NV_17:
case NV_20:
case NV_30:
@@ -256,18 +283,14 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
case NV_44:
case NV_50:
default:
- // XXX won't work on BSD because of pci_read_config_dword
- if (dev_priv->flags&NV_NFORCE) {
- uint32_t mem;
- pci_read_config_dword(dev->pdev, 0x7C, &mem);
- return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024;
- } else if(dev_priv->flags&NV_NFORCE2) {
- uint32_t mem;
- pci_read_config_dword(dev->pdev, 0x84, &mem);
- return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024;
+ if (dev_priv->flags & (NV_NFORCE | NV_NFORCE2)) {
+ return nouveau_mem_fb_amount_igp(dev);
} else {
uint64_t mem;
- mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
+
+ mem = (NV_READ(NV04_FIFO_DATA) &
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>
+ NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;
return mem*1024*1024;
}
break;
@@ -277,70 +300,67 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)
return 0;
}
-
-
-int nouveau_mem_init(struct drm_device *dev)
+static int
+nouveau_mem_init_agp(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fb_size;
- dev_priv->agp_phys=0;
- dev_priv->fb_phys=0;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_agp_info info;
+ struct drm_agp_mode mode;
+ struct drm_agp_buffer agp_req;
+ struct drm_agp_binding bind_req;
+ int ret;
+
+ ret = drm_agp_acquire(dev);
+ if (ret) {
+ DRM_ERROR("Unable to acquire AGP: %d\n", ret);
+ return ret;
+ }
- /* init AGP */
- dev_priv->agp_heap=NULL;
- if (drm_device_is_agp(dev))
- {
- int err;
- drm_agp_info_t info;
- drm_agp_mode_t mode;
- drm_agp_buffer_t agp_req;
- drm_agp_binding_t bind_req;
-
- err = drm_agp_acquire(dev);
- if (err) {
- DRM_ERROR("Unable to acquire AGP: %d\n", err);
- goto no_agp;
- }
+ ret = drm_agp_info(dev, &info);
+ if (ret) {
+ DRM_ERROR("Unable to get AGP info: %d\n", ret);
+ return ret;
+ }
- err = drm_agp_info(dev, &info);
- if (err) {
- DRM_ERROR("Unable to get AGP info: %d\n", err);
- goto no_agp;
- }
+ /* see agp.h for the AGPSTAT_* modes available */
+ mode.mode = info.mode;
+ ret = drm_agp_enable(dev, mode);
+ if (ret) {
+ DRM_ERROR("Unable to enable AGP: %d\n", ret);
+ return ret;
+ }
- /* see agp.h for the AGPSTAT_* modes available */
- mode.mode = info.mode;
- err = drm_agp_enable(dev, mode);
- if (err) {
- DRM_ERROR("Unable to enable AGP: %d\n", err);
- goto no_agp;
- }
+ agp_req.size = info.aperture_size;
+ agp_req.type = 0;
+ ret = drm_agp_alloc(dev, &agp_req);
+ if (ret) {
+ DRM_ERROR("Unable to alloc AGP: %d\n", ret);
+ return ret;
+ }
- agp_req.size = info.aperture_size;
- agp_req.type = 0;
- err = drm_agp_alloc(dev, &agp_req);
- if (err) {
- DRM_ERROR("Unable to alloc AGP: %d\n", err);
- goto no_agp;
- }
+ bind_req.handle = agp_req.handle;
+ bind_req.offset = 0;
+ ret = drm_agp_bind(dev, &bind_req);
+ if (ret) {
+ DRM_ERROR("Unable to bind AGP: %d\n", ret);
+ return ret;
+ }
- bind_req.handle = agp_req.handle;
- bind_req.offset = 0;
- err = drm_agp_bind(dev, &bind_req);
- if (err) {
- DRM_ERROR("Unable to bind AGP: %d\n", err);
- goto no_agp;
- }
+ dev_priv->gart_info.type = NOUVEAU_GART_AGP;
+ dev_priv->gart_info.aper_base = info.aperture_base;
+ dev_priv->gart_info.aper_size = info.aperture_size;
+ return 0;
+}
- if (nouveau_mem_init_heap(&dev_priv->agp_heap,
- info.aperture_base,
- info.aperture_size))
- goto no_agp;
+int nouveau_mem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t fb_size;
+ int ret = 0;
- dev_priv->agp_phys = info.aperture_base;
- dev_priv->agp_available_size = info.aperture_size;
- }
-no_agp:
+ dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL;
+ dev_priv->fb_phys = 0;
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
/* setup a mtrr over the FB */
dev_priv->fb_mtrr = drm_mtrr_add(drm_get_resource_start(dev, 1),
@@ -353,7 +373,7 @@ no_agp:
/* On at least NV40, RAMIN is actually at the end of vram.
* We don't want to allocate this... */
if (dev_priv->card_type >= NV_40)
- fb_size -= dev_priv->ramin_size;
+ fb_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_available_size = fb_size;
DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);
@@ -361,30 +381,75 @@ no_agp:
/* On cards with > 256Mb, you can't map everything.
* So we create a second FB heap for that type of memory */
if (nouveau_mem_init_heap(&dev_priv->fb_heap,
- drm_get_resource_start(dev,1),
- 256*1024*1024))
- return DRM_ERR(ENOMEM);
+ 0, 256*1024*1024))
+ return -ENOMEM;
if (nouveau_mem_init_heap(&dev_priv->fb_nomap_heap,
- drm_get_resource_start(dev,1) +
- 256*1024*1024,
- fb_size-256*1024*1024))
- return DRM_ERR(ENOMEM);
+ 256*1024*1024, fb_size-256*1024*1024))
+ return -ENOMEM;
} else {
- if (nouveau_mem_init_heap(&dev_priv->fb_heap,
- drm_get_resource_start(dev,1),
- fb_size))
- return DRM_ERR(ENOMEM);
+ if (nouveau_mem_init_heap(&dev_priv->fb_heap, 0, fb_size))
+ return -ENOMEM;
dev_priv->fb_nomap_heap=NULL;
}
+ /* Init AGP / NV50 PCIEGART */
+ if (drm_device_is_agp(dev) && dev->agp) {
+ if ((ret = nouveau_mem_init_agp(dev)))
+ DRM_ERROR("Error initialising AGP: %d\n", ret);
+ }
+
+ /*Note: this is *not* just NV50 code, but only used on NV50 for now */
+ if (dev_priv->gart_info.type == NOUVEAU_GART_NONE &&
+ dev_priv->card_type >= NV_50) {
+ ret = nouveau_sgdma_init(dev);
+ if (!ret) {
+ ret = nouveau_sgdma_nottm_hack_init(dev);
+ if (ret)
+ nouveau_sgdma_takedown(dev);
+ }
+
+ if (ret)
+ DRM_ERROR("Error initialising SG DMA: %d\n", ret);
+ }
+
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ if (nouveau_mem_init_heap(&dev_priv->agp_heap,
+ 0, dev_priv->gart_info.aper_size)) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ nouveau_sgdma_nottm_hack_takedown(dev);
+ nouveau_sgdma_takedown(dev);
+ }
+ }
+ }
+
+ /* NV04-NV40 PCIEGART */
+ if (!dev_priv->agp_heap && dev_priv->card_type < NV_50) {
+ struct drm_scatter_gather sgreq;
+
+ DRM_DEBUG("Allocating sg memory for PCI DMA\n");
+ sgreq.size = 16 << 20; //16MB of PCI scatter-gather zone
+
+ if (drm_sg_alloc(dev, &sgreq)) {
+ DRM_ERROR("Unable to allocate %dMB of scatter-gather"
+ " pages for PCI DMA!",sgreq.size>>20);
+ } else {
+ if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,
+ dev->sg->pages * PAGE_SIZE)) {
+ DRM_ERROR("Unable to initialize pci_heap!");
+ }
+ }
+ }
+
return 0;
}
-struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp)
+struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,
+ uint64_t size, int flags,
+ struct drm_file *file_priv)
{
struct mem_block *block;
int type;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
/*
* Make things easier on ourselves: all allocations are page-aligned.
@@ -405,29 +470,42 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint6
if (size & (~PAGE_MASK))
size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE;
- if (flags&NOUVEAU_MEM_AGP) {
- type=NOUVEAU_MEM_AGP;
- block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,
- alignment, filp);
- if (block) goto alloc_ok;
- }
- if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) {
- type=NOUVEAU_MEM_FB;
- if (!(flags&NOUVEAU_MEM_MAPPED)) {
- block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,
- size, alignment, filp);
- if (block) goto alloc_ok;
- }
- block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,
- alignment, filp);
- if (block) goto alloc_ok;
- }
- if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) {
- type=NOUVEAU_MEM_AGP;
- block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,
- alignment, filp);
- if (block) goto alloc_ok;
- }
+
+#define NOUVEAU_MEM_ALLOC_AGP {\
+ type=NOUVEAU_MEM_AGP;\
+ block = nouveau_mem_alloc_block(dev_priv->agp_heap, size,\
+ alignment, file_priv); \
+ if (block) goto alloc_ok;\
+ }
+
+#define NOUVEAU_MEM_ALLOC_PCI {\
+ type = NOUVEAU_MEM_PCI;\
+ block = nouveau_mem_alloc_block(dev_priv->pci_heap, size, \
+ alignment, file_priv); \
+ if ( block ) goto alloc_ok;\
+ }
+
+#define NOUVEAU_MEM_ALLOC_FB {\
+ type=NOUVEAU_MEM_FB;\
+ if (!(flags&NOUVEAU_MEM_MAPPED)) {\
+ block = nouveau_mem_alloc_block(dev_priv->fb_nomap_heap,\
+ size, alignment, \
+ file_priv); \
+ if (block) goto alloc_ok;\
+ }\
+ block = nouveau_mem_alloc_block(dev_priv->fb_heap, size,\
+ alignment, file_priv);\
+ if (block) goto alloc_ok;\
+ }
+
+
+ if (flags&NOUVEAU_MEM_FB) NOUVEAU_MEM_ALLOC_FB
+ if (flags&NOUVEAU_MEM_AGP) NOUVEAU_MEM_ALLOC_AGP
+ if (flags&NOUVEAU_MEM_PCI) NOUVEAU_MEM_ALLOC_PCI
+ if (flags&NOUVEAU_MEM_FB_ACCEPTABLE) NOUVEAU_MEM_ALLOC_FB
+ if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) NOUVEAU_MEM_ALLOC_AGP
+ if (flags&NOUVEAU_MEM_PCI_ACCEPTABLE) NOUVEAU_MEM_ALLOC_PCI
+
return NULL;
@@ -436,216 +514,92 @@ alloc_ok:
if (flags&NOUVEAU_MEM_MAPPED)
{
- int ret;
+ struct drm_map_list *entry;
+ int ret = 0;
block->flags|=NOUVEAU_MEM_MAPPED;
- if (type == NOUVEAU_MEM_AGP)
- ret = drm_addmap(dev, block->start - dev->agp->base, block->size,
- _DRM_AGP, 0, &block->map);
- else
+ if (type == NOUVEAU_MEM_AGP) {
+ if (dev_priv->gart_info.type != NOUVEAU_GART_SGDMA)
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_AGP, 0, &block->map);
+ else
ret = drm_addmap(dev, block->start, block->size,
- _DRM_FRAME_BUFFER, 0, &block->map);
+ _DRM_SCATTER_GATHER, 0, &block->map);
+ }
+ else if (type == NOUVEAU_MEM_FB)
+ ret = drm_addmap(dev, block->start + dev_priv->fb_phys,
+ block->size, _DRM_FRAME_BUFFER,
+ 0, &block->map);
+ else if (type == NOUVEAU_MEM_PCI)
+ ret = drm_addmap(dev, block->start, block->size,
+ _DRM_SCATTER_GATHER, 0, &block->map);
+
if (ret) {
nouveau_mem_free_block(block);
return NULL;
}
+
+ entry = drm_find_matching_map(dev, block->map);
+ if (!entry) {
+ nouveau_mem_free_block(block);
+ return NULL;
+ }
+ block->map_handle = entry->user_token;
}
- DRM_INFO("allocated 0x%llx\n", block->start);
+ DRM_DEBUG("allocated 0x%llx type=0x%08x\n", block->start, block->flags);
return block;
}
void nouveau_mem_free(struct drm_device* dev, struct mem_block* block)
{
- DRM_INFO("freeing 0x%llx\n", block->start);
+ DRM_DEBUG("freeing 0x%llx type=0x%08x\n", block->start, block->flags);
if (block->flags&NOUVEAU_MEM_MAPPED)
drm_rmmap(dev, block->map);
nouveau_mem_free_block(block);
}
-static void
-nouveau_instmem_determine_amount(struct drm_device *dev)
-{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- int i;
-
- /* Figure out how much instance memory we need */
- switch (dev_priv->card_type) {
- case NV_40:
- /* We'll want more instance memory than this on some NV4x cards.
- * There's a 16MB aperture to play with that maps onto the end
- * of vram. For now, only reserve a small piece until we know
- * more about what each chipset requires.
- */
- dev_priv->ramin_size = (1*1024* 1024);
- break;
- default:
- /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
- * exist in vram on those cards as well?
- */
- dev_priv->ramin_size = (512*1024);
- break;
- }
- DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10);
-
- /* Clear all of it, except the BIOS image that's in the first 64KiB */
- for (i=(64*1024); i<dev_priv->ramin_size; i+=4)
- NV_WI32(i, 0x00000000);
-}
-
-static void
-nouveau_instmem_configure_fixed_tables(struct drm_device *dev)
-{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
-
- /* FIFO hash table (RAMHT)
- * use 4k hash table at RAMIN+0x10000
- * TODO: extend the hash table
- */
- dev_priv->ramht_offset = 0x10000;
- dev_priv->ramht_bits = 9;
- dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
- DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
- dev_priv->ramht_size);
-
- /* FIFO runout table (RAMRO) - 512k at 0x11200 */
- dev_priv->ramro_offset = 0x11200;
- dev_priv->ramro_size = 512;
- DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
- dev_priv->ramro_size);
-
- /* FIFO context table (RAMFC)
- * NV40 : Not sure exactly how to position RAMFC on some cards,
- * 0x30002 seems to position it at RAMIN+0x20000 on these
- * cards. RAMFC is 4kb (32 fifos, 128byte entries).
- * Others: Position RAMFC at RAMIN+0x11400
- */
- switch(dev_priv->card_type)
- {
- case NV_50:
- case NV_40:
- case NV_44:
- dev_priv->ramfc_offset = 0x20000;
- dev_priv->ramfc_size = nouveau_fifo_number(dev) *
- nouveau_fifo_ctx_size(dev);
- break;
- case NV_30:
- case NV_20:
- case NV_17:
- case NV_10:
- case NV_04:
- case NV_03:
- default:
- dev_priv->ramfc_offset = 0x11400;
- dev_priv->ramfc_size = nouveau_fifo_number(dev) *
- nouveau_fifo_ctx_size(dev);
- break;
- }
- DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
- dev_priv->ramfc_size);
-}
-
-int nouveau_instmem_init(struct drm_device *dev)
-{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t offset;
- int ret = 0;
-
- nouveau_instmem_determine_amount(dev);
- nouveau_instmem_configure_fixed_tables(dev);
-
- /* Create a heap to manage RAMIN allocations, we don't allocate
- * the space that was reserved for RAMHT/FC/RO.
- */
- offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
- ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
- offset, dev_priv->ramin_size - offset);
- if (ret) {
- dev_priv->ramin_heap = NULL;
- DRM_ERROR("Failed to init RAMIN heap\n");
- }
-
- return ret;
-}
-
-struct mem_block *nouveau_instmem_alloc(struct drm_device *dev,
- uint32_t size, uint32_t align)
-{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct mem_block *block;
-
- if (!dev_priv->ramin_heap) {
- DRM_ERROR("instmem alloc called without init\n");
- return NULL;
- }
-
- block = nouveau_mem_alloc_block(dev_priv->ramin_heap, size, align,
- (DRMFILE)-2);
- if (block) {
- block->flags = NOUVEAU_MEM_INSTANCE;
- DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n",
- size, (1<<align), (uint32_t)block->start);
- }
-
- return block;
-}
-
-void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block)
-{
- if (dev && block) {
- nouveau_mem_free_block(block);
- }
-}
-
/*
* Ioctls
*/
-int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS)
+int nouveau_ioctl_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- drm_nouveau_mem_alloc_t alloc;
+ struct drm_nouveau_mem_alloc *alloc = data;
struct mem_block *block;
- if (!dev_priv) {
- DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
- }
-
- DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data,
- sizeof(alloc));
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp);
+ block=nouveau_mem_alloc(dev, alloc->alignment, alloc->size,
+ alloc->flags, file_priv);
if (!block)
- return DRM_ERR(ENOMEM);
- alloc.region_offset=block->start;
- alloc.flags=block->flags;
-
- DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc));
+ return -ENOMEM;
+ alloc->map_handle=block->map_handle;
+ alloc->offset=block->start;
+ alloc->flags=block->flags;
return 0;
}
-int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS)
+int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- drm_nouveau_mem_free_t memfree;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_mem_free *memfree = data;
struct mem_block *block;
- DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data,
- sizeof(memfree));
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
block=NULL;
- if (memfree.flags&NOUVEAU_MEM_FB)
- block = find_block(dev_priv->fb_heap, memfree.region_offset);
- else if (memfree.flags&NOUVEAU_MEM_AGP)
- block = find_block(dev_priv->agp_heap, memfree.region_offset);
+ if (memfree->flags & NOUVEAU_MEM_FB)
+ block = find_block(dev_priv->fb_heap, memfree->offset);
+ else if (memfree->flags & NOUVEAU_MEM_AGP)
+ block = find_block(dev_priv->agp_heap, memfree->offset);
+ else if (memfree->flags & NOUVEAU_MEM_PCI)
+ block = find_block(dev_priv->pci_heap, memfree->offset);
if (!block)
- return DRM_ERR(EFAULT);
- if (block->filp != filp)
- return DRM_ERR(EPERM);
+ return -EFAULT;
+ if (block->file_priv != file_priv)
+ return -EPERM;
nouveau_mem_free(dev, block);
return 0;
diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c
index 0cfe733e..d3b79683 100644
--- a/shared-core/nouveau_notifier.c
+++ b/shared-core/nouveau_notifier.c
@@ -30,21 +30,30 @@
#include "nouveau_drv.h"
int
-nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp)
+nouveau_notifier_init_channel(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int flags, ret;
/*TODO: PCI notifier blocks */
+#ifndef __powerpc__
if (dev_priv->agp_heap)
- flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_FB_ACCEPTABLE;
+ flags = NOUVEAU_MEM_AGP;
+ else
+#endif
+ if (dev_priv->pci_heap)
+ flags = NOUVEAU_MEM_PCI;
else
flags = NOUVEAU_MEM_FB;
+ flags |= (NOUVEAU_MEM_MAPPED | NOUVEAU_MEM_FB_ACCEPTABLE);
- chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,filp);
+ chan->notifier_block = nouveau_mem_alloc(dev, 0, PAGE_SIZE, flags,
+ (struct drm_file *)-2);
if (!chan->notifier_block)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
+ DRM_DEBUG("Allocated notifier block in 0x%08x\n",
+ chan->notifier_block->flags);
ret = nouveau_mem_init_heap(&chan->notifier_heap,
0, chan->notifier_block->size);
@@ -55,71 +64,92 @@ nouveau_notifier_init_channel(drm_device_t *dev, int channel, DRMFILE filp)
}
void
-nouveau_notifier_takedown_channel(drm_device_t *dev, int channel)
+nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
if (chan->notifier_block) {
nouveau_mem_free(dev, chan->notifier_block);
chan->notifier_block = NULL;
}
- /*XXX: heap destroy */
+ nouveau_mem_takedown(&chan->notifier_heap);
+}
+
+static void
+nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
+ struct nouveau_gpuobj *gpuobj)
+{
+ DRM_DEBUG("\n");
+
+ if (gpuobj->priv)
+ nouveau_mem_free_block(gpuobj->priv);
}
int
-nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle,
+nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
int count, uint32_t *b_offset)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- struct nouveau_object *obj;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *nobj = NULL;
struct mem_block *mem;
uint32_t offset;
- int target;
+ int target, ret;
if (!chan->notifier_heap) {
DRM_ERROR("Channel %d doesn't have a notifier heap!\n",
- channel);
- return DRM_ERR(EINVAL);
+ chan->id);
+ return -EINVAL;
}
- mem = nouveau_mem_alloc_block(chan->notifier_heap, 32, 0, chan->filp);
+ mem = nouveau_mem_alloc_block(chan->notifier_heap, count*32, 0,
+ (struct drm_file *)-2);
if (!mem) {
- DRM_ERROR("Channel %d notifier block full\n", channel);
- return DRM_ERR(ENOMEM);
+ DRM_ERROR("Channel %d notifier block full\n", chan->id);
+ return -ENOMEM;
}
mem->flags = NOUVEAU_MEM_NOTIFIER;
- offset = chan->notifier_block->start + mem->start;
+ offset = chan->notifier_block->start;
if (chan->notifier_block->flags & NOUVEAU_MEM_FB) {
- offset -= drm_get_resource_start(dev, 1);
target = NV_DMA_TARGET_VIDMEM;
- } else if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
- offset -= dev_priv->agp_phys;
- target = NV_DMA_TARGET_AGP;
+ } else
+ if (chan->notifier_block->flags & NOUVEAU_MEM_AGP) {
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
+ dev_priv->card_type < NV_50) {
+ ret = nouveau_sgdma_get_page(dev, offset, &offset);
+ if (ret)
+ return ret;
+ target = NV_DMA_TARGET_PCI;
+ } else {
+ target = NV_DMA_TARGET_AGP;
+ }
+ } else
+ if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {
+ target = NV_DMA_TARGET_PCI_NONLINEAR;
} else {
DRM_ERROR("Bad DMA target, flags 0x%08x!\n",
chan->notifier_block->flags);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
+ offset += mem->start;
- obj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY,
- offset, mem->size, NV_DMA_ACCESS_RW,
- target);
- if (!obj) {
+ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset, mem->size,
+ NV_DMA_ACCESS_RW, target, &nobj))) {
nouveau_mem_free_block(mem);
- DRM_ERROR("Error creating notifier ctxdma\n");
- return DRM_ERR(ENOMEM);
+ DRM_ERROR("Error creating notifier ctxdma: %d\n", ret);
+ return ret;
}
+ nobj->dtor = nouveau_notifier_gpuobj_dtor;
+ nobj->priv = mem;
- obj->handle = handle;
- if (nouveau_ramht_insert(dev, channel, handle, obj)) {
- nouveau_object_free(dev, obj);
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL))) {
+ nouveau_gpuobj_del(dev, &nobj);
nouveau_mem_free_block(mem);
- DRM_ERROR("Error inserting notifier ctxdma into RAMHT\n");
- return DRM_ERR(ENOMEM);
+ DRM_ERROR("Error referencing notifier ctxdma: %d\n", ret);
+ return ret;
}
*b_offset = mem->start;
@@ -127,28 +157,20 @@ nouveau_notifier_alloc(drm_device_t *dev, int channel, uint32_t handle,
}
int
-nouveau_ioctl_notifier_alloc(DRM_IOCTL_ARGS)
+nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_notifier_alloc_t na;
+ struct drm_nouveau_notifierobj_alloc *na = data;
+ struct nouveau_channel *chan;
int ret;
- DRM_COPY_FROM_USER_IOCTL(na, (drm_nouveau_notifier_alloc_t __user*)data,
- sizeof(na));
-
- if (!nouveau_fifo_owner(dev, filp, na.channel)) {
- DRM_ERROR("pid %d doesn't own channel %d\n",
- DRM_CURRENTPID, na.channel);
- return DRM_ERR(EPERM);
- }
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(na->channel, file_priv, chan);
- ret = nouveau_notifier_alloc(dev, na.channel, na.handle,
- na.count, &na.offset);
+ ret = nouveau_notifier_alloc(chan, na->handle, na->count, &na->offset);
if (ret)
return ret;
- DRM_COPY_TO_USER_IOCTL((drm_nouveau_notifier_alloc_t __user*)data,
- na, sizeof(na));
return 0;
}
diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c
index dac08df4..fbce7702 100644
--- a/shared-core/nouveau_object.c
+++ b/shared-core/nouveau_object.c
@@ -35,79 +35,6 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-/* TODO
- * - Check object class, deny unsafe objects (add card-specific versioning?)
- * - Get rid of DMA object creation, this should be wrapped by MM routines.
- */
-
-/* Translate a RAMIN offset into a value the card understands, will be useful
- * in the future when we can access more instance ram which isn't mapped into
- * the PRAMIN aperture
- */
-uint32_t
-nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem)
-{
- uint32_t inst = (uint32_t)mem->start >> 4;
- DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n",
- mem->start, inst);
- return inst;
-}
-
-static void
-nouveau_object_link(drm_device_t *dev, struct nouveau_object *obj)
-{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel];
-
- if (!chan->objs) {
- chan->objs = obj;
- return;
- }
-
- obj->prev = NULL;
- obj->next = chan->objs;
-
- chan->objs->prev = obj;
- chan->objs = obj;
-}
-
-static void
-nouveau_object_unlink(drm_device_t *dev, struct nouveau_object *obj)
-{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[obj->channel];
-
- if (obj->prev == NULL) {
- if (obj->next)
- obj->next->prev = NULL;
- chan->objs = obj->next;
- } else if (obj->next == NULL) {
- if (obj->prev)
- obj->prev->next = NULL;
- } else {
- obj->prev->next = obj->next;
- obj->next->prev = obj->prev;
- }
-}
-
-static struct nouveau_object *
-nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle)
-{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- struct nouveau_object *obj = chan->objs;
-
- DRM_DEBUG("Looking for handle 0x%08x\n", handle);
- while (obj) {
- if (obj->handle == handle)
- return obj;
- obj = obj->next;
- }
-
- DRM_DEBUG("...couldn't find handle\n");
- return NULL;
-}
-
/* NVidia uses context objects to drive drawing operations.
Context objects can be selected into 8 subchannels in the FIFO,
@@ -139,157 +66,550 @@ nouveau_object_handle_find(drm_device_t *dev, int channel, uint32_t handle)
is given as:
*/
static uint32_t
-nouveau_ramht_hash_handle(drm_device_t *dev, int channel, uint32_t handle)
+nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
uint32_t hash = 0;
int i;
+ DRM_DEBUG("ch%d handle=0x%08x\n", channel, handle);
+
for (i=32;i>0;i-=dev_priv->ramht_bits) {
hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
handle >>= dev_priv->ramht_bits;
}
- hash ^= channel << (dev_priv->ramht_bits - 4);
- return hash << 3;
+ if (dev_priv->card_type < NV_50)
+ hash ^= channel << (dev_priv->ramht_bits - 4);
+ hash <<= 3;
+
+ DRM_DEBUG("hash=0x%08x\n", hash);
+ return hash;
}
static int
-nouveau_ramht_entry_valid(drm_device_t *dev, uint32_t ramht, uint32_t offset)
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+ uint32_t offset)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- uint32_t ctx = NV_RI32(ramht + offset + 4);
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ uint32_t ctx = INSTANCE_RD(ramht, (offset + 4)/4);
if (dev_priv->card_type < NV_40)
return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
return (ctx != 0);
}
-int
-nouveau_ramht_insert(drm_device_t* dev, int channel, uint32_t handle,
- struct nouveau_object *obj)
+static int
+nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- uint32_t ramht = dev_priv->ramht_offset;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
+ struct nouveau_gpuobj *gpuobj = ref->gpuobj;
uint32_t ctx, co, ho;
- uint32_t inst;
- inst = nouveau_chip_instance_get(dev, obj->instance);
+ if (!ramht) {
+ DRM_ERROR("No hash table!\n");
+ return -EINVAL;
+ }
+
if (dev_priv->card_type < NV_40) {
- ctx = NV_RAMHT_CONTEXT_VALID | inst |
- (channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+ ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
+ (ref->channel << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
} else
if (dev_priv->card_type < NV_50) {
- ctx = inst |
- (channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
- (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ ctx = (ref->instance >> 4) |
+ (ref->channel << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
} else {
- ctx = inst |
- (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+ ctx = (ref->instance >> 4) |
+ (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
}
- co = ho = nouveau_ramht_hash_handle(dev, channel, handle);
+ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
do {
if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
DRM_DEBUG("insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- channel, co, handle, ctx);
- NV_WI32(ramht + co + 0, handle);
- NV_WI32(ramht + co + 4, ctx);
- obj->handle = handle;
+ ref->channel, co, ref->handle, ctx);
+ INSTANCE_WR(ramht, (co + 0)/4, ref->handle);
+ INSTANCE_WR(ramht, (co + 4)/4, ctx);
+
+ list_add_tail(&ref->list, &chan->ramht_refs);
return 0;
}
DRM_DEBUG("collision ch%d 0x%08x: h=0x%08x\n",
- channel, co, NV_RI32(ramht + co));
+ ref->channel, co, INSTANCE_RD(ramht, co/4));
co += 8;
- if (co == dev_priv->ramht_size)
+ if (co >= dev_priv->ramht_size) {
+ DRM_INFO("no space left after collision\n");
co = 0;
+ /* exit as it seems to cause crash with nouveau_demo and
+ * 0xdead0001 object */
+ break;
+ }
} while (co != ho);
- DRM_ERROR("RAMHT space exhausted. ch=%d\n", channel);
- return DRM_ERR(ENOMEM);
+ DRM_ERROR("RAMHT space exhausted. ch=%d\n", ref->channel);
+ return -ENOMEM;
}
static void
-nouveau_ramht_remove(drm_device_t* dev, struct nouveau_object *obj)
+nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t ramht = dev_priv->ramht_offset;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[ref->channel];
+ struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
uint32_t co, ho;
- co = ho = nouveau_ramht_hash_handle(dev, obj->channel, obj->handle);
+ if (!ramht) {
+ DRM_ERROR("No hash table!\n");
+ return;
+ }
+
+ co = ho = nouveau_ramht_hash_handle(dev, ref->channel, ref->handle);
do {
if (nouveau_ramht_entry_valid(dev, ramht, co) &&
- (obj->handle == NV_RI32(ramht + co))) {
+ (ref->handle == INSTANCE_RD(ramht, (co/4)))) {
DRM_DEBUG("remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
- obj->channel, co, obj->handle,
- NV_RI32(ramht + co + 4));
- NV_WI32(ramht + co + 0, 0x00000000);
- NV_WI32(ramht + co + 4, 0x00000000);
- obj->handle = ~0;
+ ref->channel, co, ref->handle,
+ INSTANCE_RD(ramht, (co + 4)));
+ INSTANCE_WR(ramht, (co + 0)/4, 0x00000000);
+ INSTANCE_WR(ramht, (co + 4)/4, 0x00000000);
+
+ list_del(&ref->list);
return;
}
co += 8;
- if (co == dev_priv->ramht_size)
+ if (co >= dev_priv->ramht_size)
co = 0;
} while (co != ho);
DRM_ERROR("RAMHT entry not found. ch=%d, handle=0x%08x\n",
- obj->channel, obj->handle);
+ ref->channel, ref->handle);
}
-static struct nouveau_object *
-nouveau_object_instance_alloc(drm_device_t* dev, int channel)
+int
+nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
+ int size, int align, uint32_t flags,
+ struct nouveau_gpuobj **gpuobj_ret)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_object *obj;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_gpuobj *gpuobj;
+ struct mem_block *pramin = NULL;
+ int ret;
+
+ DRM_DEBUG("ch%d size=%d align=%d flags=0x%08x\n",
+ chan ? chan->id : -1, size, align, flags);
+
+ if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
+ return -EINVAL;
+
+ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ if (!gpuobj)
+ return -ENOMEM;
+ DRM_DEBUG("gpuobj %p\n", gpuobj);
+ gpuobj->flags = flags;
+ gpuobj->im_channel = chan ? chan->id : -1;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ /* Choose between global instmem heap, and per-channel private
+ * instmem heap. On <NV50 allow requests for private instmem
+ * to be satisfied from global heap if no per-channel area
+ * available.
+ */
+ if (chan) {
+ if (chan->ramin_heap) {
+ DRM_DEBUG("private heap\n");
+ pramin = chan->ramin_heap;
+ } else
+ if (dev_priv->card_type < NV_50) {
+ DRM_DEBUG("global heap fallback\n");
+ pramin = dev_priv->ramin_heap;
+ }
+ } else {
+ DRM_DEBUG("global heap\n");
+ pramin = dev_priv->ramin_heap;
+ }
- /* Create object struct */
- obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
- if (!obj) {
- DRM_ERROR("couldn't alloc memory for object\n");
- return NULL;
+ if (!pramin) {
+ DRM_ERROR("No PRAMIN heap!\n");
+ return -EINVAL;
}
- /* Allocate instance memory */
- obj->instance = nouveau_instmem_alloc(dev,
- (dev_priv->card_type >= NV_40 ? 32 : 16), 4);
- if (!obj->instance) {
- DRM_ERROR("couldn't alloc RAMIN for object\n");
- drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
- return NULL;
+ if (!chan && (ret = engine->instmem.populate(dev, gpuobj, &size))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ /* Allocate a chunk of the PRAMIN aperture */
+ gpuobj->im_pramin = nouveau_mem_alloc_block(pramin, size,
+ drm_order(align),
+ (struct drm_file *)-2);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->flags = NOUVEAU_MEM_INSTANCE;
+
+ if (!chan && (ret = engine->instmem.bind(dev, gpuobj))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ int i;
+
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ INSTANCE_WR(gpuobj, i/4, 0);
}
- /* Bind object to channel */
- obj->channel = channel;
- obj->handle = ~0;
- nouveau_object_link(dev, obj);
+ *gpuobj_ret = gpuobj;
+ return 0;
+}
+
+int
+nouveau_gpuobj_early_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- return obj;
+ DRM_DEBUG("\n");
+
+ INIT_LIST_HEAD(&dev_priv->gpuobj_list);
+
+ return 0;
}
-static void
-nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj)
+int
+nouveau_gpuobj_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ if (dev_priv->card_type < NV_50) {
+ if ((ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramht_offset,
+ ~0, dev_priv->ramht_size,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ALLOW_NO_REFS,
+ &dev_priv->ramht, NULL)))
+ return ret;
+ }
+
+ return 0;
+}
+
+void
+nouveau_gpuobj_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ nouveau_gpuobj_del(dev, &dev_priv->ramht);
+}
+
+void
+nouveau_gpuobj_late_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
+ struct list_head *entry, *tmp;
+
+ DRM_DEBUG("\n");
+
+ list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
+ gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
+
+ DRM_ERROR("gpuobj %p still exists at takedown, refs=%d\n",
+ gpuobj, gpuobj->refcount);
+ gpuobj->refcount = 0;
+ nouveau_gpuobj_del(dev, &gpuobj);
+ }
+}
+
+int
+nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_gpuobj *gpuobj;
+
+ DRM_DEBUG("gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
+
+ if (!dev_priv || !pgpuobj || !(*pgpuobj))
+ return -EINVAL;
+ gpuobj = *pgpuobj;
+
+ if (gpuobj->refcount != 0) {
+ DRM_ERROR("gpuobj refcount is %d\n", gpuobj->refcount);
+ return -EINVAL;
+ }
+
+ if (gpuobj->dtor)
+ gpuobj->dtor(dev, gpuobj);
+
+ if (gpuobj->im_backing) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ drm_free(gpuobj->im_backing,
+ sizeof(*gpuobj->im_backing), DRM_MEM_DRIVER);
+ else
+ engine->instmem.clear(dev, gpuobj);
+ }
+
+ if (gpuobj->im_pramin) {
+ if (gpuobj->flags & NVOBJ_FLAG_FAKE)
+ drm_free(gpuobj->im_pramin, sizeof(*gpuobj->im_pramin),
+ DRM_MEM_DRIVER);
+ else
+ nouveau_mem_free_block(gpuobj->im_pramin);
+ }
+
+ list_del(&gpuobj->list);
+
+ *pgpuobj = NULL;
+ drm_free(gpuobj, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ return 0;
+}
+
+static int
+nouveau_gpuobj_instance_get(struct drm_device *dev,
+ struct nouveau_channel *chan,
+ struct nouveau_gpuobj *gpuobj, uint32_t *inst)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *cpramin;
+
+ /* <NV50 use PRAMIN address everywhere */
+ if (dev_priv->card_type < NV_50) {
+ *inst = gpuobj->im_pramin->start;
+ return 0;
+ }
+
+ if (chan && gpuobj->im_channel != chan->id) {
+ DRM_ERROR("Channel mismatch: obj %d, ref %d\n",
+ gpuobj->im_channel, chan->id);
+ return -EINVAL;
+ }
+
+ /* NV50 channel-local instance */
+ if (chan > 0) {
+ cpramin = chan->ramin->gpuobj;
+ *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
+ return 0;
+ }
+
+ /* NV50 global (VRAM) instance */
+ if (gpuobj->im_channel < 0) {
+ /* ...from global heap */
+ if (!gpuobj->im_backing) {
+ DRM_ERROR("AII, no VRAM backing gpuobj\n");
+ return -EINVAL;
+ }
+ *inst = gpuobj->im_backing->start;
+ return 0;
+ } else {
+ /* ...from local heap */
+ cpramin = dev_priv->fifos[gpuobj->im_channel]->ramin->gpuobj;
+ *inst = cpramin->im_backing->start +
+ (gpuobj->im_pramin->start - cpramin->im_pramin->start);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
+ uint32_t handle, struct nouveau_gpuobj *gpuobj,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj_ref *ref;
+ uint32_t instance;
+ int ret;
+
+ DRM_DEBUG("ch%d h=0x%08x gpuobj=%p\n",
+ chan ? chan->id : -1, handle, gpuobj);
+
+ if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
+ return -EINVAL;
+
+ if (!chan && !ref_ret)
+ return -EINVAL;
+
+ ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
+ if (ret)
+ return ret;
+
+ ref = drm_calloc(1, sizeof(*ref), DRM_MEM_DRIVER);
+ if (!ref)
+ return -ENOMEM;
+ ref->gpuobj = gpuobj;
+ ref->channel = chan ? chan->id : -1;
+ ref->instance = instance;
+
+ if (!ref_ret) {
+ ref->handle = handle;
+
+ ret = nouveau_ramht_insert(dev, ref);
+ if (ret) {
+ drm_free(ref, sizeof(*ref), DRM_MEM_DRIVER);
+ return ret;
+ }
+ } else {
+ ref->handle = ~0;
+ *ref_ret = ref;
+ }
+
+ ref->gpuobj->refcount++;
+ return 0;
+}
+
+int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct nouveau_gpuobj_ref *ref;
+
+ DRM_DEBUG("ref %p\n", pref ? *pref : NULL);
+
+ if (!dev || !pref || *pref == NULL)
+ return -EINVAL;
+ ref = *pref;
+
+ if (ref->handle != ~0)
+ nouveau_ramht_remove(dev, ref);
+
+ if (ref->gpuobj) {
+ ref->gpuobj->refcount--;
+
+ if (ref->gpuobj->refcount == 0) {
+ if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
+ nouveau_gpuobj_del(dev, &ref->gpuobj);
+ }
+ }
+
+ *pref = NULL;
+ drm_free(ref, sizeof(ref), DRM_MEM_DRIVER);
+ return 0;
+}
+
+int
+nouveau_gpuobj_new_ref(struct drm_device *dev,
+ struct nouveau_channel *oc, struct nouveau_channel *rc,
+ uint32_t handle, int size, int align, uint32_t flags,
+ struct nouveau_gpuobj_ref **ref)
+{
+ struct nouveau_gpuobj *gpuobj = NULL;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj)))
+ return ret;
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+int
+nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
+ struct nouveau_gpuobj_ref **ref_ret)
+{
+ struct nouveau_gpuobj_ref *ref;
+ struct list_head *entry, *tmp;
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
+
+ if (ref->handle == handle) {
+ if (ref_ret)
+ *ref_ret = ref;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+int
+nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
+ uint32_t b_offset, uint32_t size,
+ uint32_t flags, struct nouveau_gpuobj **pgpuobj,
+ struct nouveau_gpuobj_ref **pref)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *gpuobj = NULL;
int i;
- /* Unbind object from channel */
- nouveau_object_unlink(dev, obj);
+ DRM_DEBUG("p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
+ p_offset, b_offset, size, flags);
+
+ gpuobj = drm_calloc(1, sizeof(*gpuobj), DRM_MEM_DRIVER);
+ if (!gpuobj)
+ return -ENOMEM;
+ DRM_DEBUG("gpuobj %p\n", gpuobj);
+ gpuobj->im_channel = -1;
+ gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
+
+ list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
+
+ if (p_offset != ~0) {
+ gpuobj->im_pramin = drm_calloc(1, sizeof(struct mem_block),
+ DRM_MEM_DRIVER);
+ if (!gpuobj->im_pramin) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_pramin->start = p_offset;
+ gpuobj->im_pramin->size = size;
+ }
+
+ if (b_offset != ~0) {
+ gpuobj->im_backing = drm_calloc(1, sizeof(struct mem_block),
+ DRM_MEM_DRIVER);
+ if (!gpuobj->im_backing) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return -ENOMEM;
+ }
+ gpuobj->im_backing->start = b_offset;
+ gpuobj->im_backing->size = size;
+ }
- /* Clean RAMIN entry */
- DRM_DEBUG("Instance entry for 0x%08x"
- "(engine %d, class 0x%x) before destroy:\n",
- obj->handle, obj->engine, obj->class);
- for (i=0; i<(obj->instance->size/4); i++) {
- DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4),
- INSTANCE_RD(obj->instance, i));
- INSTANCE_WR(obj->instance, i, 0x00000000);
+ if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
+ for (i = 0; i < gpuobj->im_pramin->size; i += 4)
+ INSTANCE_WR(gpuobj, i/4, 0);
}
- /* Free RAMIN */
- nouveau_instmem_free(dev, obj->instance);
+ if (pref) {
+ if ((i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref))) {
+ nouveau_gpuobj_del(dev, &gpuobj);
+ return i;
+ }
+ }
+
+ if (pgpuobj)
+ *pgpuobj = gpuobj;
+ return 0;
+}
+
+
+static int
+nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*XXX: dodgy hack for now */
+ if (dev_priv->card_type >= NV_50)
+ return 24;
+ if (dev_priv->card_type >= NV_40)
+ return 32;
+ return 16;
}
/*
@@ -305,75 +625,190 @@ nouveau_object_instance_free(drm_device_t *dev, struct nouveau_object *obj)
17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
31:20 dma adjust (bits 0-11 of the address)
entry[1]
- dma limit
- entry[2]
+ dma limit (size of transfer)
+ entry[X]
1 0 readonly, 1 readwrite
- 31:12 dma frame address (bits 12-31 of the address)
+ 31:12 dma frame address of the page (bits 12-31 of the address)
+ entry[N]
+ page table terminator, same value as the first pte, as does nvidia
+ rivatv uses 0xffffffff
- Non linear page tables seem to need a list of frame addresses afterwards,
- the rivatv project has some info on this.
+ Non linear page tables need a list of frame addresses afterwards,
+ the rivatv project has some info on this.
The method below creates a DMA object in instance RAM and returns a handle
to it that can be used to set up context objects.
*/
-
-struct nouveau_object *
-nouveau_object_dma_create(drm_device_t* dev, int channel, int class,
- uint32_t offset, uint32_t size,
- int access, int target)
+int
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
+ uint64_t offset, uint64_t size, int access,
+ int target, struct nouveau_gpuobj **gpuobj)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_object *obj;
- uint32_t frame, adjust;
- uint32_t pte_flags = 0;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+ uint32_t is_scatter_gather = 0;
+
+ /* Total number of pages covered by the request.
+ */
+ const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n",
- offset, size, target, access);
+
+ DRM_DEBUG("ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
+ chan->id, class, offset, size);
+ DRM_DEBUG("access=%d target=%d\n", access, target);
switch (target) {
- case NV_DMA_TARGET_AGP:
- offset += dev_priv->agp_phys;
- break;
- default:
- break;
+ case NV_DMA_TARGET_AGP:
+ offset += dev_priv->gart_info.aper_base;
+ break;
+ case NV_DMA_TARGET_PCI_NONLINEAR:
+ /*assume the "offset" is a virtual memory address*/
+ is_scatter_gather = 1;
+ /*put back the right value*/
+ target = NV_DMA_TARGET_PCI;
+ break;
+ default:
+ break;
+ }
+
+ ret = nouveau_gpuobj_new(dev, chan,
+ is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ DRM_ERROR("Error creating gpuobj: %d\n", ret);
+ return ret;
}
- switch (access) {
- case NV_DMA_ACCESS_RO:
- break;
- case NV_DMA_ACCESS_WO:
- case NV_DMA_ACCESS_RW:
- pte_flags |= (1 << 1);
- break;
- default:
- DRM_ERROR("invalid access mode=%d\n", access);
- return NULL;
- }
+ if (dev_priv->card_type < NV_50) {
+ uint32_t frame, adjust, pte_flags = 0;
+ adjust = offset & 0x00000fff;
+ if (access != NV_DMA_ACCESS_RO)
+ pte_flags |= (1<<1);
+
+ if ( ! is_scatter_gather )
+ {
+ frame = offset & ~0x00000fff;
+
+ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ INSTANCE_WR(*gpuobj, 1, size - 1);
+ INSTANCE_WR(*gpuobj, 2, frame | pte_flags);
+ INSTANCE_WR(*gpuobj, 3, frame | pte_flags);
+ }
+ else
+ {
+ /* Intial page entry in the scatter-gather area that
+ * corresponds to the base offset
+ */
+ unsigned int idx = offset / PAGE_SIZE;
+
+ uint32_t instance_offset;
+ unsigned int i;
+
+ if ((idx + page_count) > dev->sg->pages) {
+ DRM_ERROR("Requested page range exceedes "
+ "allocated scatter-gather range!");
+ return -E2BIG;
+ }
+
+ DRM_DEBUG("Creating PCI DMA object using virtual zone starting at %#llx, size %d\n", offset, (uint32_t)size);
+ INSTANCE_WR(*gpuobj, 0, ((1<<12) | (0<<13) |
+ (adjust << 20) |
+ (access << 14) |
+ (target << 16) |
+ class));
+ INSTANCE_WR(*gpuobj, 1, (uint32_t) size-1);
+
+
+ /*write starting at the third dword*/
+ instance_offset = 2;
+
+ /*for each PAGE, get its bus address, fill in the page table entry, and advance*/
+ for (i = 0; i < page_count; i++) {
+ if (dev->sg->busaddr[idx] == 0) {
+ dev->sg->busaddr[idx] =
+ pci_map_page(dev->pdev,
+ dev->sg->pagelist[idx],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev->sg->busaddr[idx])) {
+ return -ENOMEM;
+ }
+ }
+
+ frame = (uint32_t) dev->sg->busaddr[idx];
+ INSTANCE_WR(*gpuobj, instance_offset,
+ frame | pte_flags);
+
+ idx++;
+ instance_offset ++;
+ }
+ }
+ } else {
+ uint32_t flags0, flags5;
- frame = offset & ~0x00000FFF;
- adjust = offset & 0x00000FFF;
+ if (target == NV_DMA_TARGET_VIDMEM) {
+ flags0 = 0x00190000;
+ flags5 = 0x00010000;
+ } else {
+ flags0 = 0x7fc00000;
+ flags5 = 0x00080000;
+ }
- obj = nouveau_object_instance_alloc(dev, channel);
- if (!obj) {
- DRM_ERROR("couldn't allocate DMA object\n");
- return obj;
+ INSTANCE_WR(*gpuobj, 0, flags0 | class);
+ INSTANCE_WR(*gpuobj, 1, offset + size - 1);
+ INSTANCE_WR(*gpuobj, 2, offset);
+ INSTANCE_WR(*gpuobj, 5, flags5);
}
- obj->engine = 0;
- obj->class = class;
+ (*gpuobj)->engine = NVOBJ_ENGINE_SW;
+ (*gpuobj)->class = class;
+ return 0;
+}
- INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) |
- (adjust << 20) |
- (access << 14) |
- (target << 16) |
- class));
- INSTANCE_WR(obj->instance, 1, size-1);
- INSTANCE_WR(obj->instance, 2, frame | pte_flags);
- INSTANCE_WR(obj->instance, 3, frame | pte_flags);
+int
+nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
+ uint64_t offset, uint64_t size, int access,
+ struct nouveau_gpuobj **gpuobj,
+ uint32_t *o_ret)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
- return obj;
-}
+ if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
+ (dev_priv->card_type >= NV_50 &&
+ dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ offset, size, access,
+ NV_DMA_TARGET_AGP, gpuobj);
+ if (o_ret)
+ *o_ret = 0;
+ } else
+ if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
+ *gpuobj = dev_priv->gart_info.sg_ctxdma;
+ if (offset & ~0xffffffffULL) {
+ DRM_ERROR("obj offset exceeds 32-bits\n");
+ return -EINVAL;
+ }
+ if (o_ret)
+ *o_ret = (uint32_t)offset;
+ ret = (*gpuobj != NULL) ? 0 : -EINVAL;
+ } else {
+ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+ return ret;
+}
/* Context objects in the instance RAM have the following structure.
* On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
@@ -426,108 +861,202 @@ nouveau_object_dma_create(drm_device_t* dev, int channel, int class,
entry[5]:
set to 0?
*/
-struct nouveau_object *
-nouveau_object_gr_create(drm_device_t* dev, int channel, int class)
+int
+nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
+ struct nouveau_gpuobj **gpuobj)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- struct nouveau_object *obj;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
- DRM_DEBUG("class=%x\n", class);
+ DRM_DEBUG("ch%d class=0x%04x\n", chan->id, class);
- obj = nouveau_object_instance_alloc(dev, channel);
- if (!obj) {
- DRM_ERROR("couldn't allocate context object\n");
- return obj;
+ ret = nouveau_gpuobj_new(dev, chan,
+ nouveau_gpuobj_class_instmem_size(dev, class),
+ 16,
+ NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
+ gpuobj);
+ if (ret) {
+ DRM_ERROR("Error creating gpuobj: %d\n", ret);
+ return ret;
}
- obj->engine = 1;
- obj->class = class;
-
+ if (dev_priv->card_type >= NV_50) {
+ INSTANCE_WR(*gpuobj, 0, class);
+ INSTANCE_WR(*gpuobj, 5, 0x00010000);
+ } else {
switch (class) {
case NV_CLASS_NULL:
- INSTANCE_WR(obj->instance, 0, 0x00001030);
- INSTANCE_WR(obj->instance, 1, 0xFFFFFFFF);
- INSTANCE_WR(obj->instance, 2, 0x00000000);
- INSTANCE_WR(obj->instance, 2, 0x00000000);
+ INSTANCE_WR(*gpuobj, 0, 0x00001030);
+ INSTANCE_WR(*gpuobj, 1, 0xFFFFFFFF);
break;
default:
if (dev_priv->card_type >= NV_40) {
- INSTANCE_WR(obj->instance, 0, obj->class);
- INSTANCE_WR(obj->instance, 1, 0x00000000);
+ INSTANCE_WR(*gpuobj, 0, class);
#ifdef __BIG_ENDIAN
- INSTANCE_WR(obj->instance, 2, 0x01000000);
-#else
- INSTANCE_WR(obj->instance, 2, 0x00000000);
+ INSTANCE_WR(*gpuobj, 2, 0x01000000);
#endif
- INSTANCE_WR(obj->instance, 3, 0x00000000);
- INSTANCE_WR(obj->instance, 4, 0x00000000);
- INSTANCE_WR(obj->instance, 5, 0x00000000);
- INSTANCE_WR(obj->instance, 6, 0x00000000);
- INSTANCE_WR(obj->instance, 7, 0x00000000);
} else {
#ifdef __BIG_ENDIAN
- INSTANCE_WR(obj->instance, 0, obj->class | 0x00080000);
+ INSTANCE_WR(*gpuobj, 0, class | 0x00080000);
#else
- INSTANCE_WR(obj->instance, 0, obj->class);
+ INSTANCE_WR(*gpuobj, 0, class);
#endif
- INSTANCE_WR(obj->instance, 1, 0x00000000);
- INSTANCE_WR(obj->instance, 2, 0x00000000);
- INSTANCE_WR(obj->instance, 3, 0x00000000);
}
}
+ }
- return obj;
+ (*gpuobj)->engine = NVOBJ_ENGINE_GR;
+ (*gpuobj)->class = class;
+ return 0;
}
-void
-nouveau_object_free(drm_device_t *dev, struct nouveau_object *obj)
+static int
+nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
{
- nouveau_object_instance_free(dev, obj);
- if (obj->handle != ~0)
- nouveau_ramht_remove(dev, obj);
- drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *pramin = NULL;
+ int size, base, ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ /* Base amount for object storage (4KiB enough?) */
+ size = 0x1000;
+ base = 0;
+
+ /* PGRAPH context */
+
+ if (dev_priv->card_type == NV_50) {
+ /* Various fixed table thingos */
+ size += 0x1400; /* mostly unknown stuff */
+ size += 0x4000; /* vm pd */
+ base = 0x6000;
+ /* RAMHT, not sure about setting size yet, 32KiB to be safe */
+ size += 0x8000;
+ /* RAMFC */
+ size += 0x1000;
+ /* PGRAPH context */
+ size += 0x60000;
+ }
+
+ DRM_DEBUG("ch%d PRAMIN size: 0x%08x bytes, base alloc=0x%08x\n",
+ chan->id, size, base);
+ ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
+ &chan->ramin);
+ if (ret) {
+ DRM_ERROR("Error allocating channel PRAMIN: %d\n", ret);
+ return ret;
+ }
+ pramin = chan->ramin->gpuobj;
+
+ ret = nouveau_mem_init_heap(&chan->ramin_heap,
+ pramin->im_pramin->start + base, size);
+ if (ret) {
+ DRM_ERROR("Error creating PRAMIN heap: %d\n", ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ return ret;
+ }
+
+ return 0;
}
int
-nouveau_object_init_channel(drm_device_t *dev, int channel,
- uint32_t vram_handle,
- uint32_t tt_handle)
+nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
+ uint32_t vram_h, uint32_t tt_h)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_object *gpuobj;
- int ret;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *vram = NULL, *tt = NULL;
+ int ret, i;
+
+ INIT_LIST_HEAD(&chan->ramht_refs);
+
+ DRM_DEBUG("ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
+
+ /* Reserve a block of PRAMIN for the channel
+ *XXX: maybe on <NV50 too at some point
+ */
+ if (0 || dev_priv->card_type == NV_50) {
+ ret = nouveau_gpuobj_channel_init_pramin(chan);
+ if (ret)
+ return ret;
+ }
+
+ /* NV50 VM, point offset 0-512MiB at shared PCIEGART table */
+ if (dev_priv->card_type >= NV_50) {
+ uint32_t vm_offset;
+
+ vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
+ vm_offset += chan->ramin->gpuobj->im_pramin->start;
+ if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
+ 0, &chan->vm_pd, NULL)))
+ return ret;
+ for (i=0; i<0x4000; i+=8) {
+ INSTANCE_WR(chan->vm_pd, (i+0)/4, 0x00000000);
+ INSTANCE_WR(chan->vm_pd, (i+4)/4, 0xdeadcafe);
+ }
+
+ if ((ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
+ dev_priv->gart_info.sg_ctxdma,
+ &chan->vm_gart_pt)))
+ return ret;
+ INSTANCE_WR(chan->vm_pd, (0+0)/4,
+ chan->vm_gart_pt->instance | 0x03);
+ INSTANCE_WR(chan->vm_pd, (0+4)/4, 0x00000000);
+ }
+
+ /* RAMHT */
+ if (dev_priv->card_type < NV_50) {
+ ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ } else {
+ ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
+ 0x8000, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramht);
+ if (ret)
+ return ret;
+ }
/* VRAM ctxdma */
- gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->fb_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_VIDMEM);
- if (!gpuobj) {
- DRM_ERROR("Error creating VRAM ctxdma: %d\n", DRM_ERR(ENOMEM));
- return DRM_ERR(ENOMEM);
+ if ((ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev_priv->fb_available_size,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_VIDMEM, &vram))) {
+ DRM_ERROR("Error creating VRAM ctxdma: %d\n", ret);
+ return ret;
}
- ret = nouveau_ramht_insert(dev, channel, vram_handle, gpuobj);
- if (ret) {
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL))) {
DRM_ERROR("Error referencing VRAM ctxdma: %d\n", ret);
return ret;
}
- /* non-AGP unimplemented */
- if (dev_priv->agp_heap == NULL)
- return 0;
+ /* TT memory ctxdma */
+ if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
+ ret = nouveau_gpuobj_gart_dma_new(chan, 0,
+ dev_priv->gart_info.aper_size,
+ NV_DMA_ACCESS_RW, &tt, NULL);
+ } else
+ if (dev_priv->pci_heap) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+ 0, dev->sg->pages * PAGE_SIZE,
+ NV_DMA_ACCESS_RW,
+ NV_DMA_TARGET_PCI_NONLINEAR, &tt);
+ } else {
+ DRM_ERROR("Invalid GART type %d\n", dev_priv->gart_info.type);
+ ret = -EINVAL;
+ }
- /* GART ctxdma */
- gpuobj = nouveau_object_dma_create(dev, channel, NV_CLASS_DMA_IN_MEMORY,
- 0, dev_priv->agp_available_size,
- NV_DMA_ACCESS_RW,
- NV_DMA_TARGET_AGP);
- if (!gpuobj) {
- DRM_ERROR("Error creating TT ctxdma: %d\n", DRM_ERR(ENOMEM));
- return DRM_ERR(ENOMEM);
+ if (ret) {
+ DRM_ERROR("Error creating TT ctxdma: %d\n", ret);
+ return ret;
}
- ret = nouveau_ramht_insert(dev, channel, tt_handle, gpuobj);
+ ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
if (ret) {
DRM_ERROR("Error referencing TT ctxdma: %d\n", ret);
return ret;
@@ -536,47 +1065,84 @@ nouveau_object_init_channel(drm_device_t *dev, int channel,
return 0;
}
-void nouveau_object_cleanup(drm_device_t *dev, int channel)
+void
+nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
+ struct drm_device *dev = chan->dev;
+ struct list_head *entry, *tmp;
+ struct nouveau_gpuobj_ref *ref;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ list_for_each_safe(entry, tmp, &chan->ramht_refs) {
+ ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
- while (dev_priv->fifos[channel].objs) {
- nouveau_object_free(dev, dev_priv->fifos[channel].objs);
+ nouveau_gpuobj_ref_del(dev, &ref);
}
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramht);
+
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
+
+ if (chan->ramin_heap)
+ nouveau_mem_takedown(&chan->ramin_heap);
+ if (chan->ramin)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+
}
-int nouveau_ioctl_grobj_alloc(DRM_IOCTL_ARGS)
+int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_grobj_alloc_t init;
- struct nouveau_object *obj;
-
- DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_grobj_alloc_t __user *)
- data, sizeof(init));
+ struct nouveau_channel *chan;
+ struct drm_nouveau_grobj_alloc *init = data;
+ struct nouveau_gpuobj *gr = NULL;
+ int ret;
- if (!nouveau_fifo_owner(dev, filp, init.channel)) {
- DRM_ERROR("pid %d doesn't own channel %d\n",
- DRM_CURRENTPID, init.channel);
- return DRM_ERR(EINVAL);
- }
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
//FIXME: check args, only allow trusted objects to be created
+
+ if (init->handle == ~0)
+ return -EINVAL;
- if (nouveau_object_handle_find(dev, init.channel, init.handle)) {
- DRM_ERROR("Channel %d: handle 0x%08x already exists\n",
- init.channel, init.handle);
- return DRM_ERR(EINVAL);
- }
+ if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
+ return -EEXIST;
- obj = nouveau_object_gr_create(dev, init.channel, init.class);
- if (!obj)
- return DRM_ERR(ENOMEM);
+ ret = nouveau_gpuobj_gr_new(chan, init->class, &gr);
+ if (ret) {
+ DRM_ERROR("Error creating gr object: %d (%d/0x%08x)\n",
+ ret, init->channel, init->handle);
+ return ret;
+ }
- if (nouveau_ramht_insert(dev, init.channel, init.handle, obj)) {
- nouveau_object_free(dev, obj);
- return DRM_ERR(ENOMEM);
+ if ((ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL))) {
+ DRM_ERROR("Error referencing gr object: %d (%d/0x%08x\n)",
+ ret, init->channel, init->handle);
+ nouveau_gpuobj_del(dev, &gr);
+ return ret;
}
return 0;
}
+int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_nouveau_gpuobj_free *objfree = data;
+ struct nouveau_gpuobj_ref *ref;
+ struct nouveau_channel *chan;
+ int ret;
+
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
+ NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
+
+ if ((ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref)))
+ return ret;
+ nouveau_gpuobj_ref_del(dev, &ref);
+
+ return 0;
+}
+
diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h
index 4c013c53..21133d98 100644
--- a/shared-core/nouveau_reg.h
+++ b/shared-core/nouveau_reg.h
@@ -15,9 +15,6 @@
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000
# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20
-#define NV03_PGRAPH_STATUS 0x004006b0
-#define NV04_PGRAPH_STATUS 0x00400700
-
#define NV_RAMIN 0x00700000
#define NV_RAMHT_HANDLE_OFFSET 0
@@ -39,6 +36,8 @@
#define NV_DMA_TARGET_VIDMEM 0
#define NV_DMA_TARGET_PCI 2
#define NV_DMA_TARGET_AGP 3
+/*The following is not a real value used by nvidia cards, it's changed by nouveau_object_dma_create*/
+#define NV_DMA_TARGET_PCI_NONLINEAR 8
/* Some object classes we care about in the drm */
#define NV_CLASS_DMA_FROM_MEMORY 0x00000002
@@ -47,11 +46,15 @@
#define NV_CLASS_DMA_IN_MEMORY 0x0000003D
#define NV03_FIFO_SIZE 0x8000UL
-#define NV_MAX_FIFO_NUMBER 32
+#define NV_MAX_FIFO_NUMBER 128
#define NV03_FIFO_REGS_SIZE 0x10000
#define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE)
# define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40)
# define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44)
+#define NV50_FIFO_REGS_SIZE 0x2000
+#define NV50_FIFO_REGS(i) (0x00c00000+i*NV50_FIFO_REGS_SIZE)
+# define NV50_FIFO_REGS_DMAPUT(i) (NV50_FIFO_REGS(i)+0x40)
+# define NV50_FIFO_REGS_DMAGET(i) (NV50_FIFO_REGS(i)+0x44)
#define NV03_PMC_BOOT_0 0x00000000
#define NV03_PMC_INTR_0 0x00000100
@@ -74,6 +77,16 @@
#define NV40_PMC_1708 0x00001708
#define NV40_PMC_170C 0x0000170C
+/* probably PMC ? */
+#define NV50_PUNK_BAR0_PRAMIN 0x00001700
+#define NV50_PUNK_BAR_CFG_BASE 0x00001704
+#define NV50_PUNK_BAR_CFG_BASE_VALID (1<<30)
+#define NV50_PUNK_BAR1_CTXDMA 0x00001708
+#define NV50_PUNK_BAR1_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_BAR3_CTXDMA 0x0000170C
+#define NV50_PUNK_BAR3_CTXDMA_VALID (1<<31)
+#define NV50_PUNK_UNK1710 0x00001710
+
#define NV04_PTIMER_INTR_0 0x00009100
#define NV04_PTIMER_INTR_EN_0 0x00009140
#define NV04_PTIMER_NUMERATOR 0x00009200
@@ -104,6 +117,31 @@
#define NV04_PGRAPH_DEBUG_3 0x0040008c
#define NV10_PGRAPH_DEBUG_4 0x00400090
#define NV03_PGRAPH_INTR 0x00400100
+#define NV03_PGRAPH_NSTATUS 0x00400104
+# define NV03_PGRAPH_NSTATUS_STATE_IN_USE (1<<23)
+# define NV03_PGRAPH_NSTATUS_INVALID_STATE (1<<24)
+# define NV03_PGRAPH_NSTATUS_BAD_ARGUMENT (1<<25)
+# define NV03_PGRAPH_NSTATUS_PROTECTION_FAULT (1<<26)
+#define NV03_PGRAPH_NSOURCE 0x00400108
+# define NV03_PGRAPH_NSOURCE_NOTIFICATION (1<< 0)
+# define NV03_PGRAPH_NSOURCE_DATA_ERROR (1<< 1)
+# define NV03_PGRAPH_NSOURCE_PROTECTION_ERROR (1<< 2)
+# define NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION (1<< 3)
+# define NV03_PGRAPH_NSOURCE_LIMIT_COLOR (1<< 4)
+# define NV03_PGRAPH_NSOURCE_LIMIT_ZETA (1<< 5)
+# define NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD (1<< 6)
+# define NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION (1<< 7)
+# define NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION (1<< 8)
+# define NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION (1<< 9)
+# define NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION (1<<10)
+# define NV03_PGRAPH_NSOURCE_STATE_INVALID (1<<11)
+# define NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY (1<<12)
+# define NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE (1<<13)
+# define NV03_PGRAPH_NSOURCE_METHOD_CNT (1<<14)
+# define NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION (1<<15)
+# define NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION (1<<16)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_A (1<<17)
+# define NV03_PGRAPH_NSOURCE_DMA_WIDTH_B (1<<18)
#define NV03_PGRAPH_INTR_EN 0x00400140
#define NV40_PGRAPH_INTR_EN 0x0040013C
# define NV_PGRAPH_INTR_NOTIFY (1<< 0)
@@ -137,6 +175,10 @@
#define NV10_PGRAPH_CTX_CACHE5 0x004001E0
#define NV40_PGRAPH_CTXCTL_0304 0x00400304
#define NV40_PGRAPH_CTXCTL_0304_XFER_CTX 0x00000001
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT 0x00400308
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_MASK 0xff000000
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT 24
+#define NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK 0x00ffffff
#define NV40_PGRAPH_CTXCTL_0310 0x00400310
#define NV40_PGRAPH_CTXCTL_0310_XFER_SAVE 0x00000020
#define NV40_PGRAPH_CTXCTL_0310_XFER_LOAD 0x00000040
@@ -219,7 +261,12 @@
#define NV04_PGRAPH_BLIMIT5 0x00400698
#define NV04_PGRAPH_BSWIZZLE2 0x0040069C
#define NV04_PGRAPH_BSWIZZLE5 0x004006A0
+#define NV03_PGRAPH_STATUS 0x004006B0
+#define NV04_PGRAPH_STATUS 0x00400700
+#define NV04_PGRAPH_TRAPPED_ADDR 0x00400704
+#define NV04_PGRAPH_TRAPPED_DATA 0x00400708
#define NV04_PGRAPH_SURFACE 0x0040070C
+#define NV10_PGRAPH_TRAPPED_DATA_HIGH 0x0040070C
#define NV04_PGRAPH_STATE 0x00400710
#define NV10_PGRAPH_SURFACE 0x00400710
#define NV04_PGRAPH_NOTIFY 0x00400714
@@ -272,6 +319,18 @@
#define NV47_PGRAPH_TSTATUS0(i) 0x00400D0C
#define NV04_PGRAPH_V_RAM 0x00400D40
#define NV04_PGRAPH_W_RAM 0x00400D80
+#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
+#define NV10_PGRAPH_COMBINER1_IN_ALPHA 0x00400E44
+#define NV10_PGRAPH_COMBINER0_IN_RGB 0x00400E48
+#define NV10_PGRAPH_COMBINER1_IN_RGB 0x00400E4C
+#define NV10_PGRAPH_COMBINER_COLOR0 0x00400E50
+#define NV10_PGRAPH_COMBINER_COLOR1 0x00400E54
+#define NV10_PGRAPH_COMBINER0_OUT_ALPHA 0x00400E58
+#define NV10_PGRAPH_COMBINER1_OUT_ALPHA 0x00400E5C
+#define NV10_PGRAPH_COMBINER0_OUT_RGB 0x00400E60
+#define NV10_PGRAPH_COMBINER1_OUT_RGB 0x00400E64
+#define NV10_PGRAPH_COMBINER_FINAL0 0x00400E68
+#define NV10_PGRAPH_COMBINER_FINAL1 0x00400E6C
#define NV10_PGRAPH_WINDOWCLIP_HORIZONTAL 0x00400F00
#define NV10_PGRAPH_WINDOWCLIP_VERTICAL 0x00400F20
#define NV10_PGRAPH_XFMODE0 0x00400F40
@@ -332,6 +391,12 @@
#define NV04_PFIFO_MODE 0x00002504
#define NV04_PFIFO_DMA 0x00002508
#define NV04_PFIFO_SIZE 0x0000250c
+#define NV50_PFIFO_CTX_TABLE(c) (0x2600+(c)*4)
+#define NV50_PFIFO_CTX_TABLE__SIZE 128
+#define NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED (1<<31)
+#define NV50_PFIFO_CTX_TABLE_UNK30_BAD (1<<30)
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80 0x0FFFFFFF
+#define NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84 0x00FFFFFF
#define NV03_PFIFO_CACHE0_PUSH0 0x00003000
#define NV03_PFIFO_CACHE0_PULL0 0x00003040
#define NV04_PFIFO_CACHE0_PULL0 0x00003050
diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c
index fa773d28..e73b4878 100644
--- a/shared-core/nouveau_state.c
+++ b/shared-core/nouveau_state.c
@@ -28,9 +28,9 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-static int nouveau_init_card_mappings(drm_device_t *dev)
+static int nouveau_init_card_mappings(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int ret;
/* resource 0 is mmio regs */
@@ -86,18 +86,27 @@ static int nouveau_init_card_mappings(drm_device_t *dev)
return 0;
}
-static int nouveau_stub_init(drm_device_t *dev) { return 0; }
-static void nouveau_stub_takedown(drm_device_t *dev) {}
-static int nouveau_init_engine_ptrs(drm_device_t *dev)
+static int nouveau_stub_init(struct drm_device *dev) { return 0; }
+static void nouveau_stub_takedown(struct drm_device *dev) {}
+static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; }
+
+static int nouveau_init_engine_ptrs(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_engine_func *engine = &dev_priv->Engine;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
switch (dev_priv->chipset & 0xf0) {
case 0x00:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv04_fb_init;
engine->fb.takedown = nv04_fb_takedown;
@@ -115,9 +124,16 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
engine->fifo.save_context = nv04_fifo_save_context;
break;
case 0x10:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
@@ -135,9 +151,16 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x20:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
@@ -155,9 +178,16 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x30:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv04_mc_init;
engine->mc.takedown = nv04_mc_takedown;
engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv10_fb_init;
engine->fb.takedown = nv10_fb_takedown;
@@ -175,9 +205,16 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
engine->fifo.save_context = nv10_fifo_save_context;
break;
case 0x40:
+ engine->instmem.init = nv04_instmem_init;
+ engine->instmem.takedown= nv04_instmem_takedown;
+ engine->instmem.populate = nv04_instmem_populate;
+ engine->instmem.clear = nv04_instmem_clear;
+ engine->instmem.bind = nv04_instmem_bind;
+ engine->instmem.unbind = nv04_instmem_unbind;
engine->mc.init = nv40_mc_init;
engine->mc.takedown = nv40_mc_takedown;
engine->timer.init = nv04_timer_init;
+ engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
engine->fb.init = nv40_fb_init;
engine->fb.takedown = nv40_fb_takedown;
@@ -187,7 +224,7 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
engine->graph.destroy_context = nv40_graph_destroy_context;
engine->graph.load_context = nv40_graph_load_context;
engine->graph.save_context = nv40_graph_save_context;
- engine->fifo.init = nouveau_fifo_init;
+ engine->fifo.init = nv40_fifo_init;
engine->fifo.takedown = nouveau_stub_takedown;
engine->fifo.create_context = nv40_fifo_create_context;
engine->fifo.destroy_context = nv40_fifo_destroy_context;
@@ -196,9 +233,16 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
break;
case 0x50:
case 0x80: /* gotta love NVIDIA's consistency.. */
+ engine->instmem.init = nv50_instmem_init;
+ engine->instmem.takedown= nv50_instmem_takedown;
+ engine->instmem.populate = nv50_instmem_populate;
+ engine->instmem.clear = nv50_instmem_clear;
+ engine->instmem.bind = nv50_instmem_bind;
+ engine->instmem.unbind = nv50_instmem_unbind;
engine->mc.init = nv50_mc_init;
engine->mc.takedown = nv50_mc_takedown;
engine->timer.init = nouveau_stub_init;
+ engine->timer.read = nouveau_stub_timer_read;
engine->timer.takedown = nouveau_stub_takedown;
engine->fb.init = nouveau_stub_init;
engine->fb.takedown = nouveau_stub_takedown;
@@ -223,12 +267,18 @@ static int nouveau_init_engine_ptrs(drm_device_t *dev)
return 0;
}
-static int nouveau_card_init(drm_device_t *dev)
+int
+nouveau_card_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_engine_func *engine;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine;
int ret;
+ DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
+ return 0;
+
/* Map any PCI resources we need on the card */
ret = nouveau_init_card_mappings(dev);
if (ret) return ret;
@@ -244,18 +294,25 @@ static int nouveau_card_init(drm_device_t *dev)
ret = nouveau_init_engine_ptrs(dev);
if (ret) return ret;
engine = &dev_priv->Engine;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+
+ ret = nouveau_gpuobj_early_init(dev);
+ if (ret) return ret;
/* Initialise instance memory, must happen before mem_init so we
* know exactly how much VRAM we're able to use for "normal"
* purposes.
*/
- ret = nouveau_instmem_init(dev);
+ ret = engine->instmem.init(dev);
if (ret) return ret;
/* Setup the memory manager */
ret = nouveau_mem_init(dev);
if (ret) return ret;
+ ret = nouveau_gpuobj_init(dev);
+ if (ret) return ret;
+
/* Parse BIOS tables / Run init tables? */
/* PMC */
@@ -278,66 +335,96 @@ static int nouveau_card_init(drm_device_t *dev)
ret = engine->fifo.init(dev);
if (ret) return ret;
+ /* this call irq_preinstall, register irq handler and
+ * call irq_postinstall
+ */
+ ret = drm_irq_install(dev);
+ if (ret) return ret;
+
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
+ ret = nouveau_dma_channel_init(dev);
+ if (ret) return ret;
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DONE;
return 0;
}
-/* here a client dies, release the stuff that was allocated for its filp */
-void nouveau_preclose(drm_device_t * dev, DRMFILE filp)
+static void nouveau_card_takedown(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+
+ DRM_DEBUG("prev state = %d\n", dev_priv->init_state);
+
+ if (dev_priv->init_state != NOUVEAU_CARD_INIT_DOWN) {
+ nouveau_dma_channel_takedown(dev);
+
+ engine->fifo.takedown(dev);
+ engine->graph.takedown(dev);
+ engine->fb.takedown(dev);
+ engine->timer.takedown(dev);
+ engine->mc.takedown(dev);
+
+ nouveau_sgdma_nottm_hack_takedown(dev);
+ nouveau_sgdma_takedown(dev);
+
+ nouveau_gpuobj_takedown(dev);
- nouveau_fifo_cleanup(dev, filp);
- nouveau_mem_release(filp,dev_priv->fb_heap);
- nouveau_mem_release(filp,dev_priv->agp_heap);
+ nouveau_mem_close(dev);
+ engine->instmem.takedown(dev);
+
+ drm_irq_uninstall(dev);
+
+ nouveau_gpuobj_late_takedown(dev);
+
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
+ }
}
-/* first module load, setup the mmio/fb mapping */
-int nouveau_firstopen(struct drm_device *dev)
+/* here a client dies, release the stuff that was allocated for its
+ * file_priv */
+void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
- int ret;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- ret = nouveau_card_init(dev);
- if (ret) {
- DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret);
- return ret;
- }
+ nouveau_fifo_cleanup(dev, file_priv);
+ nouveau_mem_release(file_priv,dev_priv->fb_heap);
+ nouveau_mem_release(file_priv,dev_priv->agp_heap);
+ nouveau_mem_release(file_priv,dev_priv->pci_heap);
+}
+/* first module load, setup the mmio/fb mapping */
+int nouveau_firstopen(struct drm_device *dev)
+{
return 0;
}
int nouveau_load(struct drm_device *dev, unsigned long flags)
{
- drm_nouveau_private_t *dev_priv;
+ struct drm_nouveau_private *dev_priv;
if (flags==NV_UNKNOWN)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER);
+ dev_priv = drm_calloc(1, sizeof(*dev_priv), DRM_MEM_DRIVER);
if (!dev_priv)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
- memset(dev_priv, 0, sizeof(drm_nouveau_private_t));
dev_priv->card_type=flags&NOUVEAU_FAMILY;
dev_priv->flags=flags&NOUVEAU_FLAGS;
+ dev_priv->init_state = NOUVEAU_CARD_INIT_DOWN;
dev->dev_private = (void *)dev_priv;
-
-#if 0
- ret = nouveau_card_init(dev);
- if (ret) {
- DRM_ERROR("nouveau_card_init() failed! (%d)\n", ret);
- return ret;
- }
-#endif
-
return 0;
}
void nouveau_lastclose(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_card_takedown(dev);
+
if(dev_priv->fb_mtrr>0)
{
drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC);
@@ -352,80 +439,95 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int nouveau_ioctl_getparam(DRM_IOCTL_ARGS)
+int
+nouveau_ioctl_card_init(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ return nouveau_card_init(dev);
+}
+
+int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- drm_nouveau_getparam_t getparam;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_getparam *getparam = data;
- DRM_COPY_FROM_USER_IOCTL(getparam, (drm_nouveau_getparam_t __user *)data,
- sizeof(getparam));
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- switch (getparam.param) {
+ switch (getparam->param) {
+ case NOUVEAU_GETPARAM_CHIPSET_ID:
+ getparam->value = dev_priv->chipset;
+ break;
case NOUVEAU_GETPARAM_PCI_VENDOR:
- getparam.value=dev->pci_vendor;
+ getparam->value=dev->pci_vendor;
break;
case NOUVEAU_GETPARAM_PCI_DEVICE:
- getparam.value=dev->pci_device;
+ getparam->value=dev->pci_device;
break;
case NOUVEAU_GETPARAM_BUS_TYPE:
if (drm_device_is_agp(dev))
- getparam.value=NV_AGP;
+ getparam->value=NV_AGP;
else if (drm_device_is_pcie(dev))
- getparam.value=NV_PCIE;
+ getparam->value=NV_PCIE;
else
- getparam.value=NV_PCI;
+ getparam->value=NV_PCI;
break;
case NOUVEAU_GETPARAM_FB_PHYSICAL:
- getparam.value=dev_priv->fb_phys;
+ getparam->value=dev_priv->fb_phys;
break;
case NOUVEAU_GETPARAM_AGP_PHYSICAL:
- getparam.value=dev_priv->agp_phys;
+ getparam->value=dev_priv->gart_info.aper_base;
+ break;
+ case NOUVEAU_GETPARAM_PCI_PHYSICAL:
+ if ( dev -> sg )
+ getparam->value=(uint64_t) dev->sg->virtual;
+ else
+ {
+ DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");
+ return -EINVAL;
+ }
break;
case NOUVEAU_GETPARAM_FB_SIZE:
- getparam.value=dev_priv->fb_available_size;
+ getparam->value=dev_priv->fb_available_size;
break;
case NOUVEAU_GETPARAM_AGP_SIZE:
- getparam.value=dev_priv->agp_available_size;
+ getparam->value=dev_priv->gart_info.aper_size;
break;
default:
- DRM_ERROR("unknown parameter %lld\n", getparam.param);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("unknown parameter %lld\n", getparam->param);
+ return -EINVAL;
}
- DRM_COPY_TO_USER_IOCTL((drm_nouveau_getparam_t __user *)data, getparam,
- sizeof(getparam));
return 0;
}
-int nouveau_ioctl_setparam(DRM_IOCTL_ARGS)
+int nouveau_ioctl_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- drm_nouveau_setparam_t setparam;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_setparam *setparam = data;
- DRM_COPY_FROM_USER_IOCTL(setparam, (drm_nouveau_setparam_t __user *)data,
- sizeof(setparam));
+ NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- switch (setparam.param) {
+ switch (setparam->param) {
case NOUVEAU_SETPARAM_CMDBUF_LOCATION:
- switch (setparam.value) {
+ switch (setparam->value) {
case NOUVEAU_MEM_AGP:
case NOUVEAU_MEM_FB:
+ case NOUVEAU_MEM_PCI:
+ case NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI_ACCEPTABLE:
break;
default:
DRM_ERROR("invalid CMDBUF_LOCATION value=%lld\n",
- setparam.value);
- return DRM_ERR(EINVAL);
+ setparam->value);
+ return -EINVAL;
}
- dev_priv->config.cmdbuf.location = setparam.value;
+ dev_priv->config.cmdbuf.location = setparam->value;
break;
case NOUVEAU_SETPARAM_CMDBUF_SIZE:
- dev_priv->config.cmdbuf.size = setparam.value;
+ dev_priv->config.cmdbuf.size = setparam->value;
break;
default:
- DRM_ERROR("unknown parameter %lld\n", setparam.param);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("unknown parameter %lld\n", setparam->param);
+ return -EINVAL;
}
return 0;
@@ -434,15 +536,30 @@ int nouveau_ioctl_setparam(DRM_IOCTL_ARGS)
/* waits for idle */
void nouveau_wait_for_idle(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv=dev->dev_private;
- switch(dev_priv->card_type)
- {
- case NV_03:
- while(NV_READ(NV03_PGRAPH_STATUS));
- break;
- default:
- while(NV_READ(NV04_PGRAPH_STATUS));
- break;
+ struct drm_nouveau_private *dev_priv=dev->dev_private;
+ switch(dev_priv->card_type) {
+ case NV_50:
+ break;
+ default: {
+ /* This stuff is more or less a copy of what is seen
+ * in nv28 kmmio dump.
+ */
+ uint64_t started = dev_priv->Engine.timer.read(dev);
+ uint64_t stopped = started;
+ uint32_t status;
+ do {
+ uint32_t pmc_e = NV_READ(NV03_PMC_ENABLE);
+ (void)pmc_e;
+ status = NV_READ(NV04_PGRAPH_STATUS);
+ if (!status)
+ break;
+ stopped = dev_priv->Engine.timer.read(dev);
+ /* It'll never wrap anyway... */
+ } while (stopped - started < 1000000000ULL);
+ if (status)
+ DRM_ERROR("timed out with status 0x%08x\n",
+ status);
+ }
}
}
diff --git a/shared-core/nv04_fb.c b/shared-core/nv04_fb.c
index 06b1c994..534fb50b 100644
--- a/shared-core/nv04_fb.c
+++ b/shared-core/nv04_fb.c
@@ -4,9 +4,9 @@
#include "nouveau_drm.h"
int
-nv04_fb_init(drm_device_t *dev)
+nv04_fb_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
/* This is what the DDX did for NV_ARCH_04, but a mmio-trace shows
* nvidia reading PFB_CFG_0, then writing back its original value.
@@ -18,7 +18,7 @@ nv04_fb_init(drm_device_t *dev)
}
void
-nv04_fb_takedown(drm_device_t *dev)
+nv04_fb_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c
index bfae432e..d750ced8 100644
--- a/shared-core/nv04_fifo.c
+++ b/shared-core/nv04_fifo.c
@@ -28,31 +28,31 @@
#include "drm.h"
#include "nouveau_drv.h"
-#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV04_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) NV_RI32(fifoctx + NV04_RAMFC_##offset)
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV04_RAMFC_##offset/4)
#define NV04_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
int
-nv04_fifo_create_context(drm_device_t *dev, int channel)
+nv04_fifo_create_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- struct nouveau_object *pb = chan->cmdbuf_obj;
- uint32_t fifoctx = NV04_RAMFC(channel);
- int i;
-
- if (!pb || !pb->instance)
- return DRM_ERR(EINVAL);
-
- /* Clear RAMFC */
- for (i=0; i<NV04_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
-
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
+ NV04_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
+
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE, nouveau_chip_instance_get(dev, pb->instance));
+ RAMFC_WR(DMA_INSTANCE, chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -60,28 +60,31 @@ nv04_fifo_create_context(drm_device_t *dev, int channel)
NV_PFIFO_CACHE1_BIG_ENDIAN |
#endif
0));
+
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE) | (1<<chan->id));
return 0;
}
void
-nv04_fifo_destroy_context(drm_device_t *dev, int channel)
+nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV04_RAMFC(channel);
- int i;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
- for (i=0; i<NV04_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
-nv04_fifo_load_context(drm_device_t *dev, int channel)
+nv04_fifo_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV04_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
- NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | channel);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | chan->id);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT));
@@ -103,10 +106,10 @@ nv04_fifo_load_context(drm_device_t *dev, int channel)
}
int
-nv04_fifo_save_context(drm_device_t *dev, int channel)
+nv04_fifo_save_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV04_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
RAMFC_WR(DMA_PUT, NV04_PFIFO_CACHE1_DMA_PUT);
diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c
index 1aaae33c..2cf052cf 100644
--- a/shared-core/nv04_graph.c
+++ b/shared-core/nv04_graph.c
@@ -27,323 +27,399 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
-struct reg_interval
-{
- uint32_t reg;
- int number;
-} nv04_graph_ctx_regs [] = {
- {NV04_PGRAPH_CTX_SWITCH1,1},
- {NV04_PGRAPH_CTX_SWITCH2,1},
- {NV04_PGRAPH_CTX_SWITCH3,1},
- {NV04_PGRAPH_CTX_SWITCH4,1},
- {NV04_PGRAPH_CTX_CACHE1,1},
- {NV04_PGRAPH_CTX_CACHE2,1},
- {NV04_PGRAPH_CTX_CACHE3,1},
- {NV04_PGRAPH_CTX_CACHE4,1},
- {0x00400184,1},
- {0x004001a4,1},
- {0x004001c4,1},
- {0x004001e4,1},
- {0x00400188,1},
- {0x004001a8,1},
- {0x004001c8,1},
- {0x004001e8,1},
- {0x0040018c,1},
- {0x004001ac,1},
- {0x004001cc,1},
- {0x004001ec,1},
- {0x00400190,1},
- {0x004001b0,1},
- {0x004001d0,1},
- {0x004001f0,1},
- {0x00400194,1},
- {0x004001b4,1},
- {0x004001d4,1},
- {0x004001f4,1},
- {0x00400198,1},
- {0x004001b8,1},
- {0x004001d8,1},
- {0x004001f8,1},
- {0x0040019c,1},
- {0x004001bc,1},
- {0x004001dc,1},
- {0x004001fc,1},
- {0x00400174,1},
- {NV04_PGRAPH_DMA_START_0,1},
- {NV04_PGRAPH_DMA_START_1,1},
- {NV04_PGRAPH_DMA_LENGTH,1},
- {NV04_PGRAPH_DMA_MISC,1},
- {NV04_PGRAPH_DMA_PITCH,1},
- {NV04_PGRAPH_BOFFSET0,1},
- {NV04_PGRAPH_BBASE0,1},
- {NV04_PGRAPH_BLIMIT0,1},
- {NV04_PGRAPH_BOFFSET1,1},
- {NV04_PGRAPH_BBASE1,1},
- {NV04_PGRAPH_BLIMIT1,1},
- {NV04_PGRAPH_BOFFSET2,1},
- {NV04_PGRAPH_BBASE2,1},
- {NV04_PGRAPH_BLIMIT2,1},
- {NV04_PGRAPH_BOFFSET3,1},
- {NV04_PGRAPH_BBASE3,1},
- {NV04_PGRAPH_BLIMIT3,1},
- {NV04_PGRAPH_BOFFSET4,1},
- {NV04_PGRAPH_BBASE4,1},
- {NV04_PGRAPH_BLIMIT4,1},
- {NV04_PGRAPH_BOFFSET5,1},
- {NV04_PGRAPH_BBASE5,1},
- {NV04_PGRAPH_BLIMIT5,1},
- {NV04_PGRAPH_BPITCH0,1},
- {NV04_PGRAPH_BPITCH1,1},
- {NV04_PGRAPH_BPITCH2,1},
- {NV04_PGRAPH_BPITCH3,1},
- {NV04_PGRAPH_BPITCH4,1},
- {NV04_PGRAPH_SURFACE,1},
- {NV04_PGRAPH_STATE,1},
- {NV04_PGRAPH_BSWIZZLE2,1},
- {NV04_PGRAPH_BSWIZZLE5,1},
- {NV04_PGRAPH_BPIXEL,1},
- {NV04_PGRAPH_NOTIFY,1},
- {NV04_PGRAPH_PATT_COLOR0,1},
- {NV04_PGRAPH_PATT_COLOR1,1},
- {NV04_PGRAPH_PATT_COLORRAM,64},
- {NV04_PGRAPH_PATTERN,1},
- {0x0040080c,1},
- {NV04_PGRAPH_PATTERN_SHAPE,1},
- {0x00400600,1},
- {NV04_PGRAPH_ROP3,1},
- {NV04_PGRAPH_CHROMA,1},
- {NV04_PGRAPH_BETA_AND,1},
- {NV04_PGRAPH_BETA_PREMULT,1},
- {NV04_PGRAPH_CONTROL0,1},
- {NV04_PGRAPH_CONTROL1,1},
- {NV04_PGRAPH_CONTROL2,1},
- {NV04_PGRAPH_BLEND,1},
- {NV04_PGRAPH_STORED_FMT,1},
- {NV04_PGRAPH_SOURCE_COLOR,1},
- {0x00400560,1},
- {0x00400568,1},
- {0x00400564,1},
- {0x0040056c,1},
- {0x00400400,1},
- {0x00400480,1},
- {0x00400404,1},
- {0x00400484,1},
- {0x00400408,1},
- {0x00400488,1},
- {0x0040040c,1},
- {0x0040048c,1},
- {0x00400410,1},
- {0x00400490,1},
- {0x00400414,1},
- {0x00400494,1},
- {0x00400418,1},
- {0x00400498,1},
- {0x0040041c,1},
- {0x0040049c,1},
- {0x00400420,1},
- {0x004004a0,1},
- {0x00400424,1},
- {0x004004a4,1},
- {0x00400428,1},
- {0x004004a8,1},
- {0x0040042c,1},
- {0x004004ac,1},
- {0x00400430,1},
- {0x004004b0,1},
- {0x00400434,1},
- {0x004004b4,1},
- {0x00400438,1},
- {0x004004b8,1},
- {0x0040043c,1},
- {0x004004bc,1},
- {0x00400440,1},
- {0x004004c0,1},
- {0x00400444,1},
- {0x004004c4,1},
- {0x00400448,1},
- {0x004004c8,1},
- {0x0040044c,1},
- {0x004004cc,1},
- {0x00400450,1},
- {0x004004d0,1},
- {0x00400454,1},
- {0x004004d4,1},
- {0x00400458,1},
- {0x004004d8,1},
- {0x0040045c,1},
- {0x004004dc,1},
- {0x00400460,1},
- {0x004004e0,1},
- {0x00400464,1},
- {0x004004e4,1},
- {0x00400468,1},
- {0x004004e8,1},
- {0x0040046c,1},
- {0x004004ec,1},
- {0x00400470,1},
- {0x004004f0,1},
- {0x00400474,1},
- {0x004004f4,1},
- {0x00400478,1},
- {0x004004f8,1},
- {0x0040047c,1},
- {0x004004fc,1},
- {0x0040053c,1},
- {0x00400544,1},
- {0x00400540,1},
- {0x00400548,1},
- {0x00400560,1},
- {0x00400568,1},
- {0x00400564,1},
- {0x0040056c,1},
- {0x00400534,1},
- {0x00400538,1},
- {0x00400514,1},
- {0x00400518,1},
- {0x0040051c,1},
- {0x00400520,1},
- {0x00400524,1},
- {0x00400528,1},
- {0x0040052c,1},
- {0x00400530,1},
- {0x00400d00,1},
- {0x00400d40,1},
- {0x00400d80,1},
- {0x00400d04,1},
- {0x00400d44,1},
- {0x00400d84,1},
- {0x00400d08,1},
- {0x00400d48,1},
- {0x00400d88,1},
- {0x00400d0c,1},
- {0x00400d4c,1},
- {0x00400d8c,1},
- {0x00400d10,1},
- {0x00400d50,1},
- {0x00400d90,1},
- {0x00400d14,1},
- {0x00400d54,1},
- {0x00400d94,1},
- {0x00400d18,1},
- {0x00400d58,1},
- {0x00400d98,1},
- {0x00400d1c,1},
- {0x00400d5c,1},
- {0x00400d9c,1},
- {0x00400d20,1},
- {0x00400d60,1},
- {0x00400da0,1},
- {0x00400d24,1},
- {0x00400d64,1},
- {0x00400da4,1},
- {0x00400d28,1},
- {0x00400d68,1},
- {0x00400da8,1},
- {0x00400d2c,1},
- {0x00400d6c,1},
- {0x00400dac,1},
- {0x00400d30,1},
- {0x00400d70,1},
- {0x00400db0,1},
- {0x00400d34,1},
- {0x00400d74,1},
- {0x00400db4,1},
- {0x00400d38,1},
- {0x00400d78,1},
- {0x00400db8,1},
- {0x00400d3c,1},
- {0x00400d7c,1},
- {0x00400dbc,1},
- {0x00400590,1},
- {0x00400594,1},
- {0x00400598,1},
- {0x0040059c,1},
- {0x004005a8,1},
- {0x004005ac,1},
- {0x004005b0,1},
- {0x004005b4,1},
- {0x004005c0,1},
- {0x004005c4,1},
- {0x004005c8,1},
- {0x004005cc,1},
- {0x004005d0,1},
- {0x004005d4,1},
- {0x004005d8,1},
- {0x004005dc,1},
- {0x004005e0,1},
- {NV04_PGRAPH_PASSTHRU_0,1},
- {NV04_PGRAPH_PASSTHRU_1,1},
- {NV04_PGRAPH_PASSTHRU_2,1},
- {NV04_PGRAPH_DVD_COLORFMT,1},
- {NV04_PGRAPH_SCALED_FORMAT,1},
- {NV04_PGRAPH_MISC24_0,1},
- {NV04_PGRAPH_MISC24_1,1},
- {NV04_PGRAPH_MISC24_2,1},
- {0x00400500,1},
- {0x00400504,1},
- {NV04_PGRAPH_VALID1,1},
- {NV04_PGRAPH_VALID2,1}
+static uint32_t nv04_graph_ctx_regs [] = {
+ NV04_PGRAPH_CTX_SWITCH1,
+ NV04_PGRAPH_CTX_SWITCH2,
+ NV04_PGRAPH_CTX_SWITCH3,
+ NV04_PGRAPH_CTX_SWITCH4,
+ NV04_PGRAPH_CTX_CACHE1,
+ NV04_PGRAPH_CTX_CACHE2,
+ NV04_PGRAPH_CTX_CACHE3,
+ NV04_PGRAPH_CTX_CACHE4,
+ 0x00400184,
+ 0x004001a4,
+ 0x004001c4,
+ 0x004001e4,
+ 0x00400188,
+ 0x004001a8,
+ 0x004001c8,
+ 0x004001e8,
+ 0x0040018c,
+ 0x004001ac,
+ 0x004001cc,
+ 0x004001ec,
+ 0x00400190,
+ 0x004001b0,
+ 0x004001d0,
+ 0x004001f0,
+ 0x00400194,
+ 0x004001b4,
+ 0x004001d4,
+ 0x004001f4,
+ 0x00400198,
+ 0x004001b8,
+ 0x004001d8,
+ 0x004001f8,
+ 0x0040019c,
+ 0x004001bc,
+ 0x004001dc,
+ 0x004001fc,
+ 0x00400174,
+ NV04_PGRAPH_DMA_START_0,
+ NV04_PGRAPH_DMA_START_1,
+ NV04_PGRAPH_DMA_LENGTH,
+ NV04_PGRAPH_DMA_MISC,
+ NV04_PGRAPH_DMA_PITCH,
+ NV04_PGRAPH_BOFFSET0,
+ NV04_PGRAPH_BBASE0,
+ NV04_PGRAPH_BLIMIT0,
+ NV04_PGRAPH_BOFFSET1,
+ NV04_PGRAPH_BBASE1,
+ NV04_PGRAPH_BLIMIT1,
+ NV04_PGRAPH_BOFFSET2,
+ NV04_PGRAPH_BBASE2,
+ NV04_PGRAPH_BLIMIT2,
+ NV04_PGRAPH_BOFFSET3,
+ NV04_PGRAPH_BBASE3,
+ NV04_PGRAPH_BLIMIT3,
+ NV04_PGRAPH_BOFFSET4,
+ NV04_PGRAPH_BBASE4,
+ NV04_PGRAPH_BLIMIT4,
+ NV04_PGRAPH_BOFFSET5,
+ NV04_PGRAPH_BBASE5,
+ NV04_PGRAPH_BLIMIT5,
+ NV04_PGRAPH_BPITCH0,
+ NV04_PGRAPH_BPITCH1,
+ NV04_PGRAPH_BPITCH2,
+ NV04_PGRAPH_BPITCH3,
+ NV04_PGRAPH_BPITCH4,
+ NV04_PGRAPH_SURFACE,
+ NV04_PGRAPH_STATE,
+ NV04_PGRAPH_BSWIZZLE2,
+ NV04_PGRAPH_BSWIZZLE5,
+ NV04_PGRAPH_BPIXEL,
+ NV04_PGRAPH_NOTIFY,
+ NV04_PGRAPH_PATT_COLOR0,
+ NV04_PGRAPH_PATT_COLOR1,
+ NV04_PGRAPH_PATT_COLORRAM+0x00,
+ NV04_PGRAPH_PATT_COLORRAM+0x01,
+ NV04_PGRAPH_PATT_COLORRAM+0x02,
+ NV04_PGRAPH_PATT_COLORRAM+0x03,
+ NV04_PGRAPH_PATT_COLORRAM+0x04,
+ NV04_PGRAPH_PATT_COLORRAM+0x05,
+ NV04_PGRAPH_PATT_COLORRAM+0x06,
+ NV04_PGRAPH_PATT_COLORRAM+0x07,
+ NV04_PGRAPH_PATT_COLORRAM+0x08,
+ NV04_PGRAPH_PATT_COLORRAM+0x09,
+ NV04_PGRAPH_PATT_COLORRAM+0x0A,
+ NV04_PGRAPH_PATT_COLORRAM+0x0B,
+ NV04_PGRAPH_PATT_COLORRAM+0x0C,
+ NV04_PGRAPH_PATT_COLORRAM+0x0D,
+ NV04_PGRAPH_PATT_COLORRAM+0x0E,
+ NV04_PGRAPH_PATT_COLORRAM+0x0F,
+ NV04_PGRAPH_PATT_COLORRAM+0x10,
+ NV04_PGRAPH_PATT_COLORRAM+0x11,
+ NV04_PGRAPH_PATT_COLORRAM+0x12,
+ NV04_PGRAPH_PATT_COLORRAM+0x13,
+ NV04_PGRAPH_PATT_COLORRAM+0x14,
+ NV04_PGRAPH_PATT_COLORRAM+0x15,
+ NV04_PGRAPH_PATT_COLORRAM+0x16,
+ NV04_PGRAPH_PATT_COLORRAM+0x17,
+ NV04_PGRAPH_PATT_COLORRAM+0x18,
+ NV04_PGRAPH_PATT_COLORRAM+0x19,
+ NV04_PGRAPH_PATT_COLORRAM+0x1A,
+ NV04_PGRAPH_PATT_COLORRAM+0x1B,
+ NV04_PGRAPH_PATT_COLORRAM+0x1C,
+ NV04_PGRAPH_PATT_COLORRAM+0x1D,
+ NV04_PGRAPH_PATT_COLORRAM+0x1E,
+ NV04_PGRAPH_PATT_COLORRAM+0x1F,
+ NV04_PGRAPH_PATT_COLORRAM+0x20,
+ NV04_PGRAPH_PATT_COLORRAM+0x21,
+ NV04_PGRAPH_PATT_COLORRAM+0x22,
+ NV04_PGRAPH_PATT_COLORRAM+0x23,
+ NV04_PGRAPH_PATT_COLORRAM+0x24,
+ NV04_PGRAPH_PATT_COLORRAM+0x25,
+ NV04_PGRAPH_PATT_COLORRAM+0x26,
+ NV04_PGRAPH_PATT_COLORRAM+0x27,
+ NV04_PGRAPH_PATT_COLORRAM+0x28,
+ NV04_PGRAPH_PATT_COLORRAM+0x29,
+ NV04_PGRAPH_PATT_COLORRAM+0x2A,
+ NV04_PGRAPH_PATT_COLORRAM+0x2B,
+ NV04_PGRAPH_PATT_COLORRAM+0x2C,
+ NV04_PGRAPH_PATT_COLORRAM+0x2D,
+ NV04_PGRAPH_PATT_COLORRAM+0x2E,
+ NV04_PGRAPH_PATT_COLORRAM+0x2F,
+ NV04_PGRAPH_PATT_COLORRAM+0x30,
+ NV04_PGRAPH_PATT_COLORRAM+0x31,
+ NV04_PGRAPH_PATT_COLORRAM+0x32,
+ NV04_PGRAPH_PATT_COLORRAM+0x33,
+ NV04_PGRAPH_PATT_COLORRAM+0x34,
+ NV04_PGRAPH_PATT_COLORRAM+0x35,
+ NV04_PGRAPH_PATT_COLORRAM+0x36,
+ NV04_PGRAPH_PATT_COLORRAM+0x37,
+ NV04_PGRAPH_PATT_COLORRAM+0x38,
+ NV04_PGRAPH_PATT_COLORRAM+0x39,
+ NV04_PGRAPH_PATT_COLORRAM+0x3A,
+ NV04_PGRAPH_PATT_COLORRAM+0x3B,
+ NV04_PGRAPH_PATT_COLORRAM+0x3C,
+ NV04_PGRAPH_PATT_COLORRAM+0x3D,
+ NV04_PGRAPH_PATT_COLORRAM+0x3E,
+ NV04_PGRAPH_PATT_COLORRAM+0x3F,
+ NV04_PGRAPH_PATTERN,
+ 0x0040080c,
+ NV04_PGRAPH_PATTERN_SHAPE,
+ 0x00400600,
+ NV04_PGRAPH_ROP3,
+ NV04_PGRAPH_CHROMA,
+ NV04_PGRAPH_BETA_AND,
+ NV04_PGRAPH_BETA_PREMULT,
+ NV04_PGRAPH_CONTROL0,
+ NV04_PGRAPH_CONTROL1,
+ NV04_PGRAPH_CONTROL2,
+ NV04_PGRAPH_BLEND,
+ NV04_PGRAPH_STORED_FMT,
+ NV04_PGRAPH_SOURCE_COLOR,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400400,
+ 0x00400480,
+ 0x00400404,
+ 0x00400484,
+ 0x00400408,
+ 0x00400488,
+ 0x0040040c,
+ 0x0040048c,
+ 0x00400410,
+ 0x00400490,
+ 0x00400414,
+ 0x00400494,
+ 0x00400418,
+ 0x00400498,
+ 0x0040041c,
+ 0x0040049c,
+ 0x00400420,
+ 0x004004a0,
+ 0x00400424,
+ 0x004004a4,
+ 0x00400428,
+ 0x004004a8,
+ 0x0040042c,
+ 0x004004ac,
+ 0x00400430,
+ 0x004004b0,
+ 0x00400434,
+ 0x004004b4,
+ 0x00400438,
+ 0x004004b8,
+ 0x0040043c,
+ 0x004004bc,
+ 0x00400440,
+ 0x004004c0,
+ 0x00400444,
+ 0x004004c4,
+ 0x00400448,
+ 0x004004c8,
+ 0x0040044c,
+ 0x004004cc,
+ 0x00400450,
+ 0x004004d0,
+ 0x00400454,
+ 0x004004d4,
+ 0x00400458,
+ 0x004004d8,
+ 0x0040045c,
+ 0x004004dc,
+ 0x00400460,
+ 0x004004e0,
+ 0x00400464,
+ 0x004004e4,
+ 0x00400468,
+ 0x004004e8,
+ 0x0040046c,
+ 0x004004ec,
+ 0x00400470,
+ 0x004004f0,
+ 0x00400474,
+ 0x004004f4,
+ 0x00400478,
+ 0x004004f8,
+ 0x0040047c,
+ 0x004004fc,
+ 0x0040053c,
+ 0x00400544,
+ 0x00400540,
+ 0x00400548,
+ 0x00400560,
+ 0x00400568,
+ 0x00400564,
+ 0x0040056c,
+ 0x00400534,
+ 0x00400538,
+ 0x00400514,
+ 0x00400518,
+ 0x0040051c,
+ 0x00400520,
+ 0x00400524,
+ 0x00400528,
+ 0x0040052c,
+ 0x00400530,
+ 0x00400d00,
+ 0x00400d40,
+ 0x00400d80,
+ 0x00400d04,
+ 0x00400d44,
+ 0x00400d84,
+ 0x00400d08,
+ 0x00400d48,
+ 0x00400d88,
+ 0x00400d0c,
+ 0x00400d4c,
+ 0x00400d8c,
+ 0x00400d10,
+ 0x00400d50,
+ 0x00400d90,
+ 0x00400d14,
+ 0x00400d54,
+ 0x00400d94,
+ 0x00400d18,
+ 0x00400d58,
+ 0x00400d98,
+ 0x00400d1c,
+ 0x00400d5c,
+ 0x00400d9c,
+ 0x00400d20,
+ 0x00400d60,
+ 0x00400da0,
+ 0x00400d24,
+ 0x00400d64,
+ 0x00400da4,
+ 0x00400d28,
+ 0x00400d68,
+ 0x00400da8,
+ 0x00400d2c,
+ 0x00400d6c,
+ 0x00400dac,
+ 0x00400d30,
+ 0x00400d70,
+ 0x00400db0,
+ 0x00400d34,
+ 0x00400d74,
+ 0x00400db4,
+ 0x00400d38,
+ 0x00400d78,
+ 0x00400db8,
+ 0x00400d3c,
+ 0x00400d7c,
+ 0x00400dbc,
+ 0x00400590,
+ 0x00400594,
+ 0x00400598,
+ 0x0040059c,
+ 0x004005a8,
+ 0x004005ac,
+ 0x004005b0,
+ 0x004005b4,
+ 0x004005c0,
+ 0x004005c4,
+ 0x004005c8,
+ 0x004005cc,
+ 0x004005d0,
+ 0x004005d4,
+ 0x004005d8,
+ 0x004005dc,
+ 0x004005e0,
+ NV04_PGRAPH_PASSTHRU_0,
+ NV04_PGRAPH_PASSTHRU_1,
+ NV04_PGRAPH_PASSTHRU_2,
+ NV04_PGRAPH_DVD_COLORFMT,
+ NV04_PGRAPH_SCALED_FORMAT,
+ NV04_PGRAPH_MISC24_0,
+ NV04_PGRAPH_MISC24_1,
+ NV04_PGRAPH_MISC24_2,
+ 0x00400500,
+ 0x00400504,
+ NV04_PGRAPH_VALID1,
+ NV04_PGRAPH_VALID2
};
-void nouveau_nv04_context_switch(drm_device_t *dev)
+void nouveau_nv04_context_switch(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- int channel, channel_old, i, j, index;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *next, *last;
+ int chid;
+
+ if (!dev) {
+ DRM_DEBUG("Invalid drm_device\n");
+ return;
+ }
+ dev_priv = dev->dev_private;
+ if (!dev_priv) {
+ DRM_DEBUG("Invalid drm_nouveau_private\n");
+ return;
+ }
+ if (!dev_priv->fifos) {
+ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
+ return;
+ }
+
+ chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
+ next = dev_priv->fifos[chid];
+
+ if (!next) {
+ DRM_DEBUG("Invalid next channel\n");
+ return;
+ }
- channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
- channel_old = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ last = dev_priv->fifos[chid];
- DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel);
+ if (!last) {
+ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
+ next->id);
+ } else {
+ DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",
+ last->id, next->id);
+ }
- NV_WRITE(NV03_PFIFO_CACHES, 0x0);
+/* NV_WRITE(NV03_PFIFO_CACHES, 0x0);
NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
- NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);
+ NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x0);*/
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
- nouveau_wait_for_idle(dev);
+ if (last)
+ nv04_graph_save_context(last);
- // save PGRAPH context
- index=0;
- for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
- for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
- {
- dev_priv->fifos[channel_old].pgraph_ctx[index] = NV_READ(nv04_graph_ctx_regs[i].reg+j*4);
- index++;
- }
+ nouveau_wait_for_idle(dev);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10000000);
NV_WRITE(NV04_PGRAPH_CTX_USER, (NV_READ(NV04_PGRAPH_CTX_USER) & 0xffffff) | (0x0f << 24));
- // restore PGRAPH context
- index=0;
- for (i = 0; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
- for (j = 0; j<nv04_graph_ctx_regs[i].number; j++)
- {
- NV_WRITE(nv04_graph_ctx_regs[i].reg+j*4, dev_priv->fifos[channel].pgraph_ctx[index]);
- index++;
- }
+ nouveau_wait_for_idle(dev);
+
+ nv04_graph_load_context(next);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL, 0x10010100);
- NV_WRITE(NV04_PGRAPH_CTX_USER, channel << 24);
+ NV_WRITE(NV04_PGRAPH_CTX_USER, next->id << 24);
NV_WRITE(NV04_PGRAPH_FFINTFC_ST2, NV_READ(NV04_PGRAPH_FFINTFC_ST2)&0x000FFFFF);
- NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+/* NV_WRITE(NV04_PGRAPH_FIFO,0x0);
NV_WRITE(NV04_PFIFO_CACHE0_PULL0, 0x0);
NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x1);
- NV_WRITE(NV03_PFIFO_CACHES, 0x1);
+ NV_WRITE(NV03_PFIFO_CACHES, 0x1);*/
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
-int nv04_graph_create_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("nv04_graph_context_create %d\n", channel);
+int nv04_graph_create_context(struct nouveau_channel *chan) {
+ DRM_DEBUG("nv04_graph_context_create %d\n", chan->id);
- memset(dev_priv->fifos[channel].pgraph_ctx, 0, sizeof(dev_priv->fifos[channel].pgraph_ctx));
+ memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
//dev_priv->fifos[channel].pgraph_ctx_user = channel << 24;
- dev_priv->fifos[channel].pgraph_ctx[0] = 0x0001ffff;
+ chan->pgraph_ctx[0] = 0x0001ffff;
/* is it really needed ??? */
//dev_priv->fifos[channel].pgraph_ctx[1] = NV_READ(NV_PGRAPH_DEBUG_4);
//dev_priv->fifos[channel].pgraph_ctx[2] = NV_READ(0x004006b0);
@@ -351,49 +427,60 @@ int nv04_graph_create_context(drm_device_t *dev, int channel) {
return 0;
}
-void nv04_graph_destroy_context(drm_device_t *dev, int channel)
+void nv04_graph_destroy_context(struct nouveau_channel *chan)
{
}
-int nv04_graph_load_context(drm_device_t *dev, int channel)
+int nv04_graph_load_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
+ NV_WRITE(nv04_graph_ctx_regs[i], chan->pgraph_ctx[i]);
+
return 0;
}
-int nv04_graph_save_context(drm_device_t *dev, int channel)
+int nv04_graph_save_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
+ chan->pgraph_ctx[i] = NV_READ(nv04_graph_ctx_regs[i]);
+
return 0;
}
-int nv04_graph_init(drm_device_t *dev) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- int i,sum=0;
+int nv04_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
+ /* Enable PGRAPH interrupts */
+ NV_WRITE(NV03_PGRAPH_INTR, 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
+
// check the context is big enough
- for ( i = 0 ; i<sizeof(nv04_graph_ctx_regs)/sizeof(nv04_graph_ctx_regs[0]); i++)
- sum+=nv04_graph_ctx_regs[i].number;
- if ( sum*4>sizeof(dev_priv->fifos[0].pgraph_ctx) )
+ if ( sizeof(nv04_graph_ctx_regs)>sizeof(dev_priv->fifos[0]->pgraph_ctx) )
DRM_ERROR("pgraph_ctx too small\n");
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
- NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
-
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x000001FF);
- NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1230C000);
- NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x72111101);
- NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11D5F071);
+ NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x1231c000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_1, 0xf2d91100);
+ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x11d5f870);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x0004FF31);
NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x4004FF31 |
(0x00D00000) |
(1<<29) |
(1<<31));
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);
NV_WRITE(NV04_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_CTX_CONTROL , 0x10010100);
@@ -406,7 +493,7 @@ int nv04_graph_init(drm_device_t *dev) {
return 0;
}
-void nv04_graph_takedown(drm_device_t *dev)
+void nv04_graph_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c
new file mode 100644
index 00000000..5a446450
--- /dev/null
+++ b/shared-core/nv04_instmem.c
@@ -0,0 +1,159 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+static void
+nv04_instmem_determine_amount(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Figure out how much instance memory we need */
+ if (dev_priv->card_type >= NV_40) {
+ /* We'll want more instance memory than this on some NV4x cards.
+ * There's a 16MB aperture to play with that maps onto the end
+ * of vram. For now, only reserve a small piece until we know
+ * more about what each chipset requires.
+ */
+ dev_priv->ramin_rsvd_vram = (1*1024* 1024);
+ } else {
+ /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN
+ * exist in vram on those cards as well?
+ */
+ dev_priv->ramin_rsvd_vram = (512*1024);
+ }
+ DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_rsvd_vram>>10);
+
+ /* Clear all of it, except the BIOS image that's in the first 64KiB */
+ for (i=(64*1024); i<dev_priv->ramin_rsvd_vram; i+=4)
+ NV_WI32(i, 0x00000000);
+}
+
+static void
+nv04_instmem_configure_fixed_tables(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* FIFO hash table (RAMHT)
+ * use 4k hash table at RAMIN+0x10000
+ * TODO: extend the hash table
+ */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", dev_priv->ramht_offset,
+ dev_priv->ramht_size);
+
+ /* FIFO runout table (RAMRO) - 512k at 0x11200 */
+ dev_priv->ramro_offset = 0x11200;
+ dev_priv->ramro_size = 512;
+ DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", dev_priv->ramro_offset,
+ dev_priv->ramro_size);
+
+ /* FIFO context table (RAMFC)
+ * NV40 : Not sure exactly how to position RAMFC on some cards,
+ * 0x30002 seems to position it at RAMIN+0x20000 on these
+ * cards. RAMFC is 4kb (32 fifos, 128byte entries).
+ * Others: Position RAMFC at RAMIN+0x11400
+ */
+ switch(dev_priv->card_type)
+ {
+ case NV_40:
+ case NV_44:
+ dev_priv->ramfc_offset = 0x20000;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) *
+ nouveau_fifo_ctx_size(dev);
+ break;
+ case NV_30:
+ case NV_20:
+ case NV_17:
+ case NV_11:
+ case NV_10:
+ case NV_04:
+ default:
+ dev_priv->ramfc_offset = 0x11400;
+ dev_priv->ramfc_size = nouveau_fifo_number(dev) *
+ nouveau_fifo_ctx_size(dev);
+ break;
+ }
+ DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset,
+ dev_priv->ramfc_size);
+}
+
+int nv04_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t offset;
+ int ret = 0;
+
+ nv04_instmem_determine_amount(dev);
+ nv04_instmem_configure_fixed_tables(dev);
+
+ /* Create a heap to manage RAMIN allocations, we don't allocate
+ * the space that was reserved for RAMHT/FC/RO.
+ */
+ offset = dev_priv->ramfc_offset + dev_priv->ramfc_size;
+
+ /* On my NV4E, there's *something* clobbering the 16KiB just after
+ * where we setup these fixed tables. No idea what it is just yet,
+ * so reserve this space on all NV4X cards for now.
+ */
+ if (dev_priv->card_type >= NV_40)
+ offset += 16*1024;
+
+ ret = nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ offset, dev_priv->ramin_rsvd_vram - offset);
+ if (ret) {
+ dev_priv->ramin_heap = NULL;
+ DRM_ERROR("Failed to init RAMIN heap\n");
+ }
+
+ return ret;
+}
+
+void
+nv04_instmem_takedown(struct drm_device *dev)
+{
+}
+
+int
+nv04_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ return 0;
+}
+
+void
+nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->Engine.instmem.unbind(dev, gpuobj);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv04_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (!gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
diff --git a/shared-core/nv04_mc.c b/shared-core/nv04_mc.c
index 0e23efb2..eee0c50c 100644
--- a/shared-core/nv04_mc.c
+++ b/shared-core/nv04_mc.c
@@ -4,22 +4,20 @@
#include "nouveau_drm.h"
int
-nv04_mc_init(drm_device_t *dev)
+nv04_mc_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
/* Power up everything, resetting each individual unit will
* be done later if needed.
*/
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
- NV_WRITE(NV03_PMC_INTR_EN_0, 0);
-
return 0;
}
void
-nv04_mc_takedown(drm_device_t *dev)
+nv04_mc_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv04_timer.c b/shared-core/nv04_timer.c
index a4b4e826..08a27f4f 100644
--- a/shared-core/nv04_timer.c
+++ b/shared-core/nv04_timer.c
@@ -4,9 +4,9 @@
#include "nouveau_drm.h"
int
-nv04_timer_init(drm_device_t *dev)
+nv04_timer_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
NV_WRITE(NV04_PTIMER_INTR_EN_0, 0x00000000);
NV_WRITE(NV04_PTIMER_INTR_0, 0xFFFFFFFF);
@@ -17,8 +17,29 @@ nv04_timer_init(drm_device_t *dev)
return 0;
}
+uint64_t
+nv04_timer_read(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t low;
+ /* From kmmio dumps on nv28 this looks like how the blob does this.
+ * It reads the high dword twice, before and after.
+ * The only explanation seems to be that the 64-bit timer counter
+ * advances between high and low dword reads and may corrupt the
+ * result. Not confirmed.
+ */
+ uint32_t high2 = NV_READ(NV04_PTIMER_TIME_1);
+ uint32_t high1;
+ do {
+ high1 = high2;
+ low = NV_READ(NV04_PTIMER_TIME_0);
+ high2 = NV_READ(NV04_PTIMER_TIME_1);
+ } while(high1 != high2);
+ return (((uint64_t)high2) << 32) | (uint64_t)low;
+}
+
void
-nv04_timer_takedown(drm_device_t *dev)
+nv04_timer_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv10_fb.c b/shared-core/nv10_fb.c
index e8336a2d..7fff5b3f 100644
--- a/shared-core/nv10_fb.c
+++ b/shared-core/nv10_fb.c
@@ -4,9 +4,9 @@
#include "nouveau_drm.h"
int
-nv10_fb_init(drm_device_t *dev)
+nv10_fb_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fb_bar_size;
int i;
@@ -20,7 +20,7 @@ nv10_fb_init(drm_device_t *dev)
}
void
-nv10_fb_takedown(drm_device_t *dev)
+nv10_fb_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c
index b84971de..c86725d2 100644
--- a/shared-core/nv10_fifo.c
+++ b/shared-core/nv10_fifo.c
@@ -28,30 +28,34 @@
#include "drm.h"
#include "nouveau_drv.h"
-#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV10_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) NV_RI32(fifoctx + NV10_RAMFC_##offset)
-#define NV10_RAMFC(c) (dev_priv->ramfc_offset + NV10_RAMFC__SIZE)
+
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV10_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV10_RAMFC_##offset/4)
+#define NV10_RAMFC(c) (dev_priv->ramfc_offset + ((c) * NV10_RAMFC__SIZE))
#define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)
int
-nv10_fifo_create_context(drm_device_t *dev, int channel)
+nv10_fifo_create_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- uint32_t fifoctx = NV10_RAMFC(channel), pushbuf;
- int i;
-
- pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
- for (i=0; i<NV10_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV10_RAMFC(chan->id), ~0,
+ NV10_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , chan->pushbuf_base);
RAMFC_WR(DMA_GET , chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE , pushbuf);
+ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -60,28 +64,30 @@ nv10_fifo_create_context(drm_device_t *dev, int channel)
#endif
0);
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
return 0;
}
void
-nv10_fifo_destroy_context(drm_device_t *dev, int channel)
+nv10_fifo_destroy_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV10_RAMFC(channel);
- int i;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
- for (i=0; i<NV10_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
-nv10_fifo_load_context(drm_device_t *dev, int channel)
+nv10_fifo_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV10_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
- NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | channel);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00000100 | chan->id);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT , RAMFC_RD(DMA_PUT));
@@ -117,10 +123,10 @@ nv10_fifo_load_context(drm_device_t *dev, int channel)
}
int
-nv10_fifo_save_context(drm_device_t *dev, int channel)
+nv10_fifo_save_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV10_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c
index d1fe0a54..1fd185a0 100644
--- a/shared-core/nv10_graph.c
+++ b/shared-core/nv10_graph.c
@@ -27,13 +27,68 @@
#include "nouveau_drm.h"
#include "nouveau_drv.h"
+#define NV10_FIFO_NUMBER 32
+
+struct pipe_state {
+ uint32_t pipe_0x0000[0x040/4];
+ uint32_t pipe_0x0040[0x010/4];
+ uint32_t pipe_0x0200[0x0c0/4];
+ uint32_t pipe_0x4400[0x080/4];
+ uint32_t pipe_0x6400[0x3b0/4];
+ uint32_t pipe_0x6800[0x2f0/4];
+ uint32_t pipe_0x6c00[0x030/4];
+ uint32_t pipe_0x7000[0x130/4];
+ uint32_t pipe_0x7400[0x0c0/4];
+ uint32_t pipe_0x7800[0x0c0/4];
+};
+
+/* TODO dynamic allocation ??? */
+static struct pipe_state pipe_state[NV10_FIFO_NUMBER];
+
+static void nv10_graph_save_pipe(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
+ int i;
+#define PIPE_SAVE(addr) \
+ do { \
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
+ fifo_pipe_state->pipe_##addr[i] = NV_READ(NV10_PGRAPH_PIPE_DATA); \
+ } while (0)
+
+ PIPE_SAVE(0x4400);
+ PIPE_SAVE(0x0200);
+ PIPE_SAVE(0x6400);
+ PIPE_SAVE(0x6800);
+ PIPE_SAVE(0x6c00);
+ PIPE_SAVE(0x7000);
+ PIPE_SAVE(0x7400);
+ PIPE_SAVE(0x7800);
+ PIPE_SAVE(0x0040);
+ PIPE_SAVE(0x0000);
+
+#undef PIPE_SAVE
+}
-static void nv10_praph_pipe(drm_device_t *dev) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+static void nv10_graph_load_pipe(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
int i;
+ uint32_t xfmode0, xfmode1;
+#define PIPE_RESTORE(addr) \
+ do { \
+ NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, addr); \
+ for (i=0; i < sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]); i++) \
+ NV_WRITE(NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
+ } while (0)
+
nouveau_wait_for_idle(dev);
/* XXX check haiku comments */
+ xfmode0 = NV_READ(NV10_PGRAPH_XFMODE0);
+ xfmode1 = NV_READ(NV10_PGRAPH_XFMODE1);
NV_WRITE(NV10_PGRAPH_XFMODE0, 0x10000000);
NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
@@ -43,7 +98,6 @@ static void nv10_praph_pipe(drm_device_t *dev) {
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
-
for (i = 0; i < 3; i++)
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
@@ -54,138 +108,179 @@ static void nv10_praph_pipe(drm_device_t *dev) {
NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000008);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000200);
- for (i = 0; i < 48; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ PIPE_RESTORE(0x0200);
nouveau_wait_for_idle(dev);
- NV_WRITE(NV10_PGRAPH_XFMODE0, 0x00000000);
- NV_WRITE(NV10_PGRAPH_XFMODE1, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006400);
- for (i = 0; i < 211; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ /* restore XFMODE */
+ NV_WRITE(NV10_PGRAPH_XFMODE0, xfmode0);
+ NV_WRITE(NV10_PGRAPH_XFMODE1, xfmode1);
+ PIPE_RESTORE(0x6400);
+ PIPE_RESTORE(0x6800);
+ PIPE_RESTORE(0x6c00);
+ PIPE_RESTORE(0x7000);
+ PIPE_RESTORE(0x7400);
+ PIPE_RESTORE(0x7800);
+ PIPE_RESTORE(0x4400);
+ PIPE_RESTORE(0x0000);
+ PIPE_RESTORE(0x0040);
+ nouveau_wait_for_idle(dev);
+
+#undef PIPE_RESTORE
+}
+
+static void nv10_graph_create_pipe(struct nouveau_channel *chan) {
+ struct pipe_state *fifo_pipe_state = pipe_state + chan->id;
+ uint32_t *fifo_pipe_state_addr;
+ int i;
+#define PIPE_INIT(addr) \
+ do { \
+ fifo_pipe_state_addr = fifo_pipe_state->pipe_##addr; \
+ } while (0)
+#define PIPE_INIT_END(addr) \
+ do { \
+ if (fifo_pipe_state_addr != \
+ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr) \
+ DRM_ERROR("incomplete pipe init for 0x%x : %p/%p\n", addr, fifo_pipe_state_addr, \
+ sizeof(fifo_pipe_state->pipe_##addr)/sizeof(fifo_pipe_state->pipe_##addr[0]) + fifo_pipe_state->pipe_##addr); \
+ } while (0)
+#define NV_WRITE_PIPE_INIT(value) *(fifo_pipe_state_addr++) = value
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x40000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
-
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006800);
+ PIPE_INIT(0x0200);
+ for (i = 0; i < 48; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0200);
+
+ PIPE_INIT(0x6400);
+ for (i = 0; i < 211; i++)
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x40000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x3f000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
+ PIPE_INIT_END(0x6400);
+
+ PIPE_INIT(0x6800);
for (i = 0; i < 162; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x3f800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x3f800000);
for (i = 0; i < 25; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00006c00);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0xbf800000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6800);
+
+ PIPE_INIT(0x6c00);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0xbf800000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x6c00);
+
+ PIPE_INIT(0x7000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ NV_WRITE_PIPE_INIT(0x7149f2ca);
for (i = 0; i < 35; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
-
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7000);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007400);
+ PIPE_INIT(0x7400);
for (i = 0; i < 48; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7400);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00007800);
+ PIPE_INIT(0x7800);
for (i = 0; i < 48; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x7800);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00004400);
+ PIPE_INIT(0x4400);
for (i = 0; i < 32; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x4400);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000000);
+ PIPE_INIT(0x0000);
for (i = 0; i < 16; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0000);
- NV_WRITE(NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
+ PIPE_INIT(0x0040);
for (i = 0; i < 4; i++)
- NV_WRITE(NV10_PGRAPH_PIPE_DATA, 0x00000000);
+ NV_WRITE_PIPE_INIT(0x00000000);
+ PIPE_INIT_END(0x0040);
- nouveau_wait_for_idle(dev);
+#undef PIPE_INIT
+#undef PIPE_INIT_END
+#undef NV_WRITE_PIPE_INIT
}
-/* TODO replace address with name
- use loops */
static int nv10_graph_ctx_regs [] = {
-NV03_PGRAPH_XY_LOGIC_MISC0,
-
NV10_PGRAPH_CTX_SWITCH1,
NV10_PGRAPH_CTX_SWITCH2,
NV10_PGRAPH_CTX_SWITCH3,
@@ -455,6 +550,7 @@ NV03_PGRAPH_ABS_UCLIPA_YMIN,
NV03_PGRAPH_ABS_UCLIPA_YMAX,
NV03_PGRAPH_ABS_ICLIP_XMAX,
NV03_PGRAPH_ABS_ICLIP_YMAX,
+NV03_PGRAPH_XY_LOGIC_MISC0,
NV03_PGRAPH_XY_LOGIC_MISC1,
NV03_PGRAPH_XY_LOGIC_MISC2,
NV03_PGRAPH_XY_LOGIC_MISC3,
@@ -462,18 +558,18 @@ NV03_PGRAPH_CLIPX_0,
NV03_PGRAPH_CLIPX_1,
NV03_PGRAPH_CLIPY_0,
NV03_PGRAPH_CLIPY_1,
-0x00400e40,
-0x00400e44,
-0x00400e48,
-0x00400e4c,
-0x00400e50,
-0x00400e54,
-0x00400e58,
-0x00400e5c,
-0x00400e60,
-0x00400e64,
-0x00400e68,
-0x00400e6c,
+NV10_PGRAPH_COMBINER0_IN_ALPHA,
+NV10_PGRAPH_COMBINER1_IN_ALPHA,
+NV10_PGRAPH_COMBINER0_IN_RGB,
+NV10_PGRAPH_COMBINER1_IN_RGB,
+NV10_PGRAPH_COMBINER_COLOR0,
+NV10_PGRAPH_COMBINER_COLOR1,
+NV10_PGRAPH_COMBINER0_OUT_ALPHA,
+NV10_PGRAPH_COMBINER1_OUT_ALPHA,
+NV10_PGRAPH_COMBINER0_OUT_RGB,
+NV10_PGRAPH_COMBINER1_OUT_RGB,
+NV10_PGRAPH_COMBINER_FINAL0,
+NV10_PGRAPH_COMBINER_FINAL1,
0x00400e00,
0x00400e04,
0x00400e08,
@@ -527,9 +623,9 @@ NV10_PGRAPH_DEBUG_4,
0x00400a04,
};
-static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg)
+static int nv10_graph_ctx_regs_find_offset(struct drm_device *dev, int reg)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i, j;
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++) {
if (nv10_graph_ctx_regs[i] == reg)
@@ -544,82 +640,117 @@ static int nv10_graph_ctx_regs_find_offset(drm_device_t *dev, int reg)
return -1;
}
-static void restore_ctx_regs(drm_device_t *dev, int channel)
+int nv10_graph_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i, j;
+
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
- NV_WRITE(nv10_graph_ctx_regs[i], fifo->pgraph_ctx[i]);
+ NV_WRITE(nv10_graph_ctx_regs[i], chan->pgraph_ctx[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
- NV_WRITE(nv17_graph_ctx_regs[j], fifo->pgraph_ctx[i]);
+ NV_WRITE(nv17_graph_ctx_regs[j], chan->pgraph_ctx[i]);
}
- nouveau_wait_for_idle(dev);
-}
-void nouveau_nv10_context_switch(drm_device_t *dev)
-{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- int channel, channel_old, i, j;
-
- channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
- channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ nv10_graph_load_pipe(chan);
- DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel);
+ return 0;
+}
- NV_WRITE(NV04_PGRAPH_FIFO,0x0);
-#if 0
- NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000);
- NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000);
- NV_WRITE(NV_PFIFO_CACHES, 0x00000000);
-#endif
+int nv10_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, j;
- // save PGRAPH context
for (i = 0; i < sizeof(nv10_graph_ctx_regs)/sizeof(nv10_graph_ctx_regs[0]); i++)
- dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
+ chan->pgraph_ctx[i] = NV_READ(nv10_graph_ctx_regs[i]);
if (dev_priv->chipset>=0x17) {
for (j = 0; j < sizeof(nv17_graph_ctx_regs)/sizeof(nv17_graph_ctx_regs[0]); i++,j++)
- dev_priv->fifos[channel_old].pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
+ chan->pgraph_ctx[i] = NV_READ(nv17_graph_ctx_regs[j]);
+ }
+
+ nv10_graph_save_pipe(chan);
+
+ return 0;
+}
+
+void nouveau_nv10_context_switch(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv;
+ struct nouveau_channel *next, *last;
+ int chid;
+
+ if (!dev) {
+ DRM_DEBUG("Invalid drm_device\n");
+ return;
+ }
+ dev_priv = dev->dev_private;
+ if (!dev_priv) {
+ DRM_DEBUG("Invalid drm_nouveau_private\n");
+ return;
}
-
+ if (!dev_priv->fifos) {
+ DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");
+ return;
+ }
+
+ chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20)&(nouveau_fifo_number(dev)-1);
+ next = dev_priv->fifos[chid];
+
+ if (!next) {
+ DRM_DEBUG("Invalid next channel\n");
+ return;
+ }
+
+ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ last = dev_priv->fifos[chid];
+
+ if (!last) {
+ DRM_DEBUG("WARNING: Invalid last channel, switch to %x\n",
+ next->id);
+ } else {
+ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
+ last->id, next->id);
+ }
+
+ NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+ if (last) {
+ nouveau_wait_for_idle(dev);
+ nv10_graph_save_context(last);
+ }
+
nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- NV_WRITE(NV10_PGRAPH_CTX_USER, (NV_READ(NV10_PGRAPH_CTX_USER) & 0xffffff) | (0x1f << 24));
nouveau_wait_for_idle(dev);
- // restore PGRAPH context
-#if 1
- restore_ctx_regs(dev, channel);
-#endif
-
+
+ nv10_graph_load_context(next);
+
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
- NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
-
-#if 0
- NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001);
- NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001);
- NV_WRITE(NV_PFIFO_CACHES, 0x00000001);
-#endif
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
#define NV_WRITE_CTX(reg, val) do { \
int offset = nv10_graph_ctx_regs_find_offset(dev, reg); \
if (offset > 0) \
- fifo->pgraph_ctx[offset] = val; \
+ chan->pgraph_ctx[offset] = val; \
} while (0)
-int nv10_graph_create_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *fifo = &dev_priv->fifos[channel];
- uint32_t tmp, vramsz;
- DRM_DEBUG("nv10_graph_context_create %d\n", channel);
+int nv10_graph_create_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("nv10_graph_context_create %d\n", chan->id);
- memset(fifo->pgraph_ctx, 0, sizeof(fifo->pgraph_ctx));
+ memset(chan->pgraph_ctx, 0, sizeof(chan->pgraph_ctx));
+ /* mmio trace suggest that should be done in ddx with methods/objects */
+#if 0
+ uint32_t tmp, vramsz;
/* per channel init from ddx */
tmp = NV_READ(NV10_PGRAPH_SURFACE) & 0x0007ff00;
/*XXX the original ddx code, does this in 2 steps :
@@ -644,43 +775,53 @@ int nv10_graph_create_context(drm_device_t *dev, int channel) {
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMIN, 0);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_XMAX, 0x7fff);
NV_WRITE_CTX(NV03_PGRAPH_ABS_UCLIP_YMAX, 0x7fff);
+#endif
+ NV_WRITE_CTX(0x00400e88, 0x08000000);
+ NV_WRITE_CTX(0x00400e9c, 0x4b7fffff);
NV_WRITE_CTX(NV03_PGRAPH_XY_LOGIC_MISC0, 0x0001ffff);
- /* is it really needed ??? */
+ NV_WRITE_CTX(0x00400e10, 0x00001000);
+ NV_WRITE_CTX(0x00400e14, 0x00001000);
+ NV_WRITE_CTX(0x00400e30, 0x00080008);
+ NV_WRITE_CTX(0x00400e34, 0x00080008);
if (dev_priv->chipset>=0x17) {
+ /* is it really needed ??? */
NV_WRITE_CTX(NV10_PGRAPH_DEBUG_4, NV_READ(NV10_PGRAPH_DEBUG_4));
NV_WRITE_CTX(0x004006b0, NV_READ(0x004006b0));
+ NV_WRITE_CTX(0x00400eac, 0x0fff0000);
+ NV_WRITE_CTX(0x00400eb0, 0x0fff0000);
+ NV_WRITE_CTX(0x00400ec0, 0x00000080);
+ NV_WRITE_CTX(0x00400ed0, 0x00000080);
}
+ NV_WRITE_CTX(NV10_PGRAPH_CTX_USER, chan->id << 24);
- /* for the first channel init the regs */
- if (dev_priv->fifo_alloc_count == 0)
- restore_ctx_regs(dev, channel);
-
-
- //XXX should be saved/restored for each fifo
- //we supposed here we have X fifo and only one 3D fifo.
- nv10_praph_pipe(dev);
+ nv10_graph_create_pipe(chan);
return 0;
}
-void nv10_graph_destroy_context(drm_device_t *dev, int channel)
-{
-}
-
-int nv10_graph_load_context(drm_device_t *dev, int channel)
+void nv10_graph_destroy_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
- return 0;
-}
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int chid;
+ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
-int nv10_graph_save_context(drm_device_t *dev, int channel)
-{
- DRM_ERROR("stub!\n");
- return 0;
+ /* does this avoid a potential context switch while we are written graph
+ * reg, or we should mask graph interrupt ???
+ */
+ NV_WRITE(NV04_PGRAPH_FIFO,0x0);
+ if (chid == chan->id) {
+ DRM_INFO("cleanning a channel with graph in current context\n");
+ nouveau_wait_for_idle(dev);
+ DRM_INFO("reseting current graph context\n");
+ nv10_graph_create_context(chan);
+ nv10_graph_load_context(chan);
+ }
+ NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
-int nv10_graph_init(drm_device_t *dev) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+int nv10_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
@@ -688,16 +829,23 @@ int nv10_graph_init(drm_device_t *dev) {
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |
NV_PMC_ENABLE_PGRAPH);
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700);
- NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810);
- NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0030 |
+ //NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x24E00810); /* 0x25f92ad9 */
+ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x25f92ad9);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x55DE0830 |
(1<<29) |
(1<<31));
+ if (dev_priv->chipset>=0x17) {
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x1f000000);
+ NV_WRITE(0x004006b0, 0x40000020);
+ }
+ else
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);
/* copy tile info from PFB */
for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
@@ -707,6 +855,10 @@ int nv10_graph_init(drm_device_t *dev) {
NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));
}
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH3, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CTX_SWITCH4, 0x00000000);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_STATE , 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_FIFO , 0x00000001);
@@ -714,7 +866,7 @@ int nv10_graph_init(drm_device_t *dev) {
return 0;
}
-void nv10_graph_takedown(drm_device_t *dev)
+void nv10_graph_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c
index 1b8a6727..c163daf9 100644
--- a/shared-core/nv20_graph.c
+++ b/shared-core/nv20_graph.c
@@ -29,42 +29,36 @@
#define NV20_GRCTX_SIZE (3529*4)
-int nv20_graph_create_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+int nv20_graph_create_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned int ctx_size = NV20_GRCTX_SIZE;
- int i;
+ int ret;
- /* Alloc and clear RAMIN to store the context */
- chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
- if (!chan->ramin_grctx)
- return DRM_ERR(ENOMEM);
- for (i=0; i<ctx_size; i+=4)
- INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx)))
+ return ret;
/* Initialise default context values */
- INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */
+ INSTANCE_WR(chan->ramin_grctx->gpuobj, 10, chan->id<<24); /* CTX_USER */
- INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx));
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
return 0;
}
-void nv20_graph_destroy_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+void nv20_graph_destroy_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (chan->ramin_grctx) {
- nouveau_instmem_free(dev, chan->ramin_grctx);
- chan->ramin_grctx = NULL;
- }
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
- INSTANCE_WR(dev_priv->ctx_table, channel, 0);
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
}
-static void nv20_graph_rdi(drm_device_t *dev) {
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
+static void nv20_graph_rdi(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000);
@@ -76,16 +70,16 @@ static void nv20_graph_rdi(drm_device_t *dev) {
/* Save current context (from PGRAPH) into the channel's context
*/
-int nv20_graph_save_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
+int nv20_graph_save_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t instance;
- instance = INSTANCE_RD(dev_priv->ctx_table, channel);
+ instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id);
if (!instance) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx))
+ if (instance != (chan->ramin_grctx->instance >> 4))
DRM_ERROR("nv20_graph_save_context : bad instance\n");
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance);
@@ -96,48 +90,53 @@ int nv20_graph_save_context(drm_device_t *dev, int channel) {
/* Restore the context for a specific channel into PGRAPH
*/
-int nv20_graph_load_context(drm_device_t *dev, int channel) {
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
+int nv20_graph_load_context(struct nouveau_channel *chan) {
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t instance;
- instance = INSTANCE_RD(dev_priv->ctx_table, channel);
+ instance = INSTANCE_RD(dev_priv->ctx_table->gpuobj, chan->id);
if (!instance) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (instance != nouveau_chip_instance_get(dev, dev_priv->fifos[channel].ramin_grctx))
+ if (instance != (chan->ramin_grctx->instance >> 4))
DRM_ERROR("nv20_graph_load_context_current : bad instance\n");
- NV_WRITE(NV10_PGRAPH_CTX_USER, channel << 24);
+ NV_WRITE(NV10_PGRAPH_CTX_USER, chan->id << 24);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_SIZE, instance);
NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_POINTER, 1 /* restore ctx */);
return 0;
}
-void nouveau_nv20_context_switch(drm_device_t *dev)
+void nouveau_nv20_context_switch(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- int channel, channel_old;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *next, *last;
+ int chid;
+
+ chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
+ next = dev_priv->fifos[chid];
- channel=NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1);
- channel_old = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1);
+ last = dev_priv->fifos[chid];
- DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel);
+ DRM_DEBUG("NV: PGRAPH context switch interrupt channel %x -> %x\n",
+ last->id, next->id);
NV_WRITE(NV04_PGRAPH_FIFO,0x0);
- nv20_graph_save_context(dev, channel_old);
+ nv20_graph_save_context(last);
nouveau_wait_for_idle(dev);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000000);
- nv20_graph_load_context(dev, channel);
+ nv20_graph_load_context(next);
nouveau_wait_for_idle(dev);
- if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != channel)
- DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", channel, NV_READ(NV10_PGRAPH_CTX_USER) >> 24);
+ if ((NV_READ(NV10_PGRAPH_CTX_USER) >> 24) != next->id)
+ DRM_ERROR("nouveau_nv20_context_switch : wrong channel restored %x %x!!!\n", next->id, NV_READ(NV10_PGRAPH_CTX_USER) >> 24);
NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);
NV_WRITE(NV10_PGRAPH_FFINTFC_ST2, NV_READ(NV10_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF);
@@ -145,11 +144,11 @@ void nouveau_nv20_context_switch(drm_device_t *dev)
NV_WRITE(NV04_PGRAPH_FIFO,0x1);
}
-int nv20_graph_init(drm_device_t *dev) {
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
- int i;
+int nv20_graph_init(struct drm_device *dev) {
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
uint32_t tmp, vramsz;
+ int ret, i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@@ -158,20 +157,20 @@ int nv20_graph_init(drm_device_t *dev) {
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
- dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4);
- if (!dev_priv->ctx_table)
- return DRM_ERR(ENOMEM);
-
- for (i=0; i< dev_priv->ctx_table_size; i+=4)
- INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000);
+ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table)))
+ return ret;
- NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table));
+ NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
//XXX need to be done and save/restore for each fifo ???
nv20_graph_rdi(dev);
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
@@ -241,7 +240,10 @@ int nv20_graph_init(drm_device_t *dev) {
return 0;
}
-void nv20_graph_takedown(drm_device_t *dev)
+void nv20_graph_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
}
diff --git a/shared-core/nv30_graph.c b/shared-core/nv30_graph.c
index 7a87990a..590a5c33 100644
--- a/shared-core/nv30_graph.c
+++ b/shared-core/nv30_graph.c
@@ -8,148 +8,2764 @@
#include "nouveau_drm.h"
/*
- * This is obviously not the correct size.
+ * There are 3 families :
+ * NV30 is 0x10de:0x030*
+ * NV31 is 0x10de:0x031*
+ *
+ * NV34 is 0x10de:0x032*
+ *
+ * NV35 is 0x10de:0x033* (NV35 and NV36 are the same)
+ * NV36 is 0x10de:0x034*
+ *
+ * Not seen in the wild, no dumps (probably NV35) :
+ * NV37 is 0x10de:0x00fc, 0x10de:0x00fd
+ * NV38 is 0x10de:0x0333, 0x10de:0x00fe
+ *
*/
-#define NV30_GRCTX_SIZE (23840)
-/*TODO: deciper what each offset in the context represents. The below
- * contexts are taken from dumps just after the 3D object is
- * created.
- */
-static void nv30_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+
+#define NV30_31_GRCTX_SIZE (22392)
+#define NV34_GRCTX_SIZE (18140)
+#define NV35_36_GRCTX_SIZE (22396)
+
+static void nv30_31_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
-
- INSTANCE_WR(ctx, 0x28/4, 0x10000000);
- INSTANCE_WR(ctx, 0x40c/4, 0x00000101);
- INSTANCE_WR(ctx, 0x420/4, 0x00000111);
- INSTANCE_WR(ctx, 0x424/4, 0x00000060);
- INSTANCE_WR(ctx, 0x440/4, 0x00000080);
- INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
- INSTANCE_WR(ctx, 0x448/4, 0x00000001);
- INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
- INSTANCE_WR(ctx, 0x448/4, 0xffff0000);
- INSTANCE_WR(ctx, 0x4dc/4, 0xfff00000);
- INSTANCE_WR(ctx, 0x4e0/4, 0xfff00000);
- INSTANCE_WR(ctx, 0x4e8/4, 0x00011100);
-
- for (i = 0x504; i <= 0x540; i += 4)
- INSTANCE_WR(ctx, i/4, 0x7ff00000);
-
- INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff);
- INSTANCE_WR(ctx, 0x588/4, 0x00000080);
- INSTANCE_WR(ctx, 0x58c/4, 0x30201000);
- INSTANCE_WR(ctx, 0x590/4, 0x70605040);
- INSTANCE_WR(ctx, 0x594/4, 0xb8a89888);
- INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8);
- INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000);
-
- for (i = 0x604; i <= 0x640; i += 4)
- INSTANCE_WR(ctx, i/4, 0x00010588);
-
- for (i = 0x644; i <= 0x680; i += 4)
- INSTANCE_WR(ctx, i/4, 0x00030303);
-
- for (i = 0x6c4; i <= 0x700; i += 4)
- INSTANCE_WR(ctx, i/4, 0x0008aae4);
-
- for (i = 0x704; i <= 0x740; i += 4)
- INSTANCE_WR(ctx, i/4, 0x1012000);
-
- for (i = 0x744; i <= 0x780; i += 4)
- INSTANCE_WR(ctx, i/4, 0x0080008);
-
- INSTANCE_WR(ctx, 0x860/4, 0x00040000);
- INSTANCE_WR(ctx, 0x864/4, 0x00010000);
- INSTANCE_WR(ctx, 0x868/4, 0x00040000);
- INSTANCE_WR(ctx, 0x86c/4, 0x00040000);
- INSTANCE_WR(ctx, 0x870/4, 0x00040000);
- INSTANCE_WR(ctx, 0x874/4, 0x00040000);
-
- for (i = 0x00; i <= 0x1170; i += 0x10)
- {
- INSTANCE_WR(ctx, (0x1f24 + i)/4, 0x000c001b);
- INSTANCE_WR(ctx, (0x1f20 + i)/4, 0x0436086c);
- INSTANCE_WR(ctx, (0x1f1c + i)/4, 0x10700ff9);
- }
-
- INSTANCE_WR(ctx, 0x30bc/4, 0x0000ffff);
- INSTANCE_WR(ctx, 0x30c0/4, 0x0000ffff);
- INSTANCE_WR(ctx, 0x30c4/4, 0x0000ffff);
- INSTANCE_WR(ctx, 0x30c8/4, 0x0000ffff);
-
- INSTANCE_WR(ctx, 0x380c/4, 0x3f800000);
- INSTANCE_WR(ctx, 0x3450/4, 0x3f800000);
- INSTANCE_WR(ctx, 0x3820/4, 0x3f800000);
- INSTANCE_WR(ctx, 0x3854/4, 0x3f800000);
- INSTANCE_WR(ctx, 0x3850/4, 0x3f000000);
- INSTANCE_WR(ctx, 0x384c/4, 0x40000000);
- INSTANCE_WR(ctx, 0x3868/4, 0xbf800000);
- INSTANCE_WR(ctx, 0x3860/4, 0x3f800000);
- INSTANCE_WR(ctx, 0x386c/4, 0x40000000);
- INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);
-
- for (i = 0x4e0; i <= 0x4e1c; i += 4)
- INSTANCE_WR(ctx, i/4, 0x001c527d);
- INSTANCE_WR(ctx, 0x4e40, 0x001c527c);
-
- INSTANCE_WR(ctx, 0x5680/4, 0x000a0000);
- INSTANCE_WR(ctx, 0x87c/4, 0x10000000);
- INSTANCE_WR(ctx, 0x28/4, 0x10000011);
+
+ INSTANCE_WR(ctx, 0x410/4, 0x00000101);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x428/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x444/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x448/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x44c/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x460/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x48c/4, 0xffff0000);
+ for(i = 0x4e0; i< 0x4e8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4ec/4, 0x00011100);
+ for(i = 0x508; i< 0x548; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x550/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x58c/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x590/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x594/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x598/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x59c/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5b0/4, 0xb0000000);
+ for(i = 0x600; i< 0x640; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x640; i< 0x680; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6c0; i< 0x700; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x700; i< 0x740; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x740; i< 0x780; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x85c/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x860/4, 0x00010000);
+ for(i = 0x864; i< 0x874; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ INSTANCE_WR(ctx, 0x1f18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fa0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fa8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fb0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fb8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fbc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fc0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fc8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fcc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fd0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fd8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fdc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fe0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fe8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ff0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ff8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ffc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2000/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2008/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x200c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2010/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2018/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x201c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2020/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2028/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x202c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2030/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2038/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x203c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2040/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2048/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x204c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2050/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2058/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x205c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2060/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2068/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x206c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2070/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2078/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x207c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2080/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2088/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x208c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2090/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2098/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x209c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2100/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2108/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x210c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2110/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2118/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x211c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2120/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2128/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x212c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2130/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2138/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x213c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2140/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2148/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x214c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2150/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2158/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x215c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2160/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2168/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x216c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2170/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2178/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x217c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2180/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2188/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x218c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2190/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2198/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x219c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2200/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2208/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x220c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2210/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2218/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x221c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2220/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2228/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x222c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2230/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2238/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x223c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2240/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2248/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x224c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2250/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2258/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x225c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2260/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2268/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x226c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2270/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2278/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x227c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2280/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2288/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x228c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2290/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2298/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x229c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2300/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2308/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x230c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2310/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2318/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x231c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2320/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2328/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x232c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2330/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2338/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x233c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2340/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2348/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x234c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2350/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2358/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x235c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2360/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2368/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x236c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2370/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2378/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x237c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2380/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2388/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x238c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2390/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2398/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x239c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2400/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2408/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x240c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2410/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2418/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x241c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2420/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2428/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x242c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2430/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2438/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x243c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2440/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2448/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x244c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2450/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2458/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x245c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2460/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2468/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x246c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2470/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2478/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x247c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2480/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2488/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x248c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2490/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2498/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x249c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2500/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2508/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x250c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2510/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2518/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x251c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2520/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2528/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x252c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2530/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2538/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x253c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2540/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2548/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x254c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2550/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2558/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x255c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2560/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2568/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x256c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2570/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2578/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x257c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2580/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2588/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x258c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2590/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2598/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x259c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2600/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2608/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x260c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2610/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2618/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x261c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2620/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2628/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x262c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2630/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2638/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x263c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2640/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2648/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x264c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2650/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2658/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x265c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2660/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2668/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x266c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2670/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2678/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x267c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2680/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2688/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x268c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2690/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2698/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x269c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2700/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2708/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x270c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2710/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2718/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x271c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2720/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2728/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x272c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2730/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2738/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x273c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2740/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2748/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x274c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2750/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2758/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x275c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2760/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2768/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x276c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2770/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2778/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x277c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2780/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2788/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x278c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2790/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2798/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x279c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2800/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2808/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x280c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2810/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2818/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x281c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2820/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2828/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x282c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2830/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2838/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x283c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2840/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2848/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x284c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2850/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2858/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x285c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2860/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2868/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x286c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2870/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2878/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x287c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2880/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2888/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x288c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2890/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2898/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x289c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2900/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2908/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x290c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2910/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2918/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x291c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2920/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2928/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x292c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2930/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2938/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x293c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2940/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2948/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x294c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2950/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2958/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x295c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2960/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2968/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x296c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2970/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2978/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x297c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2980/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2988/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x298c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2990/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2998/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x299c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29a0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29a8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29ac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29b0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29b8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29bc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29c0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29c8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29cc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29d0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29d8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29dc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29e0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29e8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29ec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29f0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29f8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29fc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2aa0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2aa8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2aac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ab0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ab8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2abc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ac0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ac8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2acc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ad0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ad8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2adc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ae0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ae8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2aec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2af0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2af8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2afc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ba0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ba8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bb0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bb8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bbc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bc0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bc8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bcc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bd0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bd8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bdc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2be0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2be8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bf0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bf8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bfc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ca0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ca8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cb0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cb8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cbc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cc0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cc8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ccc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cd0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cd8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cdc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ce0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ce8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cf0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cf8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cfc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2da0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2da8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2db0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2db8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dbc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2dc0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dc8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dcc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2dd0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dd8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ddc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2de0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2de8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2df0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2df8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dfc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ea0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ea8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2eac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2eb0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2eb8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ebc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ec0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ec8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ecc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ed0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ed8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2edc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ee0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ee8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2eec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ef0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ef8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2efc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f00/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f08/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f0c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f10/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f18/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f1c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f20/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f28/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f2c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f30/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f38/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f3c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f40/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f48/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f4c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f50/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f58/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f5c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f60/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f68/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f6c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f70/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f78/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f7c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f80/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f88/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f8c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f90/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f98/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f9c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fa0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fa8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fac/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fb0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fb8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fbc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fc0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fc8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fcc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fd0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fd8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fdc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fe0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fe8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fec/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ff0/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ff8/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ffc/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3000/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3008/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x300c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3010/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3018/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x301c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3020/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3028/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x302c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3030/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3038/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x303c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3040/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3048/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x304c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3050/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3058/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x305c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3060/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3068/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x306c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3070/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3078/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x307c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3080/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x3088/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x308c/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3090/4, 0x000c001b);
+ for(i = 0x30b8; i< 0x30c8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x344c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3808/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x381c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3848/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x384c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3850/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x3858/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x385c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3864/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x386c/4, 0xbf800000);
}
+static void nv34_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
-int nv30_graph_create_context(drm_device_t *dev, int channel)
+ INSTANCE_WR(ctx, 0x40c/4, 0x01000101);
+ INSTANCE_WR(ctx, 0x420/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x440/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x448/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x480/4, 0xffff0000);
+ for(i = 0x4d4; i< 0x4dc; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4e0/4, 0x00011100);
+ for(i = 0x4fc; i< 0x53c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x544/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x57c/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x580/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x584/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x588/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x58c/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5a0/4, 0xb0000000);
+ for(i = 0x5f0; i< 0x630; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x630; i< 0x670; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6b0; i< 0x6f0; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x6f0; i< 0x730; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x730; i< 0x770; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x850/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x854/4, 0x00010000);
+ for(i = 0x858; i< 0x868; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ INSTANCE_WR(ctx, 0x15ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x15b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x15b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x15bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x15c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x15c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x15cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x15d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x15d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x15dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x15e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x15e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x15ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x15f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x15f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x15fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1600/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1604/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x160c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1610/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1614/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x161c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1620/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1624/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x162c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1630/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1634/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x163c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1640/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1644/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x164c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1650/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1654/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x165c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1660/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1664/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x166c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1670/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1674/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x167c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1680/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1684/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x168c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1690/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1694/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x169c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x16f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x16f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x16fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1700/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1704/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x170c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1710/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1714/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x171c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1720/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1724/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x172c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1730/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1734/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x173c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1740/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1744/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x174c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1750/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1754/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x175c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1760/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1764/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x176c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1770/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1774/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x177c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1780/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1784/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x178c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1790/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1794/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x179c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x17f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x17f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x17fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1800/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1804/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x180c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1810/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1814/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x181c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1820/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1824/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x182c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1830/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1834/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x183c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1840/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1844/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x184c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1850/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1854/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x185c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1860/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1864/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x186c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1870/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1874/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x187c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1880/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1884/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x188c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1890/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1894/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x189c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x18f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x18f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x18fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1900/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1904/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x190c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1910/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1914/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x191c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1920/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1924/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x192c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1930/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1934/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x193c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1940/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1944/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x194c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1950/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1954/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x195c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1960/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1964/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x196c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1970/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1974/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x197c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1980/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1984/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x198c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1990/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1994/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x199c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x19f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x19f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x19fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1a90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1a94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1a9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1aa0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1aa4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1aac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ab0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ab4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1abc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ac0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ac4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1acc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ad0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ad4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1adc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ae0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ae4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1aec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1af0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1af4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1afc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1b90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1b94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1b9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ba0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ba4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1bb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1bb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1bc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1bc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1bd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1bd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1be0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1be4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1bf0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1bf4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1bfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1c90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1c94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1c9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ca0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ca4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1cac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1cb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1cb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1cbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1cc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1cc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ccc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1cd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1cd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1cdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ce0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ce4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1cec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1cf0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1cf4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1cfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1d90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1d94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1d9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1da0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1da4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1dac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1db0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1db4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1dbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1dc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1dc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1dcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1dd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1dd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ddc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1de0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1de4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1dec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1df0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1df4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1dfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1e90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1e94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1e9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ea0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ea4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1eac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1eb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1eb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ebc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ec0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ec4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ecc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ed0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ed4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1edc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ee0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ee4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1eec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ef0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ef4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1efc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2000/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2004/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2010/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2014/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2020/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2024/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2030/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2034/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2040/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2044/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2050/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2054/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2060/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2064/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2070/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2074/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2080/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2084/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2090/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2094/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2100/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2104/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2110/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2114/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2120/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2124/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2130/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2134/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2140/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2144/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2150/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2154/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2160/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2164/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2170/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2174/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2180/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2184/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2190/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2194/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2200/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2204/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2210/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2214/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2220/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2224/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2230/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2234/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2240/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2244/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2250/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2254/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2260/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2264/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2270/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2274/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2280/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2284/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2290/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2294/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2300/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2304/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2310/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2314/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2320/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2324/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2330/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2334/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2340/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2344/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2350/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2354/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2360/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2364/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2370/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2374/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2380/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2384/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2390/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2394/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2400/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2404/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2410/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2414/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2420/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2424/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2430/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2434/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2440/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2444/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2450/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2454/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2460/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2464/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2470/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2474/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2480/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2484/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2490/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2494/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2500/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2504/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2510/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2514/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2520/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2524/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2530/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2534/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2540/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2544/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2550/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2554/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2560/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2564/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2570/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2574/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2580/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2584/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2590/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2594/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2600/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2604/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2610/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2614/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2620/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2624/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2630/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2634/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2640/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2644/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2650/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2654/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2660/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2664/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2670/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2674/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2680/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2684/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2690/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2694/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2700/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2704/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2710/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2714/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2720/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2724/4, 0x000c001b);
+ for(i = 0x274c; i< 0x275c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x2ae0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2e9c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2eb0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2edc/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x2ee0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2ee4/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x2eec/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x2ef0/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x2ef8/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x2f00/4, 0xbf800000);
+}
+
+static void nv35_36_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- void (*ctx_init)(drm_device_t *, struct mem_block *);
- unsigned int ctx_size;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
+ INSTANCE_WR(ctx, 0x40c/4, 0x00000101);
+ INSTANCE_WR(ctx, 0x420/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x424/4, 0x00000060);
+ INSTANCE_WR(ctx, 0x440/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x444/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x448/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x45c/4, 0x44400000);
+ INSTANCE_WR(ctx, 0x488/4, 0xffff0000);
+ for(i = 0x4dc; i< 0x4e4; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x4e8/4, 0x00011100);
+ for(i = 0x504; i< 0x544; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x54c/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x588/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x58c/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x590/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x594/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x598/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x5ac/4, 0xb0000000);
+ for(i = 0x604; i< 0x644; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00010588);
+ for(i = 0x644; i< 0x684; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00030303);
+ for(i = 0x6c4; i< 0x704; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0008aae4);
+ for(i = 0x704; i< 0x744; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for(i = 0x744; i< 0x784; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x860/4, 0x00040000);
+ INSTANCE_WR(ctx, 0x864/4, 0x00010000);
+ for(i = 0x868; i< 0x878; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00040004);
+ INSTANCE_WR(ctx, 0x1f1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1f90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1f94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1f9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fa0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fa4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1fe0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1fe4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1fec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x1ff0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x1ff4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x1ffc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2000/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2004/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x200c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2010/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2014/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x201c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2020/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2024/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x202c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2030/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2034/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x203c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2040/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2044/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x204c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2050/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2054/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x205c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2060/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2064/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x206c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2070/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2074/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x207c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2080/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2084/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x208c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2090/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2094/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x209c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x20f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x20f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x20fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2100/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2104/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x210c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2110/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2114/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x211c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2120/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2124/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x212c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2130/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2134/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x213c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2140/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2144/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x214c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2150/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2154/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x215c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2160/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2164/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x216c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2170/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2174/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x217c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2180/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2184/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x218c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2190/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2194/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x219c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x21f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x21f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x21fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2200/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2204/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x220c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2210/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2214/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x221c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2220/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2224/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x222c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2230/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2234/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x223c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2240/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2244/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x224c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2250/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2254/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x225c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2260/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2264/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x226c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2270/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2274/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x227c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2280/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2284/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x228c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2290/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2294/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x229c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x22f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x22f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x22fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2300/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2304/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x230c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2310/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2314/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x231c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2320/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2324/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x232c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2330/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2334/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x233c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2340/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2344/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x234c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2350/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2354/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x235c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2360/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2364/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x236c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2370/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2374/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x237c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2380/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2384/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x238c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2390/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2394/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x239c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x23f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x23f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x23fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2400/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2404/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x240c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2410/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2414/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x241c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2420/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2424/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x242c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2430/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2434/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x243c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2440/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2444/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x244c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2450/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2454/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x245c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2460/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2464/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x246c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2470/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2474/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x247c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2480/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2484/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x248c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2490/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2494/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x249c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x24f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x24f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x24fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2500/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2504/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x250c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2510/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2514/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x251c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2520/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2524/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x252c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2530/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2534/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x253c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2540/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2544/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x254c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2550/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2554/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x255c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2560/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2564/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x256c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2570/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2574/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x257c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2580/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2584/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x258c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2590/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2594/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x259c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x25f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x25f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x25fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2600/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2604/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x260c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2610/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2614/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x261c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2620/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2624/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x262c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2630/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2634/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x263c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2640/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2644/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x264c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2650/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2654/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x265c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2660/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2664/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x266c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2670/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2674/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x267c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2680/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2684/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x268c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2690/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2694/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x269c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x26f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x26f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x26fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2700/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2704/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x270c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2710/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2714/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x271c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2720/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2724/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x272c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2730/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2734/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x273c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2740/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2744/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x274c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2750/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2754/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x275c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2760/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2764/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x276c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2770/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2774/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x277c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2780/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2784/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x278c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2790/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2794/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x279c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x27f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x27f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x27fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2800/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2804/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x280c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2810/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2814/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x281c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2820/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2824/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x282c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2830/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2834/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x283c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2840/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2844/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x284c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2850/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2854/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x285c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2860/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2864/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x286c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2870/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2874/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x287c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2880/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2884/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x288c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2890/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2894/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x289c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x28f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x28f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x28fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2900/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2904/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x290c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2910/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2914/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x291c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2920/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2924/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x292c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2930/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2934/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x293c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2940/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2944/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x294c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2950/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2954/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x295c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2960/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2964/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x296c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2970/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2974/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x297c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2980/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2984/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x298c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2990/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2994/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x299c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29a0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29a4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29ac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29b0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29b4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29bc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29c0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29c4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29cc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29d0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29d4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29dc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29e0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29e4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29ec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x29f0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x29f4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x29fc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2a90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2a94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2a9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2aa0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2aa4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2aac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ab0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ab4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2abc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ac0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ac4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2acc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ad0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ad4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2adc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ae0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ae4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2aec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2af0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2af4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2afc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2b90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2b94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2b9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ba0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ba4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2be0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2be4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2bf0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2bf4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2bfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2c90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2c94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2c9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ca0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ca4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ccc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ce0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ce4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2cf0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2cf4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2cfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2d90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2d94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2d9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2da0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2da4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2db0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2db4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2dc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2dd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2dd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ddc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2de0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2de4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2df0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2df4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2dfc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2e90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2e94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2e9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ea0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ea4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2eac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2eb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2eb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ebc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ec0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ec4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ecc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ed0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ed4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2edc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ee0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ee4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2eec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ef0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ef4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2efc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f00/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f04/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f0c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f10/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f14/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f1c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f20/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f24/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f2c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f30/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f34/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f3c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f40/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f44/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f4c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f50/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f54/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f5c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f60/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f64/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f6c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f70/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f74/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f7c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f80/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f84/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f8c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2f90/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2f94/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2f9c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fa0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fa4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fac/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fb0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fb4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fbc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fc0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fc4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fcc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fd0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fd4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fdc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2fe0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2fe4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2fec/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x2ff0/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x2ff4/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x2ffc/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3000/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3004/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x300c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3010/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3014/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x301c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3020/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3024/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x302c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3030/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3034/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x303c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3040/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3044/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x304c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3050/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3054/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x305c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3060/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3064/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x306c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3070/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3074/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x307c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3080/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3084/4, 0x000c001b);
+ INSTANCE_WR(ctx, 0x308c/4, 0x10700ff9);
+ INSTANCE_WR(ctx, 0x3090/4, 0x0436086c);
+ INSTANCE_WR(ctx, 0x3094/4, 0x000c001b);
+ for(i = 0x30bc; i< 0x30cc; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x3450/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x380c/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3820/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x384c/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x3850/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3854/4, 0x3f000000);
+ INSTANCE_WR(ctx, 0x385c/4, 0x40000000);
+ INSTANCE_WR(ctx, 0x3860/4, 0x3f800000);
+ INSTANCE_WR(ctx, 0x3868/4, 0xbf800000);
+ INSTANCE_WR(ctx, 0x3870/4, 0xbf800000);}
+
+int nv30_graph_create_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
+ unsigned int ctx_size;
+ int ret;
+
switch (dev_priv->chipset) {
+ case 0x30:
+ case 0x31:
+ ctx_size = NV30_31_GRCTX_SIZE;
+ ctx_init = nv30_31_graph_context_init;
+ break;
+ case 0x34:
+ ctx_size = NV34_GRCTX_SIZE;
+ ctx_init = nv34_graph_context_init;
+ break;
+ case 0x35:
+ case 0x36:
+ ctx_size = NV35_36_GRCTX_SIZE;
+ ctx_init = nv35_36_graph_context_init;
+ break;
default:
- ctx_size = NV30_GRCTX_SIZE;
- ctx_init = nv30_graph_context_init;
+ ctx_size = 0;
+ ctx_init = nv35_36_graph_context_init;
+ DRM_ERROR("Please contact the devs if you want your NV%x card to work\n",dev_priv->chipset);
break;
}
- /* Alloc and clear RAMIN to store the context */
- chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
- if (!chan->ramin_grctx)
- return DRM_ERR(ENOMEM);
- for (i=0; i<ctx_size; i+=4)
- INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx)))
+ return ret;
/* Initialise default context values */
- ctx_init(dev, chan->ramin_grctx);
-
- INSTANCE_WR(chan->ramin_grctx, 10, channel << 24); /* CTX_USER */
- INSTANCE_WR(dev_priv->ctx_table, channel, nouveau_chip_instance_get(dev, chan->ramin_grctx));
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
+
+ INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x28/4, (chan->id<<24)|0x1); /* CTX_USER */
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id,
+ chan->ramin_grctx->instance >> 4);
return 0;
}
-void nv30_graph_destroy_context(drm_device_t *dev, int channel)
+void nv30_graph_destroy_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- if (chan->ramin_grctx) {
- nouveau_instmem_free(dev, chan->ramin_grctx);
- chan->ramin_grctx = NULL;
- }
+ if (chan->ramin_grctx)
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
- INSTANCE_WR(dev_priv->ctx_table, channel, 0);
+ INSTANCE_WR(dev_priv->ctx_table->gpuobj, chan->id, 0);
}
static int
-nouveau_graph_wait_idle(drm_device_t *dev)
+nouveau_graph_wait_idle(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int tv = 1000;
while (tv--) {
@@ -159,20 +2775,20 @@ nouveau_graph_wait_idle(drm_device_t *dev)
if (NV_READ(0x400700)) {
DRM_ERROR("timeout!\n");
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
return 0;
}
-int nv30_graph_load_context(drm_device_t *dev, int channel)
+int nv30_graph_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
if (!chan->ramin_grctx)
- return DRM_ERR(EINVAL);
- inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -181,15 +2797,15 @@ int nv30_graph_load_context(drm_device_t *dev, int channel)
return nouveau_graph_wait_idle(dev);
}
-int nv30_graph_save_context(drm_device_t *dev, int channel)
+int nv30_graph_save_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
if (!chan->ramin_grctx)
- return DRM_ERR(EINVAL);
- inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,
@@ -198,12 +2814,11 @@ int nv30_graph_save_context(drm_device_t *dev, int channel)
return nouveau_graph_wait_idle(dev);
}
-int nv30_graph_init(drm_device_t *dev)
+int nv30_graph_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t vramsz, tmp;
- int i;
+ int ret, i;
NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) &
~NV_PMC_ENABLE_PGRAPH);
@@ -212,33 +2827,39 @@ int nv30_graph_init(drm_device_t *dev)
/* Create Context Pointer Table */
dev_priv->ctx_table_size = 32 * 4;
- dev_priv->ctx_table = nouveau_instmem_alloc(dev, dev_priv->ctx_table_size, 4);
- if (!dev_priv->ctx_table)
- return DRM_ERR(ENOMEM);
+ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0,
+ dev_priv->ctx_table_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->ctx_table)))
+ return ret;
- for (i=0; i< dev_priv->ctx_table_size; i+=4)
- INSTANCE_WR(dev_priv->ctx_table, i/4, 0x00000000);
+ NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE,
+ dev_priv->ctx_table->instance >> 4);
- NV_WRITE(NV10_PGRAPH_CHANNEL_CTX_TABLE, nouveau_chip_instance_get(dev, dev_priv->ctx_table));
-
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV03_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x401287c0);
- NV_WRITE(0x400890, 0x00140000);
- NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0de0475);
- NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x10008000);
- NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04b1f36);
+ NV_WRITE(0x400890, 0x01b463ff);
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf3de0471);
+ NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00008000);
+ NV_WRITE(NV04_PGRAPH_LIMIT_VIOL_PIX, 0xf04bdff6);
NV_WRITE(0x400B80, 0x1003d888);
- NV_WRITE(0x400B84, 0x0c000000);
- NV_WRITE(0x400B88, 0x62ff0f7f);
- NV_WRITE(0x400098, 0x000000c0);
- NV_WRITE(0x40009C, 0x0005dc00);
- NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f);
+ NV_WRITE(0x400098, 0x00000000);
+ NV_WRITE(0x40009C, 0x0005ad00);
+ NV_WRITE(0x400B88, 0x62ff00ff); // suspiciously like PGRAPH_DEBUG_2
NV_WRITE(0x4000a0, 0x00000000);
NV_WRITE(0x4000a4, 0x00000008);
+ NV_WRITE(0x4008a8, 0xb784a400);
+ NV_WRITE(0x400ba0, 0x002f8685);
+ NV_WRITE(0x400ba4, 0x00231f3f);
+ NV_WRITE(0x4008a4, 0x40000020);
+ NV_WRITE(0x400B84, 0x0c000000);
+ NV_WRITE(NV04_PGRAPH_DEBUG_2, 0x62ff0f7f);
+ NV_WRITE(0x4000c0, 0x00000016);
+ NV_WRITE(0x400780, 0x000014e4);
/* copy tile info from PFB */
for (i=0; i<NV10_PFB_TILE__SIZE; i++) {
@@ -282,7 +2903,10 @@ int nv30_graph_init(drm_device_t *dev)
return 0;
}
-void nv30_graph_takedown(drm_device_t *dev)
+void nv30_graph_takedown(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ nouveau_gpuobj_ref_del(dev, &dev_priv->ctx_table);
}
diff --git a/shared-core/nv40_fb.c b/shared-core/nv40_fb.c
index 83a7580e..2cbb40e4 100644
--- a/shared-core/nv40_fb.c
+++ b/shared-core/nv40_fb.c
@@ -4,9 +4,9 @@
#include "nouveau_drm.h"
int
-nv40_fb_init(drm_device_t *dev)
+nv40_fb_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fb_bar_size, tmp;
int num_tiles;
int i;
@@ -50,7 +50,7 @@ nv40_fb_init(drm_device_t *dev)
}
void
-nv40_fb_takedown(drm_device_t *dev)
+nv40_fb_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c
index 6f25349c..ce3f8fdd 100644
--- a/shared-core/nv40_fifo.c
+++ b/shared-core/nv40_fifo.c
@@ -28,31 +28,34 @@
#include "nouveau_drv.h"
#include "nouveau_drm.h"
-#define RAMFC_WR(offset, val) NV_WI32(fifoctx + NV40_RAMFC_##offset, (val))
-#define RAMFC_RD(offset) NV_RI32(fifoctx + NV40_RAMFC_##offset)
+
+#define RAMFC_WR(offset,val) INSTANCE_WR(chan->ramfc->gpuobj, \
+ NV40_RAMFC_##offset/4, (val))
+#define RAMFC_RD(offset) INSTANCE_RD(chan->ramfc->gpuobj, \
+ NV40_RAMFC_##offset/4)
#define NV40_RAMFC(c) (dev_priv->ramfc_offset + ((c)*NV40_RAMFC__SIZE))
#define NV40_RAMFC__SIZE 128
int
-nv40_fifo_create_context(drm_device_t *dev, int channel)
+nv40_fifo_create_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- uint32_t fifoctx = NV40_RAMFC(channel), grctx, pushbuf;
- int i;
-
- for (i = 0; i < NV40_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
- grctx = nouveau_chip_instance_get(dev, chan->ramin_grctx);
- pushbuf = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance);
+ if ((ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
+ NV40_RAMFC__SIZE,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ NULL, &chan->ramfc)))
+ return ret;
/* Fill entries that are seen filled in dumps of nvidia driver just
* after channel's is put into DMA mode
*/
RAMFC_WR(DMA_PUT , chan->pushbuf_base);
RAMFC_WR(DMA_GET , chan->pushbuf_base);
- RAMFC_WR(DMA_INSTANCE , pushbuf);
+ RAMFC_WR(DMA_INSTANCE , chan->pushbuf->instance >> 4);
RAMFC_WR(DMA_FETCH , NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
@@ -61,28 +64,31 @@ nv40_fifo_create_context(drm_device_t *dev, int channel)
#endif
0x30000000 /* no idea.. */);
RAMFC_WR(DMA_SUBROUTINE, 0);
- RAMFC_WR(GRCTX_INSTANCE, grctx);
+ RAMFC_WR(GRCTX_INSTANCE, chan->ramin_grctx->instance >> 4);
RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF);
+ /* enable the fifo dma operation */
+ NV_WRITE(NV04_PFIFO_MODE,NV_READ(NV04_PFIFO_MODE)|(1<<chan->id));
return 0;
}
void
-nv40_fifo_destroy_context(drm_device_t *dev, int channel)
+nv40_fifo_destroy_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV40_RAMFC(channel);
- int i;
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));
- for (i = 0; i < NV40_RAMFC__SIZE; i+=4)
- NV_WI32(fifoctx + i, 0);
+ if (chan->ramfc)
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
-nv40_fifo_load_context(drm_device_t *dev, int channel)
+nv40_fifo_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV40_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp, tmp2;
NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET , RAMFC_RD(DMA_GET));
@@ -129,7 +135,7 @@ nv40_fifo_load_context(drm_device_t *dev, int channel)
NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);
/* Set channel active, and in DMA mode */
- NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | channel);
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1 , 0x00010000 | chan->id);
/* Reset DMA_CTL_AT_INFO to INVALID */
tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);
NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp);
@@ -138,10 +144,10 @@ nv40_fifo_load_context(drm_device_t *dev, int channel)
}
int
-nv40_fifo_save_context(drm_device_t *dev, int channel)
+nv40_fifo_save_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t fifoctx = NV40_RAMFC(channel);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
RAMFC_WR(DMA_PUT , NV_READ(NV04_PFIFO_CACHE1_DMA_PUT));
@@ -187,3 +193,16 @@ nv40_fifo_save_context(drm_device_t *dev, int channel)
return 0;
}
+int
+nv40_fifo_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if ((ret = nouveau_fifo_init(dev)))
+ return ret;
+
+ NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);
+ return 0;
+}
+
diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c
index 6fb575db..3f3df515 100644
--- a/shared-core/nv40_graph.c
+++ b/shared-core/nv40_graph.c
@@ -34,6 +34,7 @@
* between the contexts
*/
#define NV40_GRCTX_SIZE (175*1024)
+#define NV41_GRCTX_SIZE (92*1024)
#define NV43_GRCTX_SIZE (70*1024)
#define NV46_GRCTX_SIZE (70*1024) /* probably ~64KiB */
#define NV49_GRCTX_SIZE (164640)
@@ -47,13 +48,13 @@
* created.
*/
static void
-nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv40_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
/* Always has the "instance address" of itself at offset 0 */
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
/* unknown */
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
@@ -188,12 +189,122 @@ nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv41_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i;
+
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
+ INSTANCE_WR(ctx, 0x00000024/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000028/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000030/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x0000011c/4, 0x20010001);
+ INSTANCE_WR(ctx, 0x00000120/4, 0x0f73ef00);
+ INSTANCE_WR(ctx, 0x00000128/4, 0x02008821);
+ for (i = 0x00000178; i <= 0x00000180; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000040);
+ INSTANCE_WR(ctx, 0x00000188/4, 0x00000040);
+ for (i = 0x00000194; i <= 0x000001b0; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x80000000);
+ INSTANCE_WR(ctx, 0x000001d0/4, 0x0b0b0b0c);
+ INSTANCE_WR(ctx, 0x00000340/4, 0x00040000);
+ for (i = 0x00000350; i <= 0x0000035c; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x55555555);
+ INSTANCE_WR(ctx, 0x00000388/4, 0x00000008);
+ INSTANCE_WR(ctx, 0x0000039c/4, 0x00001010);
+ INSTANCE_WR(ctx, 0x000003cc/4, 0x00000111);
+ INSTANCE_WR(ctx, 0x000003d0/4, 0x00080060);
+ INSTANCE_WR(ctx, 0x000003ec/4, 0x00000080);
+ INSTANCE_WR(ctx, 0x000003f0/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x000003f4/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000408/4, 0x46400000);
+ INSTANCE_WR(ctx, 0x00000418/4, 0xffff0000);
+ INSTANCE_WR(ctx, 0x00000424/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00000428/4, 0x0fff0000);
+ INSTANCE_WR(ctx, 0x00000430/4, 0x00011100);
+ for (i = 0x0000044c; i <= 0x00000488; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x07ff0000);
+ INSTANCE_WR(ctx, 0x00000494/4, 0x4b7fffff);
+ INSTANCE_WR(ctx, 0x000004bc/4, 0x30201000);
+ INSTANCE_WR(ctx, 0x000004c0/4, 0x70605040);
+ INSTANCE_WR(ctx, 0x000004c4/4, 0xb8a89888);
+ INSTANCE_WR(ctx, 0x000004c8/4, 0xf8e8d8c8);
+ INSTANCE_WR(ctx, 0x000004dc/4, 0x40100000);
+ INSTANCE_WR(ctx, 0x000004f8/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x0000052c/4, 0x435185d6);
+ INSTANCE_WR(ctx, 0x00000530/4, 0x2155b699);
+ INSTANCE_WR(ctx, 0x00000534/4, 0xfedcba98);
+ INSTANCE_WR(ctx, 0x00000538/4, 0x00000098);
+ INSTANCE_WR(ctx, 0x00000548/4, 0xffffffff);
+ INSTANCE_WR(ctx, 0x0000054c/4, 0x00ff7000);
+ INSTANCE_WR(ctx, 0x00000550/4, 0x0000ffff);
+ INSTANCE_WR(ctx, 0x00000560/4, 0x00ff0000);
+ INSTANCE_WR(ctx, 0x00000598/4, 0x00ffff00);
+ for (i = 0x000005dc; i <= 0x00000618; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00018488);
+ for (i = 0x0000061c; i <= 0x00000658; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00028202);
+ for (i = 0x0000069c; i <= 0x000006d8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0000aae4);
+ for (i = 0x000006dc; i <= 0x00000718; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x01012000);
+ for (i = 0x0000071c; i <= 0x00000758; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ for (i = 0x0000079c; i <= 0x000007d8; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00100008);
+ for (i = 0x0000082c; i <= 0x00000838; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x0001bc80);
+ for (i = 0x0000083c; i <= 0x00000848; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000202);
+ for (i = 0x0000085c; i <= 0x00000868; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00000008);
+ for (i = 0x0000087c; i <= 0x00000888; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00080008);
+ INSTANCE_WR(ctx, 0x0000089c/4, 0x00000002);
+ INSTANCE_WR(ctx, 0x000008d0/4, 0x00000021);
+ INSTANCE_WR(ctx, 0x000008d4/4, 0x030c30c3);
+ INSTANCE_WR(ctx, 0x000008e0/4, 0x3e020200);
+ INSTANCE_WR(ctx, 0x000008e4/4, 0x00ffffff);
+ INSTANCE_WR(ctx, 0x000008e8/4, 0x20103f00);
+ INSTANCE_WR(ctx, 0x000008f4/4, 0x00020000);
+ INSTANCE_WR(ctx, 0x0000092c/4, 0x00008100);
+ INSTANCE_WR(ctx, 0x000009b8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x000009fc/4, 0x00001001);
+ INSTANCE_WR(ctx, 0x00000a04/4, 0x00000003);
+ INSTANCE_WR(ctx, 0x00000a08/4, 0x00888001);
+ INSTANCE_WR(ctx, 0x00000aac/4, 0x00000005);
+ INSTANCE_WR(ctx, 0x00000ab8/4, 0x0000ffff);
+ for (i = 0x00000ad4; i <= 0x00000ae4; i += 4)
+ INSTANCE_WR(ctx, i/4, 0x00005555);
+ INSTANCE_WR(ctx, 0x00000ae8/4, 0x00000001);
+ INSTANCE_WR(ctx, 0x00000b20/4, 0x00000001);
+ for (i = 0x00002ee8; i <= 0x00002f60; i += 8)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x00005168; i <= 0x00007358; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00007368; i <= 0x00007758; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x0000a068; i <= 0x0000c258; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x0000c268; i <= 0x0000c658; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x0000ef68; i <= 0x00011158; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00011168; i <= 0x00011558; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+ for (i = 0x00013e68; i <= 0x00016058; i += 24)
+ INSTANCE_WR(ctx, i/4, 0x00000001);
+ for (i = 0x00016068; i <= 0x00016458; i += 16)
+ INSTANCE_WR(ctx, i/4, 0x3f800000);
+};
+
+static void
+nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@@ -304,12 +415,12 @@ nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
};
static void
-nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv46_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00040/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00044/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x0004c/4, 0x00000001);
@@ -455,12 +566,12 @@ nv46_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv49_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
@@ -678,12 +789,12 @@ nv49_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv4a_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@@ -795,12 +906,12 @@ nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv4b_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00004/4, 0x0000c040);
INSTANCE_WR(ctx, 0x00008/4, 0x0000c040);
INSTANCE_WR(ctx, 0x0000c/4, 0x0000c040);
@@ -1010,12 +1121,12 @@ nv4b_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv4c_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@@ -1117,12 +1228,12 @@ nv4c_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
static void
-nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
+nv4e_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
- INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx));
+ INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);
INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff);
INSTANCE_WR(ctx, 0x00030/4, 0x00000001);
@@ -1224,20 +1335,24 @@ nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx)
}
int
-nv40_graph_create_context(drm_device_t *dev, int channel)
+nv40_graph_create_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
- void (*ctx_init)(drm_device_t *, struct mem_block *);
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ void (*ctx_init)(struct drm_device *, struct nouveau_gpuobj *);
unsigned int ctx_size;
- int i;
+ int ret;
switch (dev_priv->chipset) {
case 0x40:
ctx_size = NV40_GRCTX_SIZE;
ctx_init = nv40_graph_context_init;
break;
+ case 0x41:
+ case 0x42:
+ ctx_size = NV41_GRCTX_SIZE;
+ ctx_init = nv41_graph_context_init;
+ break;
case 0x43:
ctx_size = NV43_GRCTX_SIZE;
ctx_init = nv43_graph_context_init;
@@ -1250,6 +1365,7 @@ nv40_graph_create_context(drm_device_t *dev, int channel)
ctx_size = NV49_GRCTX_SIZE;
ctx_init = nv49_graph_context_init;
break;
+ case 0x44:
case 0x4a:
ctx_size = NV4A_GRCTX_SIZE;
ctx_init = nv4a_graph_context_init;
@@ -1272,56 +1388,58 @@ nv40_graph_create_context(drm_device_t *dev, int channel)
break;
}
- /* Alloc and clear RAMIN to store the context */
- chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4);
- if (!chan->ramin_grctx)
- return DRM_ERR(ENOMEM);
- for (i=0; i<ctx_size; i+=4)
- INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000);
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, ctx_size, 16,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &chan->ramin_grctx)))
+ return ret;
/* Initialise default context values */
- ctx_init(dev, chan->ramin_grctx);
+ ctx_init(dev, chan->ramin_grctx->gpuobj);
return 0;
}
void
-nv40_graph_destroy_context(drm_device_t *dev, int channel)
+nv40_graph_destroy_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
-
- if (chan->ramin_grctx) {
- nouveau_instmem_free(dev, chan->ramin_grctx);
- chan->ramin_grctx = NULL;
- }
+ nouveau_gpuobj_ref_del(chan->dev, &chan->ramin_grctx);
}
static int
-nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save)
+nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- uint32_t old_cp, tv = 1000;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_cp, tv = 1000, tmp;
int i;
old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
- NV_WRITE(NV40_PGRAPH_CTXCTL_0310,
- save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
- NV40_PGRAPH_CTXCTL_0310_XFER_LOAD);
- NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
+
+ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0310);
+ tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp);
+
+ tmp = NV_READ(NV40_PGRAPH_CTXCTL_0304);
+ tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp);
for (i = 0; i < tv; i++) {
if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
break;
}
+
NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
if (i == tv) {
- DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
+ uint32_t ucstat = NV_READ(NV40_PGRAPH_CTXCTL_UCODE_STAT);
+ DRM_ERROR("Failed: Instance=0x%08x Save=%d\n", inst, save);
+ DRM_ERROR("IP: 0x%02x, Opcode: 0x%08x\n",
+ ucstat >> NV40_PGRAPH_CTXCTL_UCODE_STAT_IP_SHIFT,
+ ucstat & NV40_PGRAPH_CTXCTL_UCODE_STAT_OP_MASK);
DRM_ERROR("0x40030C = 0x%08x\n",
NV_READ(NV40_PGRAPH_CTXCTL_030C));
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
return 0;
@@ -1331,15 +1449,14 @@ nv40_graph_transfer_context(drm_device_t *dev, uint32_t inst, int save)
*XXX: fails sometimes, not sure why..
*/
int
-nv40_graph_save_context(drm_device_t *dev, int channel)
+nv40_graph_save_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
uint32_t inst;
if (!chan->ramin_grctx)
- return DRM_ERR(EINVAL);
- inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
return nv40_graph_transfer_context(dev, inst, 1);
}
@@ -1348,16 +1465,16 @@ nv40_graph_save_context(drm_device_t *dev, int channel)
* XXX: fails sometimes.. not sure why
*/
int
-nv40_graph_load_context(drm_device_t *dev, int channel)
+nv40_graph_load_context(struct nouveau_channel *chan)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
- struct nouveau_fifo *chan = &dev_priv->fifos[channel];
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
int ret;
if (!chan->ramin_grctx)
- return DRM_ERR(EINVAL);
- inst = nouveau_chip_instance_get(dev, chan->ramin_grctx);
+ return -EINVAL;
+ inst = chan->ramin_grctx->instance >> 4;
ret = nv40_graph_transfer_context(dev, inst, 0);
if (ret)
@@ -1430,6 +1547,37 @@ static uint32_t nv40_ctx_voodoo[] = {
~0
};
+static uint32_t nv41_ctx_voodoo[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
+ 0x0040a068, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x001046ec, 0x00500060, 0x00404087, 0x0060000d, 0x004079e6, 0x002000f1,
+ 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,
+ 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,
+ 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,
+ 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200233, 0x0060000a, 0x00104800,
+ 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
+ 0x00108a14, 0x00200020, 0x00100b00, 0x00134b2c, 0x0010cd00, 0x0010cd04,
+ 0x00114d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06,
+ 0x002002d2, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684,
+ 0x00800001, 0x00200b1a, 0x0060000a, 0x00206380, 0x0040788a, 0x00201480,
+ 0x00800041, 0x00408900, 0x00600006, 0x004085e6, 0x00700080, 0x0020007a,
+ 0x0060000a, 0x00104280, 0x002002d2, 0x0060000a, 0x00200004, 0x00800001,
+ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a068, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060,
+ 0x00600007, 0x00409388, 0x0060000f, 0x00500060, 0x00200000, 0x0060000a,
+ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x00940400, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a206, 0x0040a305,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
static uint32_t nv43_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
@@ -1463,6 +1611,39 @@ static uint32_t nv43_ctx_voodoo[] = {
~0
};
+static uint32_t nv44_ctx_voodoo[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409a65, 0x00409f06,
+ 0x0040ac68, 0x0040248f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x001041c6, 0x00104040, 0x00200001, 0x0060000a, 0x00700000, 0x001040c5,
+ 0x00402320, 0x00402321, 0x00402322, 0x00402324, 0x00402326, 0x0040232b,
+ 0x001040c5, 0x00402328, 0x001040c5, 0x00402320, 0x00402468, 0x0060000d,
+ 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, 0x00402be6,
+ 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, 0x00110158,
+ 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, 0x001041c9,
+ 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, 0x001242c0,
+ 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, 0x0011415f,
+ 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, 0x001046ec,
+ 0x00500060, 0x00404b87, 0x0060000d, 0x004084e6, 0x002000f1, 0x0060000a,
+ 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, 0x00168691,
+ 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, 0x001646cc,
+ 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7,
+ 0x001043e1, 0x00500060, 0x00200232, 0x0060000a, 0x00104800, 0x00108901,
+ 0x00104910, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, 0x00148a00,
+ 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, 0x0010cd08,
+ 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, 0x002002c8,
+ 0x0060000a, 0x00300000, 0x00200080, 0x00407d00, 0x00200084, 0x00800001,
+ 0x00200510, 0x0060000a, 0x002037e0, 0x0040838a, 0x00201320, 0x00800029,
+ 0x00409400, 0x00600006, 0x004090e6, 0x00700080, 0x0020007a, 0x0060000a,
+ 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,
+ 0x00200000, 0x0060000a, 0x00106002, 0x0040ac68, 0x00700000, 0x00200000,
+ 0x0060000a, 0x00106002, 0x00700080, 0x00400a68, 0x00500060, 0x00600007,
+ 0x00409e88, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a,
+ 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,
+ 0x0060000b, 0x00500069, 0x0060000c, 0x00402c68, 0x0040ae06, 0x0040af05,
+ 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
static uint32_t nv46_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409306,
@@ -1560,6 +1741,37 @@ static uint32_t nv4a_ctx_voodoo[] = {
0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
};
+static uint32_t nv4c_ctx_voodoo[] = {
+ 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
+ 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409065, 0x00409406,
+ 0x0040a168, 0x0040198f, 0x00200001, 0x0060000a, 0x00700080, 0x00104042,
+ 0x00200001, 0x0060000a, 0x00700000, 0x001040c5, 0x00401826, 0x00401968,
+ 0x0060000d, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,
+ 0x004020e6, 0x007000a0, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,
+ 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,
+ 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,
+ 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,
+ 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,
+ 0x0010427e, 0x001046ec, 0x00500060, 0x00404187, 0x0060000d, 0x00407ae6,
+ 0x002000f2, 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682,
+ 0x0011068b, 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4,
+ 0x001146c6, 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0,
+ 0x00100700, 0x0010c3d7, 0x001043e1, 0x00500060, 0x00200234, 0x0060000a,
+ 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,
+ 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00,
+ 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,
+ 0x00104f06, 0x002002c0, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,
+ 0x00200084, 0x00800001, 0x00200508, 0x0060000a, 0x00201320, 0x0040798a,
+ 0xfffffaf8, 0x00800029, 0x00408a00, 0x00600006, 0x004086e6, 0x00700080,
+ 0x0020007a, 0x0060000a, 0x00104280, 0x002002c0, 0x0060000a, 0x00200004,
+ 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a168,
+ 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, 0x00400a68,
+ 0x00500060, 0x00600007, 0x00409488, 0x0060000f, 0x00500060, 0x00200000,
+ 0x0060000a, 0x00700000, 0x00106001, 0x00910880, 0x00901ffe, 0x01940000,
+ 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00402168, 0x0040a306,
+ 0x0040a405, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0
+};
+
static uint32_t nv4e_ctx_voodoo[] = {
0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,
0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06,
@@ -1603,10 +1815,10 @@ static uint32_t nv4e_ctx_voodoo[] = {
* C51 0x4e
*/
int
-nv40_graph_init(drm_device_t *dev)
+nv40_graph_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv =
- (drm_nouveau_private_t *)dev->dev_private;
+ struct drm_nouveau_private *dev_priv =
+ (struct drm_nouveau_private *)dev->dev_private;
uint32_t *ctx_voodoo;
uint32_t vramsz, tmp;
int i, j;
@@ -1618,11 +1830,15 @@ nv40_graph_init(drm_device_t *dev)
switch (dev_priv->chipset) {
case 0x40: ctx_voodoo = nv40_ctx_voodoo; break;
+ case 0x41:
+ case 0x42: ctx_voodoo = nv41_ctx_voodoo; break;
case 0x43: ctx_voodoo = nv43_ctx_voodoo; break;
+ case 0x44: ctx_voodoo = nv44_ctx_voodoo; break;
case 0x46: ctx_voodoo = nv46_ctx_voodoo; break;
case 0x49: ctx_voodoo = nv49_4b_ctx_voodoo; break;
case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break;
case 0x4b: ctx_voodoo = nv49_4b_ctx_voodoo; break;
+ case 0x4c: ctx_voodoo = nv4c_ctx_voodoo; break;
case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break;
default:
DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n",
@@ -1646,8 +1862,8 @@ nv40_graph_init(drm_device_t *dev)
/* No context present currently */
NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
- NV_WRITE(NV03_PGRAPH_INTR_EN, 0x00000000);
NV_WRITE(NV03_PGRAPH_INTR , 0xFFFFFFFF);
+ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);
NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);
@@ -1833,7 +2049,7 @@ nv40_graph_init(drm_device_t *dev)
return 0;
}
-void nv40_graph_takedown(drm_device_t *dev)
+void nv40_graph_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv40_mc.c b/shared-core/nv40_mc.c
index 8dbd96fd..c7db9023 100644
--- a/shared-core/nv40_mc.c
+++ b/shared-core/nv40_mc.c
@@ -4,9 +4,9 @@
#include "nouveau_drm.h"
int
-nv40_mc_init(drm_device_t *dev)
+nv40_mc_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t tmp;
/* Power up everything, resetting each individual unit will
@@ -14,8 +14,6 @@ nv40_mc_init(drm_device_t *dev)
*/
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
- NV_WRITE(NV03_PMC_INTR_EN_0, 0);
-
switch (dev_priv->chipset) {
case 0x44:
case 0x46: /* G72 */
@@ -35,7 +33,7 @@ nv40_mc_init(drm_device_t *dev)
}
void
-nv40_mc_takedown(drm_device_t *dev)
+nv40_mc_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c
index e5d37949..7859544a 100644
--- a/shared-core/nv50_fifo.c
+++ b/shared-core/nv50_fifo.c
@@ -28,55 +28,299 @@
#include "drm.h"
#include "nouveau_drv.h"
+typedef struct {
+ struct nouveau_gpuobj_ref *thingo;
+} nv50_fifo_priv;
+
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
static void
-nv50_fifo_init_reset(drm_device_t *dev)
+nv50_fifo_init_thingo(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
+ struct nouveau_gpuobj_ref *thingo = priv->thingo;
+ int i, fi=2;
+
+ DRM_DEBUG("\n");
+
+ INSTANCE_WR(thingo->gpuobj, 0, 0x7e);
+ INSTANCE_WR(thingo->gpuobj, 1, 0x7e);
+ for (i = 1; i < 127; i++, fi) {
+ if (dev_priv->fifos[i]) {
+ INSTANCE_WR(thingo->gpuobj, fi, i);
+ fi++;
+ }
+ }
+
+ NV_WRITE(0x32f4, thingo->instance >> 12);
+ NV_WRITE(0x32ec, fi);
+ NV_WRITE(0x2500, 0x101);
+}
+
+static int
+nv50_fifo_channel_enable(struct drm_device *dev, int channel, int nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[channel];
+ uint32_t inst;
+
+ DRM_DEBUG("ch%d\n", channel);
+
+ if (!chan->ramfc)
+ return -EINVAL;
+
+ if (IS_G80) inst = chan->ramfc->instance >> 12;
+ else inst = chan->ramfc->instance >> 8;
+ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel),
+ inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
+
+ if (!nt) nv50_fifo_init_thingo(dev);
+ return 0;
+}
+
+static void
+nv50_fifo_channel_disable(struct drm_device *dev, int channel, int nt)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst;
+
+ DRM_DEBUG("ch%d, nt=%d\n", channel, nt);
+
+ if (IS_G80) inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
+ else inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
+ NV_WRITE(NV50_PFIFO_CTX_TABLE(channel), inst);
+
+ if (!nt) nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_reset(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t pmc_e;
+ DRM_DEBUG("\n");
+
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO);
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PFIFO);
}
+static void
+nv50_fifo_init_intr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV03_PFIFO_INTR_0, 0xFFFFFFFF);
+ NV_WRITE(NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
+}
+
+static void
+nv50_fifo_init_context_table(struct drm_device *dev)
+{
+ int i;
+
+ DRM_DEBUG("\n");
+
+ for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++)
+ nv50_fifo_channel_disable(dev, i, 1);
+ nv50_fifo_init_thingo(dev);
+}
+
+static void
+nv50_fifo_init_regs__nv(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x250c, 0x6f3cfc34);
+}
+
+static int
+nv50_fifo_init_regs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x2500, 0);
+ NV_WRITE(0x3250, 0);
+ NV_WRITE(0x3220, 0);
+ NV_WRITE(0x3204, 0);
+ NV_WRITE(0x3210, 0);
+ NV_WRITE(0x3270, 0);
+
+ /* Enable dummy channels setup by nv50_instmem.c */
+ nv50_fifo_channel_enable(dev, 0, 1);
+ nv50_fifo_channel_enable(dev, 127, 1);
+
+ return 0;
+}
+
int
-nv50_fifo_init(drm_device_t *dev)
+nv50_fifo_init(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_fifo_priv *priv;
+ int ret;
+
+ DRM_DEBUG("\n");
+
+ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->Engine.fifo.priv = priv;
+
nv50_fifo_init_reset(dev);
+ nv50_fifo_init_intr(dev);
+
+ if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, (128+2)*4, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &priv->thingo))) {
+ DRM_ERROR("error creating thingo: %d\n", ret);
+ return ret;
+ }
+
+ nv50_fifo_init_context_table(dev);
+
+ nv50_fifo_init_regs__nv(dev);
+ if ((ret = nv50_fifo_init_regs(dev)))
+ return ret;
- DRM_ERROR("stub!\n");
return 0;
}
void
-nv50_fifo_takedown(drm_device_t *dev)
+nv50_fifo_takedown(struct drm_device *dev)
{
- DRM_ERROR("stub!\n");
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;
+
+ DRM_DEBUG("\n");
+
+ if (!priv)
+ return;
+
+ nouveau_gpuobj_ref_del(dev, &priv->thingo);
+
+ dev_priv->Engine.fifo.priv = NULL;
+ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
}
int
-nv50_fifo_create_context(drm_device_t *dev, int channel)
+nv50_fifo_create_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = NULL;
+ int ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ if (IS_G80) {
+ uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;
+ uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start;
+ if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset,
+ vram_offset, 0x100,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &ramfc, &chan->ramfc)))
+ return ret;
+ } else {
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100,
+ 256,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &chan->ramfc)))
+ return ret;
+ ramfc = chan->ramfc->gpuobj;
+ }
+
+ INSTANCE_WR(ramfc, 0x48/4, chan->pushbuf->instance >> 4);
+ INSTANCE_WR(ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
+ INSTANCE_WR(ramfc, 0x3c/4, 0x000f0078); /* fetch? */
+ INSTANCE_WR(ramfc, 0x44/4, 0x2101ffff);
+ INSTANCE_WR(ramfc, 0x60/4, 0x7fffffff);
+ INSTANCE_WR(ramfc, 0x10/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x08/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x40/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x50/4, 0x2039b2e0);
+ INSTANCE_WR(ramfc, 0x54/4, 0x000f0000);
+ INSTANCE_WR(ramfc, 0x7c/4, 0x30000001);
+ INSTANCE_WR(ramfc, 0x78/4, 0x00000000);
+ INSTANCE_WR(ramfc, 0x4c/4, chan->pushbuf_mem->size - 1);
+
+ if (!IS_G80) {
+ INSTANCE_WR(chan->ramin->gpuobj, 0, chan->id);
+ INSTANCE_WR(chan->ramin->gpuobj, 1, chan->ramfc->instance);
+
+ INSTANCE_WR(ramfc, 0x88/4, 0x3d520); /* some vram addy >> 10 */
+ INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);
+ }
+
+ if ((ret = nv50_fifo_channel_enable(dev, chan->id, 0))) {
+ DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ return ret;
+ }
+
return 0;
}
void
-nv50_fifo_destroy_context(drm_device_t *dev, int channel)
+nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
+ struct drm_device *dev = chan->dev;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ nv50_fifo_channel_disable(dev, chan->id, 0);
+
+ /* Dummy channel, also used on ch 127 */
+ if (chan->id == 0)
+ nv50_fifo_channel_disable(dev, 127, 0);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
}
int
-nv50_fifo_load_context(drm_device_t *dev, int channel)
+nv50_fifo_load_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ /*XXX: incomplete, only touches the regs that NV does */
+
+ NV_WRITE(0x3244, 0);
+ NV_WRITE(0x3240, 0);
+
+ NV_WRITE(0x3224, INSTANCE_RD(ramfc, 0x3c/4));
+ NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, INSTANCE_RD(ramfc, 0x48/4));
+ NV_WRITE(0x3234, INSTANCE_RD(ramfc, 0x4c/4));
+ NV_WRITE(0x3254, 1);
+ NV_WRITE(NV03_PFIFO_RAMHT, INSTANCE_RD(ramfc, 0x80/4));
+
+ if (!IS_G80) {
+ NV_WRITE(0x340c, INSTANCE_RD(ramfc, 0x88/4));
+ NV_WRITE(0x3410, INSTANCE_RD(ramfc, 0x98/4));
+ }
+
+ NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
int
-nv50_fifo_save_context(drm_device_t *dev, int channel)
+nv50_fifo_save_context(struct nouveau_channel *chan)
{
+ DRM_DEBUG("ch%d\n", chan->id);
DRM_ERROR("stub!\n");
return 0;
}
diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c
index 8c3e2b9b..e5bbf65e 100644
--- a/shared-core/nv50_graph.c
+++ b/shared-core/nv50_graph.c
@@ -28,57 +28,289 @@
#include "drm.h"
#include "nouveau_drv.h"
+#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
+
static void
-nv50_graph_init_reset(drm_device_t *dev)
+nv50_graph_init_reset(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t pmc_e;
+ DRM_DEBUG("\n");
+
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH);
pmc_e = NV_READ(NV03_PMC_ENABLE);
NV_WRITE(NV03_PMC_ENABLE, pmc_e | NV_PMC_ENABLE_PGRAPH);
}
+static void
+nv50_graph_init_intr(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+ NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff);
+ NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);
+}
+
+static void
+nv50_graph_init_regs__nv(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(0x400804, 0xc0000000);
+ NV_WRITE(0x406800, 0xc0000000);
+ NV_WRITE(0x400c04, 0xc0000000);
+ NV_WRITE(0x401804, 0xc0000000);
+ NV_WRITE(0x405018, 0xc0000000);
+ NV_WRITE(0x402000, 0xc0000000);
+
+ NV_WRITE(0x400108, 0xffffffff);
+
+ NV_WRITE(0x400824, 0x00004000);
+ NV_WRITE(0x400500, 0x00010001);
+}
+
+static void
+nv50_graph_init_regs(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("\n");
+
+ NV_WRITE(NV04_PGRAPH_DEBUG_3, (1<<2) /* HW_CONTEXT_SWITCH_ENABLED */);
+}
+
+static uint32_t nv84_ctx_voodoo[] = {
+ 0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,
+ 0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,
+ 0x00700009, 0x0041634d, 0x00402944, 0x00402905, 0x0040290d, 0x00413e06,
+ 0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,
+ 0x00700081, 0x00600004, 0x0050004a, 0x00216f40, 0x00600007, 0x00c02801,
+ 0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,
+ 0x00600008, 0x0050004c, 0x00600009, 0x00413e45, 0x0041594d, 0x0070009d,
+ 0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,
+ 0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,
+ 0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216f40, 0x00600007,
+ 0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,
+ 0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200480, 0x00600007,
+ 0x00300000, 0x00c000ff, 0x00c800ff, 0x00414907, 0x00202916, 0x008000ff,
+ 0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,
+ 0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,
+ 0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,
+ 0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,
+ 0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,
+ 0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040798c,
+ 0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,
+ 0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,
+ 0x00131c80, 0x00121c84, 0x00141ca0, 0x00111ca5, 0x00131cc0, 0x00121cc4,
+ 0x00141ce0, 0x00111ce5, 0x00131f00, 0x00191f40, 0x0040a1e0, 0x002001ed,
+ 0x00600006, 0x00200044, 0x00102080, 0x001120c6, 0x001520c9, 0x001920d0,
+ 0x00122100, 0x00122103, 0x00162200, 0x00122207, 0x00112280, 0x00112300,
+ 0x00112302, 0x00122380, 0x0011238b, 0x00112394, 0x0011239c, 0x0040bee1,
+ 0x00200254, 0x00600006, 0x00200044, 0x00102480, 0x0040af0f, 0x0040af4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040af8c,
+ 0x005000cb, 0x00000000, 0x001124c6, 0x001524c9, 0x001924d0, 0x00122500,
+ 0x00122503, 0x00162600, 0x00122607, 0x00112680, 0x00112700, 0x00112702,
+ 0x00122780, 0x0011278b, 0x00112794, 0x0011279c, 0x0040d1e2, 0x002002bb,
+ 0x00600006, 0x00200044, 0x00102880, 0x001128c6, 0x001528c9, 0x001928d0,
+ 0x00122900, 0x00122903, 0x00162a00, 0x00122a07, 0x00112a80, 0x00112b00,
+ 0x00112b02, 0x00122b80, 0x00112b8b, 0x00112b94, 0x00112b9c, 0x0040eee3,
+ 0x00200322, 0x00600006, 0x00200044, 0x00102c80, 0x0040df0f, 0x0040df4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x0040df8c,
+ 0x005000cb, 0x00000000, 0x00112cc6, 0x00152cc9, 0x00192cd0, 0x00122d00,
+ 0x00122d03, 0x00162e00, 0x00122e07, 0x00112e80, 0x00112f00, 0x00112f02,
+ 0x00122f80, 0x00112f8b, 0x00112f94, 0x00112f9c, 0x004101e4, 0x00200389,
+ 0x00600006, 0x00200044, 0x00103080, 0x001130c6, 0x001530c9, 0x001930d0,
+ 0x00123100, 0x00123103, 0x00163200, 0x00123207, 0x00113280, 0x00113300,
+ 0x00113302, 0x00123380, 0x0011338b, 0x00113394, 0x0011339c, 0x00411ee5,
+ 0x002003f0, 0x00600006, 0x00200044, 0x00103480, 0x00410f0f, 0x00410f4b,
+ 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x0070008f, 0x00410f8c,
+ 0x005000cb, 0x00000000, 0x001134c6, 0x001534c9, 0x001934d0, 0x00123500,
+ 0x00123503, 0x00163600, 0x00123607, 0x00113680, 0x00113700, 0x00113702,
+ 0x00123780, 0x0011378b, 0x00113794, 0x0011379c, 0x00000000, 0x0041250f,
+ 0x005000cb, 0x00214d40, 0x00600007, 0x0020043e, 0x008800ff, 0x005000cb,
+ 0x00412887, 0x0060000a, 0x00000000, 0x00413700, 0x007000a0, 0x00700080,
+ 0x00200480, 0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb,
+ 0x00700000, 0x00200000, 0x00600006, 0x00111bfe, 0x0041594d, 0x00700000,
+ 0x00200000, 0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d,
+ 0x00700081, 0x00600004, 0x0050004a, 0x00414388, 0x0060000b, 0x00200000,
+ 0x00600006, 0x00700000, 0x0041590b, 0x00111bfd, 0x0040424d, 0x00202916,
+ 0x008000fd, 0x005000cb, 0x00c00002, 0x00200480, 0x00600007, 0x00200160,
+ 0x00800002, 0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb,
+ 0x00404e4d, 0x0060000b, 0x0041574d, 0x00700001, 0x005000cf, 0x00700003,
+ 0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,
+ 0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0
+};
+
+static void
+nv50_graph_init_ctxctl(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t *voodoo;
+
+ DRM_DEBUG("\n");
+
+ switch (dev_priv->chipset) {
+ case 0x84:
+ voodoo = nv84_ctx_voodoo;
+ break;
+ default:
+ DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset);
+ break;
+ }
+
+ if (voodoo) {
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ while (*voodoo != ~0) {
+ NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo);
+ voodoo++;
+ }
+ }
+
+ NV_WRITE(0x400320, 4);
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0);
+}
+
int
-nv50_graph_init(drm_device_t *dev)
+nv50_graph_init(struct drm_device *dev)
{
+ DRM_DEBUG("\n");
+
nv50_graph_init_reset(dev);
+ nv50_graph_init_intr(dev);
+ nv50_graph_init_regs__nv(dev);
+ nv50_graph_init_regs(dev);
+ nv50_graph_init_ctxctl(dev);
- DRM_ERROR("stub!\n");
return 0;
}
void
-nv50_graph_takedown(drm_device_t *dev)
+nv50_graph_takedown(struct drm_device *dev)
{
- DRM_ERROR("stub!\n");
+ DRM_DEBUG("\n");
}
int
-nv50_graph_create_context(drm_device_t *dev, int channel)
+nv50_graph_create_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_engine *engine = &dev_priv->Engine;
+ struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
+ int grctx_size = 0x60000, hdr;
+ int ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
+ grctx_size, 0x1000,
+ NVOBJ_FLAG_ZERO_ALLOC |
+ NVOBJ_FLAG_ZERO_FREE,
+ &chan->ramin_grctx)))
+ return ret;
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ INSTANCE_WR(ramin, (hdr + 0x00)/4, 0x00190002);
+ INSTANCE_WR(ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
+ grctx_size - 1);
+ INSTANCE_WR(ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
+ INSTANCE_WR(ramin, (hdr + 0x0c)/4, 0);
+ INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);
+ INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000);
+
+ if ((ret = engine->graph.load_context(chan))) {
+ DRM_ERROR("Error hacking up initial context: %d\n", ret);
+ return ret;
+ }
+
return 0;
}
void
-nv50_graph_destroy_context(drm_device_t *dev, int channel)
+nv50_graph_destroy_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, hdr;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ hdr = IS_G80 ? 0x200 : 0x20;
+ for (i=hdr; i<hdr+24; i+=4)
+ INSTANCE_WR(chan->ramin->gpuobj, i/4, 0);
+
+ nouveau_gpuobj_ref_del(dev, &chan->ramin_grctx);
}
-int
-nv50_graph_load_context(drm_device_t *dev, int channel)
+static int
+nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)
{
- DRM_ERROR("stub!\n");
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t old_cp, tv = 20000;
+ int i;
+
+ DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);
+
+ old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER);
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst | (1<<31));
+ NV_WRITE(0x400824, NV_READ(0x400824) |
+ (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :
+ NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));
+ NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);
+
+ for (i = 0; i < tv; i++) {
+ if (NV_READ(NV40_PGRAPH_CTXCTL_030C) == 0)
+ break;
+ }
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, old_cp);
+
+ if (i == tv) {
+ DRM_ERROR("failed: inst=0x%08x save=%d\n", inst, save);
+ DRM_ERROR("0x40030C = 0x%08x\n",
+ NV_READ(NV40_PGRAPH_CTXCTL_030C));
+ return -EBUSY;
+ }
+
return 0;
}
int
-nv50_graph_save_context(drm_device_t *dev, int channel)
+nv50_graph_load_context(struct nouveau_channel *chan)
{
- DRM_ERROR("stub!\n");
+ struct drm_device *dev = chan->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
+ int ret; (void)ret;
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+#if 0
+ if ((ret = nv50_graph_transfer_context(dev, inst, 0)))
+ return ret;
+#endif
+
+ NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);
+ NV_WRITE(0x400320, 4);
+ NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst);
+
return 0;
}
+int
+nv50_graph_save_context(struct nouveau_channel *chan)
+{
+ struct drm_device *dev = chan->dev;
+ uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31));
+
+ DRM_DEBUG("ch%d\n", chan->id);
+
+ return nv50_graph_transfer_context(dev, inst, 1);
+}
+
diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c
new file mode 100644
index 00000000..1eeb54df
--- /dev/null
+++ b/shared-core/nv50_instmem.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+
+typedef struct {
+ uint32_t save1700[5]; /* 0x1700->0x1710 */
+
+ struct nouveau_gpuobj_ref *pramin_pt;
+ struct nouveau_gpuobj_ref *pramin_bar;
+} nv50_instmem_priv;
+
+#define NV50_INSTMEM_PAGE_SHIFT 12
+#define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
+#define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
+
+/*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
+ */
+#define BAR0_WI32(g,o,v) do { \
+ uint32_t offset; \
+ if ((g)->im_backing) { \
+ offset = (g)->im_backing->start; \
+ } else { \
+ offset = chan->ramin->gpuobj->im_backing->start; \
+ offset += (g)->im_pramin->start; \
+ } \
+ offset += (o); \
+ NV_WRITE(NV_RAMIN + (offset & 0xfffff), (v)); \
+} while(0)
+
+int
+nv50_instmem_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan;
+ uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
+ nv50_instmem_priv *priv;
+ int ret, i;
+ uint32_t v;
+
+ priv = drm_calloc(1, sizeof(*priv), DRM_MEM_DRIVER);
+ if (!priv)
+ return -ENOMEM;
+ dev_priv->Engine.instmem.priv = priv;
+
+ /* Reserve the last MiB of VRAM, we should probably try to avoid
+ * setting up the below tables over the top of the VBIOS image at
+ * some point.
+ */
+ dev_priv->ramin_rsvd_vram = 1 << 20;
+ c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
+ c_size = 128 << 10;
+ c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
+ c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
+ c_base = c_vmpd + 0x4000;
+ pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin->size);
+
+ DRM_DEBUG(" Rsvd VRAM base: 0x%08x\n", c_offset);
+ DRM_DEBUG(" VBIOS image: 0x%08x\n", (NV_READ(0x619f04)&~0xff)<<8);
+ DRM_DEBUG(" Aperture size: %d MiB\n",
+ (uint32_t)dev_priv->ramin->size >> 20);
+ DRM_DEBUG(" PT size: %d KiB\n", pt_size >> 10);
+
+ NV_WRITE(NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
+
+ /* Create a fake channel, and use it as our "dummy" channels 0/127.
+ * The main reason for creating a channel is so we can use the gpuobj
+ * code. However, it's probably worth noting that NVIDIA also setup
+ * their channels 0/127 with the same values they configure here.
+ * So, there may be some other reason for doing this.
+ *
+ * Have to create the entire channel manually, as the real channel
+ * creation code assumes we have PRAMIN access, and we don't until
+ * we're done here.
+ */
+ chan = drm_calloc(1, sizeof(*chan), DRM_MEM_DRIVER);
+ if (!chan)
+ return -ENOMEM;
+ chan->id = 0;
+ chan->dev = dev;
+ chan->file_priv = (struct drm_file *)-2;
+ dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
+
+ /* Channel's PRAMIN object + heap */
+ if ((ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, 128<<10, 0,
+ NULL, &chan->ramin)))
+ return ret;
+
+ if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
+ return -ENOMEM;
+
+ /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
+ if ((ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
+ 0x4000, 0, NULL, &chan->ramfc)))
+ return ret;
+
+ for (i = 0; i < c_vmpd; i += 4)
+ BAR0_WI32(chan->ramin->gpuobj, i, 0);
+
+ /* VM page directory */
+ if ((ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
+ 0x4000, 0, &chan->vm_pd, NULL)))
+ return ret;
+ for (i = 0; i < 0x4000; i += 8) {
+ BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
+ BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
+ }
+
+ /* PRAMIN page table, cheat and map into VM at 0x0000000000.
+ * We map the entire fake channel into the start of the PRAMIN BAR
+ */
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
+ 0, &priv->pramin_pt)))
+ return ret;
+
+ for (i = 0, v = c_offset; i < pt_size; i+=8, v+=0x1000) {
+ if (v < (c_offset + c_size))
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);
+ else
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009);
+ BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
+ }
+
+ BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
+ BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
+
+ /* DMA object for PRAMIN BAR */
+ if ((ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
+ &priv->pramin_bar)))
+ return ret;
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin->size - 1);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
+ BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
+
+ /* Poke the relevant regs, and pray it works :) */
+ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
+ NV_WRITE(NV50_PUNK_UNK1710, 0);
+ NV_WRITE(NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
+ NV50_PUNK_BAR_CFG_BASE_VALID);
+ NV_WRITE(NV50_PUNK_BAR1_CTXDMA, 0);
+ NV_WRITE(NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
+ NV50_PUNK_BAR3_CTXDMA_VALID);
+
+ /* Assume that praying isn't enough, check that we can re-read the
+ * entire fake channel back from the PRAMIN BAR */
+ for (i = 0; i < c_size; i+=4) {
+ if (NV_READ(NV_RAMIN + i) != NV_RI32(i)) {
+ DRM_ERROR("Error reading back PRAMIN at 0x%08x\n", i);
+ return -EINVAL;
+ }
+ }
+
+ /* Global PRAMIN heap */
+ if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
+ c_size, dev_priv->ramin->size - c_size)) {
+ dev_priv->ramin_heap = NULL;
+ DRM_ERROR("Failed to init RAMIN heap\n");
+ }
+
+ /*XXX: incorrect, but needed to make hash func "work" */
+ dev_priv->ramht_offset = 0x10000;
+ dev_priv->ramht_bits = 9;
+ dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
+ return 0;
+}
+
+void
+nv50_instmem_takedown(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ struct nouveau_channel *chan = dev_priv->fifos[0];
+ int i;
+
+ DRM_DEBUG("\n");
+
+ if (!priv)
+ return;
+
+ /* Restore state from before init */
+ for (i = 0x1700; i <= 0x1710; i+=4)
+ NV_WRITE(i, priv->save1700[(i-0x1700)/4]);
+
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
+ nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
+
+ /* Destroy dummy channel */
+ if (chan) {
+ nouveau_gpuobj_del(dev, &chan->vm_pd);
+ nouveau_gpuobj_ref_del(dev, &chan->ramfc);
+ nouveau_gpuobj_ref_del(dev, &chan->ramin);
+ nouveau_mem_takedown(&chan->ramin_heap);
+
+ dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
+ drm_free(chan, sizeof(*chan), DRM_MEM_DRIVER);
+ }
+
+ dev_priv->Engine.instmem.priv = NULL;
+ drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);
+}
+
+int
+nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj, uint32_t *sz)
+{
+ if (gpuobj->im_backing)
+ return -EINVAL;
+
+ *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ if (*sz == 0)
+ return -EINVAL;
+
+ gpuobj->im_backing = nouveau_mem_alloc(dev, NV50_INSTMEM_PAGE_SIZE,
+ *sz, NOUVEAU_MEM_FB,
+ (struct drm_file *)-2);
+ if (!gpuobj->im_backing) {
+ DRM_ERROR("Couldn't allocate vram to back PRAMIN pages\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void
+nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ if (gpuobj && gpuobj->im_backing) {
+ if (gpuobj->im_bound)
+ dev_priv->Engine.instmem.unbind(dev, gpuobj);
+ nouveau_mem_free(dev, gpuobj->im_backing);
+ gpuobj->im_backing = NULL;
+ }
+}
+
+int
+nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ uint32_t pte, pte_end, vram;
+
+ if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
+ return -EINVAL;
+
+ DRM_DEBUG("st=0x%0llx sz=0x%0llx\n",
+ gpuobj->im_pramin->start, gpuobj->im_pramin->size);
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ vram = gpuobj->im_backing->start;
+
+ DRM_DEBUG("pramin=0x%llx, pte=%d, pte_end=%d\n",
+ gpuobj->im_pramin->start, pte, pte_end);
+ DRM_DEBUG("first vram page: 0x%llx\n",
+ gpuobj->im_backing->start);
+
+ while (pte < pte_end) {
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, vram | 1);
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+
+ pte += 8;
+ vram += NV50_INSTMEM_PAGE_SIZE;
+ }
+
+ gpuobj->im_bound = 1;
+ return 0;
+}
+
+int
+nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ nv50_instmem_priv *priv = dev_priv->Engine.instmem.priv;
+ uint32_t pte, pte_end;
+
+ if (gpuobj->im_bound == 0)
+ return -EINVAL;
+
+ pte = (gpuobj->im_pramin->start >> 12) << 3;
+ pte_end = ((gpuobj->im_pramin->size >> 12) << 3) + pte;
+ while (pte < pte_end) {
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 0)/4, 0x00000009);
+ INSTANCE_WR(priv->pramin_pt->gpuobj, (pte + 4)/4, 0x00000000);
+ pte += 8;
+ }
+
+ gpuobj->im_bound = 0;
+ return 0;
+}
+
diff --git a/shared-core/nv50_mc.c b/shared-core/nv50_mc.c
index 7f7537f0..b111826b 100644
--- a/shared-core/nv50_mc.c
+++ b/shared-core/nv50_mc.c
@@ -29,14 +29,15 @@
#include "nouveau_drv.h"
int
-nv50_mc_init(drm_device_t *dev)
+nv50_mc_init(struct drm_device *dev)
{
- drm_nouveau_private_t *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF);
+
return 0;
}
-void nv50_mc_takedown(drm_device_t *dev)
+void nv50_mc_takedown(struct drm_device *dev)
{
}
diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c
index a2ee18b7..5bed45bc 100644
--- a/shared-core/r128_cce.c
+++ b/shared-core/r128_cce.c
@@ -81,7 +81,7 @@ static u32 r128_cce_microcode[] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
-static int R128_READ_PLL(drm_device_t * dev, int addr)
+static int R128_READ_PLL(struct drm_device * dev, int addr)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -129,7 +129,7 @@ static int r128_do_pixcache_flush(drm_r128_private_t * dev_priv)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
@@ -146,7 +146,7 @@ static int r128_do_wait_for_fifo(drm_r128_private_t * dev_priv, int entries)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
@@ -168,7 +168,7 @@ static int r128_do_wait_for_idle(drm_r128_private_t * dev_priv)
#if R128_FIFO_DEBUG
DRM_ERROR("failed!\n");
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/* ================================================================
@@ -227,7 +227,7 @@ int r128_do_cce_idle(drm_r128_private_t * dev_priv)
DRM_ERROR("failed!\n");
r128_status(dev_priv);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/* Start the Concurrent Command Engine.
@@ -271,7 +271,7 @@ static void r128_do_cce_stop(drm_r128_private_t * dev_priv)
/* Reset the engine. This will stop the CCE if it is running.
*/
-static int r128_do_engine_reset(drm_device_t * dev)
+static int r128_do_engine_reset(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, gen_reset_cntl;
@@ -308,7 +308,7 @@ static int r128_do_engine_reset(drm_device_t * dev)
return 0;
}
-static void r128_cce_init_ring_buffer(drm_device_t * dev,
+static void r128_cce_init_ring_buffer(struct drm_device * dev,
drm_r128_private_t * dev_priv)
{
u32 ring_start;
@@ -347,7 +347,7 @@ static void r128_cce_init_ring_buffer(drm_device_t * dev,
R128_WRITE(R128_BUS_CNTL, tmp);
}
-static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
+static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
{
drm_r128_private_t *dev_priv;
@@ -355,7 +355,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
dev_priv = drm_alloc(sizeof(drm_r128_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_r128_private_t));
@@ -365,7 +365,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("PCI GART memory not allocated!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
@@ -374,7 +374,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_DEBUG("TIMEOUT problem!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->cce_mode = init->cce_mode;
@@ -394,7 +394,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_DEBUG("Bad cce_mode!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
switch (init->cce_mode) {
@@ -461,7 +461,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find sarea!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->mmio = drm_core_findmap(dev, init->mmio_offset);
@@ -469,21 +469,21 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find mmio region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->cce_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cce_ring) {
DRM_ERROR("could not find cce ring region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
@@ -491,7 +491,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find dma buffer region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!dev_priv->is_pci) {
@@ -501,7 +501,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("could not find agp texture region!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -520,7 +520,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("Could not ioremap agp regions!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
} else
#endif
@@ -567,7 +567,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
DRM_ERROR("failed to init PCI GART!\n");
dev->dev_private = (void *)dev_priv;
r128_do_cleanup_cce(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
R128_WRITE(R128_PCI_GART_PAGE, dev_priv->gart_info.bus_addr);
#if __OS_HAS_AGP
@@ -584,7 +584,7 @@ static int r128_do_init_cce(drm_device_t * dev, drm_r128_init_t * init)
return 0;
}
-int r128_do_cleanup_cce(drm_device_t * dev)
+int r128_do_cleanup_cce(struct drm_device * dev)
{
/* Make sure interrupts are disabled here because the uninstall ioctl
@@ -623,35 +623,30 @@ int r128_do_cleanup_cce(drm_device_t * dev)
return 0;
}
-int r128_cce_init(DRM_IOCTL_ARGS)
+int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_r128_init_t init;
+ drm_r128_init_t *init = data;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_r128_init_t __user *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case R128_INIT_CCE:
- return r128_do_init_cce(dev, &init);
+ return r128_do_init_cce(dev, init);
case R128_CLEANUP_CCE:
return r128_do_cleanup_cce(dev);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
-int r128_cce_start(DRM_IOCTL_ARGS)
+int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) {
DRM_DEBUG("%s while CCE running\n", __FUNCTION__);
@@ -666,30 +661,26 @@ int r128_cce_start(DRM_IOCTL_ARGS)
/* Stop the CCE. The engine must have been idled before calling this
* routine.
*/
-int r128_cce_stop(DRM_IOCTL_ARGS)
+int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_cce_stop_t stop;
+ drm_r128_cce_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(stop, (drm_r128_cce_stop_t __user *) data,
- sizeof(stop));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Flush any pending CCE commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
- if (stop.flush) {
+ if (stop->flush) {
r128_do_cce_flush(dev_priv);
}
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
- if (stop.idle) {
+ if (stop->idle) {
ret = r128_do_cce_idle(dev_priv);
if (ret)
return ret;
@@ -709,17 +700,16 @@ int r128_cce_stop(DRM_IOCTL_ARGS)
/* Just reset the CCE ring. Called as part of an X Server engine reset.
*/
-int r128_cce_reset(DRM_IOCTL_ARGS)
+int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
r128_do_cce_reset(dev_priv);
@@ -730,13 +720,12 @@ int r128_cce_reset(DRM_IOCTL_ARGS)
return 0;
}
-int r128_cce_idle(DRM_IOCTL_ARGS)
+int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cce_running) {
r128_do_cce_flush(dev_priv);
@@ -745,19 +734,18 @@ int r128_cce_idle(DRM_IOCTL_ARGS)
return r128_do_cce_idle(dev_priv);
}
-int r128_engine_reset(DRM_IOCTL_ARGS)
+int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return r128_do_engine_reset(dev);
}
-int r128_fullscreen(DRM_IOCTL_ARGS)
+int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* ================================================================
@@ -767,18 +755,18 @@ int r128_fullscreen(DRM_IOCTL_ARGS)
#define R128_BUFFER_FREE 0
#if 0
-static int r128_freelist_init(drm_device_t * dev)
+static int r128_freelist_init(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_buf_t *buf;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
drm_r128_freelist_t *entry;
int i;
dev_priv->head = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (dev_priv->head == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv->head, 0, sizeof(drm_r128_freelist_t));
dev_priv->head->age = R128_BUFFER_USED;
@@ -789,7 +777,7 @@ static int r128_freelist_init(drm_device_t * dev)
entry = drm_alloc(sizeof(drm_r128_freelist_t), DRM_MEM_DRIVER);
if (!entry)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
entry->age = R128_BUFFER_FREE;
entry->buf = buf;
@@ -813,12 +801,12 @@ static int r128_freelist_init(drm_device_t * dev)
}
#endif
-static drm_buf_t *r128_freelist_get(drm_device_t * dev)
+static struct drm_buf *r128_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
/* FIXME: Optimize -- use freelist code */
@@ -826,7 +814,7 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev)
for (i = 0; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
- if (buf->filp == 0)
+ if (buf->file_priv == 0)
return buf;
}
@@ -851,13 +839,13 @@ static drm_buf_t *r128_freelist_get(drm_device_t * dev)
return NULL;
}
-void r128_freelist_reset(drm_device_t * dev)
+void r128_freelist_reset(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int i;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
@@ -881,68 +869,64 @@ int r128_wait_ring(drm_r128_private_t * dev_priv, int n)
/* FIXME: This is being ignored... */
DRM_ERROR("failed!\n");
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
-static int r128_cce_get_buffers(DRMFILE filp, drm_device_t * dev, drm_dma_t * d)
+static int r128_cce_get_buffers(struct drm_device * dev,
+ struct drm_file *file_priv,
+ struct drm_dma * d)
{
int i;
- drm_buf_t *buf;
+ struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = r128_freelist_get(dev);
if (!buf)
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
- buf->filp = filp;
+ buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
d->granted_count++;
}
return 0;
}
-int r128_cce_buffers(DRM_IOCTL_ARGS)
+int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma *d = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
- if (d.send_count != 0) {
+ if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d.send_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
}
/* We'll send you buffers.
*/
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d.request_count, dma->buf_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ return -EINVAL;
}
- d.granted_count = 0;
+ d->granted_count = 0;
- if (d.request_count) {
- ret = r128_cce_get_buffers(filp, dev, &d);
+ if (d->request_count) {
+ ret = r128_cce_get_buffers(dev, file_priv, d);
}
- DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
-
return ret;
}
diff --git a/shared-core/r128_drm.h b/shared-core/r128_drm.h
index 6e8af313..8d8878b5 100644
--- a/shared-core/r128_drm.h
+++ b/shared-core/r128_drm.h
@@ -153,7 +153,7 @@ typedef struct drm_r128_sarea {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[R128_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[R128_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@@ -161,7 +161,7 @@ typedef struct drm_r128_sarea {
unsigned int last_frame;
unsigned int last_dispatch;
- drm_tex_region_t tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
+ struct drm_tex_region tex_list[R128_NR_TEX_HEAPS][R128_NR_TEX_REGIONS + 1];
unsigned int tex_age[R128_NR_TEX_HEAPS];
int ctx_owner;
int pfAllowPageFlip; /* number of 3d windows (0,1,2 or more) */
@@ -222,11 +222,7 @@ typedef struct drm_r128_init {
R128_INIT_CCE = 0x01,
R128_CLEANUP_CCE = 0x02
} func;
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
- int sarea_priv_offset;
-#else
unsigned long sarea_priv_offset;
-#endif
int is_pci;
int cce_mode;
int cce_secure;
@@ -240,21 +236,12 @@ typedef struct drm_r128_init {
unsigned int depth_offset, depth_pitch;
unsigned int span_offset;
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
- unsigned int fb_offset;
- unsigned int mmio_offset;
- unsigned int ring_offset;
- unsigned int ring_rptr_offset;
- unsigned int buffers_offset;
- unsigned int agp_textures_offset;
-#else
unsigned long fb_offset;
unsigned long mmio_offset;
unsigned long ring_offset;
unsigned long ring_rptr_offset;
unsigned long buffers_offset;
unsigned long agp_textures_offset;
-#endif
} drm_r128_init_t;
typedef struct drm_r128_cce_stop {
@@ -264,15 +251,10 @@ typedef struct drm_r128_cce_stop {
typedef struct drm_r128_clear {
unsigned int flags;
-#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0)
- int x, y, w, h;
-#endif
unsigned int clear_color;
unsigned int clear_depth;
-#if CONFIG_XFREE86_VERSION >= XFREE86_VERSION(4,1,0,0)
unsigned int color_mask;
unsigned int depth_mask;
-#endif
} drm_r128_clear_t;
typedef struct drm_r128_vertex {
diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h
index 90868356..abb99898 100644
--- a/shared-core/r128_drv.h
+++ b/shared-core/r128_drv.h
@@ -57,7 +57,7 @@
typedef struct drm_r128_freelist {
unsigned int age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
struct drm_r128_freelist *next;
struct drm_r128_freelist *prev;
} drm_r128_freelist_t;
@@ -118,7 +118,7 @@ typedef struct drm_r128_private {
drm_local_map_t *cce_ring;
drm_local_map_t *ring_rptr;
drm_local_map_t *agp_textures;
- drm_ati_pcigart_info gart_info;
+ struct ati_pcigart_info gart_info;
} drm_r128_private_t;
typedef struct drm_r128_buf_priv {
@@ -129,34 +129,35 @@ typedef struct drm_r128_buf_priv {
drm_r128_freelist_t *list_entry;
} drm_r128_buf_priv_t;
-extern drm_ioctl_desc_t r128_ioctls[];
+extern struct drm_ioctl_desc r128_ioctls[];
extern int r128_max_ioctl;
/* r128_cce.c */
-extern int r128_cce_init(DRM_IOCTL_ARGS);
-extern int r128_cce_start(DRM_IOCTL_ARGS);
-extern int r128_cce_stop(DRM_IOCTL_ARGS);
-extern int r128_cce_reset(DRM_IOCTL_ARGS);
-extern int r128_cce_idle(DRM_IOCTL_ARGS);
-extern int r128_engine_reset(DRM_IOCTL_ARGS);
-extern int r128_fullscreen(DRM_IOCTL_ARGS);
-extern int r128_cce_buffers(DRM_IOCTL_ARGS);
+extern int r128_cce_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int r128_cce_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern void r128_freelist_reset(drm_device_t * dev);
+extern void r128_freelist_reset(struct drm_device * dev);
extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
-extern int r128_do_cleanup_cce(drm_device_t * dev);
+extern int r128_do_cleanup_cce(struct drm_device * dev);
-extern int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
-extern void r128_driver_irq_preinstall(drm_device_t * dev);
-extern void r128_driver_irq_postinstall(drm_device_t * dev);
-extern void r128_driver_irq_uninstall(drm_device_t * dev);
-extern void r128_driver_lastclose(drm_device_t * dev);
-extern void r128_driver_preclose(drm_device_t * dev, DRMFILE filp);
+extern void r128_driver_irq_preinstall(struct drm_device * dev);
+extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern void r128_driver_irq_uninstall(struct drm_device * dev);
+extern void r128_driver_lastclose(struct drm_device * dev);
+extern void r128_driver_preclose(struct drm_device * dev,
+ struct drm_file *file_priv);
extern long r128_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
@@ -428,7 +429,7 @@ do { \
DRM_UDELAY(1); \
} \
DRM_ERROR( "ring space check failed!\n" ); \
- return DRM_ERR(EBUSY); \
+ return -EBUSY; \
} \
__ring_space_done: \
; \
diff --git a/shared-core/r128_irq.c b/shared-core/r128_irq.c
index 87f8ca2b..c76fdca7 100644
--- a/shared-core/r128_irq.c
+++ b/shared-core/r128_irq.c
@@ -37,7 +37,7 @@
irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
int status;
@@ -54,7 +54,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_NONE;
}
-int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
unsigned int cur_vblank;
int ret = 0;
@@ -72,7 +72,7 @@ int r128_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
return ret;
}
-void r128_driver_irq_preinstall(drm_device_t * dev)
+void r128_driver_irq_preinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -82,7 +82,7 @@ void r128_driver_irq_preinstall(drm_device_t * dev)
R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
}
-void r128_driver_irq_postinstall(drm_device_t * dev)
+void r128_driver_irq_postinstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
@@ -90,7 +90,7 @@ void r128_driver_irq_postinstall(drm_device_t * dev)
R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
}
-void r128_driver_irq_uninstall(drm_device_t * dev)
+void r128_driver_irq_uninstall(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
if (!dev_priv)
diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c
index 17b11e7d..b7f483ca 100644
--- a/shared-core/r128_state.c
+++ b/shared-core/r128_state.c
@@ -38,7 +38,7 @@
*/
static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,
- drm_clip_rect_t * boxes, int count)
+ struct drm_clip_rect * boxes, int count)
{
u32 aux_sc_cntl = 0x00000000;
RING_LOCALS;
@@ -352,13 +352,13 @@ static void r128_print_dirty(const char *msg, unsigned int flags)
(flags & R128_REQUIRE_QUIESCENCE) ? "quiescence, " : "");
}
-static void r128_cce_dispatch_clear(drm_device_t * dev,
+static void r128_cce_dispatch_clear(struct drm_device * dev,
drm_r128_clear_t * clear)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
unsigned int flags = clear->flags;
int i;
RING_LOCALS;
@@ -458,12 +458,12 @@ static void r128_cce_dispatch_clear(drm_device_t * dev,
}
}
-static void r128_cce_dispatch_swap(drm_device_t * dev)
+static void r128_cce_dispatch_swap(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("%s\n", __FUNCTION__);
@@ -524,7 +524,7 @@ static void r128_cce_dispatch_swap(drm_device_t * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_flip(drm_device_t * dev)
+static void r128_cce_dispatch_flip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -567,7 +567,7 @@ static void r128_cce_dispatch_flip(drm_device_t * dev)
ADVANCE_RING();
}
-static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+static void r128_cce_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -637,8 +637,8 @@ static void r128_cce_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
sarea_priv->nbox = 0;
}
-static void r128_cce_dispatch_indirect(drm_device_t * dev,
- drm_buf_t * buf, int start, int end)
+static void r128_cce_dispatch_indirect(struct drm_device * dev,
+ struct drm_buf * buf, int start, int end)
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_buf_priv_t *buf_priv = buf->dev_private;
@@ -692,8 +692,8 @@ static void r128_cce_dispatch_indirect(drm_device_t * dev,
dev_priv->sarea_priv->last_dispatch++;
}
-static void r128_cce_dispatch_indices(drm_device_t * dev,
- drm_buf_t * buf,
+static void r128_cce_dispatch_indices(struct drm_device * dev,
+ struct drm_buf * buf,
int start, int end, int count)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -776,12 +776,13 @@ static void r128_cce_dispatch_indices(drm_device_t * dev,
sarea_priv->nbox = 0;
}
-static int r128_cce_dispatch_blit(DRMFILE filp,
- drm_device_t * dev, drm_r128_blit_t * blit)
+static int r128_cce_dispatch_blit(struct drm_device * dev,
+ struct drm_file *file_priv,
+ drm_r128_blit_t * blit)
{
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
u32 *data;
int dword_shift, dwords;
@@ -809,7 +810,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
break;
default:
DRM_ERROR("invalid blit format %d\n", blit->format);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Flush the pixel cache, and mark the contents as Read Invalid.
@@ -829,14 +830,14 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
buf = dma->buflist[blit->idx];
buf_priv = buf->dev_private;
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
DRM_ERROR("sending pending buffer %d\n", blit->idx);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
buf_priv->discard = 1;
@@ -887,7 +888,7 @@ static int r128_cce_dispatch_blit(DRMFILE filp,
* have hardware stencil support.
*/
-static int r128_cce_dispatch_write_span(drm_device_t * dev,
+static int r128_cce_dispatch_write_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -900,22 +901,22 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
- return DRM_ERR(EMSGSIZE);
+ return -EMSGSIZE;
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
buffer = drm_alloc(buffer_size, DRM_MEM_BUFS);
if (buffer == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
mask_size = depth->n * sizeof(u8);
@@ -923,12 +924,12 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
mask = drm_alloc(mask_size, DRM_MEM_BUFS);
if (mask == NULL) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
drm_free(mask, mask_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
for (i = 0; i < count; i++, x++) {
@@ -983,7 +984,7 @@ static int r128_cce_dispatch_write_span(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_write_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -996,28 +997,28 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
- return DRM_ERR(EMSGSIZE);
+ return -EMSGSIZE;
xbuf_size = count * sizeof(*x);
ybuf_size = count * sizeof(*y);
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
if (x == NULL) {
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
if (y == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (DRM_COPY_FROM_USER(y, depth->y, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
buffer_size = depth->n * sizeof(u32);
@@ -1025,13 +1026,13 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
if (buffer == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(buffer, depth->buffer, buffer_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (depth->mask) {
@@ -1041,14 +1042,14 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(mask, depth->mask, mask_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
drm_free(buffer, buffer_size, DRM_MEM_BUFS);
drm_free(mask, mask_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
for (i = 0; i < count; i++) {
@@ -1105,7 +1106,7 @@ static int r128_cce_dispatch_write_pixels(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_read_span(drm_device_t * dev,
+static int r128_cce_dispatch_read_span(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1115,13 +1116,13 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
- return DRM_ERR(EMSGSIZE);
+ return -EMSGSIZE;
if (DRM_COPY_FROM_USER(&x, depth->x, sizeof(x))) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (DRM_COPY_FROM_USER(&y, depth->y, sizeof(y))) {
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
BEGIN_RING(7);
@@ -1148,7 +1149,7 @@ static int r128_cce_dispatch_read_span(drm_device_t * dev,
return 0;
}
-static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
+static int r128_cce_dispatch_read_pixels(struct drm_device * dev,
drm_r128_depth_t * depth)
{
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1159,7 +1160,7 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
count = depth->n;
if (count > 4096 || count <= 0)
- return DRM_ERR(EMSGSIZE);
+ return -EMSGSIZE;
if (count > dev_priv->depth_pitch) {
count = dev_priv->depth_pitch;
@@ -1169,22 +1170,22 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
ybuf_size = count * sizeof(*y);
x = drm_alloc(xbuf_size, DRM_MEM_BUFS);
if (x == NULL) {
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
y = drm_alloc(ybuf_size, DRM_MEM_BUFS);
if (y == NULL) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(x, depth->x, xbuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (DRM_COPY_FROM_USER(y, depth->y, ybuf_size)) {
drm_free(x, xbuf_size, DRM_MEM_BUFS);
drm_free(y, ybuf_size, DRM_MEM_BUFS);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
for (i = 0; i < count; i++) {
@@ -1220,7 +1221,7 @@ static int r128_cce_dispatch_read_pixels(drm_device_t * dev,
* Polygon stipple
*/
-static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
drm_r128_private_t *dev_priv = dev->dev_private;
int i;
@@ -1241,25 +1242,21 @@ static void r128_cce_dispatch_stipple(drm_device_t * dev, u32 * stipple)
* IOCTL functions
*/
-static int r128_cce_clear(DRM_IOCTL_ARGS)
+static int r128_cce_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_r128_clear_t clear;
+ drm_r128_clear_t *clear = data;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(clear, (drm_r128_clear_t __user *) data,
- sizeof(clear));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
if (sarea_priv->nbox > R128_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = R128_NR_SAREA_CLIPRECTS;
- r128_cce_dispatch_clear(dev, &clear);
+ r128_cce_dispatch_clear(dev, clear);
COMMIT_RING();
/* Make sure we restore the 3D state next time.
@@ -1269,7 +1266,7 @@ static int r128_cce_clear(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_do_init_pageflip(drm_device_t * dev)
+static int r128_do_init_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1288,7 +1285,7 @@ static int r128_do_init_pageflip(drm_device_t * dev)
return 0;
}
-static int r128_do_cleanup_pageflip(drm_device_t * dev)
+static int r128_do_cleanup_pageflip(struct drm_device * dev)
{
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1309,13 +1306,12 @@ static int r128_do_cleanup_pageflip(drm_device_t * dev)
* They can & should be intermixed to support multiple 3d windows.
*/
-static int r128_cce_flip(DRM_IOCTL_ARGS)
+static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1328,14 +1324,13 @@ static int r128_cce_flip(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_cce_swap(DRM_IOCTL_ARGS)
+static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1350,58 +1345,54 @@ static int r128_cce_swap(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_cce_vertex(DRM_IOCTL_ARGS)
+static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
- drm_r128_vertex_t vertex;
+ drm_r128_vertex_t *vertex = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(vertex, (drm_r128_vertex_t __user *) data,
- sizeof(vertex));
-
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
- DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
+ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
- if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- vertex.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ vertex->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- if (vertex.prim < 0 ||
- vertex.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
- DRM_ERROR("buffer prim %d\n", vertex.prim);
- return DRM_ERR(EINVAL);
+ if (vertex->prim < 0 ||
+ vertex->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+ DRM_ERROR("buffer prim %d\n", vertex->prim);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf = dma->buflist[vertex.idx];
+ buf = dma->buflist[vertex->idx];
buf_priv = buf->dev_private;
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", vertex.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+ return -EINVAL;
}
- buf->used = vertex.count;
- buf_priv->prim = vertex.prim;
- buf_priv->discard = vertex.discard;
+ buf->used = vertex->count;
+ buf_priv->prim = vertex->prim;
+ buf_priv->discard = vertex->discard;
r128_cce_dispatch_vertex(dev, buf);
@@ -1409,134 +1400,123 @@ static int r128_cce_vertex(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_cce_indices(DRM_IOCTL_ARGS)
+static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
- drm_r128_indices_t elts;
+ drm_r128_indices_t *elts = data;
int count;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(elts, (drm_r128_indices_t __user *) data,
- sizeof(elts));
-
DRM_DEBUG("pid=%d buf=%d s=%d e=%d d=%d\n", DRM_CURRENTPID,
- elts.idx, elts.start, elts.end, elts.discard);
+ elts->idx, elts->start, elts->end, elts->discard);
- if (elts.idx < 0 || elts.idx >= dma->buf_count) {
+ if (elts->idx < 0 || elts->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- elts.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ elts->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- if (elts.prim < 0 || elts.prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
- DRM_ERROR("buffer prim %d\n", elts.prim);
- return DRM_ERR(EINVAL);
+ if (elts->prim < 0 ||
+ elts->prim > R128_CCE_VC_CNTL_PRIM_TYPE_TRI_TYPE2) {
+ DRM_ERROR("buffer prim %d\n", elts->prim);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf = dma->buflist[elts.idx];
+ buf = dma->buflist[elts->idx];
buf_priv = buf->dev_private;
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", elts.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", elts->idx);
+ return -EINVAL;
}
- count = (elts.end - elts.start) / sizeof(u16);
- elts.start -= R128_INDEX_PRIM_OFFSET;
+ count = (elts->end - elts->start) / sizeof(u16);
+ elts->start -= R128_INDEX_PRIM_OFFSET;
- if (elts.start & 0x7) {
- DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
- return DRM_ERR(EINVAL);
+ if (elts->start & 0x7) {
+ DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+ return -EINVAL;
}
- if (elts.start < buf->used) {
- DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
- return DRM_ERR(EINVAL);
+ if (elts->start < buf->used) {
+ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+ return -EINVAL;
}
- buf->used = elts.end;
- buf_priv->prim = elts.prim;
- buf_priv->discard = elts.discard;
+ buf->used = elts->end;
+ buf_priv->prim = elts->prim;
+ buf_priv->discard = elts->discard;
- r128_cce_dispatch_indices(dev, buf, elts.start, elts.end, count);
+ r128_cce_dispatch_indices(dev, buf, elts->start, elts->end, count);
COMMIT_RING();
return 0;
}
-static int r128_cce_blit(DRM_IOCTL_ARGS)
+static int r128_cce_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_blit_t blit;
+ drm_r128_blit_t *blit = data;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(blit, (drm_r128_blit_t __user *) data,
- sizeof(blit));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit.idx);
+ DRM_DEBUG("pid=%d index=%d\n", DRM_CURRENTPID, blit->idx);
- if (blit.idx < 0 || blit.idx >= dma->buf_count) {
+ if (blit->idx < 0 || blit->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- blit.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ blit->idx, dma->buf_count - 1);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- ret = r128_cce_dispatch_blit(filp, dev, &blit);
+ ret = r128_cce_dispatch_blit(dev, file_priv, blit);
COMMIT_RING();
return ret;
}
-static int r128_cce_depth(DRM_IOCTL_ARGS)
+static int r128_cce_depth(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_depth_t depth;
+ drm_r128_depth_t *depth = data;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(depth, (drm_r128_depth_t __user *) data,
- sizeof(depth));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
- ret = DRM_ERR(EINVAL);
- switch (depth.func) {
+ ret = -EINVAL;
+ switch (depth->func) {
case R128_WRITE_SPAN:
- ret = r128_cce_dispatch_write_span(dev, &depth);
+ ret = r128_cce_dispatch_write_span(dev, depth);
break;
case R128_WRITE_PIXELS:
- ret = r128_cce_dispatch_write_pixels(dev, &depth);
+ ret = r128_cce_dispatch_write_pixels(dev, depth);
break;
case R128_READ_SPAN:
- ret = r128_cce_dispatch_read_span(dev, &depth);
+ ret = r128_cce_dispatch_read_span(dev, depth);
break;
case R128_READ_PIXELS:
- ret = r128_cce_dispatch_read_pixels(dev, &depth);
+ ret = r128_cce_dispatch_read_pixels(dev, depth);
break;
}
@@ -1544,20 +1524,16 @@ static int r128_cce_depth(DRM_IOCTL_ARGS)
return ret;
}
-static int r128_cce_stipple(DRM_IOCTL_ARGS)
+static int r128_cce_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_stipple_t stipple;
+ drm_r128_stipple_t *stipple = data;
u32 mask[32];
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(stipple, (drm_r128_stipple_t __user *) data,
- sizeof(stipple));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
- return DRM_ERR(EFAULT);
+ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -1567,61 +1543,58 @@ static int r128_cce_stipple(DRM_IOCTL_ARGS)
return 0;
}
-static int r128_cce_indirect(DRM_IOCTL_ARGS)
+static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_r128_buf_priv_t *buf_priv;
- drm_r128_indirect_t indirect;
+ drm_r128_indirect_t *indirect = data;
#if 0
RING_LOCALS;
#endif
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(indirect, (drm_r128_indirect_t __user *) data,
- sizeof(indirect));
-
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
- indirect.idx, indirect.start, indirect.end, indirect.discard);
+ indirect->idx, indirect->start, indirect->end,
+ indirect->discard);
- if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
+ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- indirect.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ indirect->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- buf = dma->buflist[indirect.idx];
+ buf = dma->buflist[indirect->idx];
buf_priv = buf->dev_private;
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", indirect.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+ return -EINVAL;
}
- if (indirect.start < buf->used) {
+ if (indirect->start < buf->used) {
DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
- indirect.start, buf->used);
- return DRM_ERR(EINVAL);
+ indirect->start, buf->used);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf->used = indirect.end;
- buf_priv->discard = indirect.discard;
+ buf->used = indirect->end;
+ buf_priv->discard = indirect->discard;
#if 0
/* Wait for the 3D stream to idle before the indirect buffer
@@ -1636,46 +1609,42 @@ static int r128_cce_indirect(DRM_IOCTL_ARGS)
* X server. This is insecure and is thus only available to
* privileged clients.
*/
- r128_cce_dispatch_indirect(dev, buf, indirect.start, indirect.end);
+ r128_cce_dispatch_indirect(dev, buf, indirect->start, indirect->end);
COMMIT_RING();
return 0;
}
-static int r128_getparam(DRM_IOCTL_ARGS)
+static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_r128_private_t *dev_priv = dev->dev_private;
- drm_r128_getparam_t param;
+ drm_r128_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_r128_getparam_t __user *) data,
- sizeof(param));
-
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
- switch (param.param) {
+ switch (param->param) {
case R128_PARAM_IRQ_NR:
value = dev->irq;
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void r128_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_r128_private_t *dev_priv = dev->dev_private;
@@ -1685,29 +1654,29 @@ void r128_driver_preclose(drm_device_t * dev, DRMFILE filp)
}
}
-void r128_driver_lastclose(drm_device_t * dev)
+void r128_driver_lastclose(struct drm_device * dev)
{
r128_do_cleanup_cce(dev);
}
-drm_ioctl_desc_t r128_ioctls[] = {
- [DRM_IOCTL_NR(DRM_R128_INIT)] = {r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_R128_CCE_START)] = {r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_R128_CCE_STOP)] = {r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_R128_CCE_RESET)] = {r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_R128_CCE_IDLE)] = {r128_cce_idle, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_RESET)] = {r128_engine_reset, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_FULLSCREEN)] = {r128_fullscreen, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_SWAP)] = {r128_cce_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_FLIP)] = {r128_cce_flip, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_CLEAR)] = {r128_cce_clear, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_VERTEX)] = {r128_cce_vertex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_INDICES)] = {r128_cce_indices, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_BLIT)] = {r128_cce_blit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_DEPTH)] = {r128_cce_depth, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_STIPPLE)] = {r128_cce_stipple, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_R128_INDIRECT)] = {r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_R128_GETPARAM)] = {r128_getparam, DRM_AUTH},
+struct drm_ioctl_desc r128_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_R128_INIT, r128_cce_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_START, r128_cce_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_STOP, r128_cce_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_RESET, r128_cce_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_CCE_IDLE, r128_cce_idle, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_RESET, r128_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_FULLSCREEN, r128_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_SWAP, r128_cce_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_FLIP, r128_cce_flip, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_CLEAR, r128_cce_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_VERTEX, r128_cce_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_INDICES, r128_cce_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_BLIT, r128_cce_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_DEPTH, r128_cce_depth, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_STIPPLE, r128_cce_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_R128_INDIRECT, r128_cce_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_R128_GETPARAM, r128_getparam, DRM_AUTH),
};
int r128_max_ioctl = DRM_ARRAY_SIZE(r128_ioctls);
diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c
index 0cd5d7e2..fe46c2d2 100644
--- a/shared-core/r300_cmdbuf.c
+++ b/shared-core/r300_cmdbuf.c
@@ -55,7 +55,7 @@ static const int r300_cliprect_cntl[4] = {
static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf, int n)
{
- drm_clip_rect_t box;
+ struct drm_clip_rect box;
int nr;
int i;
RING_LOCALS;
@@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,
if (DRM_COPY_FROM_USER_UNCHECKED
(&box, &cmdbuf->boxes[n + i], sizeof(box))) {
DRM_ERROR("copy cliprect faulted\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
box.x1 =
@@ -263,7 +263,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
DRM_ERROR
("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
reg, sz);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
for (i = 0; i < sz; i++) {
values[i] = ((int *)cmdbuf->buf)[i];
@@ -275,13 +275,13 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
DRM_ERROR
("Offset failed range check (reg=%04x sz=%d)\n",
reg, sz);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -317,12 +317,12 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
return 0;
if (sz * 4 > cmdbuf->bufsz)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
DRM_ERROR("No such registers in hardware reg=%04x sz=%d\n", reg,
sz);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (r300_check_range(reg, sz)) {
@@ -362,7 +362,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
if (sz * 16 > cmdbuf->bufsz)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
BEGIN_RING(5 + sz * 4);
/* Wait for VAP to come to senses.. */
@@ -391,7 +391,7 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
RING_LOCALS;
if (8 * 4 > cmdbuf->bufsz)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
@@ -421,7 +421,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
memset(payload, 0, MAX_ARRAY_PACKET * 4);
memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
@@ -437,7 +437,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
k++;
i++;
@@ -448,7 +448,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
k++;
i++;
@@ -458,7 +458,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
k, i, narrays, count + 1);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* all clear, output packet */
@@ -492,7 +492,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -502,7 +502,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -530,12 +530,12 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv,
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
ret = !radeon_check_offset(dev_priv, cmd[2]);
if (ret) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(count+2);
@@ -557,7 +557,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
RING_LOCALS;
if (4 > cmdbuf->bufsz)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
@@ -568,7 +568,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* Is it packet 3 ? */
if ((header >> 30) != 0x3) {
DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
count = (header >> 16) & 0x3fff;
@@ -578,7 +578,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
(count + 2) * 4, cmdbuf->bufsz);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Is it a packet type we know about ? */
@@ -600,7 +600,7 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
break;
default:
DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(count + 2);
@@ -664,7 +664,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
DRM_ERROR("bad packet3 type %i at %p\n",
header.packet3.packet,
cmdbuf->buf - sizeof(header));
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
n += R300_SIMULTANEOUS_CLIPRECTS;
@@ -706,7 +706,7 @@ static __inline__ void r300_pacify(drm_radeon_private_t *dev_priv)
* The actual age emit is done by r300_do_cp_cmdbuf, which is why you must
* be careful about how this function is called.
*/
-static void r300_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void r300_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -725,11 +725,11 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
RING_LOCALS;
if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (header.scratch.reg >= 5) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->scratch_ages[header.scratch.reg] ++;
@@ -744,21 +744,21 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
buf_idx *= 2; /* 8 bytes per buf */
if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (h_pending == 0) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
h_pending--;
if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
cmdbuf->buf += sizeof(buf_idx);
@@ -778,14 +778,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
* commands on the DMA ring buffer.
* Called by the ioctl handler function radeon_cp_cmdbuf.
*/
-int r300_do_cp_cmdbuf(drm_device_t *dev,
- DRMFILE filp,
- drm_file_t *filp_priv,
+int r300_do_cp_cmdbuf(struct drm_device *dev,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = NULL;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
int emit_dispatch_age = 0;
int ret = 0;
@@ -878,15 +877,16 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
goto cleanup;
}
buf = dma->buflist[idx];
- if (buf->filp != filp || buf->pending) {
+ if (buf->file_priv != file_priv || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
- buf->filp, filp, buf->pending);
- ret = DRM_ERR(EINVAL);
+ buf->file_priv, file_priv,
+ buf->pending);
+ ret = -EINVAL;
goto cleanup;
}
@@ -923,7 +923,7 @@ int r300_do_cp_cmdbuf(drm_device_t *dev,
DRM_ERROR("bad cmd_type %i at %p\n",
header.header.cmd_type,
cmdbuf->buf - sizeof(header));
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
goto cleanup;
}
}
diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c
index ec2e688b..06861381 100644
--- a/shared-core/radeon_cp.c
+++ b/shared-core/radeon_cp.c
@@ -36,7 +36,7 @@
#define RADEON_FIFO_DEBUG 0
-static int radeon_do_cleanup_cp(drm_device_t * dev);
+static int radeon_do_cleanup_cp(struct drm_device * dev);
/* CP microcode (from ATI) */
static const u32 R200_cp_microcode[][2] = {
@@ -816,7 +816,7 @@ static const u32 R300_cp_microcode[][2] = {
{ 0000000000, 0000000000 },
};
-static int RADEON_READ_PLL(drm_device_t * dev, int addr)
+static int RADEON_READ_PLL(struct drm_device * dev, int addr)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -889,7 +889,7 @@ static int radeon_do_pixcache_flush(drm_radeon_private_t * dev_priv)
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
@@ -910,7 +910,7 @@ static int radeon_do_wait_for_fifo(drm_radeon_private_t * dev_priv, int entries)
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
@@ -936,7 +936,7 @@ static int radeon_do_wait_for_idle(drm_radeon_private_t * dev_priv)
DRM_ERROR("failed!\n");
radeon_status(dev_priv);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/* ================================================================
@@ -1066,7 +1066,7 @@ static void radeon_do_cp_stop(drm_radeon_private_t * dev_priv)
/* Reset the engine. This will stop the CP if it is running.
*/
-static int radeon_do_engine_reset(drm_device_t * dev)
+static int radeon_do_engine_reset(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
u32 clock_cntl_index, mclk_cntl, rbbm_soft_reset;
@@ -1122,7 +1122,7 @@ static int radeon_do_engine_reset(drm_device_t * dev)
return 0;
}
-static void radeon_cp_init_ring_buffer(drm_device_t * dev,
+static void radeon_cp_init_ring_buffer(struct drm_device * dev,
drm_radeon_private_t * dev_priv)
{
u32 ring_start, cur_read_ptr;
@@ -1174,7 +1174,7 @@ static void radeon_cp_init_ring_buffer(drm_device_t * dev,
} else
#endif
{
- drm_sg_mem_t *entry = dev->sg;
+ struct drm_sg_mem *entry = dev->sg;
unsigned long tmp_ofs, page_ofs;
tmp_ofs = dev_priv->ring_rptr->offset -
@@ -1390,7 +1390,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
}
}
-static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
+static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1400,7 +1400,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if ((dev_priv->flags & RADEON_NEW_MEMMAP) && !dev_priv->new_memmap) {
DRM_ERROR("Cannot initialise DRM on this card\nThis card requires a new X.org DDX for 3D\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (init->is_pci && (dev_priv->flags & RADEON_IS_AGP))
@@ -1418,7 +1418,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if ((!(dev_priv->flags & RADEON_IS_AGP)) && !dev->sg) {
DRM_ERROR("PCI GART memory not allocated!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->usec_timeout = init->usec_timeout;
@@ -1426,7 +1426,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
dev_priv->usec_timeout > RADEON_MAX_USEC_TIMEOUT) {
DRM_DEBUG("TIMEOUT problem!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Enable vblank on CRTC1 for older X servers
@@ -1455,7 +1455,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
(init->cp_mode != RADEON_CSQ_PRIBM_INDBM)) {
DRM_DEBUG("BAD cp_mode (%x)!\n", init->cp_mode);
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
switch (init->fb_bpp) {
@@ -1524,27 +1524,27 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->cp_ring = drm_core_findmap(dev, init->ring_offset);
if (!dev_priv->cp_ring) {
DRM_ERROR("could not find cp ring region!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->ring_rptr = drm_core_findmap(dev, init->ring_rptr_offset);
if (!dev_priv->ring_rptr) {
DRM_ERROR("could not find ring read pointer!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev->agp_buffer_token = init->buffers_offset;
dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset);
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find dma buffer region!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (init->gart_textures_offset) {
@@ -1553,7 +1553,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
if (!dev_priv->gart_textures) {
DRM_ERROR("could not find GART texture region!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -1571,7 +1571,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
!dev->agp_buffer_map->handle) {
DRM_ERROR("could not find ioremap agp regions!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else
#endif
@@ -1725,14 +1725,14 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
DRM_ERROR
("Cannot use PCI Express without GART in FB memory\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
if (!drm_ati_pcigart_init(dev, &dev_priv->gart_info)) {
DRM_ERROR("failed to init PCI GART!\n");
radeon_do_cleanup_cp(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
/* Turn on PCI GART */
@@ -1750,7 +1750,7 @@ static int radeon_do_init_cp(drm_device_t * dev, drm_radeon_init_t * init)
return 0;
}
-static int radeon_do_cleanup_cp(drm_device_t * dev)
+static int radeon_do_cleanup_cp(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
@@ -1806,13 +1806,13 @@ static int radeon_do_cleanup_cp(drm_device_t * dev)
*
* Charl P. Botha <http://cpbotha.net>
*/
-static int radeon_do_resume_cp(drm_device_t * dev)
+static int radeon_do_resume_cp(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
if (!dev_priv) {
DRM_ERROR("Called with no initialization\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
DRM_DEBUG("Starting radeon_do_resume_cp()\n");
@@ -1838,38 +1838,33 @@ static int radeon_do_resume_cp(drm_device_t * dev)
return 0;
}
-int radeon_cp_init(DRM_IOCTL_ARGS)
+int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_radeon_init_t init;
+ drm_radeon_init_t *init = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_radeon_init_t __user *) data,
- sizeof(init));
-
- if (init.func == RADEON_INIT_R300_CP)
+ if (init->func == RADEON_INIT_R300_CP)
r300_init_reg_flags();
- switch (init.func) {
+ switch (init->func) {
case RADEON_INIT_CP:
case RADEON_INIT_R200_CP:
case RADEON_INIT_R300_CP:
- return radeon_do_init_cp(dev, &init);
+ return radeon_do_init_cp(dev, init);
case RADEON_CLEANUP_CP:
return radeon_do_cleanup_cp(dev);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
-int radeon_cp_start(DRM_IOCTL_ARGS)
+int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dev_priv->cp_running) {
DRM_DEBUG("%s while CP running\n", __FUNCTION__);
@@ -1889,18 +1884,14 @@ int radeon_cp_start(DRM_IOCTL_ARGS)
/* Stop the CP. The engine must have been idled before calling this
* routine.
*/
-int radeon_cp_stop(DRM_IOCTL_ARGS)
+int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_cp_stop_t stop;
+ drm_radeon_cp_stop_t *stop = data;
int ret;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(stop, (drm_radeon_cp_stop_t __user *) data,
- sizeof(stop));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv->cp_running)
return 0;
@@ -1908,14 +1899,14 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
/* Flush any pending CP commands. This ensures any outstanding
* commands are exectuted by the engine before we turn it off.
*/
- if (stop.flush) {
+ if (stop->flush) {
radeon_do_cp_flush(dev_priv);
}
/* If we fail to make the engine go idle, we return an error
* code so that the DRM ioctl wrapper can try again.
*/
- if (stop.idle) {
+ if (stop->idle) {
ret = radeon_do_cp_idle(dev_priv);
if (ret)
return ret;
@@ -1933,7 +1924,7 @@ int radeon_cp_stop(DRM_IOCTL_ARGS)
return 0;
}
-void radeon_do_release(drm_device_t * dev)
+void radeon_do_release(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i, ret;
@@ -1983,17 +1974,16 @@ void radeon_do_release(drm_device_t * dev)
/* Just reset the CP ring. Called as part of an X Server engine reset.
*/
-int radeon_cp_reset(DRM_IOCTL_ARGS)
+int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_DEBUG("%s called before init done\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
radeon_do_cp_reset(dev_priv);
@@ -2004,32 +1994,29 @@ int radeon_cp_reset(DRM_IOCTL_ARGS)
return 0;
}
-int radeon_cp_idle(DRM_IOCTL_ARGS)
+int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return radeon_do_cp_idle(dev_priv);
}
/* Added by Charl P. Botha to call radeon_do_resume_cp().
*/
-int radeon_cp_resume(DRM_IOCTL_ARGS)
+int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
return radeon_do_resume_cp(dev);
}
-int radeon_engine_reset(DRM_IOCTL_ARGS)
+int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return radeon_do_engine_reset(dev);
}
@@ -2040,7 +2027,7 @@ int radeon_engine_reset(DRM_IOCTL_ARGS)
/* KW: Deprecated to say the least:
*/
-int radeon_fullscreen(DRM_IOCTL_ARGS)
+int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0;
}
@@ -2066,12 +2053,12 @@ int radeon_fullscreen(DRM_IOCTL_ARGS)
* they can't get the lock.
*/
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
int start;
@@ -2086,8 +2073,9 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
for (i = start; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
- if (buf->filp == 0 || (buf->pending &&
- buf_priv->age <= done_age)) {
+ if (buf->file_priv == NULL || (buf->pending &&
+ buf_priv->age <=
+ done_age)) {
dev_priv->stats.requested_bufs++;
buf->pending = 0;
return buf;
@@ -2106,12 +2094,12 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
}
#if 0
-drm_buf_t *radeon_freelist_get(drm_device_t * dev)
+struct drm_buf *radeon_freelist_get(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i, t;
int start;
u32 done_age = DRM_READ32(dev_priv->ring_rptr, RADEON_SCRATCHOFF(1));
@@ -2126,8 +2114,9 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
for (i = start; i < dma->buf_count; i++) {
buf = dma->buflist[i];
buf_priv = buf->dev_private;
- if (buf->filp == 0 || (buf->pending &&
- buf_priv->age <= done_age)) {
+ if (buf->file_priv == 0 || (buf->pending &&
+ buf_priv->age <=
+ done_age)) {
dev_priv->stats.requested_bufs++;
buf->pending = 0;
return buf;
@@ -2140,15 +2129,15 @@ drm_buf_t *radeon_freelist_get(drm_device_t * dev)
}
#endif
-void radeon_freelist_reset(drm_device_t * dev)
+void radeon_freelist_reset(struct drm_device * dev)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
dev_priv->last_buf = 0;
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
buf_priv->age = 0;
}
@@ -2187,70 +2176,65 @@ int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n)
radeon_status(dev_priv);
DRM_ERROR("failed!\n");
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
-static int radeon_cp_get_buffers(DRMFILE filp, drm_device_t * dev,
- drm_dma_t * d)
+static int radeon_cp_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_dma * d)
{
int i;
- drm_buf_t *buf;
+ struct drm_buf *buf;
for (i = d->granted_count; i < d->request_count; i++) {
buf = radeon_freelist_get(dev);
if (!buf)
- return DRM_ERR(EBUSY); /* NOTE: broken client */
+ return -EBUSY; /* NOTE: broken client */
- buf->filp = filp;
+ buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i], &buf->idx,
sizeof(buf->idx)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i], &buf->total,
sizeof(buf->total)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
d->granted_count++;
}
return 0;
}
-int radeon_cp_buffers(DRM_IOCTL_ARGS)
+int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
int ret = 0;
- drm_dma_t __user *argp = (void __user *)data;
- drm_dma_t d;
+ struct drm_dma *d = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(d, argp, sizeof(d));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
- if (d.send_count != 0) {
+ if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d.send_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
}
/* We'll send you buffers.
*/
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d.request_count, dma->buf_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ return -EINVAL;
}
- d.granted_count = 0;
+ d->granted_count = 0;
- if (d.request_count) {
- ret = radeon_cp_get_buffers(filp, dev, &d);
+ if (d->request_count) {
+ ret = radeon_cp_get_buffers(dev, file_priv, d);
}
- DRM_COPY_TO_USER_IOCTL(argp, d, sizeof(d));
-
return ret;
}
@@ -2261,7 +2245,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv = drm_alloc(sizeof(drm_radeon_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_radeon_private_t));
dev->dev_private = (void *)dev_priv;
diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h
index 6a57b804..b0ef702b 100644
--- a/shared-core/radeon_drm.h
+++ b/shared-core/radeon_drm.h
@@ -417,7 +417,7 @@ typedef struct {
/* The current cliprects, or a subset thereof.
*/
- drm_clip_rect_t boxes[RADEON_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[RADEON_NR_SAREA_CLIPRECTS];
unsigned int nbox;
/* Counters for client-side throttling of rendering clients.
@@ -426,7 +426,7 @@ typedef struct {
unsigned int last_dispatch;
unsigned int last_clear;
- drm_tex_region_t tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
+ struct drm_tex_region tex_list[RADEON_NR_TEX_HEAPS][RADEON_NR_TEX_REGIONS +
1];
unsigned int tex_age[RADEON_NR_TEX_HEAPS];
int ctx_owner;
@@ -604,7 +604,7 @@ typedef struct drm_radeon_cmd_buffer {
int bufsz;
char __user *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ struct drm_clip_rect __user *boxes;
} drm_radeon_cmd_buffer_t;
typedef struct drm_radeon_tex_image {
diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h
index 92a9b65e..006559df 100644
--- a/shared-core/radeon_drv.h
+++ b/shared-core/radeon_drv.h
@@ -156,7 +156,7 @@ enum radeon_chip_flags {
typedef struct drm_radeon_freelist {
unsigned int age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
struct drm_radeon_freelist *next;
struct drm_radeon_freelist *prev;
} drm_radeon_freelist_t;
@@ -195,7 +195,7 @@ struct mem_block {
struct mem_block *prev;
int start;
int size;
- DRMFILE filp; /* 0: free, -1: heap, other: real files */
+ struct drm_file *file_priv; /* NULL: free, -1: heap, other: real files */
};
struct radeon_surface {
@@ -210,7 +210,7 @@ struct radeon_virt_surface {
u32 lower;
u32 upper;
u32 flags;
- DRMFILE filp;
+ struct drm_file *file_priv;
};
typedef struct drm_radeon_private {
@@ -295,7 +295,7 @@ typedef struct drm_radeon_private {
unsigned long pcigart_offset;
unsigned int pcigart_offset_set;
- drm_ati_pcigart_info gart_info;
+ struct ati_pcigart_info gart_info;
u32 scratch_ages[5];
@@ -312,11 +312,11 @@ typedef struct drm_radeon_kcmd_buffer {
int bufsz;
char *buf;
int nbox;
- drm_clip_rect_t __user *boxes;
+ struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
extern int radeon_no_wb;
-extern drm_ioctl_desc_t radeon_ioctls[];
+extern struct drm_ioctl_desc radeon_ioctls[];
extern int radeon_max_ioctl;
/* Check whether the given hardware address is inside the framebuffer or the
@@ -335,60 +335,64 @@ static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv,
}
/* radeon_cp.c */
-extern int radeon_cp_init(DRM_IOCTL_ARGS);
-extern int radeon_cp_start(DRM_IOCTL_ARGS);
-extern int radeon_cp_stop(DRM_IOCTL_ARGS);
-extern int radeon_cp_reset(DRM_IOCTL_ARGS);
-extern int radeon_cp_idle(DRM_IOCTL_ARGS);
-extern int radeon_cp_resume(DRM_IOCTL_ARGS);
-extern int radeon_engine_reset(DRM_IOCTL_ARGS);
-extern int radeon_fullscreen(DRM_IOCTL_ARGS);
-extern int radeon_cp_buffers(DRM_IOCTL_ARGS);
-
-extern void radeon_freelist_reset(drm_device_t * dev);
-extern drm_buf_t *radeon_freelist_get(drm_device_t * dev);
+extern int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_stop(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_idle(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
+
+extern void radeon_freelist_reset(struct drm_device * dev);
+extern struct drm_buf *radeon_freelist_get(struct drm_device * dev);
extern int radeon_wait_ring(drm_radeon_private_t * dev_priv, int n);
extern int radeon_do_cp_idle(drm_radeon_private_t * dev_priv);
-extern int radeon_mem_alloc(DRM_IOCTL_ARGS);
-extern int radeon_mem_free(DRM_IOCTL_ARGS);
-extern int radeon_mem_init_heap(DRM_IOCTL_ARGS);
+extern int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv);
extern void radeon_mem_takedown(struct mem_block **heap);
-extern void radeon_mem_release(DRMFILE filp, struct mem_block *heap);
+extern void radeon_mem_release(struct drm_file *file_priv,
+ struct mem_block *heap);
/* radeon_irq.c */
-extern int radeon_irq_emit(DRM_IOCTL_ARGS);
-extern int radeon_irq_wait(DRM_IOCTL_ARGS);
+extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
-extern void radeon_do_release(drm_device_t * dev);
-extern int radeon_driver_vblank_wait(drm_device_t * dev,
+extern void radeon_do_release(struct drm_device * dev);
+extern int radeon_driver_vblank_wait(struct drm_device * dev,
unsigned int *sequence);
-extern int radeon_driver_vblank_wait2(drm_device_t * dev,
+extern int radeon_driver_vblank_wait2(struct drm_device * dev,
unsigned int *sequence);
extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
-extern void radeon_driver_irq_preinstall(drm_device_t * dev);
-extern void radeon_driver_irq_postinstall(drm_device_t * dev);
-extern void radeon_driver_irq_uninstall(drm_device_t * dev);
-extern int radeon_vblank_crtc_get(drm_device_t *dev);
-extern int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value);
+extern void radeon_driver_irq_preinstall(struct drm_device * dev);
+extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern void radeon_driver_irq_uninstall(struct drm_device * dev);
+extern int radeon_vblank_crtc_get(struct drm_device *dev);
+extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
extern int radeon_driver_unload(struct drm_device *dev);
extern int radeon_driver_firstopen(struct drm_device *dev);
-extern void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp);
-extern void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp);
-extern void radeon_driver_lastclose(drm_device_t * dev);
-extern int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv);
+extern void radeon_driver_preclose(struct drm_device * dev,
+ struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device * dev,
+ struct drm_file *file_priv);
+extern void radeon_driver_lastclose(struct drm_device * dev);
+extern int radeon_driver_open(struct drm_device * dev,
+ struct drm_file * file_priv);
extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
/* r300_cmdbuf.c */
extern void r300_init_reg_flags(void);
-extern int r300_do_cp_cmdbuf(drm_device_t *dev, DRMFILE filp,
- drm_file_t* filp_priv,
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t* cmdbuf);
/* Flags for stats.boxes
diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c
index a4be86e3..1ece6399 100644
--- a/shared-core/radeon_irq.c
+++ b/shared-core/radeon_irq.c
@@ -64,7 +64,7 @@ static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
u32 stat;
@@ -109,7 +109,7 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
return IRQ_HANDLED;
}
-static int radeon_emit_irq(drm_device_t * dev)
+static int radeon_emit_irq(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
unsigned int ret;
@@ -127,7 +127,7 @@ static int radeon_emit_irq(drm_device_t * dev)
return ret;
}
-static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
+static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -144,8 +144,9 @@ static int radeon_wait_irq(drm_device_t * dev, int swi_nr)
return ret;
}
-int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
- int crtc)
+static int radeon_driver_vblank_do_wait(struct drm_device * dev,
+ unsigned int *sequence,
+ int crtc)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -155,7 +156,7 @@ int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
atomic_t *counter;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (crtc == DRM_RADEON_VBLANK_CRTC1) {
@@ -165,7 +166,7 @@ int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
counter = &dev->vbl_received2;
ack |= RADEON_CRTC2_VBLANK_STAT;
} else
- return DRM_ERR(EINVAL);
+ return -EINVAL;
radeon_acknowledge_irqs(dev_priv, ack);
@@ -184,40 +185,36 @@ int radeon_driver_vblank_do_wait(drm_device_t * dev, unsigned int *sequence,
return ret;
}
-int radeon_driver_vblank_wait(drm_device_t *dev, unsigned int *sequence)
+int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
}
-int radeon_driver_vblank_wait2(drm_device_t *dev, unsigned int *sequence)
+int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
{
return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
}
/* Needs the lock as it touches the ring.
*/
-int radeon_irq_emit(DRM_IOCTL_ARGS)
+int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_irq_emit_t emit;
+ drm_radeon_irq_emit_t *emit = data;
int result;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(emit, (drm_radeon_irq_emit_t __user *) data,
- sizeof(emit));
-
result = radeon_emit_irq(dev);
- if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
+ if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
@@ -225,24 +222,20 @@ int radeon_irq_emit(DRM_IOCTL_ARGS)
/* Doesn't need the hardware lock.
*/
-int radeon_irq_wait(DRM_IOCTL_ARGS)
+int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_irq_wait_t irqwait;
+ drm_radeon_irq_wait_t *irqwait = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_radeon_irq_wait_t __user *) data,
- sizeof(irqwait));
-
- return radeon_wait_irq(dev, irqwait.irq_seq);
+ return radeon_wait_irq(dev, irqwait->irq_seq);
}
-static void radeon_enable_interrupt(drm_device_t *dev)
+static void radeon_enable_interrupt(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
@@ -259,7 +252,7 @@ static void radeon_enable_interrupt(drm_device_t *dev)
/* drm_dma.h hooks
*/
-void radeon_driver_irq_preinstall(drm_device_t * dev)
+void radeon_driver_irq_preinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -273,7 +266,7 @@ void radeon_driver_irq_preinstall(drm_device_t * dev)
RADEON_CRTC2_VBLANK_STAT));
}
-void radeon_driver_irq_postinstall(drm_device_t * dev)
+void radeon_driver_irq_postinstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -284,7 +277,7 @@ void radeon_driver_irq_postinstall(drm_device_t * dev)
radeon_enable_interrupt(dev);
}
-void radeon_driver_irq_uninstall(drm_device_t * dev)
+void radeon_driver_irq_uninstall(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv =
(drm_radeon_private_t *) dev->dev_private;
@@ -298,7 +291,7 @@ void radeon_driver_irq_uninstall(drm_device_t * dev)
}
-int radeon_vblank_crtc_get(drm_device_t *dev)
+int radeon_vblank_crtc_get(struct drm_device *dev)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
u32 flag;
@@ -315,12 +308,12 @@ int radeon_vblank_crtc_get(drm_device_t *dev)
return value;
}
-int radeon_vblank_crtc_set(drm_device_t *dev, int64_t value)
+int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
{
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->vblank_crtc = (unsigned int)value;
radeon_enable_interrupt(dev);
diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c
index dbc91c9f..9947e940 100644
--- a/shared-core/radeon_mem.c
+++ b/shared-core/radeon_mem.c
@@ -39,7 +39,7 @@
*/
static struct mem_block *split_block(struct mem_block *p, int start, int size,
- DRMFILE filp)
+ struct drm_file *file_priv)
{
/* Maybe cut off the start of an existing block */
if (start > p->start) {
@@ -49,7 +49,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start;
newblock->size = p->size - (start - p->start);
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -66,7 +66,7 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
goto out;
newblock->start = start + size;
newblock->size = p->size - size;
- newblock->filp = NULL;
+ newblock->file_priv = NULL;
newblock->next = p->next;
newblock->prev = p;
p->next->prev = newblock;
@@ -76,20 +76,20 @@ static struct mem_block *split_block(struct mem_block *p, int start, int size,
out:
/* Our block is in the middle */
- p->filp = filp;
+ p->file_priv = file_priv;
return p;
}
static struct mem_block *alloc_block(struct mem_block *heap, int size,
- int align2, DRMFILE filp)
+ int align2, struct drm_file *file_priv)
{
struct mem_block *p;
int mask = (1 << align2) - 1;
list_for_each(p, heap) {
int start = (p->start + mask) & ~mask;
- if (p->filp == 0 && start + size <= p->start + p->size)
- return split_block(p, start, size, filp);
+ if (p->file_priv == 0 && start + size <= p->start + p->size)
+ return split_block(p, start, size, file_priv);
}
return NULL;
@@ -108,12 +108,12 @@ static struct mem_block *find_block(struct mem_block *heap, int start)
static void free_block(struct mem_block *p)
{
- p->filp = NULL;
+ p->file_priv = NULL;
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
- if (p->next->filp == 0) {
+ if (p->next->file_priv == 0) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -121,7 +121,7 @@ static void free_block(struct mem_block *p)
drm_free(q, sizeof(*q), DRM_MEM_BUFS);
}
- if (p->prev->filp == 0) {
+ if (p->prev->file_priv == 0) {
struct mem_block *q = p->prev;
q->size += p->size;
q->next = p->next;
@@ -137,28 +137,28 @@ static int init_heap(struct mem_block **heap, int start, int size)
struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS);
if (!blocks)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
*heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS);
if (!*heap) {
drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
blocks->start = start;
blocks->size = size;
- blocks->filp = NULL;
+ blocks->file_priv = NULL;
blocks->next = blocks->prev = *heap;
memset(*heap, 0, sizeof(**heap));
- (*heap)->filp = (DRMFILE) - 1;
+ (*heap)->file_priv = (struct drm_file *) - 1;
(*heap)->next = (*heap)->prev = blocks;
return 0;
}
/* Free all blocks associated with the releasing file.
*/
-void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
+void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
{
struct mem_block *p;
@@ -166,15 +166,15 @@ void radeon_mem_release(DRMFILE filp, struct mem_block *heap)
return;
list_for_each(p, heap) {
- if (p->filp == filp)
- p->filp = NULL;
+ if (p->file_priv == file_priv)
+ p->file_priv = NULL;
}
- /* Assumes a single contiguous range. Needs a special filp in
+ /* Assumes a single contiguous range. Needs a special file_priv in
* 'heap' to stop it being subsumed.
*/
list_for_each(p, heap) {
- while (p->filp == 0 && p->next->filp == 0) {
+ while (p->file_priv == 0 && p->next->file_priv == 0) {
struct mem_block *q = p->next;
p->size += q->size;
p->next = q->next;
@@ -217,98 +217,86 @@ static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
}
}
-int radeon_mem_alloc(DRM_IOCTL_ARGS)
+int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_mem_alloc_t alloc;
+ drm_radeon_mem_alloc_t *alloc = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(alloc, (drm_radeon_mem_alloc_t __user *) data,
- sizeof(alloc));
-
- heap = get_heap(dev_priv, alloc.region);
+ heap = get_heap(dev_priv, alloc->region);
if (!heap || !*heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
/* Make things easier on ourselves: all allocations at least
* 4k aligned.
*/
- if (alloc.alignment < 12)
- alloc.alignment = 12;
+ if (alloc->alignment < 12)
+ alloc->alignment = 12;
- block = alloc_block(*heap, alloc.size, alloc.alignment, filp);
+ block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
if (!block)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
- if (DRM_COPY_TO_USER(alloc.region_offset, &block->start, sizeof(int))) {
+ if (DRM_COPY_TO_USER(alloc->region_offset, &block->start,
+ sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-int radeon_mem_free(DRM_IOCTL_ARGS)
+int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_mem_free_t memfree;
+ drm_radeon_mem_free_t *memfree = data;
struct mem_block *block, **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_mem_free_t __user *) data,
- sizeof(memfree));
-
- heap = get_heap(dev_priv, memfree.region);
+ heap = get_heap(dev_priv, memfree->region);
if (!heap || !*heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- block = find_block(*heap, memfree.region_offset);
+ block = find_block(*heap, memfree->region_offset);
if (!block)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- if (block->filp != filp)
- return DRM_ERR(EPERM);
+ if (block->file_priv != file_priv)
+ return -EPERM;
free_block(block);
return 0;
}
-int radeon_mem_init_heap(DRM_IOCTL_ARGS)
+int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_mem_init_heap_t initheap;
+ drm_radeon_mem_init_heap_t *initheap = data;
struct mem_block **heap;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(initheap,
- (drm_radeon_mem_init_heap_t __user *) data,
- sizeof(initheap));
-
- heap = get_heap(dev_priv, initheap.region);
+ heap = get_heap(dev_priv, initheap->region);
if (!heap)
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (*heap) {
DRM_ERROR("heap already initialized?");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- return init_heap(heap, initheap.start, initheap.size);
+ return init_heap(heap, initheap->start, initheap->size);
}
diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c
index 8ccd0981..ac7f6011 100644
--- a/shared-core/radeon_state.c
+++ b/shared-core/radeon_state.c
@@ -39,7 +39,7 @@
static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file *file_priv,
u32 * offset)
{
u64 off = *offset;
@@ -71,7 +71,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
* magic offset we get from SETPARAM or calculated from fb_location
*/
if (off < (dev_priv->fb_size + dev_priv->gart_size)) {
- radeon_priv = filp_priv->driver_priv;
+ radeon_priv = file_priv->driver_priv;
off += radeon_priv->radeon_fb_delta;
}
@@ -85,29 +85,29 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
*offset = off;
return 0;
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file *file_priv,
int id, u32 *data)
{
switch (id) {
case RADEON_EMIT_PP_MISC:
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
DRM_ERROR("Invalid depth buffer offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case RADEON_EMIT_PP_CNTL:
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
DRM_ERROR("Invalid colour buffer offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
@@ -117,20 +117,20 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&data[0])) {
DRM_ERROR("Invalid R200 texture offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
DRM_ERROR("Invalid R100 texture offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
@@ -143,11 +143,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
int i;
for (i = 0; i < 5; i++) {
if (radeon_check_and_fixup_offset(dev_priv,
- filp_priv,
+ file_priv,
&data[i])) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
break;
@@ -159,11 +159,11 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
int i;
for (i = 0; i < 5; i++) {
if (radeon_check_and_fixup_offset(dev_priv,
- filp_priv,
+ file_priv,
&data[i])) {
DRM_ERROR
("Invalid R100 cubic texture offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
}
@@ -256,7 +256,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
default:
DRM_ERROR("Unknown state packet ID %d\n", id);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
@@ -264,7 +264,7 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
dev_priv,
- drm_file_t *filp_priv,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *
cmdbuf,
unsigned int *cmdsz)
@@ -277,12 +277,12 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
DRM_ERROR("Not a type 3 packet\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (4 * *cmdsz > cmdbuf->bufsz) {
DRM_ERROR("Packet size larger than size of data provided\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
switch(cmd[0] & 0xff00) {
@@ -307,7 +307,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
/* safe but r200 only */
if (dev_priv->microcode_version != UCODE_R200) {
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
@@ -317,7 +317,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if (count > 18) { /* 12 arrays max */
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* carefully check packet contents */
@@ -326,22 +326,25 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
i = 2;
while ((k < narrays) && (i < (count + 2))) {
i++; /* skip attribute field */
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
+ &cmd[i])) {
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
k, i);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
k++;
i++;
if (k == narrays)
break;
/* have one more to process, they come in pairs */
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[i])) {
+ if (radeon_check_and_fixup_offset(dev_priv,
+ file_priv, &cmd[i]))
+ {
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
k, i);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
k++;
i++;
@@ -351,33 +354,33 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR
("Malformed 3D_LOAD_VBPNTR packet (k=%d i=%d narrays=%d count+1=%d).\n",
k, i, narrays, count + 1);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case RADEON_3D_RNDR_GEN_INDX_PRIM:
if (dev_priv->microcode_version != UCODE_R100) {
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[1])) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
DRM_ERROR("Invalid rndr_gen_indx offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case RADEON_CP_INDX_BUFFER:
if (dev_priv->microcode_version != UCODE_R200) {
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if ((cmd[1] & 0x8000ffff) != 0x80000810) {
DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &cmd[2])) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
@@ -389,9 +392,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[2] << 10;
if (radeon_check_and_fixup_offset
- (dev_priv, filp_priv, &offset)) {
+ (dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid first packet offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
}
@@ -400,9 +403,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
(cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
offset = cmd[3] << 10;
if (radeon_check_and_fixup_offset
- (dev_priv, filp_priv, &offset)) {
+ (dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
}
@@ -410,7 +413,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
default:
DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
@@ -421,7 +424,7 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
*/
static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
- drm_clip_rect_t * box)
+ struct drm_clip_rect * box)
{
RING_LOCALS;
@@ -439,7 +442,7 @@ static __inline__ void radeon_emit_clip_rect(drm_radeon_private_t * dev_priv,
/* Emit 1.1 state
*/
static int radeon_emit_state(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file *file_priv,
drm_radeon_context_regs_t * ctx,
drm_radeon_texture_regs_t * tex,
unsigned int dirty)
@@ -448,16 +451,16 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
DRM_DEBUG("dirty=0x%08x\n", dirty);
if (dirty & RADEON_UPLOAD_CONTEXT) {
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&ctx->rb3d_depthoffset)) {
DRM_ERROR("Invalid depth buffer offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&ctx->rb3d_coloroffset)) {
DRM_ERROR("Invalid depth buffer offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(14);
@@ -543,10 +546,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
}
if (dirty & RADEON_UPLOAD_TEX0) {
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&tex[0].pp_txoffset)) {
DRM_ERROR("Invalid texture offset for unit 0\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(9);
@@ -563,10 +566,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
}
if (dirty & RADEON_UPLOAD_TEX1) {
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&tex[1].pp_txoffset)) {
DRM_ERROR("Invalid texture offset for unit 1\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(9);
@@ -583,10 +586,10 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
}
if (dirty & RADEON_UPLOAD_TEX2) {
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv,
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv,
&tex[2].pp_txoffset)) {
DRM_ERROR("Invalid texture offset for unit 2\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(9);
@@ -608,7 +611,7 @@ static int radeon_emit_state(drm_radeon_private_t * dev_priv,
/* Emit 1.2 state
*/
static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file *file_priv,
drm_radeon_state_t * state)
{
RING_LOCALS;
@@ -621,7 +624,7 @@ static int radeon_emit_state2(drm_radeon_private_t * dev_priv,
ADVANCE_RING();
}
- return radeon_emit_state(dev_priv, filp_priv, &state->context,
+ return radeon_emit_state(dev_priv, file_priv, &state->context,
state->tex, state->dirty);
}
@@ -844,7 +847,7 @@ static void radeon_cp_performance_boxes(drm_radeon_private_t * dev_priv)
* CP command dispatch functions
*/
-static void radeon_cp_dispatch_clear(drm_device_t * dev,
+static void radeon_cp_dispatch_clear(struct drm_device * dev,
drm_radeon_clear_t * clear,
drm_radeon_clear_rect_t * depth_boxes)
{
@@ -852,7 +855,7 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
drm_radeon_depth_clear_t *depth_clear = &dev_priv->depth_clear;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
unsigned int flags = clear->flags;
u32 rb3d_cntl = 0, rb3d_stencilrefmask = 0;
int i;
@@ -1335,12 +1338,12 @@ static void radeon_cp_dispatch_clear(drm_device_t * dev,
ADVANCE_RING();
}
-static void radeon_cp_dispatch_swap(drm_device_t * dev)
+static void radeon_cp_dispatch_swap(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
int nbox = sarea_priv->nbox;
- drm_clip_rect_t *pbox = sarea_priv->boxes;
+ struct drm_clip_rect *pbox = sarea_priv->boxes;
int i;
RING_LOCALS;
DRM_DEBUG("\n");
@@ -1412,10 +1415,10 @@ static void radeon_cp_dispatch_swap(drm_device_t * dev)
ADVANCE_RING();
}
-static void radeon_cp_dispatch_flip(drm_device_t * dev)
+static void radeon_cp_dispatch_flip(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_sarea_t *sarea = (drm_sarea_t *) dev_priv->sarea->handle;
+ struct drm_sarea *sarea = (struct drm_sarea *) dev_priv->sarea->handle;
int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)
? dev_priv->front_offset : dev_priv->back_offset;
RING_LOCALS;
@@ -1491,8 +1494,8 @@ typedef struct {
unsigned int vc_format;
} drm_radeon_tcl_prim_t;
-static void radeon_cp_dispatch_vertex(drm_device_t * dev,
- drm_buf_t * buf,
+static void radeon_cp_dispatch_vertex(struct drm_device * dev,
+ struct drm_buf * buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1537,7 +1540,7 @@ static void radeon_cp_dispatch_vertex(drm_device_t * dev,
} while (i < nbox);
}
-static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
+static void radeon_cp_discard_buffer(struct drm_device * dev, struct drm_buf * buf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_buf_priv_t *buf_priv = buf->dev_private;
@@ -1554,8 +1557,8 @@ static void radeon_cp_discard_buffer(drm_device_t * dev, drm_buf_t * buf)
buf->used = 0;
}
-static void radeon_cp_dispatch_indirect(drm_device_t * dev,
- drm_buf_t * buf, int start, int end)
+static void radeon_cp_dispatch_indirect(struct drm_device * dev,
+ struct drm_buf * buf, int start, int end)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -1588,8 +1591,8 @@ static void radeon_cp_dispatch_indirect(drm_device_t * dev,
}
}
-static void radeon_cp_dispatch_indices(drm_device_t * dev,
- drm_buf_t * elt_buf,
+static void radeon_cp_dispatch_indices(struct drm_device * dev,
+ struct drm_buf * elt_buf,
drm_radeon_tcl_prim_t * prim)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -1646,14 +1649,13 @@ static void radeon_cp_dispatch_indices(drm_device_t * dev,
#define RADEON_MAX_TEXTURE_SIZE RADEON_BUFFER_SIZE
-static int radeon_cp_dispatch_texture(DRMFILE filp,
- drm_device_t * dev,
+static int radeon_cp_dispatch_texture(struct drm_device * dev,
+ struct drm_file *file_priv,
drm_radeon_texture_t * tex,
drm_radeon_tex_image_t * image)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
- drm_buf_t *buf;
+ struct drm_buf *buf;
u32 format;
u32 *buffer;
const u8 __user *data;
@@ -1664,11 +1666,9 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
u32 offset;
RING_LOCALS;
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- if (radeon_check_and_fixup_offset(dev_priv, filp_priv, &tex->offset)) {
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, &tex->offset)) {
DRM_ERROR("Invalid destination offset\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->stats.boxes |= RADEON_BOX_TEXTURE_LOAD;
@@ -1711,11 +1711,11 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
break;
default:
DRM_ERROR("invalid texture format %d\n", tex->format);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
spitch = blit_width >> 6;
if (spitch == 0 && image->height > 1)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
texpitch = tex->pitch;
if ((texpitch << 22) & RADEON_DST_TILE_MICRO) {
@@ -1760,8 +1760,8 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
if (!buf) {
DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n");
if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))
- return DRM_ERR(EFAULT);
- return DRM_ERR(EAGAIN);
+ return -EFAULT;
+ return -EAGAIN;
}
/* Dispatch the indirect buffer.
@@ -1774,7 +1774,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
do { \
if (DRM_COPY_FROM_USER(_buf, _data, (_width))) {\
DRM_ERROR("EFAULT on pad, %d bytes\n", (_width)); \
- return DRM_ERR(EFAULT); \
+ return -EFAULT; \
} \
} while(0)
@@ -1841,7 +1841,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
}
#undef RADEON_COPY_MT
- buf->filp = filp;
+ buf->file_priv = file_priv;
buf->used = size;
offset = dev_priv->gart_buffers_offset + buf->offset;
BEGIN_RING(9);
@@ -1881,7 +1881,7 @@ static int radeon_cp_dispatch_texture(DRMFILE filp,
return 0;
}
-static void radeon_cp_dispatch_stipple(drm_device_t * dev, u32 * stipple)
+static void radeon_cp_dispatch_stipple(struct drm_device * dev, u32 * stipple)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
int i;
@@ -1929,7 +1929,8 @@ static void radeon_apply_surface_regs(int surf_index,
* not always be available.
*/
static int alloc_surface(drm_radeon_surface_alloc_t *new,
- drm_radeon_private_t *dev_priv, DRMFILE filp)
+ drm_radeon_private_t *dev_priv,
+ struct drm_file *file_priv)
{
struct radeon_virt_surface *s;
int i;
@@ -1959,7 +1960,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
/* find a virtual surface */
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++)
- if (dev_priv->virt_surfaces[i].filp == 0)
+ if (dev_priv->virt_surfaces[i].file_priv == 0)
break;
if (i == 2 * RADEON_MAX_SURFACES) {
return -1;
@@ -1977,7 +1978,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
s->lower = new_lower;
s->upper = new_upper;
s->flags = new->flags;
- s->filp = filp;
+ s->file_priv = file_priv;
dev_priv->surfaces[i].refcount++;
dev_priv->surfaces[i].lower = s->lower;
radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -1993,7 +1994,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
s->lower = new_lower;
s->upper = new_upper;
s->flags = new->flags;
- s->filp = filp;
+ s->file_priv = file_priv;
dev_priv->surfaces[i].refcount++;
dev_priv->surfaces[i].upper = s->upper;
radeon_apply_surface_regs(s->surface_index, dev_priv);
@@ -2009,7 +2010,7 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
s->lower = new_lower;
s->upper = new_upper;
s->flags = new->flags;
- s->filp = filp;
+ s->file_priv = file_priv;
dev_priv->surfaces[i].refcount = 1;
dev_priv->surfaces[i].lower = s->lower;
dev_priv->surfaces[i].upper = s->upper;
@@ -2023,7 +2024,8 @@ static int alloc_surface(drm_radeon_surface_alloc_t *new,
return -1;
}
-static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
+static int free_surface(struct drm_file *file_priv,
+ drm_radeon_private_t * dev_priv,
int lower)
{
struct radeon_virt_surface *s;
@@ -2031,8 +2033,9 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
/* find the virtual surface */
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
s = &(dev_priv->virt_surfaces[i]);
- if (s->filp) {
- if ((lower == s->lower) && (filp == s->filp)) {
+ if (s->file_priv) {
+ if ((lower == s->lower) && (file_priv == s->file_priv))
+ {
if (dev_priv->surfaces[s->surface_index].
lower == s->lower)
dev_priv->surfaces[s->surface_index].
@@ -2048,7 +2051,7 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
refcount == 0)
dev_priv->surfaces[s->surface_index].
flags = 0;
- s->filp = NULL;
+ s->file_priv = NULL;
radeon_apply_surface_regs(s->surface_index,
dev_priv);
return 0;
@@ -2058,13 +2061,13 @@ static int free_surface(DRMFILE filp, drm_radeon_private_t * dev_priv,
return 1;
}
-static void radeon_surfaces_release(DRMFILE filp,
+static void radeon_surfaces_release(struct drm_file *file_priv,
drm_radeon_private_t * dev_priv)
{
int i;
for (i = 0; i < 2 * RADEON_MAX_SURFACES; i++) {
- if (dev_priv->virt_surfaces[i].filp == filp)
- free_surface(filp, dev_priv,
+ if (dev_priv->virt_surfaces[i].file_priv == file_priv)
+ free_surface(file_priv, dev_priv,
dev_priv->virt_surfaces[i].lower);
}
}
@@ -2072,71 +2075,58 @@ static void radeon_surfaces_release(DRMFILE filp,
/* ================================================================
* IOCTL functions
*/
-static int radeon_surface_alloc(DRM_IOCTL_ARGS)
+static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_surface_alloc_t alloc;
+ drm_radeon_surface_alloc_t *alloc = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(alloc,
- (drm_radeon_surface_alloc_t __user *) data,
- sizeof(alloc));
-
- if (alloc_surface(&alloc, dev_priv, filp) == -1)
- return DRM_ERR(EINVAL);
+ if (alloc_surface(alloc, dev_priv, file_priv) == -1)
+ return -EINVAL;
else
return 0;
}
-static int radeon_surface_free(DRM_IOCTL_ARGS)
+static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_surface_free_t memfree;
+ drm_radeon_surface_free_t *memfree = data;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(memfree, (drm_radeon_surface_free_t __user *) data,
- sizeof(memfree));
-
- if (free_surface(filp, dev_priv, memfree.address))
- return DRM_ERR(EINVAL);
+ if (free_surface(file_priv, dev_priv, memfree->address))
+ return -EINVAL;
else
return 0;
}
-static int radeon_cp_clear(DRM_IOCTL_ARGS)
+static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
- drm_radeon_clear_t clear;
+ drm_radeon_clear_t *clear = data;
drm_radeon_clear_rect_t depth_boxes[RADEON_NR_SAREA_CLIPRECTS];
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(clear, (drm_radeon_clear_t __user *) data,
- sizeof(clear));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
- if (DRM_COPY_FROM_USER(&depth_boxes, clear.depth_boxes,
+ if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
sarea_priv->nbox * sizeof(depth_boxes[0])))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- radeon_cp_dispatch_clear(dev, &clear, depth_boxes);
+ radeon_cp_dispatch_clear(dev, clear, depth_boxes);
COMMIT_RING();
return 0;
@@ -2144,7 +2134,7 @@ static int radeon_cp_clear(DRM_IOCTL_ARGS)
/* Not sure why this isn't set all the time:
*/
-static int radeon_do_init_pageflip(drm_device_t * dev)
+static int radeon_do_init_pageflip(struct drm_device * dev)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -2172,13 +2162,12 @@ static int radeon_do_init_pageflip(drm_device_t * dev)
/* Swapping and flipping are different operations, need different ioctls.
* They can & should be intermixed to support multiple 3d windows.
*/
-static int radeon_cp_flip(DRM_IOCTL_ARGS)
+static int radeon_cp_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2191,14 +2180,13 @@ static int radeon_cp_flip(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_swap(DRM_IOCTL_ARGS)
+static int radeon_cp_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
drm_radeon_sarea_t *sarea_priv = dev_priv->sarea_priv;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2212,71 +2200,64 @@ static int radeon_cp_swap(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_vertex(DRM_IOCTL_ARGS)
+static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_radeon_vertex_t vertex;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_vertex_t *vertex = data;
drm_radeon_tcl_prim_t prim;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
sarea_priv = dev_priv->sarea_priv;
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex_t __user *) data,
- sizeof(vertex));
-
DRM_DEBUG("pid=%d index=%d count=%d discard=%d\n",
- DRM_CURRENTPID, vertex.idx, vertex.count, vertex.discard);
+ DRM_CURRENTPID, vertex->idx, vertex->count, vertex->discard);
- if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- vertex.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ vertex->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- if (vertex.prim < 0 || vertex.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
- DRM_ERROR("buffer prim %d\n", vertex.prim);
- return DRM_ERR(EINVAL);
+ if (vertex->prim < 0 || vertex->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", vertex->prim);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf = dma->buflist[vertex.idx];
+ buf = dma->buflist[vertex->idx];
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", vertex.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+ return -EINVAL;
}
/* Build up a prim_t record:
*/
- if (vertex.count) {
- buf->used = vertex.count; /* not used? */
+ if (vertex->count) {
+ buf->used = vertex->count; /* not used? */
if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
- if (radeon_emit_state(dev_priv, filp_priv,
+ if (radeon_emit_state(dev_priv, file_priv,
&sarea_priv->context_state,
sarea_priv->tex_state,
sarea_priv->dirty)) {
DRM_ERROR("radeon_emit_state failed\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2286,15 +2267,15 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
}
prim.start = 0;
- prim.finish = vertex.count; /* unused */
- prim.prim = vertex.prim;
- prim.numverts = vertex.count;
+ prim.finish = vertex->count; /* unused */
+ prim.prim = vertex->prim;
+ prim.numverts = vertex->count;
prim.vc_format = dev_priv->sarea_priv->vc_format;
radeon_cp_dispatch_vertex(dev, buf, &prim);
}
- if (vertex.discard) {
+ if (vertex->discard) {
radeon_cp_discard_buffer(dev, buf);
}
@@ -2302,80 +2283,74 @@ static int radeon_cp_vertex(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_indices(DRM_IOCTL_ARGS)
+static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_radeon_indices_t elts;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_indices_t *elts = data;
drm_radeon_tcl_prim_t prim;
int count;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
sarea_priv = dev_priv->sarea_priv;
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- DRM_COPY_FROM_USER_IOCTL(elts, (drm_radeon_indices_t __user *) data,
- sizeof(elts));
-
DRM_DEBUG("pid=%d index=%d start=%d end=%d discard=%d\n",
- DRM_CURRENTPID, elts.idx, elts.start, elts.end, elts.discard);
+ DRM_CURRENTPID, elts->idx, elts->start, elts->end,
+ elts->discard);
- if (elts.idx < 0 || elts.idx >= dma->buf_count) {
+ if (elts->idx < 0 || elts->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- elts.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ elts->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- if (elts.prim < 0 || elts.prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
- DRM_ERROR("buffer prim %d\n", elts.prim);
- return DRM_ERR(EINVAL);
+ if (elts->prim < 0 || elts->prim > RADEON_PRIM_TYPE_3VRT_LINE_LIST) {
+ DRM_ERROR("buffer prim %d\n", elts->prim);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf = dma->buflist[elts.idx];
+ buf = dma->buflist[elts->idx];
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", elts.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", elts->idx);
+ return -EINVAL;
}
- count = (elts.end - elts.start) / sizeof(u16);
- elts.start -= RADEON_INDEX_PRIM_OFFSET;
+ count = (elts->end - elts->start) / sizeof(u16);
+ elts->start -= RADEON_INDEX_PRIM_OFFSET;
- if (elts.start & 0x7) {
- DRM_ERROR("misaligned buffer 0x%x\n", elts.start);
- return DRM_ERR(EINVAL);
+ if (elts->start & 0x7) {
+ DRM_ERROR("misaligned buffer 0x%x\n", elts->start);
+ return -EINVAL;
}
- if (elts.start < buf->used) {
- DRM_ERROR("no header 0x%x - 0x%x\n", elts.start, buf->used);
- return DRM_ERR(EINVAL);
+ if (elts->start < buf->used) {
+ DRM_ERROR("no header 0x%x - 0x%x\n", elts->start, buf->used);
+ return -EINVAL;
}
- buf->used = elts.end;
+ buf->used = elts->end;
if (sarea_priv->dirty & ~RADEON_UPLOAD_CLIPRECTS) {
- if (radeon_emit_state(dev_priv, filp_priv,
+ if (radeon_emit_state(dev_priv, file_priv,
&sarea_priv->context_state,
sarea_priv->tex_state,
sarea_priv->dirty)) {
DRM_ERROR("radeon_emit_state failed\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
sarea_priv->dirty &= ~(RADEON_UPLOAD_TEX0IMAGES |
@@ -2386,15 +2361,15 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
/* Build up a prim_t record:
*/
- prim.start = elts.start;
- prim.finish = elts.end;
- prim.prim = elts.prim;
+ prim.start = elts->start;
+ prim.finish = elts->end;
+ prim.prim = elts->prim;
prim.offset = 0; /* offset from start of dma buffers */
prim.numverts = RADEON_MAX_VB_VERTS; /* duh */
prim.vc_format = dev_priv->sarea_priv->vc_format;
radeon_cp_dispatch_indices(dev, buf, &prim);
- if (elts.discard) {
+ if (elts->discard) {
radeon_cp_discard_buffer(dev, buf);
}
@@ -2402,52 +2377,44 @@ static int radeon_cp_indices(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_texture(DRM_IOCTL_ARGS)
+static int radeon_cp_texture(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_texture_t tex;
+ drm_radeon_texture_t *tex = data;
drm_radeon_tex_image_t image;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(tex, (drm_radeon_texture_t __user *) data,
- sizeof(tex));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- if (tex.image == NULL) {
+ if (tex->image == NULL) {
DRM_ERROR("null texture image!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (DRM_COPY_FROM_USER(&image,
- (drm_radeon_tex_image_t __user *) tex.image,
+ (drm_radeon_tex_image_t __user *) tex->image,
sizeof(image)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- ret = radeon_cp_dispatch_texture(filp, dev, &tex, &image);
+ ret = radeon_cp_dispatch_texture(dev, file_priv, tex, &image);
COMMIT_RING();
return ret;
}
-static int radeon_cp_stipple(DRM_IOCTL_ARGS)
+static int radeon_cp_stipple(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_stipple_t stipple;
+ drm_radeon_stipple_t *stipple = data;
u32 mask[32];
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(stipple, (drm_radeon_stipple_t __user *) data,
- sizeof(stipple));
-
- if (DRM_COPY_FROM_USER(&mask, stipple.mask, 32 * sizeof(u32)))
- return DRM_ERR(EFAULT);
+ if (DRM_COPY_FROM_USER(&mask, stipple->mask, 32 * sizeof(u32)))
+ return -EFAULT;
RING_SPACE_TEST_WITH_RETURN(dev_priv);
@@ -2457,57 +2424,53 @@ static int radeon_cp_stipple(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_indirect(DRM_IOCTL_ARGS)
+static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_radeon_indirect_t indirect;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_indirect_t *indirect = data;
RING_LOCALS;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(indirect,
- (drm_radeon_indirect_t __user *) data,
- sizeof(indirect));
-
DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n",
- indirect.idx, indirect.start, indirect.end, indirect.discard);
+ indirect->idx, indirect->start, indirect->end,
+ indirect->discard);
- if (indirect.idx < 0 || indirect.idx >= dma->buf_count) {
+ if (indirect->idx < 0 || indirect->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- indirect.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ indirect->idx, dma->buf_count - 1);
+ return -EINVAL;
}
- buf = dma->buflist[indirect.idx];
+ buf = dma->buflist[indirect->idx];
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", indirect.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", indirect->idx);
+ return -EINVAL;
}
- if (indirect.start < buf->used) {
+ if (indirect->start < buf->used) {
DRM_ERROR("reusing indirect: start=0x%x actual=0x%x\n",
- indirect.start, buf->used);
- return DRM_ERR(EINVAL);
+ indirect->start, buf->used);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf->used = indirect.end;
+ buf->used = indirect->end;
/* Wait for the 3D stream to idle before the indirect buffer
* containing 2D acceleration commands is processed.
@@ -2522,8 +2485,8 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
* X server. This is insecure and is thus only available to
* privileged clients.
*/
- radeon_cp_dispatch_indirect(dev, buf, indirect.start, indirect.end);
- if (indirect.discard) {
+ radeon_cp_dispatch_indirect(dev, buf, indirect->start, indirect->end);
+ if (indirect->discard) {
radeon_cp_discard_buffer(dev, buf);
}
@@ -2531,78 +2494,71 @@ static int radeon_cp_indirect(DRM_IOCTL_ARGS)
return 0;
}
-static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
+static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
drm_radeon_sarea_t *sarea_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
- drm_radeon_vertex2_t vertex;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
+ drm_radeon_vertex2_t *vertex = data;
int i;
unsigned char laststate;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
sarea_priv = dev_priv->sarea_priv;
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- DRM_COPY_FROM_USER_IOCTL(vertex, (drm_radeon_vertex2_t __user *) data,
- sizeof(vertex));
-
DRM_DEBUG("pid=%d index=%d discard=%d\n",
- DRM_CURRENTPID, vertex.idx, vertex.discard);
+ DRM_CURRENTPID, vertex->idx, vertex->discard);
- if (vertex.idx < 0 || vertex.idx >= dma->buf_count) {
+ if (vertex->idx < 0 || vertex->idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
- vertex.idx, dma->buf_count - 1);
- return DRM_ERR(EINVAL);
+ vertex->idx, dma->buf_count - 1);
+ return -EINVAL;
}
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- buf = dma->buflist[vertex.idx];
+ buf = dma->buflist[vertex->idx];
- if (buf->filp != filp) {
+ if (buf->file_priv != file_priv) {
DRM_ERROR("process %d using buffer owned by %p\n",
- DRM_CURRENTPID, buf->filp);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, buf->file_priv);
+ return -EINVAL;
}
if (buf->pending) {
- DRM_ERROR("sending pending buffer %d\n", vertex.idx);
- return DRM_ERR(EINVAL);
+ DRM_ERROR("sending pending buffer %d\n", vertex->idx);
+ return -EINVAL;
}
if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- for (laststate = 0xff, i = 0; i < vertex.nr_prims; i++) {
+ for (laststate = 0xff, i = 0; i < vertex->nr_prims; i++) {
drm_radeon_prim_t prim;
drm_radeon_tcl_prim_t tclprim;
- if (DRM_COPY_FROM_USER(&prim, &vertex.prim[i], sizeof(prim)))
- return DRM_ERR(EFAULT);
+ if (DRM_COPY_FROM_USER(&prim, &vertex->prim[i], sizeof(prim)))
+ return -EFAULT;
if (prim.stateidx != laststate) {
drm_radeon_state_t state;
if (DRM_COPY_FROM_USER(&state,
- &vertex.state[prim.stateidx],
+ &vertex->state[prim.stateidx],
sizeof(state)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
- if (radeon_emit_state2(dev_priv, filp_priv, &state)) {
+ if (radeon_emit_state2(dev_priv, file_priv, &state)) {
DRM_ERROR("radeon_emit_state2 failed\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
laststate = prim.stateidx;
@@ -2629,7 +2585,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
sarea_priv->nbox = 0;
}
- if (vertex.discard) {
+ if (vertex->discard) {
radeon_cp_discard_buffer(dev, buf);
}
@@ -2638,7 +2594,7 @@ static int radeon_cp_vertex2(DRM_IOCTL_ARGS)
}
static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
- drm_file_t * filp_priv,
+ struct drm_file *file_priv,
drm_radeon_cmd_header_t header,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
@@ -2648,19 +2604,19 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
sz = packet[id].len;
reg = packet[id].start;
if (sz * sizeof(int) > cmdbuf->bufsz) {
DRM_ERROR("Packet size provided larger than data provided\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (radeon_check_and_fixup_packets(dev_priv, filp_priv, id, data)) {
+ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
DRM_ERROR("Packet verification failed\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
BEGIN_RING(sz + 1);
@@ -2748,7 +2704,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
if (sz * 4 > cmdbuf->bufsz)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
BEGIN_RING(5 + sz);
OUT_RING_REG(RADEON_SE_TCL_STATE_FLUSH, 0);
@@ -2763,8 +2719,8 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
return 0;
}
-static int radeon_emit_packet3(drm_device_t * dev,
- drm_file_t * filp_priv,
+static int radeon_emit_packet3(struct drm_device * dev,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -2774,7 +2730,7 @@ static int radeon_emit_packet3(drm_device_t * dev,
DRM_DEBUG("\n");
- if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv,
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
cmdbuf, &cmdsz))) {
DRM_ERROR("Packet verification failed\n");
return ret;
@@ -2789,22 +2745,22 @@ static int radeon_emit_packet3(drm_device_t * dev,
return 0;
}
-static int radeon_emit_packet3_cliprect(drm_device_t *dev,
- drm_file_t *filp_priv,
+static int radeon_emit_packet3_cliprect(struct drm_device *dev,
+ struct drm_file *file_priv,
drm_radeon_kcmd_buffer_t *cmdbuf,
int orig_nbox)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_clip_rect_t box;
+ struct drm_clip_rect box;
unsigned int cmdsz;
int ret;
- drm_clip_rect_t __user *boxes = cmdbuf->boxes;
+ struct drm_clip_rect __user *boxes = cmdbuf->boxes;
int i = 0;
RING_LOCALS;
DRM_DEBUG("\n");
- if ((ret = radeon_check_and_fixup_packet3(dev_priv, filp_priv,
+ if ((ret = radeon_check_and_fixup_packet3(dev_priv, file_priv,
cmdbuf, &cmdsz))) {
DRM_ERROR("Packet verification failed\n");
return ret;
@@ -2816,7 +2772,7 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev,
do {
if (i < cmdbuf->nbox) {
if (DRM_COPY_FROM_USER(&box, &boxes[i], sizeof(box)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
/* FIXME The second and subsequent times round
* this loop, send a WAIT_UNTIL_3D_IDLE before
* calling emit_clip_rect(). This fixes a
@@ -2851,7 +2807,7 @@ static int radeon_emit_packet3_cliprect(drm_device_t *dev,
return 0;
}
-static int radeon_emit_wait(drm_device_t * dev, int flags)
+static int radeon_emit_wait(struct drm_device * dev, int flags)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
@@ -2874,67 +2830,59 @@ static int radeon_emit_wait(drm_device_t * dev, int flags)
ADVANCE_RING();
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
}
-static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf = NULL;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf = NULL;
int idx;
- drm_radeon_kcmd_buffer_t cmdbuf;
+ drm_radeon_kcmd_buffer_t *cmdbuf = data;
drm_radeon_cmd_header_t header;
int orig_nbox, orig_bufsz;
char *kbuf = NULL;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- DRM_COPY_FROM_USER_IOCTL(cmdbuf,
- (drm_radeon_kcmd_buffer_t __user *) data,
- sizeof(cmdbuf));
-
RING_SPACE_TEST_WITH_RETURN(dev_priv);
VB_AGE_TEST_WITH_RETURN(dev_priv);
- if (cmdbuf.bufsz > 64 * 1024 || cmdbuf.bufsz < 0) {
- return DRM_ERR(EINVAL);
+ if (cmdbuf->bufsz > 64 * 1024 || cmdbuf->bufsz < 0) {
+ return -EINVAL;
}
/* Allocate an in-kernel area and copy in the cmdbuf. Do this to avoid
* races between checking values and using those values in other code,
* and simply to avoid a lot of function calls to copy in data.
*/
- orig_bufsz = cmdbuf.bufsz;
+ orig_bufsz = cmdbuf->bufsz;
if (orig_bufsz != 0) {
- kbuf = drm_alloc(cmdbuf.bufsz, DRM_MEM_DRIVER);
+ kbuf = drm_alloc(cmdbuf->bufsz, DRM_MEM_DRIVER);
if (kbuf == NULL)
- return DRM_ERR(ENOMEM);
- if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf.buf,
- cmdbuf.bufsz)) {
+ return -ENOMEM;
+ if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
+ cmdbuf->bufsz)) {
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- cmdbuf.buf = kbuf;
+ cmdbuf->buf = kbuf;
}
- orig_nbox = cmdbuf.nbox;
+ orig_nbox = cmdbuf->nbox;
if (dev_priv->microcode_version == UCODE_R300) {
int temp;
- temp = r300_do_cp_cmdbuf(dev, filp, filp_priv, &cmdbuf);
+ temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
@@ -2943,17 +2891,17 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
}
/* microcode_version != r300 */
- while (cmdbuf.bufsz >= sizeof(header)) {
+ while (cmdbuf->bufsz >= sizeof(header)) {
- header.i = *(int *)cmdbuf.buf;
- cmdbuf.buf += sizeof(header);
- cmdbuf.bufsz -= sizeof(header);
+ header.i = *(int *)cmdbuf->buf;
+ cmdbuf->buf += sizeof(header);
+ cmdbuf->bufsz -= sizeof(header);
switch (header.header.cmd_type) {
case RADEON_CMD_PACKET:
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets
- (dev_priv, filp_priv, header, &cmdbuf)) {
+ (dev_priv, file_priv, header, cmdbuf)) {
DRM_ERROR("radeon_emit_packets failed\n");
goto err;
}
@@ -2961,7 +2909,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
case RADEON_CMD_SCALARS:
DRM_DEBUG("RADEON_CMD_SCALARS\n");
- if (radeon_emit_scalars(dev_priv, header, &cmdbuf)) {
+ if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars failed\n");
goto err;
}
@@ -2969,7 +2917,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
case RADEON_CMD_VECTORS:
DRM_DEBUG("RADEON_CMD_VECTORS\n");
- if (radeon_emit_vectors(dev_priv, header, &cmdbuf)) {
+ if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
DRM_ERROR("radeon_emit_vectors failed\n");
goto err;
}
@@ -2985,9 +2933,10 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
}
buf = dma->buflist[idx];
- if (buf->filp != filp || buf->pending) {
+ if (buf->file_priv != file_priv || buf->pending) {
DRM_ERROR("bad buffer %p %p %d\n",
- buf->filp, filp, buf->pending);
+ buf->file_priv, file_priv,
+ buf->pending);
goto err;
}
@@ -2996,7 +2945,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
case RADEON_CMD_PACKET3:
DRM_DEBUG("RADEON_CMD_PACKET3\n");
- if (radeon_emit_packet3(dev, filp_priv, &cmdbuf)) {
+ if (radeon_emit_packet3(dev, file_priv, cmdbuf)) {
DRM_ERROR("radeon_emit_packet3 failed\n");
goto err;
}
@@ -3005,7 +2954,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
case RADEON_CMD_PACKET3_CLIP:
DRM_DEBUG("RADEON_CMD_PACKET3_CLIP\n");
if (radeon_emit_packet3_cliprect
- (dev, filp_priv, &cmdbuf, orig_nbox)) {
+ (dev, file_priv, cmdbuf, orig_nbox)) {
DRM_ERROR("radeon_emit_packet3_clip failed\n");
goto err;
}
@@ -3013,7 +2962,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
case RADEON_CMD_SCALARS2:
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
- if (radeon_emit_scalars2(dev_priv, header, &cmdbuf)) {
+ if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
goto err;
}
@@ -3028,7 +2977,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
break;
case RADEON_CMD_VECLINEAR:
DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
- if (radeon_emit_veclinear(dev_priv, header, &cmdbuf)) {
+ if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
DRM_ERROR("radeon_emit_veclinear failed\n");
goto err;
}
@@ -3037,7 +2986,7 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
default:
DRM_ERROR("bad cmd_type %d at %p\n",
header.header.cmd_type,
- cmdbuf.buf - sizeof(header));
+ cmdbuf->buf - sizeof(header));
goto err;
}
}
@@ -3052,27 +3001,23 @@ static int radeon_cp_cmdbuf(DRM_IOCTL_ARGS)
err:
if (orig_bufsz != 0)
drm_free(kbuf, orig_bufsz, DRM_MEM_DRIVER);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
-static int radeon_cp_getparam(DRM_IOCTL_ARGS)
+static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_radeon_getparam_t param;
+ drm_radeon_getparam_t *param = data;
int value;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_COPY_FROM_USER_IOCTL(param, (drm_radeon_getparam_t __user *) data,
- sizeof(param));
-
DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
- switch (param.param) {
+ switch (param->param) {
case RADEON_PARAM_GART_BUFFER_OFFSET:
value = dev_priv->gart_buffers_offset;
break;
@@ -3119,7 +3064,7 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
break;
case RADEON_PARAM_SCRATCH_OFFSET:
if (!dev_priv->writeback_works)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
value = RADEON_SCRATCH_REG_OFFSET;
break;
@@ -3135,48 +3080,42 @@ static int radeon_cp_getparam(DRM_IOCTL_ARGS)
value = radeon_vblank_crtc_get(dev);
break;
default:
- DRM_DEBUG( "Invalid parameter %d\n", param.param );
- return DRM_ERR(EINVAL);
+ DRM_DEBUG( "Invalid parameter %d\n", param->param );
+ return -EINVAL;
}
- if (DRM_COPY_TO_USER(param.value, &value, sizeof(int))) {
+ if (DRM_COPY_TO_USER(param->value, &value, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
return 0;
}
-static int radeon_cp_setparam(DRM_IOCTL_ARGS)
+static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_radeon_private_t *dev_priv = dev->dev_private;
- drm_file_t *filp_priv;
- drm_radeon_setparam_t sp;
+ drm_radeon_setparam_t *sp = data;
struct drm_radeon_driver_file_fields *radeon_priv;
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- DRM_GET_PRIV_WITH_RETURN(filp_priv, filp);
-
- DRM_COPY_FROM_USER_IOCTL(sp, (drm_radeon_setparam_t __user *) data,
- sizeof(sp));
-
- switch (sp.param) {
+ switch (sp->param) {
case RADEON_SETPARAM_FB_LOCATION:
- radeon_priv = filp_priv->driver_priv;
- radeon_priv->radeon_fb_delta = dev_priv->fb_location - sp.value;
+ radeon_priv = file_priv->driver_priv;
+ radeon_priv->radeon_fb_delta = dev_priv->fb_location -
+ sp->value;
break;
case RADEON_SETPARAM_SWITCH_TILING:
- if (sp.value == 0) {
+ if (sp->value == 0) {
DRM_DEBUG("color tiling disabled\n");
dev_priv->front_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset &= ~RADEON_DST_TILE_MACRO;
dev_priv->sarea_priv->tiling_enabled = 0;
- } else if (sp.value == 1) {
+ } else if (sp->value == 1) {
DRM_DEBUG("color tiling enabled\n");
dev_priv->front_pitch_offset |= RADEON_DST_TILE_MACRO;
dev_priv->back_pitch_offset |= RADEON_DST_TILE_MACRO;
@@ -3184,23 +3123,23 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
}
break;
case RADEON_SETPARAM_PCIGART_LOCATION:
- dev_priv->pcigart_offset = sp.value;
+ dev_priv->pcigart_offset = sp->value;
dev_priv->pcigart_offset_set = 1;
break;
case RADEON_SETPARAM_NEW_MEMMAP:
- dev_priv->new_memmap = sp.value;
+ dev_priv->new_memmap = sp->value;
break;
case RADEON_SETPARAM_PCIGART_TABLE_SIZE:
- dev_priv->gart_info.table_size = sp.value;
+ dev_priv->gart_info.table_size = sp->value;
if (dev_priv->gart_info.table_size < RADEON_PCIGART_TABLE_SIZE)
dev_priv->gart_info.table_size = RADEON_PCIGART_TABLE_SIZE;
break;
case RADEON_SETPARAM_VBLANK_CRTC:
- return radeon_vblank_crtc_set(dev, sp.value);
+ return radeon_vblank_crtc_set(dev, sp->value);
break;
default:
- DRM_DEBUG("Invalid parameter %d\n", sp.param);
- return DRM_ERR(EINVAL);
+ DRM_DEBUG("Invalid parameter %d\n", sp->param);
+ return -EINVAL;
}
return 0;
@@ -3213,18 +3152,19 @@ static int radeon_cp_setparam(DRM_IOCTL_ARGS)
*
* DRM infrastructure takes care of reclaiming dma buffers.
*/
-void radeon_driver_preclose(drm_device_t * dev, DRMFILE filp)
+void radeon_driver_preclose(struct drm_device * dev,
+ struct drm_file *file_priv)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
dev_priv->page_flipping = 0;
- radeon_mem_release(filp, dev_priv->gart_heap);
- radeon_mem_release(filp, dev_priv->fb_heap);
- radeon_surfaces_release(filp, dev_priv);
+ radeon_mem_release(file_priv, dev_priv->gart_heap);
+ radeon_mem_release(file_priv, dev_priv->fb_heap);
+ radeon_surfaces_release(file_priv, dev_priv);
}
}
-void radeon_driver_lastclose(drm_device_t * dev)
+void radeon_driver_lastclose(struct drm_device * dev)
{
if (dev->dev_private) {
drm_radeon_private_t *dev_priv = dev->dev_private;
@@ -3237,7 +3177,7 @@ void radeon_driver_lastclose(drm_device_t * dev)
radeon_do_release(dev);
}
-int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
+int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_radeon_driver_file_fields *radeon_priv;
@@ -3250,7 +3190,7 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
if (!radeon_priv)
return -ENOMEM;
- filp_priv->driver_priv = radeon_priv;
+ file_priv->driver_priv = radeon_priv;
if (dev_priv)
radeon_priv->radeon_fb_delta = dev_priv->fb_location;
@@ -3259,42 +3199,42 @@ int radeon_driver_open(drm_device_t * dev, drm_file_t * filp_priv)
return 0;
}
-void radeon_driver_postclose(drm_device_t * dev, drm_file_t * filp_priv)
+void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv)
{
struct drm_radeon_driver_file_fields *radeon_priv =
- filp_priv->driver_priv;
+ file_priv->driver_priv;
drm_free(radeon_priv, sizeof(*radeon_priv), DRM_MEM_FILES);
}
-drm_ioctl_desc_t radeon_ioctls[] = {
- [DRM_IOCTL_NR(DRM_RADEON_CP_INIT)] = {radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_CP_START)] = {radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_CP_STOP)] = {radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_CP_RESET)] = {radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_CP_IDLE)] = {radeon_cp_idle, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_CP_RESUME)] = {radeon_cp_resume, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_RESET)] = {radeon_engine_reset, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_FULLSCREEN)] = {radeon_fullscreen, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_SWAP)] = {radeon_cp_swap, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_CLEAR)] = {radeon_cp_clear, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_VERTEX)] = {radeon_cp_vertex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_INDICES)] = {radeon_cp_indices, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_TEXTURE)] = {radeon_cp_texture, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_STIPPLE)] = {radeon_cp_stipple, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_INDIRECT)] = {radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_VERTEX2)] = {radeon_cp_vertex2, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_CMDBUF)] = {radeon_cp_cmdbuf, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_GETPARAM)] = {radeon_cp_getparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_FLIP)] = {radeon_cp_flip, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_ALLOC)] = {radeon_mem_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_FREE)] = {radeon_mem_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_INIT_HEAP)] = {radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_RADEON_IRQ_EMIT)] = {radeon_irq_emit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_IRQ_WAIT)] = {radeon_irq_wait, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_SETPARAM)] = {radeon_cp_setparam, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_SURF_ALLOC)] = {radeon_surface_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_RADEON_SURF_FREE)] = {radeon_surface_free, DRM_AUTH}
+struct drm_ioctl_desc radeon_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_RADEON_CP_INIT, radeon_cp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_START, radeon_cp_start, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_STOP, radeon_cp_stop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESET, radeon_cp_reset, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_IDLE, radeon_cp_idle, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CP_RESUME, radeon_cp_resume, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_RESET, radeon_engine_reset, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FULLSCREEN, radeon_fullscreen, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SWAP, radeon_cp_swap, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CLEAR, radeon_cp_clear, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX, radeon_cp_vertex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDICES, radeon_cp_indices, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_TEXTURE, radeon_cp_texture, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_STIPPLE, radeon_cp_stipple, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INDIRECT, radeon_cp_indirect, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_VERTEX2, radeon_cp_vertex2, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_CMDBUF, radeon_cp_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GETPARAM, radeon_cp_getparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FLIP, radeon_cp_flip, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_ALLOC, radeon_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_FREE, radeon_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_INIT_HEAP, radeon_mem_init_heap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_EMIT, radeon_irq_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_IRQ_WAIT, radeon_irq_wait, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SETPARAM, radeon_cp_setparam, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free, DRM_AUTH)
};
int radeon_max_ioctl = DRM_ARRAY_SIZE(radeon_ioctls);
diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c
index 9a3ae1f1..32ac5ac2 100644
--- a/shared-core/savage_bci.c
+++ b/shared-core/savage_bci.c
@@ -32,7 +32,7 @@
#define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */
#define SAVAGE_FREELIST_DEBUG 0
-static int savage_do_cleanup_bci(drm_device_t *dev);
+static int savage_do_cleanup_bci(struct drm_device *dev);
static int
savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
@@ -60,7 +60,7 @@ savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x, threshold=0x%08x\n", status, threshold);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int
@@ -81,7 +81,7 @@ savage_bci_wait_fifo_s3d(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int
@@ -102,7 +102,7 @@ savage_bci_wait_fifo_s4(drm_savage_private_t *dev_priv, unsigned int n)
DRM_ERROR("failed!\n");
DRM_INFO(" status=0x%08x\n", status);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
/*
@@ -136,7 +136,7 @@ savage_bci_wait_event_shadow(drm_savage_private_t *dev_priv, uint16_t e)
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
static int
@@ -158,7 +158,7 @@ savage_bci_wait_event_reg(drm_savage_private_t *dev_priv, uint16_t e)
DRM_INFO(" status=0x%08x, e=0x%04x\n", status, e);
#endif
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
@@ -203,11 +203,11 @@ uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
/*
* Freelist management
*/
-static int savage_freelist_init(drm_device_t *dev)
+static int savage_freelist_init(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *buf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *buf;
drm_savage_buf_priv_t *entry;
int i;
DRM_DEBUG("count=%d\n", dma->buf_count);
@@ -236,7 +236,7 @@ static int savage_freelist_init(drm_device_t *dev)
return 0;
}
-static drm_buf_t *savage_freelist_get(drm_device_t *dev)
+static struct drm_buf *savage_freelist_get(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *tail = dev_priv->tail.prev;
@@ -269,7 +269,7 @@ static drm_buf_t *savage_freelist_get(drm_device_t *dev)
return NULL;
}
-void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf)
+void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf)
{
drm_savage_private_t *dev_priv = dev->dev_private;
drm_savage_buf_priv_t *entry = buf->dev_private, *prev, *next;
@@ -301,7 +301,7 @@ static int savage_dma_init(drm_savage_private_t *dev_priv)
dev_priv->dma_pages = drm_alloc(sizeof(drm_savage_dma_page_t) *
dev_priv->nr_dma_pages, DRM_MEM_DRIVER);
if (dev_priv->dma_pages == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
for (i = 0; i < dev_priv->nr_dma_pages; ++i) {
SET_AGE(&dev_priv->dma_pages[i].age, 0, 0);
@@ -535,13 +535,13 @@ static void savage_fake_dma_flush(drm_savage_private_t *dev_priv)
dev_priv->first_dma_page = dev_priv->current_dma_page = 0;
}
-int savage_driver_load(drm_device_t *dev, unsigned long chipset)
+int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_savage_private_t *dev_priv;
dev_priv = drm_alloc(sizeof(drm_savage_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
memset(dev_priv, 0, sizeof(drm_savage_private_t));
dev->dev_private = (void *)dev_priv;
@@ -557,7 +557,7 @@ int savage_driver_load(drm_device_t *dev, unsigned long chipset)
* in drm_addmap. Therefore we add them manually before the maps are
* initialized, and tear them down on last close.
*/
-int savage_driver_firstopen(drm_device_t *dev)
+int savage_driver_firstopen(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
unsigned long mmio_base, fb_base, fb_size, aperture_base;
@@ -654,7 +654,7 @@ int savage_driver_firstopen(drm_device_t *dev)
/*
* Delete MTRRs and free device-private data.
*/
-void savage_driver_lastclose(drm_device_t *dev)
+void savage_driver_lastclose(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@@ -666,7 +666,7 @@ void savage_driver_lastclose(drm_device_t *dev)
dev_priv->mtrr[i].size, DRM_MTRR_WC);
}
-int savage_driver_unload(drm_device_t *dev)
+int savage_driver_unload(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -675,22 +675,22 @@ int savage_driver_unload(drm_device_t *dev)
return 0;
}
-static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
+static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)
{
drm_savage_private_t *dev_priv = dev->dev_private;
if (init->fb_bpp != 16 && init->fb_bpp != 32) {
DRM_ERROR("invalid frame buffer bpp %d!\n", init->fb_bpp);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (init->depth_bpp != 16 && init->depth_bpp != 32) {
DRM_ERROR("invalid depth buffer bpp %d!\n", init->fb_bpp);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (init->dma_type != SAVAGE_DMA_AGP &&
init->dma_type != SAVAGE_DMA_PCI) {
DRM_ERROR("invalid dma memory type %d!\n", init->dma_type);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->cob_size = init->cob_size;
@@ -714,14 +714,14 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev_priv->sarea) {
DRM_ERROR("could not find sarea!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (init->status_offset != 0) {
dev_priv->status = drm_core_findmap(dev, init->status_offset);
if (!dev_priv->status) {
DRM_ERROR("could not find shadow status region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else {
dev_priv->status = NULL;
@@ -733,13 +733,13 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev->agp_buffer_map) {
DRM_ERROR("could not find DMA buffer region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
drm_core_ioremap(dev->agp_buffer_map, dev);
if (!dev->agp_buffer_map) {
DRM_ERROR("failed to ioremap DMA buffer region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
}
if (init->agp_textures_offset) {
@@ -748,7 +748,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev_priv->agp_textures) {
DRM_ERROR("could not find agp texture region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else {
dev_priv->agp_textures = NULL;
@@ -759,39 +759,39 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
DRM_ERROR("command DMA not supported on "
"Savage3D/MX/IX.\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (dev->dma && dev->dma->buflist) {
DRM_ERROR("command and vertex DMA not supported "
"at the same time.\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->cmd_dma = drm_core_findmap(dev, init->cmd_dma_offset);
if (!dev_priv->cmd_dma) {
DRM_ERROR("could not find command DMA region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (dev_priv->dma_type == SAVAGE_DMA_AGP) {
if (dev_priv->cmd_dma->type != _DRM_AGP) {
DRM_ERROR("AGP command DMA region is not a "
"_DRM_AGP map!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
drm_core_ioremap(dev_priv->cmd_dma, dev);
if (!dev_priv->cmd_dma->handle) {
DRM_ERROR("failed to ioremap command "
"DMA region!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
} else if (dev_priv->cmd_dma->type != _DRM_CONSISTENT) {
DRM_ERROR("PCI command DMA region is not a "
"_DRM_CONSISTENT map!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else {
dev_priv->cmd_dma = NULL;
@@ -808,7 +808,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (!dev_priv->fake_dma.handle) {
DRM_ERROR("could not allocate faked DMA buffer!\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->cmd_dma = &dev_priv->fake_dma;
dev_priv->dma_flush = savage_fake_dma_flush;
@@ -885,19 +885,19 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init)
if (savage_freelist_init(dev) < 0) {
DRM_ERROR("could not initialize freelist\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (savage_dma_init(dev_priv) < 0) {
DRM_ERROR("could not initialize command DMA\n");
savage_do_cleanup_bci(dev);
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
return 0;
}
-static int savage_do_cleanup_bci(drm_device_t *dev)
+static int savage_do_cleanup_bci(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
@@ -927,59 +927,46 @@ static int savage_do_cleanup_bci(drm_device_t *dev)
return 0;
}
-static int savage_bci_init(DRM_IOCTL_ARGS)
+static int savage_bci_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_savage_init_t init;
+ drm_savage_init_t *init = data;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_savage_init_t __user *)data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case SAVAGE_INIT_BCI:
- return savage_do_init_bci(dev, &init);
+ return savage_do_init_bci(dev, init);
case SAVAGE_CLEANUP_BCI:
return savage_do_cleanup_bci(dev);
}
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
-static int savage_bci_event_emit(DRM_IOCTL_ARGS)
+static int savage_bci_event_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_event_emit_t event;
+ drm_savage_event_emit_t *event = data;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_emit_t __user *)data,
- sizeof(event));
+ event->count = savage_bci_emit_event(dev_priv, event->flags);
+ event->count |= dev_priv->event_wrap << 16;
- event.count = savage_bci_emit_event(dev_priv, event.flags);
- event.count |= dev_priv->event_wrap << 16;
- DRM_COPY_TO_USER_IOCTL((drm_savage_event_emit_t __user *)data,
- event, sizeof(event));
return 0;
}
-static int savage_bci_event_wait(DRM_IOCTL_ARGS)
+static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_savage_event_wait_t event;
+ drm_savage_event_wait_t *event = data;
unsigned int event_e, hw_e;
unsigned int event_w, hw_w;
DRM_DEBUG("\n");
- DRM_COPY_FROM_USER_IOCTL(event, (drm_savage_event_wait_t __user *)data,
- sizeof(event));
-
UPDATE_EVENT_COUNTER();
if (dev_priv->status_ptr)
hw_e = dev_priv->status_ptr[1] & 0xffff;
@@ -989,8 +976,8 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
if (hw_e > dev_priv->event_counter)
hw_w--; /* hardware hasn't passed the last wrap yet */
- event_e = event.count & 0xffff;
- event_w = event.count >> 16;
+ event_e = event->count & 0xffff;
+ event_w = event->count >> 16;
/* Don't need to wait if
* - event counter wrapped since the event was emitted or
@@ -1006,71 +993,68 @@ static int savage_bci_event_wait(DRM_IOCTL_ARGS)
* DMA buffer management
*/
-static int savage_bci_get_buffers(DRMFILE filp, drm_device_t *dev, drm_dma_t *d)
+static int savage_bci_get_buffers(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_dma *d)
{
- drm_buf_t *buf;
+ struct drm_buf *buf;
int i;
for (i = d->granted_count; i < d->request_count; i++) {
buf = savage_freelist_get(dev);
if (!buf)
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
- buf->filp = filp;
+ buf->file_priv = file_priv;
if (DRM_COPY_TO_USER(&d->request_indices[i],
&buf->idx, sizeof(buf->idx)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if (DRM_COPY_TO_USER(&d->request_sizes[i],
&buf->total, sizeof(buf->total)))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
d->granted_count++;
}
return 0;
}
-int savage_bci_buffers(DRM_IOCTL_ARGS)
+int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_device_dma_t *dma = dev->dma;
- drm_dma_t d;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_dma *d = data;
int ret = 0;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(d, (drm_dma_t __user *)data, sizeof(d));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
/* Please don't send us buffers.
*/
- if (d.send_count != 0) {
+ if (d->send_count != 0) {
DRM_ERROR("Process %d trying to send %d buffers via drmDMA\n",
- DRM_CURRENTPID, d.send_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->send_count);
+ return -EINVAL;
}
/* We'll send you buffers.
*/
- if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ if (d->request_count < 0 || d->request_count > dma->buf_count) {
DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
- DRM_CURRENTPID, d.request_count, dma->buf_count);
- return DRM_ERR(EINVAL);
+ DRM_CURRENTPID, d->request_count, dma->buf_count);
+ return -EINVAL;
}
- d.granted_count = 0;
+ d->granted_count = 0;
- if (d.request_count) {
- ret = savage_bci_get_buffers(filp, dev, &d);
+ if (d->request_count) {
+ ret = savage_bci_get_buffers(dev, file_priv, d);
}
- DRM_COPY_TO_USER_IOCTL((drm_dma_t __user *)data, d, sizeof(d));
-
return ret;
}
-void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
+void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)
{
- drm_device_dma_t *dma = dev->dma;
+ struct drm_device_dma *dma = dev->dma;
drm_savage_private_t *dev_priv = dev->dev_private;
int i;
@@ -1084,10 +1068,10 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
/*i830_flush_queue(dev);*/
for (i = 0; i < dma->buf_count; i++) {
- drm_buf_t *buf = dma->buflist[i];
+ struct drm_buf *buf = dma->buflist[i];
drm_savage_buf_priv_t *buf_priv = buf->dev_private;
- if (buf->filp == filp && buf_priv &&
+ if (buf->file_priv == file_priv && buf_priv &&
buf_priv->next == NULL && buf_priv->prev == NULL) {
uint16_t event;
DRM_DEBUG("reclaimed from client\n");
@@ -1097,14 +1081,14 @@ void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp)
}
}
- drm_core_reclaim_buffers(dev, filp);
+ drm_core_reclaim_buffers(dev, file_priv);
}
-drm_ioctl_desc_t savage_ioctls[] = {
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_INIT)] = {savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_CMDBUF)] = {savage_bci_cmdbuf, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_EMIT)] = {savage_bci_event_emit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SAVAGE_BCI_EVENT_WAIT)] = {savage_bci_event_wait, DRM_AUTH},
+struct drm_ioctl_desc savage_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_INIT, savage_bci_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_CMDBUF, savage_bci_cmdbuf, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_EMIT, savage_bci_event_emit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SAVAGE_BCI_EVENT_WAIT, savage_bci_event_wait, DRM_AUTH),
};
int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls);
diff --git a/shared-core/savage_drm.h b/shared-core/savage_drm.h
index 6526c9aa..b960d557 100644
--- a/shared-core/savage_drm.h
+++ b/shared-core/savage_drm.h
@@ -47,7 +47,7 @@
typedef struct _drm_savage_sarea {
/* LRU lists for texture memory in agp space and on the card.
*/
- drm_tex_region_t texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
+ struct drm_tex_region texList[SAVAGE_NR_TEX_HEAPS][SAVAGE_NR_TEX_REGIONS+1];
unsigned int texAge[SAVAGE_NR_TEX_HEAPS];
/* Mechanism to validate card state.
@@ -112,7 +112,7 @@ typedef struct drm_savage_cmdbuf {
unsigned int vb_size; /* size of client vertex buffer in bytes */
unsigned int vb_stride; /* stride of vertices in 32bit words */
/* boxes in client's address space */
- drm_clip_rect_t __user *box_addr;
+ struct drm_clip_rect __user *box_addr;
unsigned int nbox; /* number of clipping boxes */
} drm_savage_cmdbuf_t;
diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h
index 88c571e1..d86bac04 100644
--- a/shared-core/savage_drv.h
+++ b/shared-core/savage_drv.h
@@ -58,7 +58,7 @@ typedef struct drm_savage_buf_priv {
struct drm_savage_buf_priv *next;
struct drm_savage_buf_priv *prev;
drm_savage_age_t age;
- drm_buf_t *buf;
+ struct drm_buf *buf;
} drm_savage_buf_priv_t;
typedef struct drm_savage_dma_page {
@@ -104,7 +104,7 @@ enum savage_family {
S3_LAST
};
-extern drm_ioctl_desc_t savage_ioctls[];
+extern struct drm_ioctl_desc savage_ioctls[];
extern int savage_max_ioctl;
#define S3_SAVAGE3D_SERIES(chip) ((chip>=S3_SAVAGE3D) && (chip<=S3_SAVAGE_MX))
@@ -192,33 +192,34 @@ typedef struct drm_savage_private {
/* Err, there is a macro wait_event in include/linux/wait.h.
* Avoid unwanted macro expansion. */
void (*emit_clip_rect)(struct drm_savage_private *dev_priv,
- const drm_clip_rect_t *pbox);
+ const struct drm_clip_rect *pbox);
void (*dma_flush)(struct drm_savage_private *dev_priv);
} drm_savage_private_t;
/* ioctls */
-extern int savage_bci_cmdbuf(DRM_IOCTL_ARGS);
-extern int savage_bci_buffers(DRM_IOCTL_ARGS);
+extern int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int savage_bci_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv);
/* BCI functions */
extern uint16_t savage_bci_emit_event(drm_savage_private_t *dev_priv,
unsigned int flags);
-extern void savage_freelist_put(drm_device_t *dev, drm_buf_t *buf);
+extern void savage_freelist_put(struct drm_device *dev, struct drm_buf *buf);
extern void savage_dma_reset(drm_savage_private_t *dev_priv);
extern void savage_dma_wait(drm_savage_private_t *dev_priv, unsigned int page);
extern uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv,
unsigned int n);
-extern int savage_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int savage_driver_firstopen(drm_device_t *dev);
-extern void savage_driver_lastclose(drm_device_t *dev);
-extern int savage_driver_unload(drm_device_t *dev);
-extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp);
+extern int savage_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int savage_driver_firstopen(struct drm_device *dev);
+extern void savage_driver_lastclose(struct drm_device *dev);
+extern int savage_driver_unload(struct drm_device *dev);
+extern void savage_reclaim_buffers(struct drm_device *dev,
+ struct drm_file *file_priv);
/* state functions */
extern void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
- const drm_clip_rect_t *pbox);
+ const struct drm_clip_rect *pbox);
extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
- const drm_clip_rect_t *pbox);
+ const struct drm_clip_rect *pbox);
#define SAVAGE_FB_SIZE_S3 0x01000000 /* 16MB */
#define SAVAGE_FB_SIZE_S4 0x02000000 /* 32MB */
diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c
index acc98f89..dd593340 100644
--- a/shared-core/savage_state.c
+++ b/shared-core/savage_state.c
@@ -27,7 +27,7 @@
#include "savage_drv.h"
void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
- const drm_clip_rect_t *pbox)
+ const struct drm_clip_rect *pbox)
{
uint32_t scstart = dev_priv->state.s3d.new_scstart;
uint32_t scend = dev_priv->state.s3d.new_scend;
@@ -53,7 +53,7 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,
}
void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,
- const drm_clip_rect_t *pbox)
+ const struct drm_clip_rect *pbox)
{
uint32_t drawctrl0 = dev_priv->state.s4.new_drawctrl0;
uint32_t drawctrl1 = dev_priv->state.s4.new_drawctrl1;
@@ -83,7 +83,7 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
{
if ((addr & 6) != 2) { /* reserved bits */
DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit, addr);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!(addr & 1)) { /* local */
addr &= ~7;
@@ -92,13 +92,13 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
DRM_ERROR
("bad texAddr%d %08x (local addr out of range)\n",
unit, addr);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else { /* AGP */
if (!dev_priv->agp_textures) {
DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
unit, addr);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
addr &= ~7;
if (addr < dev_priv->agp_textures->offset ||
@@ -107,7 +107,7 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,
DRM_ERROR
("bad texAddr%d %08x (AGP addr out of range)\n",
unit, addr);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
return 0;
@@ -132,7 +132,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,
start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
SAVE_STATE_MASK(SAVAGE_SCSTART_S3D, s3d.new_scstart,
@@ -164,7 +164,7 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,
start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) {
DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
start, start+count-1);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4, s4.new_drawctrl0,
@@ -275,7 +275,7 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,
static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
- const drm_buf_t *dmabuf)
+ const struct drm_buf *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->prim.prim;
@@ -287,7 +287,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!n)
@@ -301,7 +301,7 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@@ -310,18 +310,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -329,18 +329,18 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
if (start + n > dmabuf->total/32) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, dmabuf->total/32);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* Vertex DMA doesn't work with command DMA at the same time,
@@ -438,7 +438,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
if (n % 3 != 0) {
DRM_ERROR("wrong number of vertices %u in TRILIST\n",
n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@@ -447,24 +447,24 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
DRM_ERROR
("wrong number of vertices %u in TRIFAN/STRIP\n",
n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
vtx_size = 10; /* full vertex */
}
@@ -476,13 +476,13 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (start + n > vb_size / (vb_stride*4)) {
DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
start, start + n - 1, vb_size / (vb_stride*4));
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
prim <<= 25;
@@ -534,7 +534,7 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,
static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const uint16_t *idx,
- const drm_buf_t *dmabuf)
+ const struct drm_buf *dmabuf)
{
unsigned char reorder = 0;
unsigned int prim = cmd_header->idx.prim;
@@ -545,7 +545,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
if (!dmabuf) {
DRM_ERROR("called without dma buffers!\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (!n)
@@ -558,7 +558,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@@ -566,18 +566,18 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
if (n < 3) {
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip != 0) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
} else {
unsigned int size = 10 - (skip & 1) - (skip >> 1 & 1) -
@@ -585,11 +585,11 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
(skip >> 5 & 1) - (skip >> 6 & 1) - (skip >> 7 & 1);
if (skip > SAVAGE_SKIP_ALL_S4 || size != 8) {
DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (reorder) {
DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -626,7 +626,7 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,
if (idx[i] > dmabuf->total/32) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], dmabuf->total/32);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -696,7 +696,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
case SAVAGE_PRIM_TRILIST:
if (n % 3 != 0) {
DRM_ERROR("wrong number of indices %u in TRILIST\n", n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
case SAVAGE_PRIM_TRISTRIP:
@@ -704,24 +704,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
if (n < 3) {
DRM_ERROR
("wrong number of indices %u in TRIFAN/STRIP\n", n);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
break;
default:
DRM_ERROR("invalid primitive type %u\n", prim);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
if (skip > SAVAGE_SKIP_ALL_S3D) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
vtx_size = 8; /* full vertex */
} else {
if (skip > SAVAGE_SKIP_ALL_S4) {
DRM_ERROR("invalid skip flags 0x%04x\n", skip);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
vtx_size = 10; /* full vertex */
}
@@ -733,7 +733,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
if (vtx_size > vb_stride) {
DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
vtx_size, vb_stride);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
prim <<= 25;
@@ -746,7 +746,7 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,
if (idx[i] > vb_size / (vb_stride*4)) {
DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
i, idx[i], vb_size / (vb_stride*4));
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
@@ -790,7 +790,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *cmd_header,
const drm_savage_cmd_header_t *data,
unsigned int nbox,
- const drm_clip_rect_t *boxes)
+ const struct drm_clip_rect *boxes)
{
unsigned int flags = cmd_header->clear0.flags;
unsigned int clear_cmd;
@@ -860,7 +860,7 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,
}
static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
- unsigned int nbox, const drm_clip_rect_t *boxes)
+ unsigned int nbox, const struct drm_clip_rect *boxes)
{
unsigned int swap_cmd;
unsigned int i;
@@ -891,11 +891,11 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,
static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
const drm_savage_cmd_header_t *start,
const drm_savage_cmd_header_t *end,
- const drm_buf_t *dmabuf,
+ const struct drm_buf *dmabuf,
const unsigned int *vtxbuf,
unsigned int vb_size, unsigned int vb_stride,
unsigned int nbox,
- const drm_clip_rect_t *boxes)
+ const struct drm_clip_rect *boxes)
{
unsigned int i, j;
int ret;
@@ -941,7 +941,7 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
DRM_ERROR("IMPLEMENTATION ERROR: "
"non-drawing-command %d\n",
cmd_header.cmd.cmd);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (ret != 0)
@@ -952,35 +952,31 @@ static int savage_dispatch_draw(drm_savage_private_t *dev_priv,
return 0;
}
-int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
+int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_savage_private_t *dev_priv = dev->dev_private;
- drm_device_dma_t *dma = dev->dma;
- drm_buf_t *dmabuf;
- drm_savage_cmdbuf_t cmdbuf;
+ struct drm_device_dma *dma = dev->dma;
+ struct drm_buf *dmabuf;
+ drm_savage_cmdbuf_t *cmdbuf = data;
drm_savage_cmd_header_t *kcmd_addr = NULL;
drm_savage_cmd_header_t *first_draw_cmd;
unsigned int *kvb_addr = NULL;
- drm_clip_rect_t *kbox_addr = NULL;
+ struct drm_clip_rect *kbox_addr = NULL;
unsigned int i, j;
int ret = 0;
DRM_DEBUG("\n");
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_savage_cmdbuf_t __user *)data,
- sizeof(cmdbuf));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
if (dma && dma->buflist) {
- if (cmdbuf.dma_idx > dma->buf_count) {
+ if (cmdbuf->dma_idx > dma->buf_count) {
DRM_ERROR
("vertex buffer index %u out of range (0-%u)\n",
- cmdbuf.dma_idx, dma->buf_count-1);
- return DRM_ERR(EINVAL);
+ cmdbuf->dma_idx, dma->buf_count-1);
+ return -EINVAL;
}
- dmabuf = dma->buflist[cmdbuf.dma_idx];
+ dmabuf = dma->buflist[cmdbuf->dma_idx];
} else {
dmabuf = NULL;
}
@@ -990,47 +986,49 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
* COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
* for locking on FreeBSD.
*/
- if (cmdbuf.size) {
- kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER);
+ if (cmdbuf->size) {
+ kcmd_addr = drm_alloc(cmdbuf->size * 8, DRM_MEM_DRIVER);
if (kcmd_addr == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
- if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr,
- cmdbuf.size * 8))
+ if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf->cmd_addr,
+ cmdbuf->size * 8))
{
- drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
- return DRM_ERR(EFAULT);
+ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
+ return -EFAULT;
}
- cmdbuf.cmd_addr = kcmd_addr;
+ cmdbuf->cmd_addr = kcmd_addr;
}
- if (cmdbuf.vb_size) {
- kvb_addr = drm_alloc(cmdbuf.vb_size, DRM_MEM_DRIVER);
+ if (cmdbuf->vb_size) {
+ kvb_addr = drm_alloc(cmdbuf->vb_size, DRM_MEM_DRIVER);
if (kvb_addr == NULL) {
- ret = DRM_ERR(ENOMEM);
+ ret = -ENOMEM;
goto done;
}
- if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf.vb_addr,
- cmdbuf.vb_size)) {
- ret = DRM_ERR(EFAULT);
+ if (DRM_COPY_FROM_USER(kvb_addr, cmdbuf->vb_addr,
+ cmdbuf->vb_size)) {
+ ret = -EFAULT;
goto done;
}
- cmdbuf.vb_addr = kvb_addr;
+ cmdbuf->vb_addr = kvb_addr;
}
- if (cmdbuf.nbox) {
- kbox_addr = drm_alloc(cmdbuf.nbox * sizeof(drm_clip_rect_t),
- DRM_MEM_DRIVER);
+ if (cmdbuf->nbox) {
+ kbox_addr = drm_alloc(cmdbuf->nbox *
+ sizeof(struct drm_clip_rect),
+ DRM_MEM_DRIVER);
if (kbox_addr == NULL) {
- ret = DRM_ERR(ENOMEM);
+ ret = -ENOMEM;
goto done;
}
- if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf.box_addr,
- cmdbuf.nbox * sizeof(drm_clip_rect_t))) {
- ret = DRM_ERR(EFAULT);
+ if (DRM_COPY_FROM_USER(kbox_addr, cmdbuf->box_addr,
+ cmdbuf->nbox *
+ sizeof(struct drm_clip_rect))) {
+ ret = -EFAULT;
goto done;
}
- cmdbuf.box_addr = kbox_addr;
+ cmdbuf->box_addr = kbox_addr;
}
/* Make sure writes to DMA buffers are finished before sending
@@ -1043,10 +1041,10 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
i = 0;
first_draw_cmd = NULL;
- while (i < cmdbuf.size) {
+ while (i < cmdbuf->size) {
drm_savage_cmd_header_t cmd_header;
- cmd_header = *(drm_savage_cmd_header_t *)cmdbuf.cmd_addr;
- cmdbuf.cmd_addr++;
+ cmd_header = *(drm_savage_cmd_header_t *)cmdbuf->cmd_addr;
+ cmdbuf->cmd_addr++;
i++;
/* Group drawing commands with same state to minimize
@@ -1056,28 +1054,29 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
case SAVAGE_CMD_DMA_IDX:
case SAVAGE_CMD_VB_IDX:
j = (cmd_header.idx.count + 3) / 4;
- if (i + j > cmdbuf.size) {
+ if (i + j > cmdbuf->size) {
DRM_ERROR("indexed drawing command extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
/* fall through */
case SAVAGE_CMD_DMA_PRIM:
case SAVAGE_CMD_VB_PRIM:
if (!first_draw_cmd)
- first_draw_cmd = cmdbuf.cmd_addr-1;
- cmdbuf.cmd_addr += j;
+ first_draw_cmd = cmdbuf->cmd_addr-1;
+ cmdbuf->cmd_addr += j;
i += j;
break;
default:
if (first_draw_cmd) {
ret = savage_dispatch_draw (
dev_priv, first_draw_cmd,
- cmdbuf.cmd_addr-1,
- dmabuf, cmdbuf.vb_addr, cmdbuf.vb_size,
- cmdbuf.vb_stride,
- cmdbuf.nbox, cmdbuf.box_addr);
+ cmdbuf->cmd_addr-1,
+ dmabuf, cmdbuf->vb_addr,
+ cmdbuf->vb_size,
+ cmdbuf->vb_stride,
+ cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0)
return ret;
first_draw_cmd = NULL;
@@ -1089,40 +1088,42 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
switch (cmd_header.cmd.cmd) {
case SAVAGE_CMD_STATE:
j = (cmd_header.state.count + 1) / 2;
- if (i + j > cmdbuf.size) {
+ if (i + j > cmdbuf->size) {
DRM_ERROR("command SAVAGE_CMD_STATE extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
goto done;
}
ret = savage_dispatch_state(dev_priv, &cmd_header,
- (const uint32_t *)cmdbuf.cmd_addr);
- cmdbuf.cmd_addr += j;
+ (const uint32_t *)cmdbuf->cmd_addr);
+ cmdbuf->cmd_addr += j;
i += j;
break;
case SAVAGE_CMD_CLEAR:
- if (i + 1 > cmdbuf.size) {
+ if (i + 1 > cmdbuf->size) {
DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
"beyond end of command buffer\n");
DMA_FLUSH();
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
goto done;
}
ret = savage_dispatch_clear(dev_priv, &cmd_header,
- cmdbuf.cmd_addr,
- cmdbuf.nbox, cmdbuf.box_addr);
- cmdbuf.cmd_addr++;
+ cmdbuf->cmd_addr,
+ cmdbuf->nbox,
+ cmdbuf->box_addr);
+ cmdbuf->cmd_addr++;
i++;
break;
case SAVAGE_CMD_SWAP:
- ret = savage_dispatch_swap(dev_priv, cmdbuf.nbox,
- cmdbuf.box_addr);
+ ret = savage_dispatch_swap(dev_priv, cmdbuf->nbox,
+ cmdbuf->box_addr);
break;
default:
- DRM_ERROR("invalid command 0x%x\n", cmd_header.cmd.cmd);
+ DRM_ERROR("invalid command 0x%x\n",
+ cmd_header.cmd.cmd);
DMA_FLUSH();
- ret = DRM_ERR(EINVAL);
+ ret = -EINVAL;
goto done;
}
@@ -1134,9 +1135,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
if (first_draw_cmd) {
ret = savage_dispatch_draw (
- dev_priv, first_draw_cmd, cmdbuf.cmd_addr, dmabuf,
- cmdbuf.vb_addr, cmdbuf.vb_size, cmdbuf.vb_stride,
- cmdbuf.nbox, cmdbuf.box_addr);
+ dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,
+ cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,
+ cmdbuf->nbox, cmdbuf->box_addr);
if (ret != 0) {
DMA_FLUSH();
goto done;
@@ -1145,7 +1146,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
DMA_FLUSH();
- if (dmabuf && cmdbuf.discard) {
+ if (dmabuf && cmdbuf->discard) {
drm_savage_buf_priv_t *buf_priv = dmabuf->dev_private;
uint16_t event;
event = savage_bci_emit_event(dev_priv, SAVAGE_WAIT_3D);
@@ -1155,9 +1156,9 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS)
done:
/* If we didn't need to allocate them, these'll be NULL */
- drm_free(kcmd_addr, cmdbuf.size * 8, DRM_MEM_DRIVER);
- drm_free(kvb_addr, cmdbuf.vb_size, DRM_MEM_DRIVER);
- drm_free(kbox_addr, cmdbuf.nbox * sizeof(drm_clip_rect_t),
+ drm_free(kcmd_addr, cmdbuf->size * 8, DRM_MEM_DRIVER);
+ drm_free(kvb_addr, cmdbuf->vb_size, DRM_MEM_DRIVER);
+ drm_free(kbox_addr, cmdbuf->nbox * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
return ret;
diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h
index ec572ad4..a4a88fe1 100644
--- a/shared-core/sis_drv.h
+++ b/shared-core/sis_drv.h
@@ -58,7 +58,7 @@ enum sis_family {
typedef struct drm_sis_private {
drm_local_map_t *mmio;
unsigned int idle_fault;
- drm_sman_t sman;
+ struct drm_sman sman;
unsigned int chipset;
int vram_initialized;
int agp_initialized;
@@ -66,9 +66,10 @@ typedef struct drm_sis_private {
unsigned long agp_offset;
} drm_sis_private_t;
-extern int sis_idle(drm_device_t *dev);
-extern void sis_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void sis_lastclose(drm_device_t *dev);
+extern int sis_idle(struct drm_device *dev);
+extern void sis_reclaim_buffers_locked(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void sis_lastclose(struct drm_device *dev);
#else
#include "sis_ds.h"
@@ -78,14 +79,14 @@ typedef struct drm_sis_private {
memHeap_t *FBHeap;
} drm_sis_private_t;
-extern int sis_init_context(drm_device_t * dev, int context);
-extern int sis_final_context(drm_device_t * dev, int context);
+extern int sis_init_context(struct drm_device * dev, int context);
+extern int sis_final_context(struct drm_device * dev, int context);
#endif
-extern drm_ioctl_desc_t sis_ioctls[];
+extern struct drm_ioctl_desc sis_ioctls[];
extern int sis_max_ioctl;
#endif
diff --git a/shared-core/sis_mm.c b/shared-core/sis_mm.c
index 6d074d6f..e11939fa 100644
--- a/shared-core/sis_mm.c
+++ b/shared-core/sis_mm.c
@@ -81,59 +81,52 @@ static int del_alloc_set(int context, int type, unsigned int val)
/* fb management via fb device */
#if defined(__linux__) && defined(CONFIG_FB_SIS)
-static int sis_fb_init(DRM_IOCTL_ARGS)
+static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
return 0;
}
-static int sis_fb_alloc(DRM_IOCTL_ARGS)
+static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_sis_mem_t fb;
+ drm_sis_mem_t *fb = data;
struct sis_memreq req;
- drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
int retval = 0;
- DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
-
- req.size = fb.size;
+ req.size = fb->size;
sis_malloc(&req);
if (req.offset) {
/* TODO */
- fb.offset = req.offset;
- fb.free = req.offset;
- if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
+ fb->offset = req.offset;
+ fb->free = req.offset;
+ if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
sis_free(req.offset);
- retval = DRM_ERR(EINVAL);
+ retval = -EINVAL;
}
} else {
- fb.offset = 0;
- fb.size = 0;
- fb.free = 0;
+ fb->offset = 0;
+ fb->size = 0;
+ fb->free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
-
- DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb.size, req.offset);
+ DRM_DEBUG("alloc fb, size = %d, offset = %ld\n", fb->size, req.offset);
return retval;
}
-static int sis_fb_free(DRM_IOCTL_ARGS)
+static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_sis_mem_t fb;
int retval = 0;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
-
- if (!fb.free)
- return DRM_ERR(EINVAL);
+ if (!fb->free)
+ return -EINVAL;
- if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
- retval = DRM_ERR(EINVAL);
- sis_free(fb.free);
+ if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
+ retval = -EINVAL;
+ sis_free(fb->free);
- DRM_DEBUG("free fb, offset = 0x%lx\n", fb.free);
+ DRM_DEBUG("free fb, offset = 0x%lx\n", fb->free);
return retval;
}
@@ -150,13 +143,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
* X driver/sisfb HW- Command-
* framebuffer memory DRI heap Cursor queue
*/
-static int sis_fb_init(DRM_IOCTL_ARGS)
+static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_fb_t fb;
-
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb));
+ drm_sis_fb_t *fb = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
@@ -167,71 +157,62 @@ static int sis_fb_init(DRM_IOCTL_ARGS)
}
if (dev_priv->FBHeap != NULL)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- dev_priv->FBHeap = mmInit(fb.offset, fb.size);
+ dev_priv->FBHeap = mmInit(fb->offset, fb->size);
- DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size);
+ DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
return 0;
}
-static int sis_fb_alloc(DRM_IOCTL_ARGS)
+static int sis_fb_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
- drm_sis_mem_t fb;
+ drm_sis_mem_t *fb = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
- return DRM_ERR(EINVAL);
-
- DRM_COPY_FROM_USER_IOCTL(fb, argp, sizeof(fb));
+ return -EINVAL;
- block = mmAllocMem(dev_priv->FBHeap, fb.size, 0, 0);
+ block = mmAllocMem(dev_priv->FBHeap, fb->size, 0, 0);
if (block) {
/* TODO */
- fb.offset = block->ofs;
- fb.free = (unsigned long)block;
- if (!add_alloc_set(fb.context, VIDEO_TYPE, fb.free)) {
+ fb->offset = block->ofs;
+ fb->free = (unsigned long)block;
+ if (!add_alloc_set(fb->context, VIDEO_TYPE, fb->free)) {
DRM_DEBUG("adding to allocation set fails\n");
- mmFreeMem((PMemBlock) fb.free);
- retval = DRM_ERR(EINVAL);
+ mmFreeMem((PMemBlock) fb->free);
+ retval = -EINVAL;
}
} else {
- fb.offset = 0;
- fb.size = 0;
- fb.free = 0;
+ fb->offset = 0;
+ fb->size = 0;
+ fb->free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, fb, sizeof(fb));
-
- DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb.size, fb.offset);
+ DRM_DEBUG("alloc fb, size = %d, offset = %d\n", fb->size, fb->offset);
return retval;
}
-static int sis_fb_free(DRM_IOCTL_ARGS)
+static int sis_fb_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t fb;
+ drm_sis_mem_t *fb = data;
if (dev_priv == NULL || dev_priv->FBHeap == NULL)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_mem_t __user *) data, sizeof(fb));
+ if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb->free))
+ return -EINVAL;
- if (!mmBlockInHeap(dev_priv->FBHeap, (PMemBlock) fb.free))
- return DRM_ERR(EINVAL);
+ if (!del_alloc_set(fb->context, VIDEO_TYPE, fb->free))
+ return -EINVAL;
+ mmFreeMem((PMemBlock) fb->free);
- if (!del_alloc_set(fb.context, VIDEO_TYPE, fb.free))
- return DRM_ERR(EINVAL);
- mmFreeMem((PMemBlock) fb.free);
-
- DRM_DEBUG("free fb, free = 0x%lx\n", fb.free);
+ DRM_DEBUG("free fb, free = 0x%lx\n", fb->free);
return 0;
}
@@ -240,11 +221,10 @@ static int sis_fb_free(DRM_IOCTL_ARGS)
/* agp memory management */
-static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
+static int sis_ioctl_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_agp_t agp;
+ drm_sis_agp_t *agp = data;
if (dev_priv == NULL) {
dev->dev_private = drm_calloc(1, sizeof(drm_sis_private_t),
@@ -255,75 +235,63 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS)
}
if (dev_priv->AGPHeap != NULL)
- return DRM_ERR(EINVAL);
-
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data,
- sizeof(agp));
+ return -EINVAL;
- dev_priv->AGPHeap = mmInit(agp.offset, agp.size);
+ dev_priv->AGPHeap = mmInit(agp->offset, agp->size);
- DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size);
+ DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
return 0;
}
-static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS)
+static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *)data;
- drm_sis_mem_t agp;
+ drm_sis_mem_t *agp = data;
PMemBlock block;
int retval = 0;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL(agp, argp, sizeof(agp));
-
- block = mmAllocMem(dev_priv->AGPHeap, agp.size, 0, 0);
+ block = mmAllocMem(dev_priv->AGPHeap, agp->size, 0, 0);
if (block) {
/* TODO */
- agp.offset = block->ofs;
- agp.free = (unsigned long)block;
- if (!add_alloc_set(agp.context, AGP_TYPE, agp.free)) {
+ agp->offset = block->ofs;
+ agp->free = (unsigned long)block;
+ if (!add_alloc_set(agp->context, AGP_TYPE, agp->free)) {
DRM_DEBUG("adding to allocation set fails\n");
- mmFreeMem((PMemBlock) agp.free);
+ mmFreeMem((PMemBlock) agp->free);
retval = -1;
}
} else {
- agp.offset = 0;
- agp.size = 0;
- agp.free = 0;
+ agp->offset = 0;
+ agp->size = 0;
+ agp->free = 0;
}
- DRM_COPY_TO_USER_IOCTL(argp, agp, sizeof(agp));
-
- DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp.size, agp.offset);
+ DRM_DEBUG("alloc agp, size = %d, offset = %d\n", agp->size,
+ agp->offset);
return retval;
}
-static int sis_ioctl_agp_free(DRM_IOCTL_ARGS)
+static int sis_ioctl_agp_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_sis_private_t *dev_priv = dev->dev_private;
- drm_sis_mem_t agp;
+ drm_sis_mem_t *agp = data;
if (dev_priv == NULL || dev_priv->AGPHeap == NULL)
- return DRM_ERR(EINVAL);
-
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_mem_t __user *) data,
- sizeof(agp));
+ return -EINVAL;
- if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp.free))
- return DRM_ERR(EINVAL);
+ if (!mmBlockInHeap(dev_priv->AGPHeap, (PMemBlock) agp->free))
+ return -EINVAL;
- mmFreeMem((PMemBlock) agp.free);
- if (!del_alloc_set(agp.context, AGP_TYPE, agp.free))
- return DRM_ERR(EINVAL);
+ mmFreeMem((PMemBlock) agp->free);
+ if (!del_alloc_set(agp->context, AGP_TYPE, agp->free))
+ return -EINVAL;
- DRM_DEBUG("free agp, free = 0x%lx\n", agp.free);
+ DRM_DEBUG("free agp, free = 0x%lx\n", agp->free);
return 0;
}
@@ -407,12 +375,12 @@ int sis_final_context(struct drm_device *dev, int context)
}
drm_ioctl_desc_t sis_ioctls[] = {
- [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_fb_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = {sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
- [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_ioctl_agp_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = {sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}
+ DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_fb_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_ioctl_agp_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY)
};
int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls);
diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c
index 333c4bcf..bd737a7e 100644
--- a/shared-core/via_dma.c
+++ b/shared-core/via_dma.c
@@ -139,7 +139,7 @@ static inline uint32_t *via_check_dma(drm_via_private_t * dev_priv,
return (uint32_t *) (dev_priv->dma_ptr + dev_priv->dma_low);
}
-int via_dma_cleanup(drm_device_t * dev)
+int via_dma_cleanup(struct drm_device * dev)
{
if (dev->dev_private) {
drm_via_private_t *dev_priv =
@@ -157,30 +157,30 @@ int via_dma_cleanup(drm_device_t * dev)
return 0;
}
-static int via_initialize(drm_device_t * dev,
+static int via_initialize(struct drm_device * dev,
drm_via_private_t * dev_priv,
drm_via_dma_init_t * init)
{
if (!dev_priv || !dev_priv->mmio) {
DRM_ERROR("via_dma_init called before via_map_init\n");
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (dev_priv->ring.virtual_start != NULL) {
DRM_ERROR("%s called again without calling cleanup\n",
__FUNCTION__);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (!dev->agp || !dev->agp->base) {
DRM_ERROR("%s called with no agp memory available\n",
__FUNCTION__);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (dev_priv->chipset == VIA_DX9_0) {
DRM_ERROR("AGP DMA is not supported on this chip\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
dev_priv->ring.map.offset = dev->agp->base + init->offset;
@@ -195,7 +195,7 @@ static int via_initialize(drm_device_t * dev,
via_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -215,35 +215,31 @@ static int via_initialize(drm_device_t * dev,
return 0;
}
-static int via_dma_init(DRM_IOCTL_ARGS)
+static int via_dma_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
- drm_via_dma_init_t init;
+ drm_via_dma_init_t *init = data;
int retcode = 0;
- DRM_COPY_FROM_USER_IOCTL(init, (drm_via_dma_init_t __user *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case VIA_INIT_DMA:
if (!DRM_SUSER(DRM_CURPROC))
- retcode = DRM_ERR(EPERM);
+ retcode = -EPERM;
else
- retcode = via_initialize(dev, dev_priv, &init);
+ retcode = via_initialize(dev, dev_priv, init);
break;
case VIA_CLEANUP_DMA:
if (!DRM_SUSER(DRM_CURPROC))
- retcode = DRM_ERR(EPERM);
+ retcode = -EPERM;
else
retcode = via_dma_cleanup(dev);
break;
case VIA_DMA_INITIALIZED:
retcode = (dev_priv->ring.virtual_start != NULL) ?
- 0 : DRM_ERR(EFAULT);
+ 0 : -EFAULT;
break;
default:
- retcode = DRM_ERR(EINVAL);
+ retcode = -EINVAL;
break;
}
@@ -252,7 +248,7 @@ static int via_dma_init(DRM_IOCTL_ARGS)
-static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
+static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv;
uint32_t *vb;
@@ -263,15 +259,15 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
if (cmd->size > VIA_PCI_BUF_SIZE) {
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
/*
* Running this function on AGP memory is dead slow. Therefore
@@ -287,7 +283,7 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
vb = via_check_dma(dev_priv, (cmd->size < 0x100) ? 0x102 : cmd->size);
if (vb == NULL) {
- return DRM_ERR(EAGAIN);
+ return -EAGAIN;
}
memcpy(vb, dev_priv->pci_buf, cmd->size);
@@ -306,39 +302,35 @@ static int via_dispatch_cmdbuffer(drm_device_t * dev, drm_via_cmdbuffer_t * cmd)
return 0;
}
-int via_driver_dma_quiescent(drm_device_t * dev)
+int via_driver_dma_quiescent(struct drm_device * dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
if (!via_wait_idle(dev_priv)) {
- return DRM_ERR(EBUSY);
+ return -EBUSY;
}
return 0;
}
-static int via_flush_ioctl(DRM_IOCTL_ARGS)
+static int via_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- LOCK_TEST_WITH_RETURN(dev, filp);
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
return via_driver_dma_quiescent(dev);
}
-static int via_cmdbuffer(DRM_IOCTL_ARGS)
+static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_cmdbuffer_t cmdbuf;
+ drm_via_cmdbuffer_t *cmdbuf = data;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
- sizeof(cmdbuf));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf.buf, cmdbuf.size);
+ DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
+ cmdbuf->size);
- ret = via_dispatch_cmdbuffer(dev, &cmdbuf);
+ ret = via_dispatch_cmdbuffer(dev, cmdbuf);
if (ret) {
return ret;
}
@@ -346,17 +338,17 @@ static int via_cmdbuffer(DRM_IOCTL_ARGS)
return 0;
}
-static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
+static int via_dispatch_pci_cmdbuffer(struct drm_device * dev,
drm_via_cmdbuffer_t * cmd)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret;
if (cmd->size > VIA_PCI_BUF_SIZE) {
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
}
if (DRM_COPY_FROM_USER(dev_priv->pci_buf, cmd->buf, cmd->size))
- return DRM_ERR(EFAULT);
+ return -EFAULT;
if ((ret =
via_verify_command_stream((uint32_t *) dev_priv->pci_buf,
@@ -370,21 +362,17 @@ static int via_dispatch_pci_cmdbuffer(drm_device_t * dev,
return ret;
}
-static int via_pci_cmdbuffer(DRM_IOCTL_ARGS)
+static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_cmdbuffer_t cmdbuf;
+ drm_via_cmdbuffer_t *cmdbuf = data;
int ret;
- LOCK_TEST_WITH_RETURN(dev, filp);
-
- DRM_COPY_FROM_USER_IOCTL(cmdbuf, (drm_via_cmdbuffer_t __user *) data,
- sizeof(cmdbuf));
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf.buf,
- cmdbuf.size);
+ DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf,
+ cmdbuf->size);
- ret = via_dispatch_pci_cmdbuffer(dev, &cmdbuf);
+ ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);
if (ret) {
return ret;
}
@@ -645,94 +633,87 @@ static void via_cmdbuf_reset(drm_via_private_t * dev_priv)
* User interface to the space and lag functions.
*/
-static int via_cmdbuf_size(DRM_IOCTL_ARGS)
+static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_cmdbuf_size_t d_siz;
+ drm_via_cmdbuf_size_t *d_siz = data;
int ret = 0;
uint32_t tmp_size, count;
drm_via_private_t *dev_priv;
DRM_DEBUG("via cmdbuf_size\n");
- LOCK_TEST_WITH_RETURN( dev, filp );
+ LOCK_TEST_WITH_RETURN(dev, file_priv);
dev_priv = (drm_via_private_t *) dev->dev_private;
if (dev_priv->ring.virtual_start == NULL) {
DRM_ERROR("%s called without initializing AGP ring buffer.\n",
__FUNCTION__);
- return DRM_ERR(EFAULT);
+ return -EFAULT;
}
- DRM_COPY_FROM_USER_IOCTL(d_siz, (drm_via_cmdbuf_size_t __user *) data,
- sizeof(d_siz));
-
-
count = 1000000;
- tmp_size = d_siz.size;
- switch(d_siz.func) {
+ tmp_size = d_siz->size;
+ switch(d_siz->func) {
case VIA_CMDBUF_SPACE:
- while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz.size)
+ while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)
&& count--) {
- if (!d_siz.wait) {
+ if (!d_siz->wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_SPACE timed out.\n");
- ret = DRM_ERR(EAGAIN);
+ ret = -EAGAIN;
}
break;
case VIA_CMDBUF_LAG:
- while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz.size)
+ while (((tmp_size = via_cmdbuf_lag(dev_priv)) > d_siz->size)
&& count--) {
- if (!d_siz.wait) {
+ if (!d_siz->wait) {
break;
}
}
if (!count) {
DRM_ERROR("VIA_CMDBUF_LAG timed out.\n");
- ret = DRM_ERR(EAGAIN);
+ ret = -EAGAIN;
}
break;
default:
- ret = DRM_ERR(EFAULT);
+ ret = -EFAULT;
}
- d_siz.size = tmp_size;
+ d_siz->size = tmp_size;
- DRM_COPY_TO_USER_IOCTL((drm_via_cmdbuf_size_t __user *) data, d_siz,
- sizeof(d_siz));
return ret;
}
#ifndef VIA_HAVE_DMABLIT
int
-via_dma_blit_sync( DRM_IOCTL_ARGS ) {
+via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
int
-via_dma_blit( DRM_IOCTL_ARGS ) {
+via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) {
DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
#endif
-drm_ioctl_desc_t via_ioctls[] = {
- [DRM_IOCTL_NR(DRM_VIA_ALLOCMEM)] = {via_mem_alloc, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_FREEMEM)] = {via_mem_free, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_AGP_INIT)] = {via_agp_init, DRM_AUTH|DRM_MASTER},
- [DRM_IOCTL_NR(DRM_VIA_FB_INIT)] = {via_fb_init, DRM_AUTH|DRM_MASTER},
- [DRM_IOCTL_NR(DRM_VIA_MAP_INIT)] = {via_map_init, DRM_AUTH|DRM_MASTER},
- [DRM_IOCTL_NR(DRM_VIA_DEC_FUTEX)] = {via_decoder_futex, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_DMA_INIT)] = {via_dma_init, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUFFER)] = {via_cmdbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_FLUSH)] = {via_flush_ioctl, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_PCICMD)] = {via_pci_cmdbuffer, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_CMDBUF_SIZE)] = {via_cmdbuf_size, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_WAIT_IRQ)] = {via_wait_irq, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_DMA_BLIT)] = {via_dma_blit, DRM_AUTH},
- [DRM_IOCTL_NR(DRM_VIA_BLIT_SYNC)] = {via_dma_blit_sync, DRM_AUTH}
+struct drm_ioctl_desc via_ioctls[] = {
+ DRM_IOCTL_DEF(DRM_VIA_ALLOCMEM, via_mem_alloc, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_FREEMEM, via_mem_free, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_AGP_INIT, via_agp_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_FB_INIT, via_fb_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_MAP_INIT, via_map_init, DRM_AUTH|DRM_MASTER),
+ DRM_IOCTL_DEF(DRM_VIA_DEC_FUTEX, via_decoder_futex, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_DMA_INIT, via_dma_init, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_CMDBUFFER, via_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_FLUSH, via_flush_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_PCICMD, via_pci_cmdbuffer, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_CMDBUF_SIZE, via_cmdbuf_size, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_WAIT_IRQ, via_wait_irq, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_DMA_BLIT, via_dma_blit, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_VIA_BLIT_SYNC, via_dma_blit_sync, DRM_AUTH)
};
int via_max_ioctl = DRM_ARRAY_SIZE(via_ioctls);
diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h
index 635e4637..b15785b3 100644
--- a/shared-core/via_drm.h
+++ b/shared-core/via_drm.h
@@ -54,7 +54,7 @@
#define VIA_NR_XVMC_LOCKS 5
#define VIA_MAX_CACHELINE_SIZE 64
#define XVMCLOCKPTR(saPriv,lockNo) \
- ((volatile drm_hw_lock_t *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
+ ((volatile struct drm_hw_lock *)(((((unsigned long) (saPriv)->XvMCLockArea) + \
(VIA_MAX_CACHELINE_SIZE - 1)) & \
~(VIA_MAX_CACHELINE_SIZE - 1)) + \
VIA_MAX_CACHELINE_SIZE*(lockNo)))
@@ -187,7 +187,7 @@ typedef struct _drm_via_tex_region {
typedef struct _drm_via_sarea {
unsigned int dirty;
unsigned int nbox;
- drm_clip_rect_t boxes[VIA_NR_SAREA_CLIPRECTS];
+ struct drm_clip_rect boxes[VIA_NR_SAREA_CLIPRECTS];
drm_via_tex_region_t texList[VIA_NR_TEX_REGIONS + 1];
int texAge; /* last time texture was uploaded */
int ctxOwner; /* last context to upload state */
diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c
index 0a478fef..9f099555 100644
--- a/shared-core/via_drv.c
+++ b/shared-core/via_drv.c
@@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = {
#ifdef VIA_HAVE_FENCE
-static drm_fence_driver_t via_fence_driver = {
+static struct drm_fence_driver via_fence_driver = {
.num_classes = 1,
.wrap_diff = (1 << 30),
.flush_diff = (1 << 20),
@@ -65,7 +65,7 @@ static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM
static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
-static drm_bo_driver_t via_bo_driver = {
+static struct drm_bo_driver via_bo_driver = {
.mem_type_prio = via_mem_prios,
.mem_busy_prio = via_busy_prios,
.num_mem_type_prio = ARRAY_SIZE(via_mem_prios),
diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h
index baafbbff..15e65950 100644
--- a/shared-core/via_drv.h
+++ b/shared-core/via_drv.h
@@ -116,7 +116,7 @@ typedef struct drm_via_private {
/* Memory manager stuff */
#ifdef VIA_HAVE_CORE_MM
unsigned int idle_fault;
- drm_sman_t sman;
+ struct drm_sman sman;
int vram_initialized;
int agp_initialized;
unsigned long vram_offset;
@@ -148,54 +148,55 @@ enum via_family {
#define VIA_READ8(reg) DRM_READ8(VIA_BASE, reg)
#define VIA_WRITE8(reg,val) DRM_WRITE8(VIA_BASE, reg, val)
-extern drm_ioctl_desc_t via_ioctls[];
+extern struct drm_ioctl_desc via_ioctls[];
extern int via_max_ioctl;
-extern int via_fb_init(DRM_IOCTL_ARGS);
-extern int via_mem_alloc(DRM_IOCTL_ARGS);
-extern int via_mem_free(DRM_IOCTL_ARGS);
-extern int via_agp_init(DRM_IOCTL_ARGS);
-extern int via_map_init(DRM_IOCTL_ARGS);
-extern int via_decoder_futex(DRM_IOCTL_ARGS);
-extern int via_wait_irq(DRM_IOCTL_ARGS);
-extern int via_dma_blit_sync( DRM_IOCTL_ARGS );
-extern int via_dma_blit( DRM_IOCTL_ARGS );
+extern int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv);
+extern int via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv );
+extern int via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv );
-extern int via_driver_load(drm_device_t *dev, unsigned long chipset);
-extern int via_driver_unload(drm_device_t *dev);
-extern int via_final_context(drm_device_t * dev, int context);
+extern int via_driver_load(struct drm_device *dev, unsigned long chipset);
+extern int via_driver_unload(struct drm_device *dev);
+extern int via_final_context(struct drm_device * dev, int context);
-extern int via_do_cleanup_map(drm_device_t * dev);
-extern int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence);
+extern int via_do_cleanup_map(struct drm_device * dev);
+extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
-extern void via_driver_irq_preinstall(drm_device_t * dev);
-extern void via_driver_irq_postinstall(drm_device_t * dev);
-extern void via_driver_irq_uninstall(drm_device_t * dev);
+extern void via_driver_irq_preinstall(struct drm_device * dev);
+extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern void via_driver_irq_uninstall(struct drm_device * dev);
-extern int via_dma_cleanup(drm_device_t * dev);
+extern int via_dma_cleanup(struct drm_device * dev);
extern void via_init_command_verifier(void);
-extern int via_driver_dma_quiescent(drm_device_t * dev);
+extern int via_driver_dma_quiescent(struct drm_device * dev);
extern void via_init_futex(drm_via_private_t *dev_priv);
extern void via_cleanup_futex(drm_via_private_t *dev_priv);
extern void via_release_futex(drm_via_private_t *dev_priv, int context);
#ifdef VIA_HAVE_CORE_MM
-extern void via_reclaim_buffers_locked(drm_device_t *dev, struct file *filp);
-extern void via_lastclose(drm_device_t *dev);
+extern void via_reclaim_buffers_locked(struct drm_device *dev,
+ struct drm_file *file_priv);
+extern void via_lastclose(struct drm_device *dev);
#else
-extern int via_init_context(drm_device_t * dev, int context);
+extern int via_init_context(struct drm_device * dev, int context);
#endif
#ifdef VIA_HAVE_DMABLIT
-extern void via_dmablit_handler(drm_device_t *dev, int engine, int from_irq);
-extern void via_init_dmablit(drm_device_t *dev);
+extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+extern void via_init_dmablit(struct drm_device *dev);
#endif
#ifdef VIA_HAVE_FENCE
extern void via_fence_timer(unsigned long data);
-extern void via_poke_flush(drm_device_t * dev, uint32_t class);
-extern int via_fence_emit_sequence(drm_device_t * dev, uint32_t class,
+extern void via_poke_flush(struct drm_device * dev, uint32_t class);
+extern int via_fence_emit_sequence(struct drm_device * dev, uint32_t class,
uint32_t flags,
uint32_t * sequence,
uint32_t * native_type);
@@ -204,14 +205,14 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class,
#endif
#ifdef VIA_HAVE_BUFFER
-extern drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t *dev);
-extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *class, uint32_t *type);
-extern int via_invalidate_caches(drm_device_t *dev, uint32_t buffer_flags);
-extern int via_init_mem_type(drm_device_t *dev, uint32_t type,
- drm_mem_type_manager_t *man);
-extern uint32_t via_evict_mask(drm_buffer_object_t *bo);
-extern int via_move(drm_buffer_object_t *bo, int evict,
- int no_wait, drm_bo_mem_reg_t *new_mem);
+extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev);
+extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type);
+extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);
+extern int via_init_mem_type(struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man);
+extern uint32_t via_evict_mask(struct drm_buffer_object *bo);
+extern int via_move(struct drm_buffer_object *bo, int evict,
+ int no_wait, struct drm_bo_mem_reg *new_mem);
#endif
#endif
diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c
index 2ac86970..475b6461 100644
--- a/shared-core/via_irq.c
+++ b/shared-core/via_irq.c
@@ -99,7 +99,7 @@ static unsigned time_diff(struct timeval *now,struct timeval *then)
irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
{
- drm_device_t *dev = (drm_device_t *) arg;
+ struct drm_device *dev = (struct drm_device *) arg;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
int handled = 0;
@@ -171,7 +171,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
}
}
-int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
+int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
unsigned int cur_vblank;
@@ -199,7 +199,7 @@ int via_driver_vblank_wait(drm_device_t * dev, unsigned int *sequence)
}
static int
-via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
+via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequence,
unsigned int *sequence)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -213,13 +213,13 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
if (!dev_priv) {
DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
if (irq >= drm_via_irq_num ) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
irq);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
real_irq = dev_priv->irq_map[irq];
@@ -227,7 +227,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
if (real_irq < 0) {
DRM_ERROR("%s Video IRQ %d not available on this hardware.\n",
__FUNCTION__, irq);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
masks = dev_priv->irq_masks;
@@ -253,7 +253,7 @@ via_driver_irq_wait(drm_device_t * dev, unsigned int irq, int force_sequence,
* drm_dma.h hooks
*/
-void via_driver_irq_preinstall(drm_device_t * dev)
+void via_driver_irq_preinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -302,7 +302,7 @@ void via_driver_irq_preinstall(drm_device_t * dev)
}
}
-void via_driver_irq_postinstall(drm_device_t * dev)
+void via_driver_irq_postinstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -321,7 +321,7 @@ void via_driver_irq_postinstall(drm_device_t * dev)
}
}
-void via_driver_irq_uninstall(drm_device_t * dev)
+void via_driver_irq_uninstall(struct drm_device * dev)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
u32 status;
@@ -340,11 +340,9 @@ void via_driver_irq_uninstall(drm_device_t * dev)
}
}
-int via_wait_irq(DRM_IOCTL_ARGS)
+int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_irqwait_t __user *argp = (void __user *)data;
- drm_via_irqwait_t irqwait;
+ drm_via_irqwait_t *irqwait = data;
struct timeval now;
int ret = 0;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -352,46 +350,44 @@ int via_wait_irq(DRM_IOCTL_ARGS)
int force_sequence;
if (!dev->irq)
- return DRM_ERR(EINVAL);
+ return -EINVAL;
- DRM_COPY_FROM_USER_IOCTL(irqwait, argp, sizeof(irqwait));
- if (irqwait.request.irq >= dev_priv->num_irqs) {
+ if (irqwait->request.irq >= dev_priv->num_irqs) {
DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,
- irqwait.request.irq);
- return DRM_ERR(EINVAL);
+ irqwait->request.irq);
+ return -EINVAL;
}
- cur_irq += irqwait.request.irq;
+ cur_irq += irqwait->request.irq;
- switch (irqwait.request.type & ~VIA_IRQ_FLAGS_MASK) {
+ switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
case VIA_IRQ_RELATIVE:
- irqwait.request.sequence += atomic_read(&cur_irq->irq_received);
- irqwait.request.type &= ~_DRM_VBLANK_RELATIVE;
+ irqwait->request.sequence +=
+ atomic_read(&cur_irq->irq_received);
+ irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
case VIA_IRQ_ABSOLUTE:
break;
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- if (irqwait.request.type & VIA_IRQ_SIGNAL) {
+ if (irqwait->request.type & VIA_IRQ_SIGNAL) {
DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",
__FUNCTION__);
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
- force_sequence = (irqwait.request.type & VIA_IRQ_FORCE_SEQUENCE);
+ force_sequence = (irqwait->request.type & VIA_IRQ_FORCE_SEQUENCE);
- ret = via_driver_irq_wait(dev, irqwait.request.irq, force_sequence,
- &irqwait.request.sequence);
+ ret = via_driver_irq_wait(dev, irqwait->request.irq, force_sequence,
+ &irqwait->request.sequence);
#ifdef __linux__
do_gettimeofday(&now);
#else
microtime(&now);
#endif
- irqwait.reply.tval_sec = now.tv_sec;
- irqwait.reply.tval_usec = now.tv_usec;
-
- DRM_COPY_TO_USER_IOCTL(argp, irqwait, sizeof(irqwait));
+ irqwait->reply.tval_sec = now.tv_sec;
+ irqwait->reply.tval_usec = now.tv_usec;
return ret;
}
diff --git a/shared-core/via_map.c b/shared-core/via_map.c
index 037a1c2c..1623df68 100644
--- a/shared-core/via_map.c
+++ b/shared-core/via_map.c
@@ -25,7 +25,7 @@
#include "via_drm.h"
#include "via_drv.h"
-static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
+static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)
{
drm_via_private_t *dev_priv = dev->dev_private;
int ret = 0;
@@ -83,7 +83,7 @@ static int via_do_init_map(drm_device_t * dev, drm_via_init_t * init)
}
-int via_do_cleanup_map(drm_device_t * dev)
+int via_do_cleanup_map(struct drm_device * dev)
{
via_dma_cleanup(dev);
@@ -91,19 +91,15 @@ int via_do_cleanup_map(drm_device_t * dev)
}
-int via_map_init(DRM_IOCTL_ARGS)
+int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_init_t init;
+ drm_via_init_t *init = data;
DRM_DEBUG("%s\n", __FUNCTION__);
- DRM_COPY_FROM_USER_IOCTL(init, (drm_via_init_t __user *) data,
- sizeof(init));
-
- switch (init.func) {
+ switch (init->func) {
case VIA_INIT_MAP:
- return via_do_init_map(dev, &init);
+ return via_do_init_map(dev, init);
case VIA_CLEANUP_MAP:
return via_do_cleanup_map(dev);
}
@@ -111,14 +107,14 @@ int via_map_init(DRM_IOCTL_ARGS)
return -EINVAL;
}
-int via_driver_load(drm_device_t *dev, unsigned long chipset)
+int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
drm_via_private_t *dev_priv;
int ret = 0;
dev_priv = drm_calloc(1, sizeof(drm_via_private_t), DRM_MEM_DRIVER);
if (dev_priv == NULL)
- return DRM_ERR(ENOMEM);
+ return -ENOMEM;
dev->dev_private = (void *)dev_priv;
@@ -133,7 +129,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset)
return ret;
}
-int via_driver_unload(drm_device_t *dev)
+int via_driver_unload(struct drm_device *dev)
{
drm_via_private_t *dev_priv = dev->dev_private;
diff --git a/shared-core/via_mm.c b/shared-core/via_mm.c
index 8f175a7d..45790dc2 100644
--- a/shared-core/via_mm.c
+++ b/shared-core/via_mm.c
@@ -72,17 +72,14 @@ static int del_alloc_set(int context, int type, unsigned long val)
/* agp memory management */
static memHeap_t *AgpHeap = NULL;
-int via_agp_init(DRM_IOCTL_ARGS)
+int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_via_agp_t agp;
+ drm_via_agp_t *agp = data;
- DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data,
- sizeof(agp));
+ AgpHeap = via_mmInit(agp->offset, agp->size);
- AgpHeap = via_mmInit(agp.offset, agp.size);
-
- DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp.offset,
- (unsigned long)agp.size);
+ DRM_DEBUG("offset = %lu, size = %lu", (unsigned long)agp->offset,
+ (unsigned long)agp->size);
return 0;
}
@@ -90,11 +87,9 @@ int via_agp_init(DRM_IOCTL_ARGS)
/* fb memory management */
static memHeap_t *FBHeap = NULL;
-int via_fb_init(DRM_IOCTL_ARGS)
+int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_via_fb_t fb;
-
- DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb));
+ drm_via_fb_t *fb = data;
FBHeap = via_mmInit(fb.offset, fb.size);
@@ -191,25 +186,18 @@ int via_final_context(struct drm_device *dev, int context)
return 1;
}
-int via_mem_alloc(DRM_IOCTL_ARGS)
+int via_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_via_mem_t mem;
-
- DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
- sizeof(mem));
+ drm_via_mem_t *mem = data;
switch (mem.type) {
case VIA_MEM_VIDEO:
- if (via_fb_alloc(&mem) < 0)
+ if (via_fb_alloc(mem) < 0)
return -EFAULT;
- DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
- sizeof(mem));
return 0;
case VIA_MEM_AGP:
- if (via_agp_alloc(&mem) < 0)
+ if (via_agp_alloc(mem) < 0)
return -EFAULT;
- DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem,
- sizeof(mem));
return 0;
}
@@ -288,21 +276,18 @@ static int via_agp_alloc(drm_via_mem_t * mem)
return retval;
}
-int via_mem_free(DRM_IOCTL_ARGS)
+int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- drm_via_mem_t mem;
+ drm_via_mem_t *mem = data;
- DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data,
- sizeof(mem));
-
- switch (mem.type) {
+ switch (mem->type) {
case VIA_MEM_VIDEO:
- if (via_fb_free(&mem) == 0)
+ if (via_fb_free(mem) == 0)
return 0;
break;
case VIA_MEM_AGP:
- if (via_agp_free(&mem) == 0)
+ if (via_agp_free(mem) == 0)
return 0;
break;
}
@@ -356,7 +341,7 @@ static int via_agp_free(drm_via_mem_t * mem)
retval = -1;
}
- DRM_DEBUG("free agp, free = %ld\n", agp.free);
+ DRM_DEBUG("free agp, free = %ld\n", agp.nfree);
return retval;
}
diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c
index 4b844af0..ded5c4e1 100644
--- a/shared-core/via_verifier.c
+++ b/shared-core/via_verifier.c
@@ -252,10 +252,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,
unsigned long offset,
unsigned long size,
- drm_device_t * dev)
+ struct drm_device * dev)
{
#ifdef __linux__
- drm_map_list_t *r_list;
+ struct drm_map_list *r_list;
#endif
drm_local_map_t *map = seq->map_cache;
@@ -967,7 +967,7 @@ via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
int
via_verify_command_stream(const uint32_t * buf, unsigned int size,
- drm_device_t * dev, int agp)
+ struct drm_device * dev, int agp)
{
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
@@ -1031,18 +1031,18 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,
case state_error:
default:
*hc_state = saved_state;
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
if (state == state_error) {
*hc_state = saved_state;
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
}
int
-via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
+via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
unsigned int size)
{
@@ -1087,11 +1087,11 @@ via_parse_command_stream(drm_device_t * dev, const uint32_t * buf,
break;
case state_error:
default:
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
}
if (state == state_error) {
- return DRM_ERR(EINVAL);
+ return -EINVAL;
}
return 0;
}
diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h
index 84497c44..dac1db91 100644
--- a/shared-core/via_verifier.h
+++ b/shared-core/via_verifier.h
@@ -49,7 +49,7 @@ typedef struct {
drm_via_sequence_t unfinished;
int agp_texture;
int multitex;
- drm_device_t *dev;
+ struct drm_device *dev;
drm_local_map_t *map_cache;
uint32_t vertex_count;
int agp;
@@ -57,8 +57,8 @@ typedef struct {
} drm_via_state_t;
extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,
- drm_device_t *dev, int agp);
-extern int via_parse_command_stream(drm_device_t *dev, const uint32_t * buf,
+ struct drm_device *dev, int agp);
+extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf,
unsigned int size);
#endif
diff --git a/shared-core/via_video.c b/shared-core/via_video.c
index 300ac61b..c15e75b5 100644
--- a/shared-core/via_video.c
+++ b/shared-core/via_video.c
@@ -65,10 +65,9 @@ void via_release_futex(drm_via_private_t * dev_priv, int context)
}
}
-int via_decoder_futex(DRM_IOCTL_ARGS)
+int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
- DRM_DEVICE;
- drm_via_futex_t fx;
+ drm_via_futex_t *fx = data;
volatile int *lock;
drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;
@@ -76,21 +75,18 @@ int via_decoder_futex(DRM_IOCTL_ARGS)
DRM_DEBUG("%s\n", __FUNCTION__);
- DRM_COPY_FROM_USER_IOCTL(fx, (drm_via_futex_t __user *) data,
- sizeof(fx));
-
- if (fx.lock > VIA_NR_XVMC_LOCKS)
+ if (fx->lock > VIA_NR_XVMC_LOCKS)
return -EFAULT;
- lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx.lock);
+ lock = (volatile int *)XVMCLOCKPTR(sAPriv, fx->lock);
- switch (fx.func) {
+ switch (fx->func) {
case VIA_FUTEX_WAIT:
- DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx.lock],
- (fx.ms / 10) * (DRM_HZ / 100), *lock != fx.val);
+ DRM_WAIT_ON(ret, dev_priv->decoder_queue[fx->lock],
+ (fx->ms / 10) * (DRM_HZ / 100), *lock != fx->val);
return ret;
case VIA_FUTEX_WAKE:
- DRM_WAKEUP(&(dev_priv->decoder_queue[fx.lock]));
+ DRM_WAKEUP(&(dev_priv->decoder_queue[fx->lock]));
return 0;
}
return 0;
diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h
new file mode 100644
index 00000000..de0fb532
--- /dev/null
+++ b/shared-core/xgi_drm.h
@@ -0,0 +1,133 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_DRM_H_
+#define _XGI_DRM_H_
+
+#include <linux/types.h>
+#include <asm/ioctl.h>
+
+struct drm_xgi_sarea {
+ __u16 device_id;
+ __u16 vendor_id;
+
+ char device_name[32];
+
+ unsigned int scrn_start;
+ unsigned int scrn_xres;
+ unsigned int scrn_yres;
+ unsigned int scrn_bpp;
+ unsigned int scrn_pitch;
+};
+
+
+struct xgi_bootstrap {
+ /**
+ * Size of PCI-e GART range in megabytes.
+ */
+ struct drm_map gart;
+};
+
+
+enum xgi_mem_location {
+ XGI_MEMLOC_NON_LOCAL = 0,
+ XGI_MEMLOC_LOCAL = 1,
+ XGI_MEMLOC_INVALID = 0x7fffffff
+};
+
+struct xgi_mem_alloc {
+ /**
+ * Memory region to be used for allocation.
+ *
+ * Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.
+ */
+ unsigned int location;
+
+ /**
+ * Number of bytes request.
+ *
+ * On successful allocation, set to the actual number of bytes
+ * allocated.
+ */
+ unsigned int size;
+
+ /**
+ * Address of the memory from the graphics hardware's point of view.
+ */
+ __u32 hw_addr;
+
+ /**
+ * Offset of the allocation in the mapping.
+ */
+ __u32 offset;
+
+ /**
+ * Magic handle used to release memory.
+ *
+ * See also DRM_XGI_FREE ioctl.
+ */
+ __u32 index;
+};
+
+enum xgi_batch_type {
+ BTYPE_2D = 0,
+ BTYPE_3D = 1,
+ BTYPE_FLIP = 2,
+ BTYPE_CTRL = 3,
+ BTYPE_NONE = 0x7fffffff
+};
+
+struct xgi_cmd_info {
+ __u32 type;
+ __u32 hw_addr;
+ __u32 size;
+ __u32 id;
+};
+
+struct xgi_state_info {
+ unsigned int _fromState;
+ unsigned int _toState;
+};
+
+
+/*
+ * Ioctl definitions
+ */
+
+#define DRM_XGI_BOOTSTRAP 0
+#define DRM_XGI_ALLOC 1
+#define DRM_XGI_FREE 2
+#define DRM_XGI_SUBMIT_CMDLIST 3
+#define DRM_XGI_STATE_CHANGE 4
+
+#define XGI_IOCTL_BOOTSTRAP DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_BOOTSTRAP, struct xgi_bootstrap)
+#define XGI_IOCTL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_XGI_ALLOC, struct xgi_mem_alloc)
+#define XGI_IOCTL_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_FREE, __u32)
+#define XGI_IOCTL_SUBMIT_CMDLIST DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_SUBMIT_CMDLIST, struct xgi_cmd_info)
+#define XGI_IOCTL_STATE_CHANGE DRM_IOW(DRM_COMMAND_BASE + DRM_XGI_STATE_CHANGE, struct xgi_state_info)
+
+#endif /* _XGI_DRM_H_ */
diff --git a/tests/Makefile b/tests/Makefile
deleted file mode 100644
index b406e0ad..00000000
--- a/tests/Makefile
+++ /dev/null
@@ -1,27 +0,0 @@
-
-# These definitions are for handling dependencies in the out of kernel build.
-
-PROGS = dristat drmstat
-
-CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d
-
-# Build test utilities
-
-PRGCFLAGS = $(CFLAGS) -g -ansi -pedantic -DPOSIX_C_SOURCE=199309L \
- -D_POSIX_SOURCE -D_XOPEN_SOURCE -D_BSD_SOURCE -D_SVID_SOURCE \
- -I. -I../libdrm -I../shared-core
-
-DRMSTATLIBS = -L../libdrm -ldrm
-
-
-programs: $(PROGS)
-
-dristat: dristat.c
- $(CC) $(PRGCFLAGS) $< -o $@
-
-drmstat: drmstat.c
- $(CC) $(PRGCFLAGS) $< -o $@ $(DRMSTATLIBS)
-
-clean:
- rm -f $(CLEANFILES)
-
diff --git a/tests/Makefile.am b/tests/Makefile.am
new file mode 100644
index 00000000..dce1754e
--- /dev/null
+++ b/tests/Makefile.am
@@ -0,0 +1,29 @@
+AM_CFLAGS = \
+ -I $(top_srcdir)/shared-core \
+ -I $(top_srcdir)/libdrm
+
+noinst_PROGRAMS = \
+ dristat \
+ drmstat
+
+EXTRA_LTLIBRARIES = libdrmtest.la
+libdrmtest_la_SOURCES = \
+ drmtest.c \
+ drmtest.h
+libdrmtest_la_LIBADD = \
+ $(top_builddir)/libdrm/libdrm.la
+
+LDADD = libdrmtest.la
+
+TESTS = auth \
+ openclose \
+ getversion \
+ getclient \
+ getstats \
+ lock \
+ setversion \
+ updatedraw
+
+EXTRA_PROGRAMS = $(TESTS)
+CLEANFILES = $(EXTRA_PROGRAMS) $(EXTRA_LTLIBRARIES)
+
diff --git a/tests/auth.c b/tests/auth.c
new file mode 100644
index 00000000..4160d1de
--- /dev/null
+++ b/tests/auth.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <limits.h>
+#include "drmtest.h"
+
+enum auth_event {
+ SERVER_READY,
+ CLIENT_MAGIC,
+ CLIENT_DONE,
+};
+
+int commfd[2];
+
+static void wait_event(int pipe, enum auth_event expected_event)
+{
+ int ret;
+ enum auth_event event;
+ unsigned char in;
+
+ ret = read(commfd[pipe], &in, 1);
+ if (ret == -1)
+ err(1, "read error");
+ event = in;
+
+ if (event != expected_event)
+ errx(1, "unexpected event: %d\n", event);
+}
+
+static void
+send_event(int pipe, enum auth_event send_event)
+{
+ int ret;
+ unsigned char event;
+
+ event = send_event;
+ ret = write(commfd[pipe], &event, 1);
+ if (ret == -1)
+ err(1, "failed to send event %d", event);
+}
+
+static void client()
+{
+ struct drm_auth auth;
+ int drmfd, ret;
+
+ /* XXX: Should make sure we open the same DRM as the master */
+ drmfd = drm_open_any();
+
+ wait_event(0, SERVER_READY);
+
+ /* Get a client magic number and pass it to the master for auth. */
+ auth.magic = 0; /* Quiet valgrind */
+ ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
+ if (ret == -1)
+ err(1, "Couldn't get client magic");
+ send_event(0, CLIENT_MAGIC);
+ ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
+ if (ret == -1)
+ err(1, "Couldn't write auth data");
+
+ /* Signal that the client is completely done. */
+ send_event(0, CLIENT_DONE);
+}
+
+static void server()
+{
+ int drmfd, ret;
+ struct drm_auth auth;
+
+ drmfd = drm_open_any_master();
+
+ auth.magic = 0xd0d0d0d0;
+ ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
+ if (ret != -1 || errno != EINVAL)
+ errx(1, "Authenticating bad magic succeeded\n");
+
+ send_event(1, SERVER_READY);
+
+ wait_event(1, CLIENT_MAGIC);
+ ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
+ if (ret == -1)
+ err(1, "Failure to read client magic");
+
+ ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
+ if (ret == -1)
+ err(1, "Failure to authenticate client magic\n");
+
+ wait_event(1, CLIENT_DONE);
+}
+
+/**
+ * Checks DRM authentication mechanisms.
+ */
+int main(int argc, char **argv)
+{
+ int ret;
+
+ ret = pipe(commfd);
+ if (ret == -1)
+ err(1, "Couldn't create pipe");
+
+ ret = fork();
+ if (ret == -1)
+ err(1, "failure to fork client");
+ if (ret == 0)
+ client();
+ else
+ server();
+
+ return 0;
+}
+
diff --git a/tests/drmtest.c b/tests/drmtest.c
new file mode 100644
index 00000000..cae99a0c
--- /dev/null
+++ b/tests/drmtest.c
@@ -0,0 +1,83 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <fcntl.h>
+#include "drmtest.h"
+
+/** Open the first DRM device we can find, searching up to 16 device nodes */
+int drm_open_any(void)
+{
+ char name[20];
+ int i, fd;
+
+ for (i = 0; i < 16; i++) {
+ sprintf(name, "/dev/dri/card%d", i);
+ fd = open(name, O_RDWR);
+ if (fd != -1)
+ return fd;
+ }
+ abort();
+}
+
+
+/**
+ * Open the first DRM device we can find where we end up being the master.
+ */
+int drm_open_any_master(void)
+{
+ char name[20];
+ int i, fd;
+
+ for (i = 0; i < 16; i++) {
+ drm_client_t client;
+ int ret;
+
+ sprintf(name, "/dev/dri/card%d", i);
+ fd = open(name, O_RDWR);
+ if (fd == -1)
+ continue;
+
+ /* Check that we're the only opener and authed. */
+ client.idx = 0;
+ ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ assert (ret == 0);
+ if (!client.auth) {
+ close(fd);
+ continue;
+ }
+ client.idx = 1;
+ ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ if (ret != -1 || errno != EINVAL) {
+ close(fd);
+ continue;
+ }
+ return fd;
+ }
+ fprintf(stderr, "Couldn't find an un-controlled DRM device\n");
+ abort();
+}
+
diff --git a/tests/drmtest.h b/tests/drmtest.h
new file mode 100644
index 00000000..afa0df4a
--- /dev/null
+++ b/tests/drmtest.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+
+#include "xf86drm.h"
+
+int drm_open_any(void);
+int drm_open_any_master(void);
diff --git a/tests/getclient.c b/tests/getclient.c
new file mode 100644
index 00000000..349c16ec
--- /dev/null
+++ b/tests/getclient.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <limits.h>
+#include "drmtest.h"
+
+/**
+ * Checks DRM_IOCTL_GET_CLIENT.
+ */
+int main(int argc, char **argv)
+{
+ int fd, ret;
+ drm_client_t client;
+
+ fd = drm_open_any();
+
+ /* Look for client index 0. This should exist whether we're operating
+ * on an otherwise unused drm device, or the X Server is running on
+ * the device.
+ */
+ client.idx = 0;
+ ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ assert(ret == 0);
+
+ /* Look for some absurd client index and make sure it's invalid.
+ * The DRM drivers currently always return data, so the user has
+ * no real way to detect when the list has terminated. That's bad,
+ * and this test is XFAIL as a result.
+ */
+ client.idx = 0x7fffffff;
+ ret = ioctl(fd, DRM_IOCTL_GET_CLIENT, &client);
+ assert(ret == -1 && errno == EINVAL);
+
+ close(fd);
+ return 0;
+}
diff --git a/tests/getstats.c b/tests/getstats.c
new file mode 100644
index 00000000..bd55b12e
--- /dev/null
+++ b/tests/getstats.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <limits.h>
+#include "drmtest.h"
+
+/**
+ * Checks DRM_IOCTL_GET_STATS.
+ *
+ * I don't care too much about the actual contents, just that the kernel
+ * doesn't crash.
+ */
+int main(int argc, char **argv)
+{
+ int fd, ret;
+ drm_stats_t stats;
+
+ fd = drm_open_any();
+
+ ret = ioctl(fd, DRM_IOCTL_GET_STATS, &stats);
+ assert(ret == 0);
+
+ assert(stats.count >= 0);
+
+ close(fd);
+ return 0;
+}
diff --git a/tests/getversion.c b/tests/getversion.c
new file mode 100644
index 00000000..3de90de6
--- /dev/null
+++ b/tests/getversion.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmtest.h"
+
+/**
+ * Checks DRM_IOCTL_GET_VERSION and libdrm's drmGetVersion() interface to it.
+ */
+int main(int argc, char **argv)
+{
+ int fd;
+ drmVersionPtr v;
+
+ fd = drm_open_any();
+ v = drmGetVersion(fd);
+ assert(strlen(v->name) != 0);
+ assert(strlen(v->date) != 0);
+ assert(strlen(v->desc) != 0);
+ assert(v->version_major >= 1);
+ drmFree(v);
+ close(fd);
+ return 0;
+}
diff --git a/tests/lock.c b/tests/lock.c
new file mode 100644
index 00000000..3f627558
--- /dev/null
+++ b/tests/lock.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+/** @file lock.c
+ * Tests various potential failures of the DRM locking mechanisms
+ */
+
+#include <limits.h>
+#include "drmtest.h"
+
+enum auth_event {
+ SERVER_READY,
+ CLIENT_MAGIC,
+ SERVER_LOCKED,
+ CLIENT_LOCKED,
+};
+
+int commfd[2];
+unsigned int lock1 = 0x00001111;
+unsigned int lock2 = 0x00002222;
+
+/* return time in milliseconds */
+static unsigned int
+get_millis()
+{
+ struct timeval tv;
+
+ gettimeofday(&tv, NULL);
+ return tv.tv_sec * 1000 + tv.tv_usec / 1000;
+}
+
+static void
+wait_event(int pipe, enum auth_event expected_event)
+{
+ int ret;
+ enum auth_event event;
+ unsigned char in;
+
+ ret = read(commfd[pipe], &in, 1);
+ if (ret == -1)
+ err(1, "read error");
+ event = in;
+
+ if (event != expected_event)
+ errx(1, "unexpected event: %d\n", event);
+}
+
+static void
+send_event(int pipe, enum auth_event send_event)
+{
+ int ret;
+ unsigned char event;
+
+ event = send_event;
+ ret = write(commfd[pipe], &event, 1);
+ if (ret == -1)
+ err(1, "failed to send event %d", event);
+}
+
+static void
+client_auth(int drmfd)
+{
+ struct drm_auth auth;
+ int ret;
+
+ wait_event(0, SERVER_READY);
+
+ /* Get a client magic number and pass it to the master for auth. */
+ ret = ioctl(drmfd, DRM_IOCTL_GET_MAGIC, &auth);
+ if (ret == -1)
+ err(1, "Couldn't get client magic");
+ send_event(0, CLIENT_MAGIC);
+ ret = write(commfd[0], &auth.magic, sizeof(auth.magic));
+ if (ret == -1)
+ err(1, "Couldn't write auth data");
+}
+
+static void
+server_auth(int drmfd)
+{
+ struct drm_auth auth;
+ int ret;
+
+ send_event(1, SERVER_READY);
+ wait_event(1, CLIENT_MAGIC);
+ ret = read(commfd[1], &auth.magic, sizeof(auth.magic));
+ if (ret == -1)
+ err(1, "Failure to read client magic");
+
+ ret = ioctl(drmfd, DRM_IOCTL_AUTH_MAGIC, &auth);
+ if (ret == -1)
+ err(1, "Failure to authenticate client magic\n");
+}
+
+/** Tests that locking is successful in normal conditions */
+static void
+test_lock_unlock(int drmfd)
+{
+ int ret;
+
+ ret = drmGetLock(drmfd, lock1, 0);
+ if (ret != 0)
+ err(1, "Locking failed");
+ ret = drmUnlock(drmfd, lock1);
+ if (ret != 0)
+ err(1, "Unlocking failed");
+}
+
+/** Tests that unlocking the lock while it's not held works correctly */
+static void
+test_unlock_unlocked(int drmfd)
+{
+ int ret;
+
+ ret = drmUnlock(drmfd, lock1);
+ if (ret == 0)
+ err(1, "Unlocking unlocked lock succeeded");
+}
+
+/** Tests that unlocking a lock held by another context fails appropriately */
+static void
+test_unlock_unowned(int drmfd)
+{
+ int ret;
+
+ ret = drmGetLock(drmfd, lock1, 0);
+ assert(ret == 0);
+ ret = drmUnlock(drmfd, lock2);
+ if (ret == 0)
+ errx(1, "Unlocking other context's lock succeeded");
+ ret = drmUnlock(drmfd, lock1);
+ assert(ret == 0);
+}
+
+/**
+ * Tests that an open/close by the same process doesn't result in the lock
+ * being dropped.
+ */
+static void test_open_close_locked(drmfd)
+{
+ int ret, tempfd;
+
+ ret = drmGetLock(drmfd, lock1, 0);
+ assert(ret == 0);
+ /* XXX: Need to make sure that this is the same device as drmfd */
+ tempfd = drm_open_any();
+ close(tempfd);
+ ret = drmUnlock(drmfd, lock1);
+ if (ret != 0)
+ errx(1, "lock lost during open/close by same pid");
+
+ close(drmfd);
+}
+
+static void client()
+{
+ int drmfd, ret;
+ unsigned int time;
+
+ /* XXX: Should make sure we open the same DRM as the master */
+ drmfd = drm_open_any();
+
+ client_auth(drmfd);
+
+ /* Wait for the server to grab the lock, then grab it ourselves (to
+ * contest it). Hopefully we hit it within the window of when the
+ * server locks.
+ */
+ wait_event(0, SERVER_LOCKED);
+ ret = drmGetLock(drmfd, lock2, 0);
+ time = get_millis();
+ if (ret != 0)
+ err(1, "Failed to get lock on client\n");
+ drmUnlock(drmfd, lock2);
+
+ /* Tell the server that our locking completed, and when it did */
+ send_event(0, CLIENT_LOCKED);
+ ret = write(commfd[0], &time, sizeof(time));
+
+ exit(0);
+}
+
+static void server()
+{
+ int drmfd, tempfd, ret;
+ unsigned int client_time, unlock_time;
+
+ drmfd = drm_open_any_master();
+
+ test_lock_unlock(drmfd);
+ test_unlock_unlocked(drmfd);
+ test_unlock_unowned(drmfd);
+ test_open_close_locked(drmfd);
+
+ /* Perform the authentication sequence with the client. */
+ server_auth(drmfd);
+
+ /* Now, test that the client attempting to lock while the server
+ * holds the lock works correctly.
+ */
+ ret = drmGetLock(drmfd, lock1, 0);
+ assert(ret == 0);
+ send_event(1, SERVER_LOCKED);
+ /* Wait a while for the client to do its thing */
+ sleep(1);
+ ret = drmUnlock(drmfd, lock1);
+ assert(ret == 0);
+ unlock_time = get_millis();
+
+ wait_event(1, CLIENT_LOCKED);
+ ret = read(commfd[1], &client_time, sizeof(client_time));
+ if (ret == -1)
+ err(1, "Failure to read client magic");
+
+ if (client_time < unlock_time)
+ errx(1, "Client took lock before server released it");
+}
+
+int main(int argc, char **argv)
+{
+ int ret;
+
+
+ ret = pipe(commfd);
+ if (ret == -1)
+ err(1, "Couldn't create pipe");
+
+ ret = fork();
+ if (ret == -1)
+ err(1, "failure to fork client");
+ if (ret == 0)
+ client();
+ else
+ server();
+
+ return 0;
+}
+
diff --git a/tests/openclose.c b/tests/openclose.c
new file mode 100644
index 00000000..946a4459
--- /dev/null
+++ b/tests/openclose.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmtest.h"
+
+int main(int argc, char **argv)
+{
+ int fd;
+
+ fd = drm_open_any();
+ close(fd);
+ return 0;
+}
diff --git a/tests/setversion.c b/tests/setversion.c
new file mode 100644
index 00000000..f4bfbfba
--- /dev/null
+++ b/tests/setversion.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <limits.h>
+#include "drmtest.h"
+
+/**
+ * Checks DRM_IOCTL_SET_VERSION.
+ *
+ * This tests that we can get the actual version out, and that setting invalid
+ * major/minor numbers fails appropriately. It does not check the actual
+ * behavior differenses resulting from an increased DI version.
+ */
+int main(int argc, char **argv)
+{
+ int fd, ret;
+ drm_set_version_t sv, version;
+
+ fd = drm_open_any_master();
+
+ /* First, check that we can get the DD/DI versions. */
+ memset(&version, 0, sizeof(version));
+ version.drm_di_major = -1;
+ version.drm_di_minor = -1;
+ version.drm_dd_major = -1;
+ version.drm_dd_minor = -1;
+ ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &version);
+ assert(ret == 0);
+ assert(version.drm_di_major != -1);
+ assert(version.drm_di_minor != -1);
+ assert(version.drm_dd_major != -1);
+ assert(version.drm_dd_minor != -1);
+
+ /* Check that an invalid DI major fails */
+ sv = version;
+ sv.drm_di_major++;
+ ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
+ assert(ret == -1 && errno == EINVAL);
+
+ /* Check that an invalid DI minor fails */
+ sv = version;
+ sv.drm_di_major++;
+ ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
+ assert(ret == -1 && errno == EINVAL);
+
+ /* Check that an invalid DD major fails */
+ sv = version;
+ sv.drm_dd_major++;
+ ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
+ assert(ret == -1 && errno == EINVAL);
+
+ /* Check that an invalid DD minor fails */
+ sv = version;
+ sv.drm_dd_minor++;
+ ret = ioctl(fd, DRM_IOCTL_SET_VERSION, &sv);
+ assert(ret == -1 && errno == EINVAL);
+
+ close(fd);
+ return 0;
+}
diff --git a/tests/updatedraw.c b/tests/updatedraw.c
new file mode 100644
index 00000000..2f22fef2
--- /dev/null
+++ b/tests/updatedraw.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright © 2007 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmtest.h"
+
+static void
+set_draw_cliprects_empty(int fd, int drawable)
+{
+ int ret;
+ struct drm_update_draw update;
+
+ update.handle = drawable;
+ update.type = DRM_DRAWABLE_CLIPRECTS;
+ update.num = 0;
+ update.data = 0;
+
+ ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
+ assert(ret == 0);
+}
+
+static void
+set_draw_cliprects_empty_fail(int fd, int drawable)
+{
+ int ret;
+ struct drm_update_draw update;
+
+ update.handle = drawable;
+ update.type = DRM_DRAWABLE_CLIPRECTS;
+ update.num = 0;
+ update.data = 0;
+
+ ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
+ assert(ret == -1 && errno == EINVAL);
+}
+
+static void
+set_draw_cliprects_2(int fd, int drawable)
+{
+ int ret;
+ struct drm_update_draw update;
+ drm_clip_rect_t rects[2];
+
+ rects[0].x1 = 0;
+ rects[0].y1 = 0;
+ rects[0].x2 = 10;
+ rects[0].y2 = 10;
+
+ rects[1].x1 = 10;
+ rects[1].y1 = 10;
+ rects[1].x2 = 20;
+ rects[1].y2 = 20;
+
+ update.handle = drawable;
+ update.type = DRM_DRAWABLE_CLIPRECTS;
+ update.num = 2;
+ update.data = (unsigned long long)(uintptr_t)&rects;
+
+ ret = ioctl(fd, DRM_IOCTL_UPDATE_DRAW, &update);
+ assert(ret == 0);
+}
+
+static int add_drawable(int fd)
+{
+ drm_draw_t drawarg;
+ int ret;
+
+ /* Create a drawable.
+ * IOCTL_ADD_DRAW is RDWR, though it should really just be RD
+ */
+ drawarg.handle = 0;
+ ret = ioctl(fd, DRM_IOCTL_ADD_DRAW, &drawarg);
+ assert(ret == 0);
+ return drawarg.handle;
+}
+
+static int rm_drawable(int fd, int drawable, int fail)
+{
+ drm_draw_t drawarg;
+ int ret;
+
+ /* Create a drawable.
+ * IOCTL_ADD_DRAW is RDWR, though it should really just be RD
+ */
+ drawarg.handle = drawable;
+ ret = ioctl(fd, DRM_IOCTL_RM_DRAW, &drawarg);
+ if (!fail)
+ assert(ret == 0);
+ else
+ assert(ret == -1 && errno == EINVAL);
+
+ return drawarg.handle;
+}
+
+/**
+ * Tests drawable management: adding, removing, and updating the cliprects of
+ * drawables.
+ */
+int main(int argc, char **argv)
+{
+ int fd, ret, d1, d2;
+
+ fd = drm_open_any_master();
+
+ d1 = add_drawable(fd);
+ d2 = add_drawable(fd);
+ /* Do a series of cliprect updates */
+ set_draw_cliprects_empty(fd, d1);
+ set_draw_cliprects_empty(fd, d2);
+ set_draw_cliprects_2(fd, d1);
+ set_draw_cliprects_empty(fd, d1);
+
+ /* Remove our drawables */
+ rm_drawable(fd, d1, 0);
+ rm_drawable(fd, d2, 0);
+
+ /* Check that removing an unknown drawable returns error */
+ rm_drawable(fd, 0x7fffffff, 1);
+
+ /* Attempt to set cliprects on a nonexistent drawable */
+ set_draw_cliprects_empty_fail(fd, d1);
+
+ close(fd);
+ return 0;
+}