diff options
author | Keith Packard <keithp@neko.keithp.com> | 2007-01-07 22:37:40 -0800 |
---|---|---|
committer | Keith Packard <keithp@neko.keithp.com> | 2007-01-07 22:37:40 -0800 |
commit | c5aaf7648df82665851c9e67f5509b427ca34c8e (patch) | |
tree | 55b317738a99097cb02ba6a736b9ef20de6b34f3 | |
parent | 63c0f3946056d044b7c5688fa5cb670782212c77 (diff) | |
parent | d0080d71b9f3df0d4f743324b7e8f1ce580bdcaf (diff) |
Merge branch 'master' into crestline
Conflicts:
shared-core/i915_drm.h
Whitespace change only
128 files changed, 5047 insertions, 912 deletions
@@ -1,54 +1,55 @@ -*-core/linux -*-core/drm.h -*-core/drm_sarea.h -*-core/i915_dma.c -*-core/i915_drm.h -*-core/i915_drv.h -*-core/i915_irq.c -*-core/i915_mem.c -*-core/mach64_dma.c -*-core/mach64_drm.h -*-core/mach64_drv.h -*-core/mach64_irq.c -*-core/mach64_state.c -*-core/mga_dma.c -*-core/mga_drm.h -*-core/mga_drv.h -*-core/mga_irq.c -*-core/mga_state.c -*-core/mga_ucode.h -*-core/mga_warp.c -*-core/nv_drv.h -*-core/r128_cce.c -*-core/r128_drm.h -*-core/r128_drv.h -*-core/r128_irq.c -*-core/r128_state.c -*-core/r300_cmdbuf.c -*-core/r300_reg.h -*-core/radeon_cp.c -*-core/radeon_drm.h -*-core/radeon_drv.h -*-core/radeon_irq.c -*-core/radeon_mem.c -*-core/radeon_state.c -*-core/savage_bci.c -*-core/savage_drm.h -*-core/savage_drv.h -*-core/savage_state.c -*-core/sis_drm.h -*-core/sis_drv.h -*-core/tdfx_drv.h -*-core/via_3d_reg.h -*-core/via_dma.c -*-core/via_drm.h -*-core/via_drv.c -*-core/via_drv.h -*-core/via_irq.c -*-core/via_map.c -*-core/via_verifier.c -*-core/via_verifier.h -*-core/via_video.c +bsd-core/linux +bsd-core/drm.h +bsd-core/drm_sarea.h +bsd-core/i915_dma.c +bsd-core/i915_drm.h +bsd-core/i915_drv.h +bsd-core/i915_irq.c +bsd-core/i915_mem.c +bsd-core/mach64_dma.c +bsd-core/mach64_drm.h +bsd-core/mach64_drv.h +bsd-core/mach64_irq.c +bsd-core/mach64_state.c +bsd-core/mga_dma.c +bsd-core/mga_drm.h +bsd-core/mga_drv.h +bsd-core/mga_irq.c +bsd-core/mga_state.c +bsd-core/mga_ucode.h +bsd-core/mga_warp.c +bsd-core/nv_drv.h +bsd-core/r128_cce.c +bsd-core/r128_drm.h +bsd-core/r128_drv.h +bsd-core/r128_irq.c +bsd-core/r128_state.c +bsd-core/r300_cmdbuf.c +bsd-core/r300_reg.h +bsd-core/radeon_cp.c +bsd-core/radeon_drm.h +bsd-core/radeon_drv.h +bsd-core/radeon_irq.c +bsd-core/radeon_mem.c +bsd-core/radeon_state.c +bsd-core/savage_bci.c +bsd-core/savage_drm.h +bsd-core/savage_drv.h +bsd-core/savage_state.c +bsd-core/sis_drm.h +bsd-core/sis_drv.h +bsd-core/tdfx_drv.h +bsd-core/via_3d_reg.h +bsd-core/via_dma.c +bsd-core/via_drm.h +bsd-core/via_drv.c +bsd-core/via_drv.h +bsd-core/via_irq.c +bsd-core/via_map.c +bsd-core/via_verifier.c +bsd-core/via_verifier.h +bsd-core/via_video.c +*~ *.flags *.ko *.ko.cmd @@ -74,6 +75,7 @@ config.log config.status config.sub configure +cscope.* depcomp device_if.h drm.kld diff --git a/bsd-core/drm_bufs.c b/bsd-core/drm_bufs.c index 33da79e7..343ab1e8 100644 --- a/bsd-core/drm_bufs.c +++ b/bsd-core/drm_bufs.c @@ -316,6 +316,9 @@ void drm_rmmap(drm_device_t *dev, drm_local_map_t *map) case _DRM_CONSISTENT: drm_pci_free(dev, map->dmah); break; + default: + DRM_ERROR("Bad map type %d\n", map->type); + break; } if (map->bsr != NULL) { diff --git a/bsd-core/drm_drawable.c b/bsd-core/drm_drawable.c index d64bbe10..379e0aa7 120000..100644 --- a/bsd-core/drm_drawable.c +++ b/bsd-core/drm_drawable.c @@ -1 +1,51 @@ -../shared-core/drm_drawable.c
\ No newline at end of file +/* drm_drawable.h -- IOCTLs for drawables -*- linux-c -*- + * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com + */ +/*- + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Rickard E. (Rik) Faith <faith@valinux.com> + * Gareth Hughes <gareth@valinux.com> + * + */ + +#include "drmP.h" + +int drm_adddraw(DRM_IOCTL_ARGS) +{ + drm_draw_t draw; + + draw.handle = 0; /* NOOP */ + DRM_DEBUG("%d\n", draw.handle); + + DRM_COPY_TO_USER_IOCTL( (drm_draw_t *)data, draw, sizeof(draw) ); + + return 0; +} + +int drm_rmdraw(DRM_IOCTL_ARGS) +{ + return 0; /* NOOP */ +} diff --git a/linux-core/.gitignore b/linux-core/.gitignore new file mode 100644 index 00000000..1d045d63 --- /dev/null +++ b/linux-core/.gitignore @@ -0,0 +1 @@ +Module*.symvers diff --git a/linux-core/Makefile b/linux-core/Makefile index 3aecec43..590633b3 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -58,7 +58,7 @@ endif # Modules for all architectures MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \ - mach64.o nv.o + mach64.o nv.o nouveau.o # Modules only for ix86 architectures ifneq (,$(findstring 86,$(MACHINE))) @@ -75,45 +75,27 @@ DRM_MODULES ?= $(MODULE_LIST) # These definitions are for handling dependencies in the out of kernel build. -DRMSHARED = drm.h drm_sarea.h drm_drawable.c DRMHEADERS = drmP.h drm_compat.h drm_os_linux.h drm.h drm_sarea.h COREHEADERS = drm_core.h drm_sman.h drm_hashtab.h TDFXHEADERS = tdfx_drv.h $(DRMHEADERS) -TDFXSHARED = tdfx_drv.h R128HEADERS = r128_drv.h r128_drm.h $(DRMHEADERS) -R128SHARED = r128_drv.h r128_drm.h r128_cce.c r128_state.c r128_irq.c RADEONHEADERS = radeon_drv.h radeon_drm.h r300_reg.h $(DRMHEADERS) -RADEONSHARED = radeon_drv.h radeon_drm.h radeon_cp.c radeon_irq.c \ - radeon_mem.c radeon_state.c r300_cmdbuf.c r300_reg.h MGAHEADERS = mga_drv.h mga_drm.h mga_ucode.h $(DRMHEADERS) -MGASHARED = mga_dma.c mga_drm.h mga_drv.h mga_irq.c mga_state.c \ - mga_ucode.h mga_warp.c I810HEADERS = i810_drv.h i810_drm.h $(DRMHEADERS) I830HEADERS = i830_drv.h i830_drm.h $(DRMHEADERS) I915HEADERS = i915_drv.h i915_drm.h $(DRMHEADERS) -I915SHARED = i915_drv.h i915_drm.h i915_irq.c i915_mem.c i915_dma.c SISHEADERS= sis_drv.h sis_drm.h drm_hashtab.h drm_sman.h $(DRMHEADERS) -SISSHARED= sis_drv.h sis_drm.h SAVAGEHEADERS= savage_drv.h savage_drm.h $(DRMHEADERS) -SAVAGESHARED= savage_drv.h savage_drm.h savage_bci.c savage_state.c VIAHEADERS = via_drm.h via_drv.h via_3d_reg.h via_verifier.h $(DRMHEADERS) -VIASHARED = via_drm.h via_drv.h via_3d_reg.h via_drv.c via_irq.c via_map.c \ - via_dma.c via_verifier.c via_verifier.h via_video.c MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) -MACH64SHARED = mach64_drv.h mach64_drm.h mach64_dma.c \ - mach64_irq.c mach64_state.c NVHEADERS = nv_drv.h $(DRMHEADERS) -NVSHARED = nv_drv.h FFBHEADERS = ffb_drv.h $(DRMHEADERS) - -SHAREDSRC = $(DRMSHARED) $(MGASHARED) $(R128SHARED) $(RADEONSHARED) \ - $(SISSHARED) $(TDFXSHARED) $(VIASHARED) $(MACH64SHARED) \ - $(I915SHARED) $(SAVAGESHARED) $(NVSHARED) +NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) PROGS = dristat drmstat -CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c linux drm_pciids.h .tmp_versions +CLEANFILES = *.o *.ko $(PROGS) .depend .*.flags .*.d .*.cmd *.mod.c drm_pciids.h .tmp_versions # VERSION is not defined from the initial invocation. It is defined when # this Makefile is invoked from the kernel's root Makefile. @@ -226,27 +208,13 @@ endif SHAREDDIR := ../shared-core -HASSHARED := $(shell if [ -d $(SHAREDDIR) ]; then echo y; fi) - -ifeq ($(HASSHARED),y) -includes:: $(SHAREDSRC) drm_pciids.h +ifeq ($(shell if [ -d $(SHAREDDIR) ]; then echo y; fi),y) +includes:: drm_pciids.h drm_pciids.h: $(SHAREDDIR)/drm_pciids.txt sh ../scripts/create_linux_pci_lists.sh < $(SHAREDDIR)/drm_pciids.txt - -$(SHAREDSRC): - @if [ -r $(SHAREDDIR)/$@ ]; then \ - (rm -f $@; set -x; ln -s $(SHAREDDIR)/$@ $@); fi - -CLEANFILES += $(SHAREDSRC) endif -includes:: linux - -linux: - rm -f linux - ln -s . linux - clean cleandir: rm -rf $(CLEANFILES) @@ -274,11 +242,11 @@ else # Check for kernel versions that we don't support. -BELOW24 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 4 ]; then \ +BELOW26 := $(shell if [ $(VERSION) -lt 2 -o $(PATCHLEVEL) -lt 6 ]; then \ echo y; fi) -ifeq ($(BELOW24),y) -$(error Only 2.4.x and later kernels are supported \ +ifeq ($(BELOW26),y) +$(error Only 2.6.x and later kernels are supported \ ($(VERSION).$(PATCHLEVEL).$(SUBLEVEL))) endif @@ -291,30 +259,6 @@ endif # This needs to go before all other include paths. CC += -I$(DRMSRCDIR) -# Check for Red Hat's 4-argument do_munmap(). -DOMUNMAP := $(shell grep do_munmap $(LINUXDIR)/include/linux/mm.h | \ - grep -c acct) - -ifneq ($(DOMUNMAP),0) -EXTRA_CFLAGS += -DDO_MUNMAP_4_ARGS -endif - -# Check for 5-argument remap_page_range() in RH9 kernel, and 2.5.x kernels -RPR := $(shell grep remap_page_range $(LINUXDIR)/include/linux/mm.h | \ - grep -c vma) - -ifneq ($(RPR),0) -EXTRA_CFLAGS += -DREMAP_PAGE_RANGE_5_ARGS -endif - -# Check for 4-argument vmap() in some 2.5.x and 2.4.x kernels -VMAP := $(shell grep -A1 'vmap.*count,$$' $(LINUXDIR)/include/linux/vmalloc.h | \ - grep -c prot) - -ifneq ($(VMAP),0) -EXTRA_CFLAGS += -DVMAP_4_ARGS -endif - # Check for PAGE_AGP definition PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ grep -c PAGE_AGP) @@ -323,7 +267,6 @@ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif - # Start with all modules turned off. CONFIG_DRM_GAMMA := n CONFIG_DRM_TDFX := n @@ -372,6 +315,9 @@ endif ifneq (,$(findstring nv,$(DRM_MODULES))) CONFIG_DRM_NV := m endif +ifneq (,$(findstring nouveau,$(DRM_MODULES))) +CONFIG_DRM_NOUVEAU := m +endif # These require AGP support @@ -402,6 +348,7 @@ $(savage-objs): $(SAVAGEHEADERS) $(via-objs): $(VIAHEADERS) $(mach64-objs): $(MACH64HEADERS) $(nv-objs): $(NVHEADERS) +$(nouveau-objs): $(NOUVEAUHEADERS) endif diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index fba57ddf..b4ac2642 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -20,7 +20,9 @@ mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o i810-objs := i810_drv.o i810_dma.o i830-objs := i830_drv.o i830_dma.o i830_irq.o i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ - i915_buffer.o + i915_buffer.o +nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ + nouveau_object.o nouveau_irq.o nv40_graph.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o @@ -36,6 +38,7 @@ radeon-objs += radeon_ioc32.o mga-objs += mga_ioc32.o r128-objs += r128_ioc32.o i915-objs += i915_ioc32.o +nouveau-objs += nouveau_ioc32.o endif obj-m += drm.o @@ -52,3 +55,4 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage.o obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o +obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o diff --git a/linux-core/drm.h b/linux-core/drm.h new file mode 120000 index 00000000..29636692 --- /dev/null +++ b/linux-core/drm.h @@ -0,0 +1 @@ +../shared-core/drm.h
\ No newline at end of file diff --git a/linux-core/drmP.h b/linux-core/drmP.h index d02184c7..af8a544d 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -67,19 +67,11 @@ #include <asm/mtrr.h> #endif #if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE) +#include <asm/agp.h> #include <linux/types.h> #include <linux/agp_backend.h> #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,41) -#define HAS_WORKQUEUE 0 -#else -#define HAS_WORKQUEUE 1 -#endif -#if !HAS_WORKQUEUE -#include <linux/tqueue.h> -#else #include <linux/workqueue.h> -#endif #include <linux/poll.h> #include <asm/pgalloc.h> #include "drm.h" @@ -553,7 +545,8 @@ typedef struct drm_mm_node { } drm_mm_node_t; typedef struct drm_mm { - drm_mm_node_t root_node; + struct list_head fl_entry; + struct list_head ml_entry; } drm_mm_t; @@ -755,17 +748,6 @@ typedef struct drm_head { struct class_device *dev_class; } drm_head_t; -typedef struct drm_cache { - - /* - * Memory caches - */ - - kmem_cache_t *mm; - kmem_cache_t *fence_object; -} drm_cache_t; - - typedef struct drm_fence_driver{ int no_types; @@ -812,7 +794,11 @@ typedef struct drm_buffer_manager{ struct list_head pinned[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) struct work_struct wq; +#else + struct delayed_work wq; +#endif uint32_t fence_type; unsigned long cur_pages; atomic_t count; @@ -908,11 +894,8 @@ typedef struct drm_device { unsigned long last_switch; /**< jiffies at last context switch */ /*@} */ -#if !HAS_WORKQUEUE - struct tq_struct tq; -#else struct work_struct work; -#endif + /** \name VBLANK IRQ support */ /*@{ */ @@ -940,12 +923,8 @@ typedef struct drm_device { int pci_vendor; /**< PCI vendor id */ int pci_device; /**< PCI device id */ #ifdef __alpha__ -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) - struct pci_controler *hose; -#else struct pci_controller *hose; #endif -#endif drm_sg_mem_t *sg; /**< Scatter gather memory */ unsigned long *ctx_bitmap; /**< context bitmap */ void *dev_private; /**< device private data */ @@ -1094,6 +1073,7 @@ static inline int drm_mtrr_del(int handle, unsigned long offset, } #define drm_core_has_MTRR(dev) (0) +#define DRM_MTRR_WC 0 #endif @@ -1318,7 +1298,6 @@ extern int drm_put_head(drm_head_t * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; extern drm_head_t **drm_heads; -extern drm_cache_t drm_cache; extern struct drm_sysfs_class *drm_class; extern struct proc_dir_entry *drm_proc_root; @@ -1478,26 +1457,8 @@ extern int drm_fence_buffer_objects(drm_file_t * priv, drm_fence_object_t *fence, drm_fence_object_t **used_fence); - -/* Inline replacements for DRM_IOREMAP macros */ -static __inline__ void drm_core_ioremap(struct drm_map *map, - struct drm_device *dev) -{ - map->handle = drm_ioremap(map->offset, map->size, dev); -} - -static __inline__ void drm_core_ioremap_nocache(struct drm_map *map, - struct drm_device *dev) -{ - map->handle = drm_ioremap_nocache(map->offset, map->size, dev); -} - -static __inline__ void drm_core_ioremapfree(struct drm_map *map, - struct drm_device *dev) -{ - if (map->handle && map->size) - drm_ioremapfree(map->handle, map->size, dev); -} +extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); +extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) @@ -1581,25 +1542,6 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) drm_free_memctl(size); } -static inline void *drm_ctl_cache_alloc(kmem_cache_t *cache, size_t size, - int flags) -{ - void *ret; - if (drm_alloc_memctl(size)) - return NULL; - ret = kmem_cache_alloc(cache, flags); - if (!ret) - drm_free_memctl(size); - return ret; -} - -static inline void drm_ctl_cache_free(kmem_cache_t *cache, size_t size, - void *obj) -{ - kmem_cache_free(cache, obj); - drm_free_memctl(size); -} - /*@}*/ #endif /* __KERNEL__ */ diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index a5f1f9ee..9cdbdaf0 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -106,10 +106,6 @@ int drm_agp_acquire(drm_device_t * dev) return -ENODEV; if (dev->agp->acquired) return -EBUSY; -#ifndef VMAP_4_ARGS - if (dev->agp->cant_use_aperture) - return -EINVAL; -#endif #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) if ((retcode = agp_backend_acquire())) return retcode; @@ -563,6 +559,8 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_USER_MEMORY (AGP_USER_TYPES) #define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1) #endif +#define AGP_REQUIRED_MAJOR 0 +#define AGP_REQUIRED_MINOR 102 static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); @@ -673,6 +671,24 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, drm_ttm_backend_t *agp_be; drm_agp_ttm_priv *agp_priv; + struct agp_kern_info *info; + + if (!dev->agp) { + DRM_ERROR("AGP is not initialized.\n"); + return NULL; + } + info = &dev->agp->agp_info; + + if (info->version.major != AGP_REQUIRED_MAJOR || + info->version.minor < AGP_REQUIRED_MINOR) { + DRM_ERROR("Wrong agpgart version %d.%d\n" + "\tYou need at least version %d.%d.\n", + info->version.major, + info->version.minor, + AGP_REQUIRED_MAJOR, + AGP_REQUIRED_MINOR); + return NULL; + } agp_be = (backend != NULL) ? backend: drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_MAPPINGS); @@ -687,6 +703,7 @@ drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev, return NULL; } + agp_priv->mem = NULL; agp_priv->alloc_type = AGP_USER_MEMORY; agp_priv->cached_type = AGP_USER_CACHED_MEMORY; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 65e24fb6..c0e431b4 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -352,10 +352,20 @@ static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) } +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) static void drm_bo_delayed_workqueue(void *data) +#else +static void drm_bo_delayed_workqueue(struct work_struct *work) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) drm_device_t *dev = (drm_device_t *) data; drm_buffer_manager_t *bm = &dev->bm; +#else + drm_buffer_manager_t *bm = container_of(work, drm_buffer_manager_t, wq.work); + drm_device_t *dev = container_of(bm, drm_device_t, bm); +#endif + DRM_DEBUG("Delayed delete Worker\n"); @@ -1904,7 +1914,11 @@ int drm_bo_driver_init(drm_device_t * dev) if (ret) goto out_unlock; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev); +#else + INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue); +#endif bm->initialized = 1; bm->nice_mode = 1; atomic_set(&bm->count, 0); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index d6ebc8d1..8793ba0e 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -179,7 +179,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } } if (map->type == _DRM_REGISTERS) - map->handle = drm_ioremap(map->offset, map->size, dev); + map->handle = ioremap(map->offset, map->size); break; case _DRM_SHM: list = drm_find_matching_map(dev, map); @@ -195,7 +195,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, *maplist = list; return 0; } - map->handle = vmalloc_32(map->size); + map->handle = vmalloc_user(map->size); DRM_DEBUG("%lu %d %p\n", map->size, drm_order(map->size), map->handle); if (!map->handle) { @@ -279,6 +279,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, list = drm_alloc(sizeof(*list), DRM_MEM_MAPS); if (!list) { + if (map->type == _DRM_REGISTERS) + iounmap(map->handle); drm_free(map, sizeof(*map), DRM_MEM_MAPS); return -EINVAL; } @@ -295,6 +297,8 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, ret = drm_map_handle(dev, &list->hash, user_token, 0); if (ret) { + if (map->type == _DRM_REGISTERS) + iounmap(map->handle); drm_free(map, sizeof(*map), DRM_MEM_MAPS); drm_free(list, sizeof(*list), DRM_MEM_MAPS); mutex_unlock(&dev->struct_mutex); @@ -402,7 +406,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) switch (map->type) { case _DRM_REGISTERS: - drm_ioremapfree(map->handle, map->size, dev); + iounmap(map->handle); /* FALLTHROUGH */ case _DRM_FRAME_BUFFER: if (drm_core_has_MTRR(dev) && map->mtrr >= 0) { diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index b466f8bd..6bb58424 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -251,7 +251,8 @@ struct page *drm_vm_ttm_nopage(struct vm_area_struct *vma, page = NOPAGE_OOM; goto out; } - page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); + page = ttm->pages[page_offset] = + alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); if (!page) { drm_free_memctl(PAGE_SIZE); page = NOPAGE_OOM; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index a1a94399..3cb5d202 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -31,7 +31,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <asm/agp.h> #ifndef _DRM_COMPAT_H_ #define _DRM_COMPAT_H_ @@ -57,6 +56,12 @@ #define module_param(name, type, perm) #endif +/* older kernels had different irq args */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +#undef DRM_IRQ_ARGS +#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs +#endif + #ifndef list_for_each_safe #define list_for_each_safe(pos, n, head) \ for (pos = (head)->next, n = pos->next; pos != (head); \ @@ -80,92 +85,6 @@ pos = n, n = list_entry(n->member.next, typeof(*n), member)) #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,19) -static inline struct page *vmalloc_to_page(void *vmalloc_addr) -{ - unsigned long addr = (unsigned long)vmalloc_addr; - struct page *page = NULL; - pgd_t *pgd = pgd_offset_k(addr); - pmd_t *pmd; - pte_t *ptep, pte; - - if (!pgd_none(*pgd)) { - pmd = pmd_offset(pgd, addr); - if (!pmd_none(*pmd)) { - preempt_disable(); - ptep = pte_offset_map(pmd, addr); - pte = *ptep; - if (pte_present(pte)) - page = pte_page(pte); - pte_unmap(ptep); - preempt_enable(); - } - } - return page; -} -#endif - -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,4,2) -#define down_write down -#define up_write up -#endif - -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) -#define DRM_PCI_DEV(pdev) &pdev->dev -#else -#define DRM_PCI_DEV(pdev) NULL -#endif - -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) -static inline unsigned iminor(struct inode *inode) -{ - return MINOR(inode->i_rdev); -} - -#define old_encode_dev(x) (x) - -struct drm_sysfs_class; -struct class_simple; -struct device; - -#define pci_dev_put(x) do {} while (0) -#define pci_get_subsys pci_find_subsys - -static inline struct class_device *DRM(sysfs_device_add) (struct drm_sysfs_class - * cs, dev_t dev, - struct device * - device, - const char *fmt, - ...) { - return NULL; -} - -static inline void DRM(sysfs_device_remove) (dev_t dev) { -} - -static inline void DRM(sysfs_destroy) (struct drm_sysfs_class * cs) { -} - -static inline struct drm_sysfs_class *DRM(sysfs_create) (struct module * owner, - char *name) { - return NULL; -} - -#ifndef pci_pretty_name -#define pci_pretty_name(x) x->name -#endif - -struct drm_device; -static inline int radeon_create_i2c_busses(struct drm_device *dev) -{ - return 0; -}; -static inline void radeon_delete_i2c_busses(struct drm_device *dev) -{ -}; - -#endif - #ifndef __user #define __user #endif @@ -178,22 +97,27 @@ static inline void radeon_delete_i2c_busses(struct drm_device *dev) #define __GFP_COMP 0 #endif -#ifndef REMAP_PAGE_RANGE_5_ARGS -#define DRM_RPR_ARG(vma) -#else -#define DRM_RPR_ARG(vma) vma, -#endif - #define VM_OFFSET(vma) ((vma)->vm_pgoff << PAGE_SHIFT) #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot) { - return remap_page_range(DRM_RPR_ARG(vma) from, + return remap_page_range(vma, from, pfn << PAGE_SHIFT, size, pgprot); } + +static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags) +{ + void *addr; + + addr = kmalloc(size * nmemb, flags); + if (addr != NULL) + memset((void *)addr, 0, size * nmemb); + + return addr; +} #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) @@ -215,10 +139,6 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from #define __x86_64__ #endif -#ifndef pci_pretty_name -#define pci_pretty_name(dev) "" -#endif - /* sysfs __ATTR macro */ #ifndef __ATTR #define __ATTR(_name,_mode,_show,_store) { \ @@ -228,10 +148,17 @@ static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from } #endif +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) +#define vmalloc_user(_size) ({void * tmp = vmalloc(_size); \ + if (tmp) memset(tmp, 0, size); \ + (tmp);}) +#endif + + #include <linux/mm.h> #include <asm/page.h> -#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) && \ +#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && \ (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))) #define DRM_ODD_MM_COMPAT #endif @@ -253,16 +180,9 @@ extern void drm_clear_vma(struct vm_area_struct *vma, extern pgprot_t vm_get_page_prot(unsigned long vm_flags); -/* - * These are similar to the current kernel gatt pages allocator, only that we - * want a struct page pointer instead of a virtual address. This allows for pages - * that are not in the kernel linear map. - */ - -#define drm_alloc_gatt_pages(order) ({ \ - void *_virt = alloc_gatt_pages(order); \ - ((_virt) ? virt_to_page(_virt) : NULL);}) -#define drm_free_gatt_pages(pages, order) free_gatt_pages(page_address(pages), order) +#ifndef GFP_DMA32 +#define GFP_DMA32 0 +#endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) @@ -288,7 +208,7 @@ extern struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, #endif -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) /* * Hopefully, real NOPAGE_RETRY functionality will be in 2.6.19. diff --git a/shared-core/drm_drawable.c b/linux-core/drm_drawable.c index 0817e321..0817e321 100644 --- a/shared-core/drm_drawable.c +++ b/linux-core/drm_drawable.c diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 518e2aa3..45f563ff 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -446,52 +446,6 @@ static struct file_operations drm_stub_fops = { .open = drm_stub_open }; -static int drm_create_memory_caches(void) -{ - drm_cache.mm = kmem_cache_create("drm_mm_node_t", - sizeof(drm_mm_node_t), - 0, - SLAB_HWCACHE_ALIGN, - NULL,NULL); - if (!drm_cache.mm) - return -ENOMEM; - - drm_cache.fence_object= kmem_cache_create("drm_fence_object_t", - sizeof(drm_fence_object_t), - 0, - SLAB_HWCACHE_ALIGN, - NULL,NULL); - if (!drm_cache.fence_object) - return -ENOMEM; - - return 0; -} - -static void drm_free_mem_cache(kmem_cache_t *cache, - const char *name) -{ - if (!cache) - return; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) - if (kmem_cache_destroy(cache)) { - DRM_ERROR("Warning! DRM is leaking %s memory.\n", - name); - } -#else - kmem_cache_destroy(cache); -#endif -} - -static void drm_free_memory_caches(void ) -{ - - drm_free_mem_cache(drm_cache.fence_object, "fence object"); - drm_cache.fence_object = NULL; - drm_free_mem_cache(drm_cache.mm, "memory manager block"); - drm_cache.mm = NULL; -} - - static int __init drm_core_init(void) { int ret; @@ -499,9 +453,6 @@ static int __init drm_core_init(void) si_meminfo(&si); drm_init_memctl(si.totalram/2, si.totalram*3/4); - ret = drm_create_memory_caches(); - if (ret) - goto err_p1; ret = -ENOMEM; drm_cards_limit = @@ -539,13 +490,11 @@ err_p2: unregister_chrdev(DRM_MAJOR, "drm"); drm_free(drm_heads, sizeof(*drm_heads) * drm_cards_limit, DRM_MEM_STUB); err_p1: - drm_free_memory_caches(); return ret; } static void __exit drm_core_exit(void) { - drm_free_memory_caches(); remove_proc_entry("dri", NULL); drm_sysfs_destroy(drm_class); diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index f656340e..06d48255 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -117,8 +117,7 @@ void drm_fence_usage_deref_locked(drm_device_t * dev, DRM_DEBUG("Destroyed a fence object 0x%08lx\n", fence->base.hash.key); atomic_dec(&fm->count); - drm_ctl_cache_free(drm_cache.fence_object, sizeof(*fence), - fence); + drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } } @@ -132,8 +131,7 @@ void drm_fence_usage_deref_unlocked(drm_device_t * dev, if (atomic_read(&fence->usage) == 0) { drm_fence_unring(dev, &fence->ring); atomic_dec(&fm->count); - drm_ctl_cache_free(drm_cache.fence_object, - sizeof(*fence), fence); + drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE); } mutex_unlock(&dev->struct_mutex); } @@ -439,8 +437,7 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t type, int ret; drm_fence_manager_t *fm = &dev->fm; - fence = drm_ctl_cache_alloc(drm_cache.fence_object, - sizeof(*fence), GFP_KERNEL); + fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE); if (!fence) return -ENOMEM; ret = drm_fence_object_init(dev, type, flags, fence); diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index b60ced34..84e06c87 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -46,7 +46,7 @@ static int drm_setup(drm_device_t * dev) drm_local_map_t *map; int i; int ret; - + int sareapage; if (dev->driver->firstopen) { ret = dev->driver->firstopen(dev); @@ -57,8 +57,8 @@ static int drm_setup(drm_device_t * dev) dev->magicfree.next = NULL; /* prebuild the SAREA */ - - i = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); + sareapage = max(SAREA_MAX, PAGE_SIZE); + i = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK, &map); if (i != 0) return i; @@ -426,7 +426,7 @@ int drm_release(struct inode *inode, struct file *filp) current->pid, (long)old_encode_dev(priv->head->device), dev->open_count); - if (dev->driver->reclaim_buffers_locked) { + if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { unsigned long _end = jiffies + DRM_HZ*3; do { @@ -446,12 +446,12 @@ int drm_release(struct inode *inode, struct file *filp) * holds the lock. Then we can run reclaim buffers locked anyway. */ - DRM_ERROR("Reclaim buffers locked deadlock.\n"); - DRM_ERROR("This is probably a single thread having multiple\n"); - DRM_ERROR("DRM file descriptors open either dying or " - "closing file descriptors\n"); - DRM_ERROR("while having the lock. I will not reclaim buffers.\n"); - DRM_ERROR("Locking context is 0x%08x\n", + DRM_ERROR("Reclaim buffers locked deadlock.\n" + "\tThis is probably a single thread having multiple\n" + "\tDRM file descriptors open either dying or" + " closing file descriptors\n" + "\twhile having the lock. I will not reclaim buffers.\n" + "\tLocking context is 0x%08x\n", _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); } } else if (drm_i_have_hw_lock(filp)) { diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 776f462e..3dcc4bfb 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -337,7 +337,7 @@ int drm_setversion(DRM_IOCTL_ARGS) retv.drm_dd_major = dev->driver->major; retv.drm_dd_minor = dev->driver->minor; - if (copy_to_user(argp, &retv, sizeof(sv))) + if (copy_to_user(argp, &retv, sizeof(retv))) return -EFAULT; if (sv.drm_di_major != -1) { diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index 3370c279..62f54b67 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -134,13 +134,7 @@ int drm_mem_info(char *buf, char **start, off_t offset, /** Wrapper around kmalloc() */ void *drm_calloc(size_t nmemb, size_t size, int area) { - void *addr; - - addr = kmalloc(size * nmemb, GFP_KERNEL); - if (addr != NULL) - memset((void *)addr, 0, size * nmemb); - - return addr; + return kcalloc(nmemb, size, GFP_KERNEL); } EXPORT_SYMBOL(drm_calloc); @@ -250,3 +244,26 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) } #endif /* agp */ #endif /* debug_memory */ + +void drm_core_ioremap(struct drm_map *map, struct drm_device *dev) +{ + if (drm_core_has_AGP(dev) && + dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) + map->handle = agp_remap(map->offset, map->size, dev); + else + map->handle = ioremap(map->offset, map->size); +} +EXPORT_SYMBOL_GPL(drm_core_ioremap); + +void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) +{ + if (!map->handle || !map->size) + return; + + if (drm_core_has_AGP(dev) && + dev->agp && dev->agp->cant_use_aperture && map->type == _DRM_AGP) + vunmap(map->handle); + else + iounmap(map->handle); +} +EXPORT_SYMBOL_GPL(drm_core_ioremapfree); diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h index 4a2c3583..32b89e5f 100644 --- a/linux-core/drm_memory.h +++ b/linux-core/drm_memory.h @@ -43,7 +43,7 @@ */ /* Need the 4-argument version of vmap(). */ -#if __OS_HAS_AGP && defined(VMAP_4_ARGS) +#if __OS_HAS_AGP #include <linux/vmalloc.h> @@ -57,18 +57,6 @@ # endif #endif -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) -#ifndef pte_offset_kernel -# define pte_offset_kernel(dir, address) pte_offset(dir, address) -#endif -#ifndef pte_pfn -# define pte_pfn(pte) (pte_page(pte) - mem_map) -#endif -#ifndef pfn_to_page -# define pfn_to_page(pfn) (mem_map + (pfn)) -#endif -#endif - /* * Find the drm_map that covers the range [offset, offset+size). */ @@ -134,19 +122,6 @@ static inline void *agp_remap(unsigned long offset, unsigned long size, return addr; } -static inline unsigned long drm_follow_page(void *vaddr) -{ - pgd_t *pgd = pgd_offset_k((unsigned long) vaddr); -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10) - pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr); -#else - pud_t *pud = pud_offset(pgd, (unsigned long) vaddr); - pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr); -#endif - pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr); - return pte_pfn(*ptep) << PAGE_SHIFT; -} - #else /* __OS_HAS_AGP */ static inline drm_map_t *drm_lookup_map(unsigned long offset, @@ -161,73 +136,4 @@ static inline void *agp_remap(unsigned long offset, unsigned long size, return NULL; } -static inline unsigned long drm_follow_page(void *vaddr) -{ - return 0; -} -#endif - -#ifndef DEBUG_MEMORY -static inline void *drm_ioremap(unsigned long offset, unsigned long size, - drm_device_t * dev) -{ -#if defined(VMAP_4_ARGS) - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { - drm_map_t *map = drm_lookup_map(offset, size, dev); - - if (map && map->type == _DRM_AGP) - return agp_remap(offset, size, dev); - } -#endif - - return ioremap(offset, size); -} - -static inline void *drm_ioremap_nocache(unsigned long offset, - unsigned long size, drm_device_t * dev) -{ -#if defined(VMAP_4_ARGS) - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) { - drm_map_t *map = drm_lookup_map(offset, size, dev); - - if (map && map->type == _DRM_AGP) - return agp_remap(offset, size, dev); - } -#endif - - return ioremap_nocache(offset, size); -} - -static inline void drm_ioremapfree(void *pt, unsigned long size, - drm_device_t * dev) -{ -#if defined(VMAP_4_ARGS) - /* - * This is a bit ugly. It would be much cleaner if the DRM API would use separate - * routines for handling mappings in the AGP space. Hopefully this can be done in - * a future revision of the interface... - */ - if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture - && ((unsigned long)pt >= VMALLOC_START - && (unsigned long)pt < VMALLOC_END)) { - unsigned long offset; - drm_map_t *map; - - offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK); - map = drm_lookup_map(offset, size, dev); - if (map && map->type == _DRM_AGP) { - vunmap(pt); - return; - } - } -#endif - iounmap(pt); -} -#else -extern void *drm_ioremap(unsigned long offset, unsigned long size, - drm_device_t * dev); -extern void *drm_ioremap_nocache(unsigned long offset, - unsigned long size, drm_device_t * dev); -extern void drm_ioremapfree(void *pt, unsigned long size, - drm_device_t * dev); #endif diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c index aa1b2922..c124f8f8 100644 --- a/linux-core/drm_memory_debug.c +++ b/linux-core/drm_memory_debug.c @@ -289,79 +289,6 @@ void drm_free_pages(unsigned long address, int order, int area) } } -void *drm_ioremap(unsigned long offset, unsigned long size, drm_device_t * dev) -{ - void *pt; - - if (!size) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Mapping 0 bytes at 0x%08lx\n", offset); - return NULL; - } - - if (!(pt = drm_ioremap(offset, size, dev))) { - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; - spin_unlock(&drm_mem_lock); - return NULL; - } - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; - spin_unlock(&drm_mem_lock); - return pt; -} -EXPORT_SYMBOL(drm_ioremap); - -void *drm_ioremap_nocache(unsigned long offset, unsigned long size, - drm_device_t * dev) -{ - void *pt; - - if (!size) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Mapping 0 bytes at 0x%08lx\n", offset); - return NULL; - } - - if (!(pt = drm_ioremap_nocache(offset, size, dev))) { - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; - spin_unlock(&drm_mem_lock); - return NULL; - } - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; - spin_unlock(&drm_mem_lock); - return pt; -} -EXPORT_SYMBOL(drm_ioremap_nocache); - -void drm_ioremapfree(void *pt, unsigned long size, drm_device_t * dev) -{ - int alloc_count; - int free_count; - - if (!pt) - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Attempt to free NULL pointer\n"); - else - drm_ioremapfree(pt, size, dev); - - spin_lock(&drm_mem_lock); - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; - free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; - alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - spin_unlock(&drm_mem_lock); - if (free_count > alloc_count) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Excess frees: %d frees, %d allocs\n", - free_count, alloc_count); - } -} -EXPORT_SYMBOL(drm_ioremapfree); - #if __OS_HAS_AGP DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h index 1e0a63b7..9d0dedfb 100644 --- a/linux-core/drm_memory_debug.h +++ b/linux-core/drm_memory_debug.h @@ -275,74 +275,6 @@ void drm_free_pages (unsigned long address, int order, int area) { } } -void *drm_ioremap (unsigned long offset, unsigned long size, - drm_device_t * dev) { - void *pt; - - if (!size) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Mapping 0 bytes at 0x%08lx\n", offset); - return NULL; - } - - if (!(pt = drm_ioremap(offset, size, dev))) { - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; - spin_unlock(&drm_mem_lock); - return NULL; - } - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; - spin_unlock(&drm_mem_lock); - return pt; -} - -void *drm_ioremap_nocache (unsigned long offset, unsigned long size, - drm_device_t * dev) { - void *pt; - - if (!size) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Mapping 0 bytes at 0x%08lx\n", offset); - return NULL; - } - - if (!(pt = drm_ioremap_nocache(offset, size, dev))) { - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].fail_count; - spin_unlock(&drm_mem_lock); - return NULL; - } - spin_lock(&drm_mem_lock); - ++drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_allocated += size; - spin_unlock(&drm_mem_lock); - return pt; -} - -void drm_ioremapfree (void *pt, unsigned long size, drm_device_t * dev) { - int alloc_count; - int free_count; - - if (!pt) - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Attempt to free NULL pointer\n"); - else - drm_ioremapfree(pt, size, dev); - - spin_lock(&drm_mem_lock); - drm_mem_stats[DRM_MEM_MAPPINGS].bytes_freed += size; - free_count = ++drm_mem_stats[DRM_MEM_MAPPINGS].free_count; - alloc_count = drm_mem_stats[DRM_MEM_MAPPINGS].succeed_count; - spin_unlock(&drm_mem_lock); - if (free_count > alloc_count) { - DRM_MEM_ERROR(DRM_MEM_MAPPINGS, - "Excess frees: %d frees, %d allocs\n", - free_count, alloc_count); - } -} - #if __OS_HAS_AGP DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index a5566b2f..5889ee4d 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -49,7 +49,7 @@ unsigned long drm_mm_tail_space(drm_mm_t *mm) struct list_head *tail_node; drm_mm_node_t *entry; - tail_node = mm->root_node.ml_entry.prev; + tail_node = mm->ml_entry.prev; entry = list_entry(tail_node, drm_mm_node_t, ml_entry); if (!entry->free) return 0; @@ -62,7 +62,7 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) struct list_head *tail_node; drm_mm_node_t *entry; - tail_node = mm->root_node.ml_entry.prev; + tail_node = mm->ml_entry.prev; entry = list_entry(tail_node, drm_mm_node_t, ml_entry); if (!entry->free) return -ENOMEM; @@ -82,8 +82,7 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, drm_mm_node_t *child; child = (drm_mm_node_t *) - drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), - GFP_KERNEL); + drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -92,8 +91,8 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, child->start = start; child->mm = mm; - list_add_tail(&child->ml_entry, &mm->root_node.ml_entry); - list_add_tail(&child->fl_entry, &mm->root_node.fl_entry); + list_add_tail(&child->ml_entry, &mm->ml_entry); + list_add_tail(&child->fl_entry, &mm->fl_entry); return 0; } @@ -104,7 +103,7 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) struct list_head *tail_node; drm_mm_node_t *entry; - tail_node = mm->root_node.ml_entry.prev; + tail_node = mm->ml_entry.prev; entry = list_entry(tail_node, drm_mm_node_t, ml_entry); if (!entry->free) { return drm_mm_create_tail_node(mm, entry->start + entry->size, size); @@ -119,8 +118,7 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, drm_mm_node_t *child; child = (drm_mm_node_t *) - drm_ctl_cache_alloc(drm_cache.mm, sizeof(*child), - GFP_KERNEL); + drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -150,7 +148,7 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, unsigned tmp = 0; if (alignment) - tmp = size % alignment; + tmp = parent->start % alignment; if (tmp) { align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); @@ -164,12 +162,8 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, return parent; } else { child = drm_mm_split_at_start(parent, size); - if (!child) { - if (align_splitoff) - drm_mm_put_block(align_splitoff); - return NULL; - } } + if (align_splitoff) drm_mm_put_block(align_splitoff); @@ -185,9 +179,8 @@ void drm_mm_put_block(drm_mm_node_t * cur) { drm_mm_t *mm = cur->mm; - drm_mm_node_t *list_root = &mm->root_node; struct list_head *cur_head = &cur->ml_entry; - struct list_head *root_head = &list_root->ml_entry; + struct list_head *root_head = &mm->ml_entry; drm_mm_node_t *prev_node = NULL; drm_mm_node_t *next_node; @@ -207,9 +200,8 @@ void drm_mm_put_block(drm_mm_node_t * cur) prev_node->size += next_node->size; list_del(&next_node->ml_entry); list_del(&next_node->fl_entry); - drm_ctl_cache_free(drm_cache.mm, - sizeof(*next_node), - next_node); + drm_ctl_free(next_node, sizeof(*next_node), + DRM_MEM_MM); } else { next_node->size += cur->size; next_node->start = cur->start; @@ -219,10 +211,10 @@ void drm_mm_put_block(drm_mm_node_t * cur) } if (!merged) { cur->free = 1; - list_add(&cur->fl_entry, &list_root->fl_entry); + list_add(&cur->fl_entry, &mm->fl_entry); } else { list_del(&cur->ml_entry); - drm_ctl_cache_free(drm_cache.mm, sizeof(*cur), cur); + drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); } } @@ -231,7 +223,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, unsigned alignment, int best_match) { struct list_head *list; - const struct list_head *free_stack = &mm->root_node.fl_entry; + const struct list_head *free_stack = &mm->fl_entry; drm_mm_node_t *entry; drm_mm_node_t *best; unsigned long best_size; @@ -244,8 +236,11 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, entry = list_entry(list, drm_mm_node_t, fl_entry); wasted = 0; + if (entry->size < size) + continue; + if (alignment) { - register unsigned tmp = size % alignment; + register unsigned tmp = entry->start % alignment; if (tmp) wasted += alignment - tmp; } @@ -266,15 +261,15 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, int drm_mm_clean(drm_mm_t * mm) { - struct list_head *head = &mm->root_node.ml_entry; + struct list_head *head = &mm->ml_entry; return (head->next->next == head); } int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) { - INIT_LIST_HEAD(&mm->root_node.ml_entry); - INIT_LIST_HEAD(&mm->root_node.fl_entry); + INIT_LIST_HEAD(&mm->ml_entry); + INIT_LIST_HEAD(&mm->fl_entry); return drm_mm_create_tail_node(mm, start, size); } @@ -283,20 +278,20 @@ EXPORT_SYMBOL(drm_mm_init); void drm_mm_takedown(drm_mm_t * mm) { - struct list_head *bnode = mm->root_node.fl_entry.next; + struct list_head *bnode = mm->fl_entry.next; drm_mm_node_t *entry; entry = list_entry(bnode, drm_mm_node_t, fl_entry); - if (entry->ml_entry.next != &mm->root_node.ml_entry || - entry->fl_entry.next != &mm->root_node.fl_entry) { + if (entry->ml_entry.next != &mm->ml_entry || + entry->fl_entry.next != &mm->fl_entry) { DRM_ERROR("Memory manager not clean. Delaying takedown\n"); return; } list_del(&entry->fl_entry); list_del(&entry->ml_entry); - drm_ctl_cache_free(drm_cache.mm, sizeof(*entry), entry); + drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); } EXPORT_SYMBOL(drm_mm_takedown); diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 42700978..816959e8 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -56,7 +56,7 @@ drm_device_t *dev = priv->head->dev /** IRQ handler arguments and return type and values */ -#define DRM_IRQ_ARGS int irq, void *arg, struct pt_regs *regs +#define DRM_IRQ_ARGS int irq, void *arg /** backwards compatibility with old irq return values */ #ifndef IRQ_HANDLED typedef void irqreturn_t; @@ -66,13 +66,8 @@ typedef void irqreturn_t; /** AGP types */ #if __OS_HAS_AGP -#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,70) -#define DRM_AGP_MEM agp_memory -#define DRM_AGP_KERN agp_kern_info -#else #define DRM_AGP_MEM struct agp_memory #define DRM_AGP_KERN struct agp_kern_info -#endif #else /* define some dummy types for non AGP supporting kernels */ struct no_agp_kern { @@ -98,9 +93,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define MTRR_TYPE_WRCOMB 1 #endif -/** Task queue handler arguments */ -#define DRM_TASKQUEUE_ARGS void *arg - /** For data going into the kernel through the ioctl argument */ #define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ if ( copy_from_user(&arg1, arg2, arg3) ) \ diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 863cacfc..1c2c17fe 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -452,19 +452,23 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, *start = &buf[offset]; *eof = 0; + DRM_PROC_PRINT("Object accounting:\n\n"); if (fm->initialized) { - DRM_PROC_PRINT("Number of active fence objects: %d.\n\n", + DRM_PROC_PRINT("Number of active fence objects: %d.\n", atomic_read(&fm->count)); } else { - DRM_PROC_PRINT("Fence objects are not supported by this driver\n\n"); + DRM_PROC_PRINT("Fence objects are not supported by this driver\n"); } if (bm->initialized) { DRM_PROC_PRINT("Number of active buffer objects: %d.\n\n", atomic_read(&bm->count)); + } + DRM_PROC_PRINT("Memory accounting:\n\n"); + if (bm->initialized) { DRM_PROC_PRINT("Number of locked GATT pages: %lu.\n", bm->cur_pages); } else { - DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n\n"); + DRM_PROC_PRINT("Buffer objects are not supported by this driver.\n"); } drm_query_memctl(&used_mem, &low_mem, &high_mem); diff --git a/linux-core/drm_sarea.h b/linux-core/drm_sarea.h new file mode 120000 index 00000000..fd428f42 --- /dev/null +++ b/linux-core/drm_sarea.h @@ -0,0 +1 @@ +../shared-core/drm_sarea.h
\ No newline at end of file diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index c03a56a1..60123cdc 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -47,18 +47,13 @@ MODULE_LICENSE("GPL and additional rights"); MODULE_PARM_DESC(cards_limit, "Maximum number of graphics cards"); MODULE_PARM_DESC(debug, "Enable debug output"); -module_param_named(cards_limit, drm_cards_limit, int, S_IRUGO); -module_param_named(debug, drm_debug, int, S_IRUGO|S_IWUGO); +module_param_named(cards_limit, drm_cards_limit, int, 0444); +module_param_named(debug, drm_debug, int, 0600); drm_head_t **drm_heads; struct drm_sysfs_class *drm_class; struct proc_dir_entry *drm_proc_root; -drm_cache_t drm_cache = -{ .mm = NULL, - .fence_object = NULL -}; - static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) @@ -249,9 +244,9 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, if ((ret = drm_get_head(dev, &dev->primary))) goto err_g1; - DRM_INFO("Initialized %s %d.%d.%d %s on minor %d: %s\n", + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, - driver->date, dev->primary.minor, pci_pretty_name(dev->pdev)); + driver->date, dev->primary.minor); return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index e5dd0532..ace0778b 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -162,7 +162,7 @@ struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, memset(s_dev, 0x00, sizeof(*s_dev)); s_dev->dev = MKDEV(DRM_MAJOR, head->minor); - s_dev->class_dev.dev = DRM_PCI_DEV(head->dev->pdev); + s_dev->class_dev.dev = &head->dev->pdev->dev; s_dev->class_dev.class = &cs->class; snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 931972af..1c9b1cf7 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -193,7 +193,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) * End debugging. */ - drm_free_gatt_pages(*cur_page, 0); + __free_page(*cur_page); drm_free_memctl(PAGE_SIZE); --bm->cur_pages; } @@ -225,7 +225,7 @@ static int drm_ttm_populate(drm_ttm_t * ttm) if (drm_alloc_memctl(PAGE_SIZE)) { return -ENOMEM; } - page = drm_alloc_gatt_pages(0); + page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); if (!page) { drm_free_memctl(PAGE_SIZE); return -ENOMEM; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 6eb996ad..827a7bdb 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -159,9 +159,9 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, } #endif /* __OS_HAS_AGP */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20) || \ +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) || \ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) static #endif struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, @@ -208,7 +208,8 @@ struct page *drm_vm_ttm_fault(struct vm_area_struct *vma, data->type = VM_FAULT_OOM; goto out; } - page = ttm->pages[page_offset] = drm_alloc_gatt_pages(0); + page = ttm->pages[page_offset] = + alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); if (!page) { drm_free_memctl(PAGE_SIZE); data->type = VM_FAULT_OOM; @@ -269,13 +270,13 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!map) - return NOPAGE_OOM; /* Nothing allocated */ + return NOPAGE_SIGBUS; /* Nothing allocated */ offset = address - vma->vm_start; i = (unsigned long)map->handle + offset; page = vmalloc_to_page((void *)i); if (!page) - return NOPAGE_OOM; + return NOPAGE_SIGBUS; get_page(page); DRM_DEBUG("shm_nopage 0x%lx\n", address); @@ -348,7 +349,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) map->size); DRM_DEBUG("mtrr_del = %d\n", retcode); } - drm_ioremapfree(map->handle, map->size, dev); + iounmap(map->handle); break; case _DRM_SHM: vfree(map->handle); @@ -396,7 +397,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!dma->pagelist) - return NOPAGE_OOM; /* Nothing allocated */ + return NOPAGE_SIGBUS; /* Nothing allocated */ offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */ page_nr = offset >> PAGE_SHIFT; @@ -435,7 +436,7 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */ if (!entry->pagelist) - return NOPAGE_OOM; /* Nothing allocated */ + return NOPAGE_SIGBUS; /* Nothing allocated */ offset = address - vma->vm_start; map_offset = map->offset - (unsigned long)dev->sg->virtual; @@ -446,8 +447,6 @@ static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, return page; } -#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0) - static struct page *drm_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { @@ -481,34 +480,6 @@ static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, } -#else /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */ - -static struct page *drm_vm_nopage(struct vm_area_struct *vma, - unsigned long address, int unused) -{ - return drm_do_vm_nopage(vma, address); -} - -static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma, - unsigned long address, int unused) -{ - return drm_do_vm_shm_nopage(vma, address); -} - -static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma, - unsigned long address, int unused) -{ - return drm_do_vm_dma_nopage(vma, address); -} - -static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, - unsigned long address, int unused) -{ - return drm_do_vm_sg_nopage(vma, address); -} - -#endif - /** AGP virtual memory operations */ static struct vm_operations_struct drm_vm_ops = { .nopage = drm_vm_nopage, @@ -537,7 +508,7 @@ static struct vm_operations_struct drm_vm_sg_ops = { .close = drm_vm_close, }; -#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) static struct vm_operations_struct drm_vm_ttm_ops = { .nopage = drm_vm_ttm_nopage, .open = drm_vm_ttm_open_wrapper, @@ -712,12 +683,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) } vma->vm_ops = &drm_vm_dma_ops; - -#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ - vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ -#else vma->vm_flags |= VM_RESERVED; /* Don't swap */ -#endif vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open(vma); @@ -829,6 +795,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_flags |= VM_IO; /* not in core dump */ vma->vm_page_prot = drm_io_prot(map->type, vma); #ifdef __sparc__ + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); if (io_remap_pfn_range(vma, vma->vm_start, (map->offset + offset) >>PAGE_SHIFT, vma->vm_end - vma->vm_start, @@ -859,20 +826,12 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) vma->vm_private_data = (void *)map; /* Don't let this area swap. Change when DRM_KERNEL advisory is supported. */ -#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ - vma->vm_flags |= VM_LOCKED; -#else vma->vm_flags |= VM_RESERVED; -#endif break; case _DRM_SCATTER_GATHER: vma->vm_ops = &drm_vm_sg_ops; vma->vm_private_data = (void *)map; -#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ - vma->vm_flags |= VM_LOCKED; -#else vma->vm_flags |= VM_RESERVED; -#endif break; case _DRM_TTM: { vma->vm_ops = &drm_vm_ttm_ops; @@ -891,11 +850,7 @@ int drm_mmap(struct file *filp, struct vm_area_struct *vma) default: return -EINVAL; /* This should never happen. */ } -#if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */ - vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */ -#else vma->vm_flags |= VM_RESERVED; /* Don't swap */ -#endif vma->vm_file = filp; /* Needed for drm_vm_open() */ drm_vm_open(vma); diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index bdbb31fa..ad4d2fce 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -39,12 +39,6 @@ #include "i810_drm.h" #include "i810_drv.h" -#ifdef DO_MUNMAP_4_ARGS -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) -#else -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) -#endif - #define I810_BUF_FREE 2 #define I810_BUF_CLIENT 1 #define I810_BUF_HARDWARE 0 @@ -186,7 +180,7 @@ static int i810_unmap_buffer(drm_buf_t * buf) return -EINVAL; down_write(¤t->mm->mmap_sem); - retcode = DO_MUNMAP(current->mm, + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, (size_t) buf->total); up_write(¤t->mm->mmap_sem); @@ -244,8 +238,7 @@ static int i810_dma_cleanup(drm_device_t * dev) (drm_i810_private_t *) dev->dev_private; if (dev_priv->ring.virtual_start) { - drm_ioremapfree((void *)dev_priv->ring.virtual_start, - dev_priv->ring.Size, dev); + drm_core_ioremapfree(&dev_priv->ring.map, dev); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -261,9 +254,9 @@ static int i810_dma_cleanup(drm_device_t * dev) for (i = 0; i < dma->buf_count; i++) { drm_buf_t *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; + if (buf_priv->kernel_virtual && buf->total) - drm_ioremapfree(buf_priv->kernel_virtual, - buf->total, dev); + drm_core_ioremapfree(&buf_priv->map, dev); } } return 0; @@ -336,8 +329,15 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) *buf_priv->in_use = I810_BUF_FREE; - buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, - buf->total, dev); + buf_priv->map.offset = buf->bus_address; + buf_priv->map.size = buf->total; + buf_priv->map.type = _DRM_AGP; + buf_priv->map.flags = 0; + buf_priv->map.mtrr = 0; + + drm_core_ioremap(&buf_priv->map, dev); + buf_priv->kernel_virtual = buf_priv->map.handle; + } return 0; } @@ -388,18 +388,24 @@ static int i810_dma_initialize(drm_device_t * dev, dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; - dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + - init->ring_start, - init->ring_size, dev); + dev_priv->ring.map.offset = dev->agp->base + init->ring_start; + dev_priv->ring.map.size = init->ring_size; + dev_priv->ring.map.type = _DRM_AGP; + dev_priv->ring.map.flags = 0; + dev_priv->ring.map.mtrr = 0; - if (dev_priv->ring.virtual_start == NULL) { + drm_core_ioremap(&dev_priv->ring.map, dev); + + if (dev_priv->ring.map.handle == NULL) { dev->dev_private = (void *)dev_priv; i810_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return -ENOMEM; + return DRM_ERR(ENOMEM); } + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; dev_priv->w = init->w; diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index bb7358d2..69d79499 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -61,6 +61,7 @@ typedef struct drm_i810_buf_priv { int currently_mapped; void *virtual; void *kernel_virtual; + drm_local_map_t map; } drm_i810_buf_priv_t; typedef struct _drm_i810_ring_buffer { @@ -72,6 +73,7 @@ typedef struct _drm_i810_ring_buffer { int head; int tail; int space; + drm_local_map_t map; } drm_i810_ring_buffer_t; typedef struct drm_i810_private { diff --git a/linux-core/i830_dma.c b/linux-core/i830_dma.c index 4526ccf1..e93307fb 100644 --- a/linux-core/i830_dma.c +++ b/linux-core/i830_dma.c @@ -41,12 +41,6 @@ #include "i830_drm.h" #include "i830_drv.h" -#ifdef DO_MUNMAP_4_ARGS -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l, 1) -#else -#define DO_MUNMAP(m, a, l) do_munmap(m, a, l) -#endif - #define I830_BUF_FREE 2 #define I830_BUF_CLIENT 1 #define I830_BUF_HARDWARE 0 @@ -174,7 +168,7 @@ static int i830_unmap_buffer(drm_buf_t * buf) return -EINVAL; down_write(¤t->mm->mmap_sem); - retcode = DO_MUNMAP(current->mm, + retcode = do_munmap(current->mm, (unsigned long)buf_priv->virtual, (size_t) buf->total); up_write(¤t->mm->mmap_sem); @@ -232,8 +226,7 @@ static int i830_dma_cleanup(drm_device_t * dev) (drm_i830_private_t *) dev->dev_private; if (dev_priv->ring.virtual_start) { - drm_ioremapfree((void *)dev_priv->ring.virtual_start, - dev_priv->ring.Size, dev); + drm_core_ioremapfree(&dev_priv->ring.map, dev); } if (dev_priv->hw_status_page) { pci_free_consistent(dev->pdev, PAGE_SIZE, @@ -251,8 +244,7 @@ static int i830_dma_cleanup(drm_device_t * dev) drm_buf_t *buf = dma->buflist[i]; drm_i830_buf_priv_t *buf_priv = buf->dev_private; if (buf_priv->kernel_virtual && buf->total) - drm_ioremapfree(buf_priv->kernel_virtual, - buf->total, dev); + drm_core_ioremapfree(&buf_priv->map, dev); } } return 0; @@ -329,8 +321,14 @@ static int i830_freelist_init(drm_device_t * dev, drm_i830_private_t * dev_priv) *buf_priv->in_use = I830_BUF_FREE; - buf_priv->kernel_virtual = drm_ioremap(buf->bus_address, - buf->total, dev); + buf_priv->map.offset = buf->bus_address; + buf_priv->map.size = buf->total; + buf_priv->map.type = _DRM_AGP; + buf_priv->map.flags = 0; + buf_priv->map.mtrr = 0; + + drm_core_ioremap(&buf_priv->map, dev); + buf_priv->kernel_virtual = buf_priv->map.handle; } return 0; } @@ -382,18 +380,24 @@ static int i830_dma_initialize(drm_device_t * dev, dev_priv->ring.End = init->ring_end; dev_priv->ring.Size = init->ring_size; - dev_priv->ring.virtual_start = drm_ioremap(dev->agp->base + - init->ring_start, - init->ring_size, dev); + dev_priv->ring.map.offset = dev->agp->base + init->ring_start; + dev_priv->ring.map.size = init->ring_size; + dev_priv->ring.map.type = _DRM_AGP; + dev_priv->ring.map.flags = 0; + dev_priv->ring.map.mtrr = 0; - if (dev_priv->ring.virtual_start == NULL) { + drm_core_ioremap(&dev_priv->ring.map, dev); + + if (dev_priv->ring.map.handle == NULL) { dev->dev_private = (void *)dev_priv; i830_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return -ENOMEM; + return DRM_ERR(ENOMEM); } + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; dev_priv->w = init->w; diff --git a/linux-core/i830_drv.h b/linux-core/i830_drv.h index 85bc5be6..e91f94af 100644 --- a/linux-core/i830_drv.h +++ b/linux-core/i830_drv.h @@ -68,6 +68,7 @@ typedef struct drm_i830_buf_priv { int currently_mapped; void __user *virtual; void *kernel_virtual; + drm_local_map_t map; } drm_i830_buf_priv_t; typedef struct _drm_i830_ring_buffer { @@ -79,6 +80,7 @@ typedef struct _drm_i830_ring_buffer { int head; int tail; int space; + drm_local_map_t map; } drm_i830_ring_buffer_t; typedef struct drm_i830_private { diff --git a/linux-core/i915_dma.c b/linux-core/i915_dma.c new file mode 120000 index 00000000..c61d967e --- /dev/null +++ b/linux-core/i915_dma.c @@ -0,0 +1 @@ +../shared-core/i915_dma.c
\ No newline at end of file diff --git a/linux-core/i915_drm.h b/linux-core/i915_drm.h new file mode 120000 index 00000000..ed53f01d --- /dev/null +++ b/linux-core/i915_drm.h @@ -0,0 +1 @@ +../shared-core/i915_drm.h
\ No newline at end of file diff --git a/linux-core/i915_drv.h b/linux-core/i915_drv.h new file mode 120000 index 00000000..085558ca --- /dev/null +++ b/linux-core/i915_drv.h @@ -0,0 +1 @@ +../shared-core/i915_drv.h
\ No newline at end of file diff --git a/linux-core/i915_irq.c b/linux-core/i915_irq.c new file mode 120000 index 00000000..2058a2e4 --- /dev/null +++ b/linux-core/i915_irq.c @@ -0,0 +1 @@ +../shared-core/i915_irq.c
\ No newline at end of file diff --git a/linux-core/i915_mem.c b/linux-core/i915_mem.c new file mode 120000 index 00000000..e8e56553 --- /dev/null +++ b/linux-core/i915_mem.c @@ -0,0 +1 @@ +../shared-core/i915_mem.c
\ No newline at end of file diff --git a/linux-core/linux b/linux-core/linux new file mode 120000 index 00000000..945c9b46 --- /dev/null +++ b/linux-core/linux @@ -0,0 +1 @@ +.
\ No newline at end of file diff --git a/linux-core/mach64_dma.c b/linux-core/mach64_dma.c new file mode 120000 index 00000000..e5c28975 --- /dev/null +++ b/linux-core/mach64_dma.c @@ -0,0 +1 @@ +../shared-core/mach64_dma.c
\ No newline at end of file diff --git a/linux-core/mach64_drm.h b/linux-core/mach64_drm.h new file mode 120000 index 00000000..136ea936 --- /dev/null +++ b/linux-core/mach64_drm.h @@ -0,0 +1 @@ +../shared-core/mach64_drm.h
\ No newline at end of file diff --git a/linux-core/mach64_drv.h b/linux-core/mach64_drv.h new file mode 120000 index 00000000..85222cc2 --- /dev/null +++ b/linux-core/mach64_drv.h @@ -0,0 +1 @@ +../shared-core/mach64_drv.h
\ No newline at end of file diff --git a/linux-core/mach64_irq.c b/linux-core/mach64_irq.c new file mode 120000 index 00000000..a1235d58 --- /dev/null +++ b/linux-core/mach64_irq.c @@ -0,0 +1 @@ +../shared-core/mach64_irq.c
\ No newline at end of file diff --git a/linux-core/mach64_state.c b/linux-core/mach64_state.c new file mode 120000 index 00000000..b11f202c --- /dev/null +++ b/linux-core/mach64_state.c @@ -0,0 +1 @@ +../shared-core/mach64_state.c
\ No newline at end of file diff --git a/linux-core/mga_dma.c b/linux-core/mga_dma.c new file mode 120000 index 00000000..f290be9b --- /dev/null +++ b/linux-core/mga_dma.c @@ -0,0 +1 @@ +../shared-core/mga_dma.c
\ No newline at end of file diff --git a/linux-core/mga_drm.h b/linux-core/mga_drm.h new file mode 120000 index 00000000..1c87036f --- /dev/null +++ b/linux-core/mga_drm.h @@ -0,0 +1 @@ +../shared-core/mga_drm.h
\ No newline at end of file diff --git a/linux-core/mga_drv.h b/linux-core/mga_drv.h new file mode 120000 index 00000000..cb0c9e1d --- /dev/null +++ b/linux-core/mga_drv.h @@ -0,0 +1 @@ +../shared-core/mga_drv.h
\ No newline at end of file diff --git a/linux-core/mga_irq.c b/linux-core/mga_irq.c new file mode 120000 index 00000000..cf521d29 --- /dev/null +++ b/linux-core/mga_irq.c @@ -0,0 +1 @@ +../shared-core/mga_irq.c
\ No newline at end of file diff --git a/linux-core/mga_state.c b/linux-core/mga_state.c new file mode 120000 index 00000000..8bda8ba9 --- /dev/null +++ b/linux-core/mga_state.c @@ -0,0 +1 @@ +../shared-core/mga_state.c
\ No newline at end of file diff --git a/linux-core/mga_ucode.h b/linux-core/mga_ucode.h new file mode 120000 index 00000000..728b9aca --- /dev/null +++ b/linux-core/mga_ucode.h @@ -0,0 +1 @@ +../shared-core/mga_ucode.h
\ No newline at end of file diff --git a/linux-core/mga_warp.c b/linux-core/mga_warp.c new file mode 120000 index 00000000..d35b3255 --- /dev/null +++ b/linux-core/mga_warp.c @@ -0,0 +1 @@ +../shared-core/mga_warp.c
\ No newline at end of file diff --git a/linux-core/nouveau_drm.h b/linux-core/nouveau_drm.h new file mode 120000 index 00000000..d300ae06 --- /dev/null +++ b/linux-core/nouveau_drm.h @@ -0,0 +1 @@ +../shared-core/nouveau_drm.h
\ No newline at end of file diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c new file mode 100644 index 00000000..91de2b31 --- /dev/null +++ b/linux-core/nouveau_drv.c @@ -0,0 +1,104 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + nouveau_PCI_IDS +}; + +extern drm_ioctl_desc_t nouveau_ioctls[]; +extern int nouveau_max_ioctl; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static struct drm_driver driver = { + .driver_features = + DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, + .load = nouveau_load, + .firstopen = nouveau_firstopen, + .lastclose = nouveau_lastclose, + .unload = nouveau_unload, + .preclose = nouveau_preclose, + .irq_preinstall = nouveau_irq_preinstall, + .irq_postinstall = nouveau_irq_postinstall, + .irq_uninstall = nouveau_irq_uninstall, + .irq_handler = nouveau_irq_handler, + .reclaim_buffers = drm_core_reclaim_buffers, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = nouveau_ioctls, + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = nouveau_compat_ioctl, +#endif + }, + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + +static int __init nouveau_init(void) +{ + driver.num_ioctls = nouveau_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit nouveau_exit(void) +{ + drm_exit(&driver); +} + +module_init(nouveau_init); +module_exit(nouveau_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/linux-core/nouveau_drv.h b/linux-core/nouveau_drv.h new file mode 120000 index 00000000..8852e264 --- /dev/null +++ b/linux-core/nouveau_drv.h @@ -0,0 +1 @@ +../shared-core/nouveau_drv.h
\ No newline at end of file diff --git a/linux-core/nouveau_fifo.c b/linux-core/nouveau_fifo.c new file mode 120000 index 00000000..60759a57 --- /dev/null +++ b/linux-core/nouveau_fifo.c @@ -0,0 +1 @@ +../shared-core/nouveau_fifo.c
\ No newline at end of file diff --git a/linux-core/nouveau_ioc32.c b/linux-core/nouveau_ioc32.c new file mode 100644 index 00000000..a752a581 --- /dev/null +++ b/linux-core/nouveau_ioc32.c @@ -0,0 +1,73 @@ +/** + * \file mga_ioc32.c + * + * 32-bit ioctl compatibility routines for the MGA DRM. + * + * \author Dave Airlie <airlied@linux.ie> with code from patches by Egbert Eich + * + * + * Copyright (C) Paul Mackerras 2005 + * Copyright (C) Egbert Eich 2003,2004 + * Copyright (C) Dave Airlie 2005 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + */ + +#include <linux/compat.h> +#include <linux/ioctl32.h> + +#include "drmP.h" +#include "drm.h" + +#include "nouveau_drm.h" + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + +#if 0 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) + fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE]; +#endif + lock_kernel(); /* XXX for now */ + if (fn != NULL) + ret = (*fn)(filp, cmd, arg); + else + ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/linux-core/nouveau_irq.c b/linux-core/nouveau_irq.c new file mode 120000 index 00000000..3137b813 --- /dev/null +++ b/linux-core/nouveau_irq.c @@ -0,0 +1 @@ +../shared-core/nouveau_irq.c
\ No newline at end of file diff --git a/linux-core/nouveau_mem.c b/linux-core/nouveau_mem.c new file mode 120000 index 00000000..a0085200 --- /dev/null +++ b/linux-core/nouveau_mem.c @@ -0,0 +1 @@ +../shared-core/nouveau_mem.c
\ No newline at end of file diff --git a/linux-core/nouveau_object.c b/linux-core/nouveau_object.c new file mode 120000 index 00000000..1c1426e3 --- /dev/null +++ b/linux-core/nouveau_object.c @@ -0,0 +1 @@ +../shared-core/nouveau_object.c
\ No newline at end of file diff --git a/linux-core/nouveau_reg.h b/linux-core/nouveau_reg.h new file mode 120000 index 00000000..2ad07397 --- /dev/null +++ b/linux-core/nouveau_reg.h @@ -0,0 +1 @@ +../shared-core/nouveau_reg.h
\ No newline at end of file diff --git a/linux-core/nouveau_state.c b/linux-core/nouveau_state.c new file mode 120000 index 00000000..b304f6bb --- /dev/null +++ b/linux-core/nouveau_state.c @@ -0,0 +1 @@ +../shared-core/nouveau_state.c
\ No newline at end of file diff --git a/linux-core/nv40_graph.c b/linux-core/nv40_graph.c new file mode 120000 index 00000000..2fe59919 --- /dev/null +++ b/linux-core/nv40_graph.c @@ -0,0 +1 @@ +../shared-core/nv40_graph.c
\ No newline at end of file diff --git a/linux-core/nv_drv.h b/linux-core/nv_drv.h new file mode 120000 index 00000000..c9617800 --- /dev/null +++ b/linux-core/nv_drv.h @@ -0,0 +1 @@ +../shared-core/nv_drv.h
\ No newline at end of file diff --git a/linux-core/r128_cce.c b/linux-core/r128_cce.c new file mode 120000 index 00000000..0c1d659e --- /dev/null +++ b/linux-core/r128_cce.c @@ -0,0 +1 @@ +../shared-core/r128_cce.c
\ No newline at end of file diff --git a/linux-core/r128_drm.h b/linux-core/r128_drm.h new file mode 120000 index 00000000..363852cb --- /dev/null +++ b/linux-core/r128_drm.h @@ -0,0 +1 @@ +../shared-core/r128_drm.h
\ No newline at end of file diff --git a/linux-core/r128_drv.h b/linux-core/r128_drv.h new file mode 120000 index 00000000..4f7e822d --- /dev/null +++ b/linux-core/r128_drv.h @@ -0,0 +1 @@ +../shared-core/r128_drv.h
\ No newline at end of file diff --git a/linux-core/r128_irq.c b/linux-core/r128_irq.c new file mode 120000 index 00000000..66d28b05 --- /dev/null +++ b/linux-core/r128_irq.c @@ -0,0 +1 @@ +../shared-core/r128_irq.c
\ No newline at end of file diff --git a/linux-core/r128_state.c b/linux-core/r128_state.c new file mode 120000 index 00000000..e83d84b5 --- /dev/null +++ b/linux-core/r128_state.c @@ -0,0 +1 @@ +../shared-core/r128_state.c
\ No newline at end of file diff --git a/linux-core/r300_cmdbuf.c b/linux-core/r300_cmdbuf.c new file mode 120000 index 00000000..6674d056 --- /dev/null +++ b/linux-core/r300_cmdbuf.c @@ -0,0 +1 @@ +../shared-core/r300_cmdbuf.c
\ No newline at end of file diff --git a/linux-core/r300_reg.h b/linux-core/r300_reg.h new file mode 120000 index 00000000..ef54eba2 --- /dev/null +++ b/linux-core/r300_reg.h @@ -0,0 +1 @@ +../shared-core/r300_reg.h
\ No newline at end of file diff --git a/linux-core/radeon_cp.c b/linux-core/radeon_cp.c new file mode 120000 index 00000000..ee860943 --- /dev/null +++ b/linux-core/radeon_cp.c @@ -0,0 +1 @@ +../shared-core/radeon_cp.c
\ No newline at end of file diff --git a/linux-core/radeon_drm.h b/linux-core/radeon_drm.h new file mode 120000 index 00000000..54f595a3 --- /dev/null +++ b/linux-core/radeon_drm.h @@ -0,0 +1 @@ +../shared-core/radeon_drm.h
\ No newline at end of file diff --git a/linux-core/radeon_drv.h b/linux-core/radeon_drv.h new file mode 120000 index 00000000..5b415ea8 --- /dev/null +++ b/linux-core/radeon_drv.h @@ -0,0 +1 @@ +../shared-core/radeon_drv.h
\ No newline at end of file diff --git a/linux-core/radeon_irq.c b/linux-core/radeon_irq.c new file mode 120000 index 00000000..2f394a5e --- /dev/null +++ b/linux-core/radeon_irq.c @@ -0,0 +1 @@ +../shared-core/radeon_irq.c
\ No newline at end of file diff --git a/linux-core/radeon_mem.c b/linux-core/radeon_mem.c new file mode 120000 index 00000000..8cc27989 --- /dev/null +++ b/linux-core/radeon_mem.c @@ -0,0 +1 @@ +../shared-core/radeon_mem.c
\ No newline at end of file diff --git a/linux-core/radeon_state.c b/linux-core/radeon_state.c new file mode 120000 index 00000000..ccee8761 --- /dev/null +++ b/linux-core/radeon_state.c @@ -0,0 +1 @@ +../shared-core/radeon_state.c
\ No newline at end of file diff --git a/linux-core/savage_bci.c b/linux-core/savage_bci.c new file mode 120000 index 00000000..b8436713 --- /dev/null +++ b/linux-core/savage_bci.c @@ -0,0 +1 @@ +../shared-core/savage_bci.c
\ No newline at end of file diff --git a/linux-core/savage_drm.h b/linux-core/savage_drm.h new file mode 120000 index 00000000..0dab2e3b --- /dev/null +++ b/linux-core/savage_drm.h @@ -0,0 +1 @@ +../shared-core/savage_drm.h
\ No newline at end of file diff --git a/linux-core/savage_drv.h b/linux-core/savage_drv.h new file mode 120000 index 00000000..8397009c --- /dev/null +++ b/linux-core/savage_drv.h @@ -0,0 +1 @@ +../shared-core/savage_drv.h
\ No newline at end of file diff --git a/linux-core/savage_state.c b/linux-core/savage_state.c new file mode 120000 index 00000000..e55dc5d4 --- /dev/null +++ b/linux-core/savage_state.c @@ -0,0 +1 @@ +../shared-core/savage_state.c
\ No newline at end of file diff --git a/linux-core/sis_drm.h b/linux-core/sis_drm.h new file mode 120000 index 00000000..36c77aac --- /dev/null +++ b/linux-core/sis_drm.h @@ -0,0 +1 @@ +../shared-core/sis_drm.h
\ No newline at end of file diff --git a/linux-core/sis_drv.h b/linux-core/sis_drv.h new file mode 120000 index 00000000..3fddfdae --- /dev/null +++ b/linux-core/sis_drv.h @@ -0,0 +1 @@ +../shared-core/sis_drv.h
\ No newline at end of file diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index eca535fb..5efbada4 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -36,11 +36,7 @@ #include "sis_drv.h" #if defined(__linux__) -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) #include <video/sisfb.h> -#else -#include <linux/sisfb.h> -#endif #endif #define VIDEO_TYPE 0 diff --git a/linux-core/tdfx_drv.h b/linux-core/tdfx_drv.h new file mode 120000 index 00000000..8df70329 --- /dev/null +++ b/linux-core/tdfx_drv.h @@ -0,0 +1 @@ +../shared-core/tdfx_drv.h
\ No newline at end of file diff --git a/linux-core/via_3d_reg.h b/linux-core/via_3d_reg.h new file mode 120000 index 00000000..90d238ec --- /dev/null +++ b/linux-core/via_3d_reg.h @@ -0,0 +1 @@ +../shared-core/via_3d_reg.h
\ No newline at end of file diff --git a/linux-core/via_dma.c b/linux-core/via_dma.c new file mode 120000 index 00000000..1f4d920f --- /dev/null +++ b/linux-core/via_dma.c @@ -0,0 +1 @@ +../shared-core/via_dma.c
\ No newline at end of file diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index fdc2bd67..2f508374 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -217,7 +217,9 @@ via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE); VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start); + DRM_WRITEMEMORYBARRIER(); VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS); + VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04); } /* @@ -496,10 +498,18 @@ via_dmablit_timer(unsigned long data) static void +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) via_dmablit_workqueue(void *data) +#else +via_dmablit_workqueue(struct work_struct *work) +#endif { +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; - drm_device_t *dev = blitq->dev; +#else + drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); +#endif + drm_device_t *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; @@ -562,12 +572,16 @@ via_init_dmablit(drm_device_t *dev) blitq->num_outstanding = 0; blitq->is_active = 0; blitq->aborting = 0; - blitq->blit_lock = SPIN_LOCK_UNLOCKED; + spin_lock_init(&blitq->blit_lock); for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) { DRM_INIT_WAITQUEUE(blitq->blit_queue + j); } DRM_INIT_WAITQUEUE(&blitq->busy_queue); +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq); +#else + INIT_WORK(&blitq->wq, via_dmablit_workqueue); +#endif init_timer(&blitq->poll_timer); blitq->poll_timer.function = &via_dmablit_timer; blitq->poll_timer.data = (unsigned long) blitq; diff --git a/linux-core/via_drm.h b/linux-core/via_drm.h new file mode 120000 index 00000000..7cd175d3 --- /dev/null +++ b/linux-core/via_drm.h @@ -0,0 +1 @@ +../shared-core/via_drm.h
\ No newline at end of file diff --git a/linux-core/via_drv.c b/linux-core/via_drv.c new file mode 120000 index 00000000..b6ff160e --- /dev/null +++ b/linux-core/via_drv.c @@ -0,0 +1 @@ +../shared-core/via_drv.c
\ No newline at end of file diff --git a/linux-core/via_drv.h b/linux-core/via_drv.h new file mode 120000 index 00000000..8954fe88 --- /dev/null +++ b/linux-core/via_drv.h @@ -0,0 +1 @@ +../shared-core/via_drv.h
\ No newline at end of file diff --git a/linux-core/via_irq.c b/linux-core/via_irq.c new file mode 120000 index 00000000..f615af87 --- /dev/null +++ b/linux-core/via_irq.c @@ -0,0 +1 @@ +../shared-core/via_irq.c
\ No newline at end of file diff --git a/linux-core/via_map.c b/linux-core/via_map.c new file mode 120000 index 00000000..b5056634 --- /dev/null +++ b/linux-core/via_map.c @@ -0,0 +1 @@ +../shared-core/via_map.c
\ No newline at end of file diff --git a/linux-core/via_verifier.c b/linux-core/via_verifier.c new file mode 120000 index 00000000..00b411bd --- /dev/null +++ b/linux-core/via_verifier.c @@ -0,0 +1 @@ +../shared-core/via_verifier.c
\ No newline at end of file diff --git a/linux-core/via_verifier.h b/linux-core/via_verifier.h new file mode 120000 index 00000000..62d3e287 --- /dev/null +++ b/linux-core/via_verifier.h @@ -0,0 +1 @@ +../shared-core/via_verifier.h
\ No newline at end of file diff --git a/linux-core/via_video.c b/linux-core/via_video.c new file mode 120000 index 00000000..a6d27947 --- /dev/null +++ b/linux-core/via_video.c @@ -0,0 +1 @@ +../shared-core/via_video.c
\ No newline at end of file diff --git a/scripts/create_lk_drm.sh b/scripts/create_lk_drm.sh index 4b57ce20..1028853a 100755 --- a/scripts/create_lk_drm.sh +++ b/scripts/create_lk_drm.sh @@ -27,3 +27,25 @@ cp linux-core/Makefile.kernel $OUTDIR/Makefile echo "Copying 2.6 Kernel files" cp linux-core/Kconfig $OUTDIR/ +cd $OUTDIR + +rm via_ds.[ch] +for i in via*.[ch] +do +unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DVIA_HAVE_CORE_MM $i > $i.tmp +mv $i.tmp $i +done + +rm sis_ds.[ch] +for i in sis*.[ch] +do +unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp +mv $i.tmp $i +done + +for i in i915*.[ch] +do +unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp +mv $i.tmp $i +done +cd - diff --git a/shared-core/Makefile.am b/shared-core/Makefile.am index cd278643..f0ebf2a3 100644 --- a/shared-core/Makefile.am +++ b/shared-core/Makefile.am @@ -29,6 +29,7 @@ klibdrminclude_HEADERS = \ i915_drm.h \ mach64_drm.h \ mga_drm.h \ + nouveau_drm.h \ r128_drm.h \ radeon_drm.h \ savage_drm.h \ diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 7c185a30..dc921d9d 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -219,7 +219,9 @@ 0x1106 0x3122 0 "VIA CLE266" 0x1106 0x7205 0 "VIA KM400" 0x1106 0x3108 0 "VIA K8M800" -0x1106 0x3344 0 "VIA P4VM800PRO" +0x1106 0x3344 0 "VIA CN700 / VM800 / P4M800Pro" +0x1106 0x3343 0 "VIA P4M890" +0x1106 0x3230 VIA_DX9_0 "VIA K8M890" [i810] 0x8086 0x7121 0 "Intel i810 GMCH" @@ -463,3 +465,233 @@ 0x10DE 0x009C NV40 "NVidia 0x009C" 0x10DE 0x009D NV40 "NVidia Quadro FX 4500" 0x10DE 0x009E NV40 "NVidia 0x009E" + +[nouveau] +0x10de 0x0008 NV_03 "EDGE 3D" +0x10de 0x0009 NV_03 "EDGE 3D" +0x10de 0x0010 NV_03 "Mutara V08" +0x10de 0x0020 NV_04 "RIVA TNT" +0x10de 0x0028 NV_04 "RIVA TNT2/TNT2 Pro" +0x10de 0x0029 NV_04 "RIVA TNT2 Ultra" +0x10de 0x002a NV_04 "Riva TnT2" +0x10de 0x002b NV_04 "Riva TnT2" +0x10de 0x002c NV_04 "Vanta/Vanta LT" +0x10de 0x002d NV_04 "RIVA TNT2 Model 64/Model 64 Pro" +0x10de 0x002e NV_04 "Vanta" +0x10de 0x002f NV_04 "Vanta" +0x10de 0x0040 NV_40 "GeForce 6800 Ultra" +0x10de 0x0041 NV_40 "GeForce 6800" +0x10de 0x0042 NV_40 "GeForce 6800 LE" +0x10de 0x0043 NV_40 "NV40.3" +0x10de 0x0044 NV_40 "GeForce 6800 XT" +0x10de 0x0045 NV_40 "GeForce 6800 GT" +0x10de 0x0046 NV_40 "GeForce 6800 GT" +0x10de 0x0047 NV_40 "GeForce 6800 GS" +0x10de 0x0048 NV_40 "GeForce 6800 XT" +0x10de 0x0049 NV_40 "NV40GL" +0x10de 0x004d NV_40 "Quadro FX 4000" +0x10de 0x004e NV_40 "Quadro FX 4000" +0x10de 0x0090 NV_40 "GeForce 7800 GTX" +0x10de 0x0091 NV_40 "GeForce 7800 GTX" +0x10de 0x0092 NV_40 "GeForce 7800 GT" +0x10de 0x0093 NV_40 "GeForce 7800 GS" +0x10de 0x0098 NV_40 "GeForce Go 7800" +0x10de 0x0099 NV_40 "GE Force Go 7800 GTX" +0x10de 0x009d NV_40 "Quadro FX4500" +0x10de 0x00a0 NV_04 "Aladdin TNT2" +0x10de 0x00c0 NV_40 "GeForce 6800 GS" +0x10de 0x00c1 NV_40 "GeForce 6800" +0x10de 0x00c2 NV_40 "GeForce 6800 LE" +0x10de 0x00c3 NV_40 "Geforce 6800 XT" +0x10de 0x00c8 NV_40 "GeForce Go 6800" +0x10de 0x00c9 NV_40 "GeForce Go 6800 Ultra" +0x10de 0x00cc NV_40 "Quadro FX Go1400" +0x10de 0x00cd NV_40 "Quadro FX 3450/4000 SDI" +0x10de 0x00ce NV_40 "Quadro FX 1400" +0x10de 0x00f0 NV_40 "GeForce 6800/GeForce 6800 Ultra" +0x10de 0x00f1 NV_40 "GeForce 6600/GeForce 6600 GT" +0x10de 0x00f2 NV_40 "GeForce 6600/GeForce 6600 GT" +0x10de 0x00f3 NV_40 "GeForce 6200" +0x10de 0x00f4 NV_40 "GeForce 6600 LE" +0x10de 0x00f5 NV_40 "GeForce 7800 GS" +0x10de 0x00f6 NV_40 "GeForce 6600 GS" +0x10de 0x00f8 NV_40 "Quadro FX 3400/4400" +0x10de 0x00f9 NV_40 "GeForce 6800 Ultra/GeForce 6800 GT" +0x10de 0x00fa NV_30 "GeForce PCX 5750" +0x10de 0x00fb NV_30 "GeForce PCX 5900" +0x10de 0x00fc NV_30 "Quadro FX 330/GeForce PCX 5300" +0x10de 0x00fd NV_30 "Quadro FX 330/Quadro NVS280" +0x10de 0x00fe NV_30 "Quadro FX 1300" +0x10de 0x00ff NV_17 "GeForce PCX 4300" +0x10de 0x0100 NV_10 "GeForce 256 SDR" +0x10de 0x0101 NV_10 "GeForce 256 DDR" +0x10de 0x0103 NV_10 "Quadro" +0x10de 0x0110 NV_11 "GeForce2 MX/MX 400" +0x10de 0x0111 NV_11 "GeForce2 MX 100 DDR/200 DDR" +0x10de 0x0112 NV_11 "GeForce2 Go" +0x10de 0x0113 NV_11 "Quadro2 MXR/EX/Go" +0x10de 0x0140 NV_40 "GeForce 6600 GT" +0x10de 0x0141 NV_40 "GeForce 6600" +0x10de 0x0142 NV_40 "GeForce 6600 PCIe" +0x10de 0x0144 NV_40 "GeForce Go 6600" +0x10de 0x0145 NV_40 "GeForce 6610 XL" +0x10de 0x0146 NV_40 "Geforce Go 6600TE/6200TE" +0x10de 0x0148 NV_40 "GeForce Go 6600" +0x10de 0x0149 NV_40 "GeForce Go 6600 GT" +0x10de 0x014a NV_40 "Quadro NVS 440" +0x10de 0x014d NV_17 "Quadro FX 550" +0x10de 0x014e NV_40 "Quadro FX 540" +0x10de 0x014f NV_40 "GeForce 6200" +0x10de 0x0150 NV_15 "GeForce2 GTS/Pro" +0x10de 0x0151 NV_15 "GeForce2 Ti" +0x10de 0x0152 NV_15 "GeForce2 Ultra, Bladerunner" +0x10de 0x0153 NV_15 "Quadro2 Pro" +0x10de 0x0161 NV_44 "GeForce 6200 TurboCache(TM)" +0x10de 0x0162 NV_44 "GeForce 6200 SE TurboCache (TM)" +0x10de 0x0163 NV_44 "GeForce 6200 LE" +0x10de 0x0164 NV_44 "GeForce Go 6200" +0x10de 0x0165 NV_44 "Quadro NVS 285" +0x10de 0x0166 NV_44 "GeForce Go 6400" +0x10de 0x0167 NV_44 "GeForce Go 6200 TurboCache" +0x10de 0x0168 NV_44 "GeForce Go 6200 TurboCache" +0x10de 0x0170 NV_17 "GeForce4 MX 460" +0x10de 0x0171 NV_17 "GeForce4 MX 440" +0x10de 0x0172 NV_17 "GeForce4 MX 420" +0x10de 0x0173 NV_17 "GeForce4 MX 440-SE" +0x10de 0x0174 NV_17 "GeForce4 440 Go" +0x10de 0x0175 NV_17 "GeForce4 420 Go" +0x10de 0x0176 NV_17 "GeForce4 420 Go 32M" +0x10de 0x0177 NV_17 "GeForce4 460 Go" +0x10de 0x0178 NV_17 "Quadro4 550 XGL" +0x10de 0x0179 NV_17 "GeForce4 420 Go 32M" +0x10de 0x017a NV_17 "Quadro4 200/400 NVS" +0x10de 0x017b NV_17 "Quadro4 550 XGL" +0x10de 0x017c NV_17 "Quadro4 500 GoGL" +0x10de 0x017d NV_17 "GeForce4 410 Go 16M" +0x10de 0x0181 NV_17 "GeForce4 MX 440 AGP 8x" +0x10de 0x0182 NV_17 "GeForce4 MX 440SE AGP 8x" +0x10de 0x0183 NV_17 "GeForce4 MX 420 AGP 8x" +0x10de 0x0185 NV_17 "GeForce4 MX 4000 AGP 8x" +0x10de 0x0186 NV_17 "GeForce4 448 Go" +0x10de 0x0187 NV_17 "GeForce4 488 Go" +0x10de 0x0188 NV_17 "Quadro4 580 XGL" +0x10de 0x018a NV_17 "Quadro4 NVS AGP 8x" +0x10de 0x018b NV_17 "Quadro4 380 XGL" +0x10de 0x018c NV_17 "Quadro NVS 50 PCI" +0x10de 0x018d NV_17 "GeForce4 448 Go" +0x10de 0x0191 NV_50 "GeForce 8800 GTX" +0x10de 0x0193 NV_50 "GeForce 8800 GTS" +0x10de 0x01a0 NV_11|NV_NFORCE "GeForce2 MX Integrated Graphics" +0x10de 0x01d1 NV_44 "GeForce 7300 LE" +0x10de 0x01d6 NV_44 "GeForce Go 7200" +0x10de 0x01d7 NV_44 "Quadro NVS 110M / GeForce Go 7300" +0x10de 0x01d8 NV_44 "GeForce Go 7400" +0x10de 0x01da NV_44 "Quadro NVS 110M" +0x10de 0x01df NV_44 "GeForce 7300 GS" +0x10de 0x01f0 NV_17|NV_NFORCE2 "GeForce4 MX - nForce GPU" +0x10de 0x0200 NV_20 "GeForce3" +0x10de 0x0201 NV_20 "GeForce3 Ti 200" +0x10de 0x0202 NV_20 "GeForce3 Ti 500" +0x10de 0x0203 NV_20 "Quadro DCC" +0x10de 0x0211 NV_40 "GeForce 6800" +0x10de 0x0212 NV_40 "GeForce 6800 LE" +0x10de 0x0215 NV_40 "GeForce 6800 GT" +0x10de 0x0218 NV_40 "GeForce 6800 XT" +0x10de 0x0221 NV_44 "GeForce 6200" +0x10de 0x0240 NV_44 "GeForce 6150" +0x10de 0x0242 NV_44 "GeForce 6100" +0x10de 0x0250 NV_25 "GeForce4 Ti 4600" +0x10de 0x0251 NV_25 "GeForce4 Ti 4400" +0x10de 0x0252 NV_25 "GeForce4 Ti" +0x10de 0x0253 NV_25 "GeForce4 Ti 4200" +0x10de 0x0258 NV_25 "Quadro4 900 XGL" +0x10de 0x0259 NV_25 "Quadro4 750 XGL" +0x10de 0x025b NV_25 "Quadro4 700 XGL" +0x10de 0x0280 NV_25 "GeForce4 Ti 4800" +0x10de 0x0281 NV_25 "GeForce4 Ti 4200 AGP 8x" +0x10de 0x0282 NV_25 "GeForce4 Ti 4800 SE" +0x10de 0x0286 NV_25 "GeForce4 Ti 4200 Go AGP 8x" +0x10de 0x0288 NV_25 "Quadro4 980 XGL" +0x10de 0x0289 NV_25 "Quadro4 780 XGL" +0x10de 0x028c NV_25 "Quadro4 700 GoGL" +0x10de 0x0290 NV_40 "GeForce 7900 GTX" +0x10de 0x0291 NV_40 "GeForce 7900 GT" +0x10de 0x0292 NV_40 "GeForce 7900 GS" +0x10de 0x0298 NV_40 "GeForce Go 7900 GS" +0x10de 0x0299 NV_40 "GeForce Go 7900 GTX" +0x10de 0x029a NV_40 "Quadro FX 2500M" +0x10de 0x029b NV_40 "Quadro FX 1500M" +0x10de 0x029c NV_40 "Quadro FX 5500" +0x10de 0x029d NV_40 "Quadro FX 3500" +0x10de 0x029e NV_40 "Quadro FX 1500" +0x10de 0x029f NV_40 "Quadro FX 4500 X2" +0x10de 0x02a0 NV_20 "XGPU" +0x10de 0x02e1 NV_40 "GeForce 7600 GS" +0x10de 0x0300 NV_30 "GeForce FX" +0x10de 0x0301 NV_30 "GeForce FX 5800 Ultra" +0x10de 0x0302 NV_30 "GeForce FX 5800" +0x10de 0x0308 NV_30 "Quadro FX 2000" +0x10de 0x0309 NV_30 "Quadro FX 1000" +0x10de 0x0311 NV_30 "GeForce FX 5600 Ultra" +0x10de 0x0312 NV_30 "GeForce FX 5600" +0x10de 0x0313 NV_30 "NV31" +0x10de 0x0314 NV_30 "GeForce FX 5600XT" +0x10de 0x0316 NV_30 "NV31M" +0x10de 0x0317 NV_30 "NV31M Pro" +0x10de 0x031a NV_30 "GeForce FX Go5600" +0x10de 0x031b NV_30 "GeForce FX Go5650" +0x10de 0x031d NV_30 "NV31GLM" +0x10de 0x031e NV_30 "NV31GLM Pro" +0x10de 0x031f NV_30 "NV31GLM Pro" +0x10de 0x0320 NV_34 "GeForce FX 5200" +0x10de 0x0321 NV_34 "GeForce FX 5200 Ultra" +0x10de 0x0322 NV_34 "GeForce FX 5200" +0x10de 0x0323 NV_34 "GeForce FX 5200LE" +0x10de 0x0324 NV_34 "GeForce FX Go5200" +0x10de 0x0325 NV_34 "GeForce FX Go5250" +0x10de 0x0326 NV_34 "GeForce FX 5500" +0x10de 0x0327 NV_34 "GeForce FX 5100" +0x10de 0x0328 NV_34 "GeForce FX Go5200 32M/64M" +0x10de 0x0329 NV_34 "GeForce FX Go5200" +0x10de 0x032a NV_34 "Quadro NVS 280 PCI" +0x10de 0x032b NV_34 "Quadro FX 500/600 PCI" +0x10de 0x032c NV_34 "GeForce FX Go 5300" +0x10de 0x032d NV_34 "GeForce FX Go5100" +0x10de 0x032f NV_34 "NV34GL" +0x10de 0x0330 NV_30 "GeForce FX 5900 Ultra" +0x10de 0x0331 NV_30 "GeForce FX 5900" +0x10de 0x0332 NV_30 "GeForce FX 5900XT" +0x10de 0x0333 NV_30 "GeForce FX 5950 Ultra" +0x10de 0x0334 NV_30 "GeForce FX 5900ZT" +0x10de 0x0338 NV_30 "Quadro FX 3000" +0x10de 0x033f NV_30 "Quadro FX 700" +0x10de 0x0341 NV_30 "GeForce FX 5700 Ultra" +0x10de 0x0342 NV_30 "GeForce FX 5700" +0x10de 0x0343 NV_30 "GeForce FX 5700LE" +0x10de 0x0344 NV_30 "GeForce FX 5700VE" +0x10de 0x0345 NV_30 "NV36.5" +0x10de 0x0347 NV_30 "GeForce FX Go5700" +0x10de 0x0348 NV_30 "GeForce FX Go5700" +0x10de 0x0349 NV_30 "NV36M Pro" +0x10de 0x034b NV_30 "NV36MAP" +0x10de 0x034c NV_30 "Quadro FX Go1000" +0x10de 0x034e NV_30 "Quadro FX 1100" +0x10de 0x034f NV_30 "NV36GL" +0x10de 0x0391 NV_40 "GeForce 7600 GT" +0x10de 0x0392 NV_40 "GeForce 7600 GS" +0x10de 0x0393 NV_40 "GeForce 7300 GT" +0x10de 0x0398 NV_40 "GeForce Go 7600" +0x10de 0x03d0 NV_44 "GeForce 6100 nForce 430" +0x10de 0x03d1 NV_44 "GeForce 6100 nForce 405" +0x10de 0x03d2 NV_44 "GeForce 6100 nForce 400" +0x10de 0x03d5 NV_44 "GeForce 6100 nForce 420" +0x12d2 0x0008 NV_03 "NV1" +0x12d2 0x0009 NV_03 "DAC64" +0x12d2 0x0018 NV_03 "Riva128" +0x12d2 0x0019 NV_03 "Riva128ZX" +0x12d2 0x0020 NV_04 "TNT" +0x12d2 0x0028 NV_04 "TNT2" +0x12d2 0x0029 NV_04 "UTNT2" +0x12d2 0x002c NV_04 "VTNT2" +0x12d2 0x00a0 NV_04 "ITNT2" + diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h index 0d5baf69..43d1114f 100644 --- a/shared-core/drm_sarea.h +++ b/shared-core/drm_sarea.h @@ -41,7 +41,7 @@ #define SAREA_MAX 0x10000 /* 64kB */ #else /* Intel 830M driver needs at least 8k SAREA */ -#define SAREA_MAX 0x2000 +#define SAREA_MAX 0x2000UL #endif /** Maximum number of drawables in the SAREA */ diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 449747a2..cd7d2b43 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -264,7 +264,7 @@ static int i915_dma_init(DRM_IOCTL_ARGS) retcode = i915_dma_resume(dev); break; default: - retcode = -EINVAL; + retcode = DRM_ERR(EINVAL); break; } @@ -361,10 +361,9 @@ static int i915_emit_cmds(drm_device_t * dev, int __user * buffer, int dwords) for (i = 0; i < dwords;) { int cmd, sz; - if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) { - + if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd))) return DRM_ERR(EINVAL); - } + if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords) return DRM_ERR(EINVAL); @@ -396,7 +395,7 @@ static int i915_emit_box(drm_device_t * dev, RING_LOCALS; if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) { - return EFAULT; + return DRM_ERR(EFAULT); } if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) { diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 85804ce7..98f58940 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -35,7 +35,7 @@ #define DRIVER_AUTHOR "Tungsten Graphics, Inc." -#define DRIVER_NAME "i915-mm" +#define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" #define DRIVER_DATE "20060929" @@ -48,9 +48,10 @@ * 1.5: Add vblank pipe configuration * 1.6: - New ioctl for scheduling buffer swaps on vertical blank * - Support vertical blank on secondary display pipe + * 1.8: New ioctl for ARB_Occlusion_Query */ #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 7 +#define DRIVER_MINOR 8 #define DRIVER_PATCHLEVEL 0 #if defined(__linux__) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index a48e1ff8..97723653 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -46,88 +46,167 @@ static void i915_vblank_tasklet(drm_device_t *dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; unsigned long irqflags; - struct list_head *list, *tmp; + struct list_head *list, *tmp, hits, *hit; + int nhits, nrects, slice[2], upper[2], lower[2], i; + unsigned counter[2] = { atomic_read(&dev->vbl_received), + atomic_read(&dev->vbl_received2) }; + drm_drawable_info_t *drw; + drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; + u32 cpp = dev_priv->cpp; + u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | + XY_SRC_COPY_BLT_WRITE_ALPHA | + XY_SRC_COPY_BLT_WRITE_RGB) + : XY_SRC_COPY_BLT_CMD; + u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) | + (cpp << 23) | (1 << 24); + RING_LOCALS; DRM_DEBUG("\n"); + INIT_LIST_HEAD(&hits); + + nhits = nrects = 0; + spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); + /* Find buffer swaps scheduled for this vertical blank */ list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { drm_i915_vbl_swap_t *vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); - atomic_t *counter = vbl_swap->pipe ? &dev->vbl_received2 : - &dev->vbl_received; - - if ((atomic_read(counter) - vbl_swap->sequence) <= (1<<23)) { - drm_drawable_info_t *drw; - - spin_unlock(&dev_priv->swaps_lock); - - spin_lock(&dev->drw_lock); - - drw = drm_get_drawable_info(dev, vbl_swap->drw_id); - - if (drw) { - int i, num_rects = drw->num_rects; - drm_clip_rect_t *rect = drw->rects; - drm_i915_sarea_t *sarea_priv = - dev_priv->sarea_priv; - u32 cpp = dev_priv->cpp; - u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD | - XY_SRC_COPY_BLT_WRITE_ALPHA | - XY_SRC_COPY_BLT_WRITE_RGB) - : XY_SRC_COPY_BLT_CMD; - u32 pitchropcpp = (sarea_priv->pitch * cpp) | - (0xcc << 16) | (cpp << 23) | - (1 << 24); - RING_LOCALS; - - i915_kernel_lost_context(dev); - - BEGIN_LP_RING(6); - - OUT_RING(GFX_OP_DRAWRECT_INFO); - OUT_RING(0); - OUT_RING(0); - OUT_RING(sarea_priv->width | - sarea_priv->height << 16); - OUT_RING(sarea_priv->width | - sarea_priv->height << 16); - OUT_RING(0); - ADVANCE_LP_RING(); + if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) + continue; - sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + list_del(list); + dev_priv->swaps_pending--; - for (i = 0; i < num_rects; i++, rect++) { - BEGIN_LP_RING(8); + spin_unlock(&dev_priv->swaps_lock); + spin_lock(&dev->drw_lock); - OUT_RING(cmd); - OUT_RING(pitchropcpp); - OUT_RING((rect->y1 << 16) | rect->x1); - OUT_RING((rect->y2 << 16) | rect->x2); - OUT_RING(sarea_priv->front_offset); - OUT_RING((rect->y1 << 16) | rect->x1); - OUT_RING(pitchropcpp & 0xffff); - OUT_RING(sarea_priv->back_offset); + drw = drm_get_drawable_info(dev, vbl_swap->drw_id); - ADVANCE_LP_RING(); - } + if (!drw) { + spin_unlock(&dev->drw_lock); + drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); + spin_lock(&dev_priv->swaps_lock); + continue; + } + + list_for_each(hit, &hits) { + drm_i915_vbl_swap_t *swap_cmp = + list_entry(hit, drm_i915_vbl_swap_t, head); + drm_drawable_info_t *drw_cmp = + drm_get_drawable_info(dev, swap_cmp->drw_id); + + if (drw_cmp && + drw_cmp->rects[0].y1 > drw->rects[0].y1) { + list_add_tail(list, hit); + break; } + } - spin_unlock(&dev->drw_lock); + spin_unlock(&dev->drw_lock); - spin_lock(&dev_priv->swaps_lock); + /* List of hits was empty, or we reached the end of it */ + if (hit == &hits) + list_add_tail(list, hits.prev); - list_del(list); + nhits++; - drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER); + spin_lock(&dev_priv->swaps_lock); + } + + if (nhits == 0) { + spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + return; + } + + spin_unlock(&dev_priv->swaps_lock); + + i915_kernel_lost_context(dev); - dev_priv->swaps_pending--; + BEGIN_LP_RING(6); + + OUT_RING(GFX_OP_DRAWRECT_INFO); + OUT_RING(0); + OUT_RING(0); + OUT_RING(sarea_priv->width | sarea_priv->height << 16); + OUT_RING(sarea_priv->width | sarea_priv->height << 16); + OUT_RING(0); + + ADVANCE_LP_RING(); + + sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT; + + upper[0] = upper[1] = 0; + slice[0] = max(sarea_priv->pipeA_h / nhits, 1); + slice[1] = max(sarea_priv->pipeB_h / nhits, 1); + lower[0] = sarea_priv->pipeA_y + slice[0]; + lower[1] = sarea_priv->pipeB_y + slice[0]; + + spin_lock(&dev->drw_lock); + + /* Emit blits for buffer swaps, partitioning both outputs into as many + * slices as there are buffer swaps scheduled in order to avoid tearing + * (based on the assumption that a single buffer swap would always + * complete before scanout starts). + */ + for (i = 0; i++ < nhits; + upper[0] = lower[0], lower[0] += slice[0], + upper[1] = lower[1], lower[1] += slice[1]) { + if (i == nhits) + lower[0] = lower[1] = sarea_priv->height; + + list_for_each(hit, &hits) { + drm_i915_vbl_swap_t *swap_hit = + list_entry(hit, drm_i915_vbl_swap_t, head); + drm_clip_rect_t *rect; + int num_rects, pipe; + unsigned short top, bottom; + + drw = drm_get_drawable_info(dev, swap_hit->drw_id); + + if (!drw) + continue; + + rect = drw->rects; + pipe = swap_hit->pipe; + top = upper[pipe]; + bottom = lower[pipe]; + + for (num_rects = drw->num_rects; num_rects--; rect++) { + int y1 = max(rect->y1, top); + int y2 = min(rect->y2, bottom); + + if (y1 >= y2) + continue; + + BEGIN_LP_RING(8); + + OUT_RING(cmd); + OUT_RING(pitchropcpp); + OUT_RING((y1 << 16) | rect->x1); + OUT_RING((y2 << 16) | rect->x2); + OUT_RING(sarea_priv->front_offset); + OUT_RING((y1 << 16) | rect->x1); + OUT_RING(pitchropcpp & 0xffff); + OUT_RING(sarea_priv->back_offset); + + ADVANCE_LP_RING(); + } } } - spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); + spin_unlock_irqrestore(&dev->drw_lock, irqflags); + + list_for_each_safe(hit, tmp, &hits) { + drm_i915_vbl_swap_t *swap_hit = + list_entry(hit, drm_i915_vbl_swap_t, head); + + list_del(hit); + + drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER); + } } irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) @@ -453,7 +532,7 @@ int i915_vblank_swap(DRM_IOCTL_ARGS) if (!drm_get_drawable_info(dev, swap.drawable)) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_ERROR("Invalid drawable ID %d\n", swap.drawable); + DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable); return DRM_ERR(EINVAL); } diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h new file mode 100644 index 00000000..3f363192 --- /dev/null +++ b/shared-core/nouveau_drm.h @@ -0,0 +1,152 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRM_H__ +#define __NOUVEAU_DRM_H__ + +typedef struct drm_nouveau_fifo_alloc { + int channel; + uint32_t put_base; + /* FIFO control regs */ + drm_handle_t ctrl; + int ctrl_size; + /* DMA command buffer */ + drm_handle_t cmdbuf; + int cmdbuf_size; +} +drm_nouveau_fifo_alloc_t; + +#define NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND 0x1 +#define NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY 0x2 +#define NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE 0x4 +#define NV_DMA_CONTEXT_FLAGS_MONO 0x8 + +typedef struct drm_nouveau_object_init { + uint32_t handle; + int class; + uint32_t flags; + /* these are object handles */ + uint32_t dma0; + uint32_t dma1; + uint32_t dma_notifier; +} +drm_nouveau_object_init_t; + +typedef struct drm_nouveau_dma_object_init { + uint32_t handle; + int access; + int target; + uint32_t offset; + int size; +} +drm_nouveau_dma_object_init_t; + +#define NOUVEAU_MEM_FB 0x00000001 +#define NOUVEAU_MEM_AGP 0x00000002 +#define NOUVEAU_MEM_FB_ACCEPTABLE 0x00000004 +#define NOUVEAU_MEM_AGP_ACCEPTABLE 0x00000008 +#define NOUVEAU_MEM_PINNED 0x00000010 +#define NOUVEAU_MEM_USER_BACKED 0x00000020 +#define NOUVEAU_MEM_MAPPED 0x00000040 +#define NOUVEAU_MEM_INSTANCE 0x00000080 /* internal */ + +typedef struct drm_nouveau_mem_alloc { + int flags; + int alignment; + uint64_t size; // in bytes + uint64_t region_offset; +} +drm_nouveau_mem_alloc_t; + +typedef struct drm_nouveau_mem_free { + int flags; + uint64_t region_offset; +} +drm_nouveau_mem_free_t; + +/* FIXME : maybe unify {GET,SET}PARAMs */ +#define NOUVEAU_GETPARAM_PCI_VENDOR 3 +#define NOUVEAU_GETPARAM_PCI_DEVICE 4 +#define NOUVEAU_GETPARAM_BUS_TYPE 5 +#define NOUVEAU_GETPARAM_FB_PHYSICAL 6 +#define NOUVEAU_GETPARAM_AGP_PHYSICAL 7 +typedef struct drm_nouveau_getparam { + unsigned int param; + uint64_t value; +} +drm_nouveau_getparam_t; + +#define NOUVEAU_SETPARAM_CMDBUF_LOCATION 1 +#define NOUVEAU_SETPARAM_CMDBUF_SIZE 2 +typedef struct drm_nouveau_setparam { + unsigned int param; + unsigned int value; +} +drm_nouveau_setparam_t; + +enum nouveau_card_type { + NV_UNKNOWN =0, + NV_01 =1, + NV_03 =3, + NV_04 =4, + NV_05 =5, + NV_10 =10, + NV_11 =10, + NV_15 =10, + NV_17 =10, + NV_20 =20, + NV_25 =20, + NV_30 =30, + NV_34 =30, + NV_40 =40, + NV_44 =44, + NV_50 =50, + NV_LAST =0xffff, +}; + +enum nouveau_bus_type { + NV_AGP =0, + NV_PCI =1, + NV_PCIE =2, +}; + +#define NOUVEAU_MAX_SAREA_CLIPRECTS 16 + +typedef struct drm_nouveau_sarea { + /* the cliprects */ + drm_clip_rect_t boxes[NOUVEAU_MAX_SAREA_CLIPRECTS]; + unsigned int nbox; +} +drm_nouveau_sarea_t; + +#define DRM_NOUVEAU_FIFO_ALLOC 0x00 +#define DRM_NOUVEAU_OBJECT_INIT 0x01 +#define DRM_NOUVEAU_DMA_OBJECT_INIT 0x02 // We don't want this eventually.. +#define DRM_NOUVEAU_MEM_ALLOC 0x03 +#define DRM_NOUVEAU_MEM_FREE 0x04 +#define DRM_NOUVEAU_GETPARAM 0x05 +#define DRM_NOUVEAU_SETPARAM 0x06 + +#endif /* __NOUVEAU_DRM_H__ */ + diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h new file mode 100644 index 00000000..6b09046c --- /dev/null +++ b/shared-core/nouveau_drv.h @@ -0,0 +1,222 @@ +/* + * Copyright 2005 Stephane Marchesin. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __NOUVEAU_DRV_H__ +#define __NOUVEAU_DRV_H__ + +#define DRIVER_AUTHOR "Stephane Marchesin" +#define DRIVER_EMAIL "dri-devel@lists.sourceforge.net" + +#define DRIVER_NAME "nouveau" +#define DRIVER_DESC "nVidia Riva/TNT/GeForce" +#define DRIVER_DATE "20060213" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 2 + +#define NOUVEAU_FAMILY 0x0000FFFF +#define NOUVEAU_FLAGS 0xFFFF0000 + +#include "nouveau_drm.h" +#include "nouveau_reg.h" + +struct mem_block { + struct mem_block *next; + struct mem_block *prev; + uint64_t start; + uint64_t size; + DRMFILE filp; /* 0: free, -1: heap, other: real files */ + int flags; + drm_local_map_t *map; +}; + +enum nouveau_flags { + NV_NFORCE =0x10000000, + NV_NFORCE2 =0x20000000 +}; + +struct nouveau_object +{ + struct nouveau_object *next; + struct nouveau_object *prev; + + struct mem_block *instance; + uint32_t ht_loc; + + uint32_t handle; + int class; + int engine; +}; + +#define NV_DMA_TARGET_VIDMEM 0 +#define NV_DMA_TARGET_PCI 2 +#define NV_DMA_TARGET_AGP 3 +struct nouveau_fifo +{ + int used; + /* owner of this fifo */ + DRMFILE filp; + /* mapping of the fifo itself */ + drm_local_map_t *map; + /* mapping of the regs controling the fifo */ + drm_local_map_t *regs; + /* dma object for the command buffer itself */ + struct mem_block *cmdbuf_mem; + struct nouveau_object *cmdbuf_obj; + /* PGRAPH context, for cards that keep it in RAMIN */ + struct mem_block *ramin_grctx; + /* objects belonging to this fifo */ + struct nouveau_object *objs; + + /* XXX move this in PGRAPH struct */ + uint32_t pgraph_ctx_user; +}; + +struct nouveau_config { + struct { + int location; + int size; + } cmdbuf; +}; + +typedef struct drm_nouveau_private { + /* the card type, takes NV_* as values */ + int card_type; + int flags; + + drm_local_map_t *mmio; + drm_local_map_t *fb; + drm_local_map_t *ramin; /* NV40 onwards */ + + //TODO: Remove me, I'm bogus :) + int cur_fifo; + + struct nouveau_object *fb_obj; + int cmdbuf_ch_size; + struct mem_block* cmdbuf_alloc; + + int fifo_alloc_count; + struct nouveau_fifo fifos[NV_MAX_FIFO_NUMBER]; + + /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */ + uint32_t ramin_size; + uint32_t ramht_offset; + uint32_t ramht_size; + uint32_t ramht_bits; + uint32_t ramfc_offset; + uint32_t ramfc_size; + uint32_t ramro_offset; + uint32_t ramro_size; + + /* base physical adresses */ + uint64_t fb_phys; + uint64_t agp_phys; + + /* the mtrr covering the FB */ + int fb_mtrr; + + struct mem_block *agp_heap; + struct mem_block *fb_heap; + struct mem_block *fb_nomap_heap; + struct mem_block *ramin_heap; + + struct nouveau_config config; +} +drm_nouveau_private_t; + +/* nouveau_state.c */ +extern void nouveau_preclose(drm_device_t * dev, DRMFILE filp); +extern int nouveau_load(struct drm_device *dev, unsigned long flags); +extern int nouveau_firstopen(struct drm_device *dev); +extern void nouveau_lastclose(struct drm_device *dev); +extern int nouveau_unload(struct drm_device *dev); +extern int nouveau_ioctl_getparam(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_setparam(DRM_IOCTL_ARGS); +extern void nouveau_wait_for_idle(struct drm_device *dev); + +/* nouveau_mem.c */ +extern uint64_t nouveau_mem_fb_amount(struct drm_device *dev); +extern void nouveau_mem_release(DRMFILE filp, struct mem_block *heap); +extern int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS); +extern struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp); +extern void nouveau_mem_free(struct drm_device* dev, struct mem_block*); +extern int nouveau_mem_init(struct drm_device *dev); +extern void nouveau_mem_close(struct drm_device *dev); +extern int nouveau_instmem_init(struct drm_device *dev, + uint32_t offset); +extern struct mem_block* nouveau_instmem_alloc(struct drm_device *dev, + uint32_t size, uint32_t align); +extern void nouveau_instmem_free(struct drm_device *dev, + struct mem_block *block); +extern uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index); +extern void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index, + uint32_t val); + +/* nouveau_fifo.c */ +extern int nouveau_fifo_init(drm_device_t *dev); +extern int nouveau_fifo_number(drm_device_t *dev); +extern void nouveau_fifo_cleanup(drm_device_t *dev, DRMFILE filp); +extern int nouveau_fifo_id_get(drm_device_t *dev, DRMFILE filp); +extern void nouveau_fifo_free(drm_device_t *dev, int channel); + +/* nouveau_object.c */ +extern void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp); +extern struct nouveau_object *nouveau_dma_object_create(drm_device_t *dev, + uint32_t offset, uint32_t size, int access, uint32_t target); +extern int nouveau_ioctl_object_init(DRM_IOCTL_ARGS); +extern int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS); +extern uint32_t nouveau_chip_instance_get(drm_device_t *dev, struct mem_block *mem); + +/* nouveau_irq.c */ +extern irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS); +extern void nouveau_irq_preinstall(drm_device_t*); +extern void nouveau_irq_postinstall(drm_device_t*); +extern void nouveau_irq_uninstall(drm_device_t*); + +/* nv40_graph.c */ +extern int nv40_graph_init(drm_device_t *dev); +extern int nv40_graph_context_create(drm_device_t *dev, int channel); +extern void nv40_graph_context_save_current(drm_device_t *dev); +extern void nv40_graph_context_restore(drm_device_t *dev, int channel); + +extern long nouveau_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +#if defined(__powerpc__) +#define NV_READ(reg) in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) ) +#define NV_WRITE(reg,val) out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) +#else +#define NV_READ(reg) DRM_READ32( dev_priv->mmio, (reg) ) +#define NV_WRITE(reg,val) DRM_WRITE32( dev_priv->mmio, (reg), (val) ) +#endif + +#define INSTANCE_WR(mem,ofs,val) nouveau_instmem_w32(dev_priv,(mem),(ofs),(val)) +#define INSTANCE_RD(mem,ofs) nouveau_instmem_r32(dev_priv,(mem),(ofs)) + +#endif /* __NOUVEAU_DRV_H__ */ + diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c new file mode 100644 index 00000000..e5f825e6 --- /dev/null +++ b/shared-core/nouveau_fifo.c @@ -0,0 +1,674 @@ +/* + * Copyright 2005-2006 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + + +/* returns the number of hw fifos */ +int nouveau_fifo_number(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + return 8; + case NV_04: + case NV_05: + return 16; + default: + return 32; + } +} + +/* returns the size of fifo context */ +static int nouveau_fifo_ctx_size(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + + if (dev_priv->card_type >= NV_40) + return 128; + else if (dev_priv->card_type >= NV_10) + return 64; + else + return 32; +} + +/*********************************** + * functions doing the actual work + ***********************************/ + +/* voir nv_xaa.c : NVResetGraphics + * mémoire mappée par nv_driver.c : NVMapMem + * voir nv_driver.c : NVPreInit + */ + +static int nouveau_fifo_instmem_configure(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + /* Clear start of RAMIN, enough to cover RAMFC/HT/RO basically */ + for (i=0x00710000; i<0x00730000; i++) + NV_WRITE(i, 0x00000000); + + /* FIFO hash table (RAMHT) + * use 4k hash table at RAMIN+0x10000 + * TODO: extend the hash table + */ + dev_priv->ramht_offset = 0x10000; + dev_priv->ramht_bits = 9; + dev_priv->ramht_size = (1 << dev_priv->ramht_bits); + NV_WRITE(NV_PFIFO_RAMHT, + (0x03 << 24) /* search 128 */ | + ((dev_priv->ramht_bits - 9) << 16) | + (dev_priv->ramht_offset >> 8) + ); + DRM_DEBUG("RAMHT offset=0x%x, size=%d\n", + dev_priv->ramht_offset, + dev_priv->ramht_size); + + /* FIFO runout table (RAMRO) - 512k at 0x11200 */ + dev_priv->ramro_offset = 0x11200; + dev_priv->ramro_size = 512; + NV_WRITE(NV_PFIFO_RAMRO, dev_priv->ramro_offset>>8); + DRM_DEBUG("RAMRO offset=0x%x, size=%d\n", + dev_priv->ramro_offset, + dev_priv->ramro_size); + + /* FIFO context table (RAMFC) + * NV40 : Not sure exactly how to position RAMFC on some cards, + * 0x30002 seems to position it at RAMIN+0x20000 on these + * cards. RAMFC is 4kb (32 fifos, 128byte entries). + * Others: Position RAMFC at RAMIN+0x11400 + */ + switch(dev_priv->card_type) + { + case NV_50: + case NV_40: + dev_priv->ramfc_offset = 0x20000; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV40_PFIFO_RAMFC, 0x30002); + break; + case NV_44: + dev_priv->ramfc_offset = 0x20000; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV40_PFIFO_RAMFC, ((nouveau_mem_fb_amount(dev)-512*1024+dev_priv->ramfc_offset)>>16) | + (2 << 16)); + break; + case NV_30: + case NV_20: + case NV_10: + dev_priv->ramfc_offset = 0x11400; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV_PFIFO_RAMFC, (dev_priv->ramfc_offset>>8) | + (1 << 16) /* 64 Bytes entry*/); + break; + case NV_04: + case NV_03: + dev_priv->ramfc_offset = 0x11400; + dev_priv->ramfc_size = nouveau_fifo_number(dev) * nouveau_fifo_ctx_size(dev); + NV_WRITE(NV_PFIFO_RAMFC, dev_priv->ramfc_offset>>8); + break; + } + DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", + dev_priv->ramfc_offset, + dev_priv->ramfc_size); + + if (nouveau_instmem_init(dev, dev_priv->ramfc_offset + + dev_priv->ramfc_size)) + return 1; + + return 0; +} + +int nouveau_fifo_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + ret = nouveau_fifo_instmem_configure(dev); + if (ret) { + DRM_ERROR("Failed to configure instance memory\n"); + return ret; + } + + /* FIXME remove all the stuff that's done in nouveau_fifo_alloc */ + + DRM_DEBUG("Setting defaults for remaining PFIFO regs\n"); + + /* All channels into PIO mode */ + NV_WRITE(NV_PFIFO_MODE, 0x00000000); + + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + /* Channel 0 active, PIO mode */ + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000000); + /* PUT and GET to 0 */ + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0x00000000); + /* No cmdbuf object */ + NV_WRITE(NV_PFIFO_CACH1_DMAI, 0x00000000); + NV_WRITE(NV_PFIFO_CACH0_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH0_PUL0, 0x00000000); + NV_WRITE(NV_PFIFO_SIZE, 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000); +#ifdef __BIG_ENDIAN + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4 | + NV_PFIFO_CACH1_BIG_ENDIAN); +#else + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + + NV_WRITE(NV_PGRAPH_CTX_USER, 0x0); + NV_WRITE(NV_PFIFO_DELAY_0, 0xff /* retrycount*/ ); + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x00002001); + else + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10110000); + + NV_WRITE(NV_PFIFO_DMA_TIMESLICE, 0x001fffff); + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + return 0; +} + +static int +nouveau_fifo_cmdbuf_alloc(struct drm_device *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_config *config = &dev_priv->config; + struct mem_block *cb; + struct nouveau_object *cb_dma = NULL; + int cb_min_size = max(NV03_FIFO_SIZE,PAGE_SIZE); + + /* Defaults for unconfigured values */ + if (!config->cmdbuf.location) + config->cmdbuf.location = NOUVEAU_MEM_FB; + if (!config->cmdbuf.size || config->cmdbuf.size < cb_min_size) + config->cmdbuf.size = cb_min_size; + + cb = nouveau_mem_alloc(dev, 0, config->cmdbuf.size, + config->cmdbuf.location | NOUVEAU_MEM_MAPPED, + (DRMFILE)-2); + if (!cb) { + DRM_ERROR("Couldn't allocate DMA command buffer.\n"); + return DRM_ERR(ENOMEM); + } + + if (cb->flags & NOUVEAU_MEM_AGP) { + cb_dma = nouveau_dma_object_create(dev, + cb->start, cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_AGP); + } else if (dev_priv->card_type != NV_04) { + cb_dma = nouveau_dma_object_create(dev, + cb->start - drm_get_resource_start(dev, 1), + cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_VIDMEM); + } else { + /* NV04 cmdbuf hack, from original ddx.. not sure of it's + * exact reason for existing :) PCI access to cmdbuf in + * VRAM. + */ + cb_dma = nouveau_dma_object_create(dev, + cb->start, cb->size, + NV_DMA_ACCESS_RO, NV_DMA_TARGET_PCI); + } + + if (!cb_dma) { + nouveau_mem_free(dev, cb); + DRM_ERROR("Failed to alloc DMA object for command buffer\n"); + return DRM_ERR(ENOMEM); + } + + dev_priv->fifos[channel].cmdbuf_mem = cb; + dev_priv->fifos[channel].cmdbuf_obj = cb_dma; + return 0; +} + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV04_RAMFC_##offset, (val)) +static void nouveau_nv04_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + uint32_t fifoctx, ctx_size = 32; + int i; + + cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj; + + fifoctx=NV_RAMIN+dev_priv->ramfc_offset+init->channel*ctx_size; + // clear the fifo context + for(i=0;i<ctx_size/4;i++) + NV_WRITE(fifoctx+4*i,0x0); + + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, cb_obj->instance)); +#ifdef __BIG_ENDIAN + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN); +#else + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif +} +#undef RAMFC_WR + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV10_RAMFC_##offset, (val)) +static void nouveau_nv10_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + uint32_t fifoctx; + int i; + + cb_obj = dev_priv->fifos[init->channel].cmdbuf_obj; + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*64; + for (i=0;i<64;i+=4) + NV_WRITE(fifoctx + i, 0); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , nouveau_chip_instance_get(dev, + cb_obj->instance)); +#ifdef __BIG_ENDIAN + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4 | + NV_PFIFO_CACH1_BIG_ENDIAN); +#else + RAMFC_WR(DMA_FETCH, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif + RAMFC_WR(DMA_SUBROUTINE, 0); +} + +static void nouveau_nv10_context_save(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int channel; + + channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*64; + + RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP)); + RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI)); + RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS)); + RAMFC_WR(DMA_FETCH , NV_READ(NV_PFIFO_CACH1_DMAF)); + RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMASR)); +} +#undef RAMFC_WR + +#define RAMFC_WR(offset, val) NV_WRITE(fifoctx + NV40_RAMFC_##offset, (val)) +static void nouveau_nv40_context_init(drm_device_t *dev, + drm_nouveau_fifo_alloc_t *init) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[init->channel]; + uint32_t fifoctx, cb_inst, grctx_inst; + int i; + + cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + grctx_inst = nouveau_chip_instance_get(dev, chan->ramin_grctx); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + init->channel*128; + for (i=0;i<128;i+=4) + NV_WRITE(fifoctx + i, 0); + + /* Fill entries that are seen filled in dumps of nvidia driver just + * after channel's is put into DMA mode + */ + RAMFC_WR(DMA_PUT , init->put_base); + RAMFC_WR(DMA_GET , init->put_base); + RAMFC_WR(DMA_INSTANCE , cb_inst); + RAMFC_WR(DMA_FETCH , NV_PFIFO_CACH1_DMAF_TRIG_128_BYTES | + NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES | + NV_PFIFO_CACH1_DMAF_MAX_REQS_8 | +#ifdef __BIG_ENDIAN + NV_PFIFO_CACH1_BIG_ENDIAN | +#endif + 0x30000000 /* no idea.. */); + RAMFC_WR(DMA_SUBROUTINE, init->put_base); + RAMFC_WR(GRCTX_INSTANCE, grctx_inst); + RAMFC_WR(DMA_TIMESLICE , 0x0001FFFF); +} + +static void nouveau_nv40_context_save(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fifoctx; + int channel; + + channel = NV_READ(NV_PFIFO_CACH1_PSH1) & (nouveau_fifo_number(dev)-1); + fifoctx = NV_RAMIN + dev_priv->ramfc_offset + channel*128; + + RAMFC_WR(DMA_PUT , NV_READ(NV_PFIFO_CACH1_DMAP)); + RAMFC_WR(DMA_GET , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(REF_CNT , NV_READ(NV_PFIFO_CACH1_REF_CNT)); + RAMFC_WR(DMA_INSTANCE , NV_READ(NV_PFIFO_CACH1_DMAI)); + RAMFC_WR(DMA_DCOUNT , NV_READ(NV_PFIFO_CACH1_DMA_DCOUNT)); + RAMFC_WR(DMA_STATE , NV_READ(NV_PFIFO_CACH1_DMAS)); + RAMFC_WR(DMA_FETCH , NV_READ(NV_PFIFO_CACH1_DMAF)); + RAMFC_WR(ENGINE , NV_READ(NV_PFIFO_CACH1_ENG)); + RAMFC_WR(PULL1_ENGINE , NV_READ(NV_PFIFO_CACH1_PUL1)); + RAMFC_WR(ACQUIRE_VALUE , NV_READ(NV_PFIFO_CACH1_ACQUIRE_VALUE)); + RAMFC_WR(ACQUIRE_TIMESTAMP, NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP)); + RAMFC_WR(ACQUIRE_TIMEOUT , NV_READ(NV_PFIFO_CACH1_ACQUIRE_TIMEOUT)); + RAMFC_WR(SEMAPHORE , NV_READ(NV_PFIFO_CACH1_SEMAPHORE)); + RAMFC_WR(DMA_SUBROUTINE , NV_READ(NV_PFIFO_CACH1_DMAG)); + RAMFC_WR(GRCTX_INSTANCE , NV_READ(NV40_PFIFO_GRCTX_INSTANCE)); + RAMFC_WR(DMA_TIMESLICE , NV_READ(NV_PFIFO_DMA_TIMESLICE) & 0x1FFFF); + RAMFC_WR(UNK_40 , NV_READ(NV40_PFIFO_UNK32E4)); +} +#undef RAMFC_WR + +/* This function should load values from RAMFC into PFIFO, but for now + * it just clobbers PFIFO with what nouveau_fifo_alloc used to setup + * unconditionally. + */ +static void +nouveau_fifo_context_restore(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t cb_inst; + + cb_inst = nouveau_chip_instance_get(dev, chan->cmdbuf_obj->instance); + + // FIXME check if we need to refill the time quota with something like NV_WRITE(0x204C, 0x0003FFFF); + + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00010000|channel); + else + NV_WRITE(NV_PFIFO_CACH1_PSH1, 0x00000100|channel); + + NV_WRITE(NV_PFIFO_CACH1_DMAP, 0 /*RAMFC_DMA_PUT*/); + NV_WRITE(NV_PFIFO_CACH1_DMAG, 0 /*RAMFC_DMA_GET*/); + NV_WRITE(NV_PFIFO_CACH1_DMAI, cb_inst); + NV_WRITE(NV_PFIFO_SIZE , 0x0000FFFF); + NV_WRITE(NV_PFIFO_CACH1_HASH, 0x0000FFFF); + + NV_WRITE(NV_PFIFO_CACH0_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_DMAC, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_ENG, 0x00000000); +#ifdef __BIG_ENDIAN + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4|NV_PFIFO_CACH1_BIG_ENDIAN); +#else + NV_WRITE(NV_PFIFO_CACH1_DMAF, NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES|NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES|NV_PFIFO_CACH1_DMAF_MAX_REQS_4); +#endif +} + +/* allocates and initializes a fifo for user space consumption */ +static int nouveau_fifo_alloc(drm_device_t* dev,drm_nouveau_fifo_alloc_t* init, DRMFILE filp) +{ + int i; + int ret; + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct nouveau_object *cb_obj; + + /* + * Alright, here is the full story + * Nvidia cards have multiple hw fifo contexts (praise them for that, + * no complicated crash-prone context switches) + * We allocate a new context for each app and let it write to it directly + * (woo, full userspace command submission !) + * When there are no more contexts, you lost + */ + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used==0) + break; + + DRM_INFO("Allocating FIFO number %d\n", i); + /* no more fifos. you lost. */ + if (i==nouveau_fifo_number(dev)) + return DRM_ERR(EINVAL); + + /* allocate a command buffer, and create a dma object for the gpu */ + ret = nouveau_fifo_cmdbuf_alloc(dev, i); + if (ret) return ret; + cb_obj = dev_priv->fifos[i].cmdbuf_obj; + + /* that fifo is used */ + dev_priv->fifos[i].used=1; + dev_priv->fifos[i].filp=filp; + + init->channel = i; + init->put_base = 0; + dev_priv->cur_fifo = init->channel; + dev_priv->fifos[i].pgraph_ctx_user = i << 24; + + nouveau_wait_for_idle(dev); + + /* disable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, NV_READ(NV_PFIFO_CACH1_DMAPSH)&(~0x1)); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + + /* Construct inital RAMFC for new channel */ + if (dev_priv->card_type < NV_10) { + nouveau_nv04_context_init(dev, init); + } else if (dev_priv->card_type < NV_40) { + nouveau_nv10_context_init(dev, init); + } else { + ret = nv40_graph_context_create(dev, init->channel); + if (ret) { + nouveau_fifo_free(dev, init->channel); + return ret; + } + nouveau_nv40_context_init(dev, init); + } + + /* enable the fifo dma operation */ + NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)|(1<<init->channel)); + + /* setup channel's default get/put values */ + NV_WRITE(NV03_FIFO_REGS_DMAPUT(init->channel), init->put_base); + NV_WRITE(NV03_FIFO_REGS_DMAGET(init->channel), init->put_base); + + /* If this is the first channel, setup PFIFO ourselves. For any + * other case, the GPU will handle this when it switches contexts. + */ + if (dev_priv->fifo_alloc_count == 0) { + nouveau_fifo_context_restore(dev, init->channel); + if (dev_priv->card_type >= NV_40) { + struct nouveau_fifo *chan; + uint32_t inst; + + chan = &dev_priv->fifos[init->channel]; + inst = nouveau_chip_instance_get(dev, + chan->ramin_grctx); + + /* see comments in nv40_graph_context_restore() */ + NV_WRITE(0x400784, inst); + NV_WRITE(0x40032C, inst | 0x01000000); + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, inst); + } + } + + NV_WRITE(NV_PFIFO_CACH1_DMAPSH, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PSH0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + + /* reenable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + /* make the fifo available to user space */ + /* first, the fifo control regs */ + init->ctrl = dev_priv->mmio->offset + NV03_FIFO_REGS(init->channel); + init->ctrl_size = NV03_FIFO_REGS_SIZE; + ret = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS, + 0, &dev_priv->fifos[init->channel].regs); + if (ret != 0) + return ret; + + /* pass back FIFO map info to the caller */ + init->cmdbuf = dev_priv->fifos[init->channel].cmdbuf_mem->start; + init->cmdbuf_size = dev_priv->fifos[init->channel].cmdbuf_mem->size; + + /* FIFO has no objects yet */ + dev_priv->fifos[init->channel].objs = NULL; + dev_priv->fifo_alloc_count++; + + DRM_INFO("%s: initialised FIFO %d\n", __func__, init->channel); + return 0; +} + +/* stops a fifo */ +void nouveau_fifo_free(drm_device_t* dev,int n) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + int ctx_size = nouveau_fifo_ctx_size(dev); + + dev_priv->fifos[n].used=0; + DRM_INFO("%s: freeing fifo %d\n", __func__, n); + + /* disable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + NV_WRITE(NV_PFIFO_MODE,NV_READ(NV_PFIFO_MODE)&~(1<<n)); + // FIXME XXX needs more code + + /* Clean RAMFC */ + for (i=0;i<ctx_size;i+=4) { + DRM_DEBUG("RAMFC +%02x: 0x%08x\n", i, NV_READ(NV_RAMIN + + dev_priv->ramfc_offset + n*ctx_size + i)); + NV_WRITE(NV_RAMIN + dev_priv->ramfc_offset + n*ctx_size + i, 0); + } + + if (dev_priv->card_type >= NV_40) + nouveau_instmem_free(dev, dev_priv->fifos[n].ramin_grctx); + + /* reenable the fifo caches */ + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + + /* Deallocate command buffer, and dma object */ + nouveau_mem_free(dev, dev_priv->fifos[n].cmdbuf_mem); + + dev_priv->fifo_alloc_count--; +} + +/* cleanups all the fifos from filp */ +void nouveau_fifo_cleanup(drm_device_t* dev, DRMFILE filp) +{ + int i; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("clearing FIFO enables from filp\n"); + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp==filp) + nouveau_fifo_free(dev,i); + + /* check we still point at an active channel */ + if (dev_priv->fifos[dev_priv->cur_fifo].used == 0) { + DRM_DEBUG("%s: cur_fifo is no longer owned.\n", __func__); + for (i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used) break; + if (i==nouveau_fifo_number(dev)) + i=0; + DRM_DEBUG("%s: new cur_fifo is %d\n", __func__, i); + dev_priv->cur_fifo = i; + } + +/* if (dev_priv->cmdbuf_alloc) + nouveau_fifo_init(dev);*/ +} + +int nouveau_fifo_id_get(drm_device_t* dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int i; + + for(i=0;i<nouveau_fifo_number(dev);i++) + if (dev_priv->fifos[i].used && dev_priv->fifos[i].filp == filp) + return i; + return -1; +} + +/*********************************** + * ioctls wrapping the functions + ***********************************/ + +static int nouveau_ioctl_fifo_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_fifo_alloc_t init; + int res; + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_fifo_alloc_t __user *) data, sizeof(init)); + + res=nouveau_fifo_alloc(dev,&init,filp); + if (!res) + DRM_COPY_TO_USER_IOCTL((drm_nouveau_fifo_alloc_t __user *)data, init, sizeof(init)); + + return res; +} + +/*********************************** + * finally, the ioctl table + ***********************************/ + +drm_ioctl_desc_t nouveau_ioctls[] = { + [DRM_IOCTL_NR(DRM_NOUVEAU_FIFO_ALLOC)] = {nouveau_ioctl_fifo_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_OBJECT_INIT)] = {nouveau_ioctl_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_DMA_OBJECT_INIT)] = {nouveau_ioctl_dma_object_init, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_ALLOC)] = {nouveau_ioctl_mem_alloc, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_MEM_FREE)] = {nouveau_ioctl_mem_free, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_GETPARAM)] = {nouveau_ioctl_getparam, DRM_AUTH}, + [DRM_IOCTL_NR(DRM_NOUVEAU_SETPARAM)] = {nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, +}; + +int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls); + + diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c new file mode 100644 index 00000000..7a31fb0b --- /dev/null +++ b/shared-core/nouveau_irq.c @@ -0,0 +1,422 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs <darktama@iinet.net.au> + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_reg.h" + +void nouveau_irq_preinstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: preinst\n"); + + /* Disable/Clear PFIFO interrupts */ + NV_WRITE(NV_PFIFO_INTEN, 0); + NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF); + /* Disable/Clear PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, 0); + else + NV_WRITE(NV40_PGRAPH_INTEN, 0); + NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF); +#if 0 + /* Disable/Clear CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, 0); + NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + NV_WRITE(NV_CRTC1_INTEN, 0); + NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); +#endif + /* Master disable */ + NV_WRITE(NV_PMC_INTEN, 0); +} + +void nouveau_irq_postinstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: postinst\n"); + + /* Enable PFIFO error reporting */ + NV_WRITE(NV_PFIFO_INTEN , + NV_PFIFO_INTR_CACHE_ERROR | + NV_PFIFO_INTR_RUNOUT | + NV_PFIFO_INTR_RUNOUT_OVERFLOW | + NV_PFIFO_INTR_DMA_PUSHER | + NV_PFIFO_INTR_DMA_PT | + NV_PFIFO_INTR_SEMAPHORE | + NV_PFIFO_INTR_ACQUIRE_TIMEOUT + ); + NV_WRITE(NV_PFIFO_INTSTAT, 0xFFFFFFFF); + + /* Enable PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, + NV_PGRAPH_INTR_NOTIFY | + NV_PGRAPH_INTR_MISSING_HW | + NV_PGRAPH_INTR_CONTEXT_SWITCH | + NV_PGRAPH_INTR_BUFFER_NOTIFY | + NV_PGRAPH_INTR_ERROR + ); + else + NV_WRITE(NV40_PGRAPH_INTEN, + NV_PGRAPH_INTR_NOTIFY | + NV_PGRAPH_INTR_MISSING_HW | + NV_PGRAPH_INTR_CONTEXT_SWITCH | + NV_PGRAPH_INTR_BUFFER_NOTIFY | + NV_PGRAPH_INTR_ERROR + ); + NV_WRITE(NV_PGRAPH_INTSTAT, 0xFFFFFFFF); + +#if 0 + /* Enable CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, NV_CRTC_INTR_VBLANK); + NV_WRITE(NV_CRTC1_INTEN, NV_CRTC_INTR_VBLANK); +#endif + + /* Master enable */ + NV_WRITE(NV_PMC_INTEN, NV_PMC_INTEN_MASTER_ENABLE); +} + +void nouveau_irq_uninstall(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("IRQ: uninst\n"); + + /* Disable PFIFO interrupts */ + NV_WRITE(NV_PFIFO_INTEN, 0); + /* Disable PGRAPH interrupts */ + if (dev_priv->card_type<NV_40) + NV_WRITE(NV04_PGRAPH_INTEN, 0); + else + NV_WRITE(NV40_PGRAPH_INTEN, 0); +#if 0 + /* Disable CRTC0/1 interrupts */ + NV_WRITE(NV_CRTC0_INTEN, 0); + NV_WRITE(NV_CRTC1_INTEN, 0); +#endif + /* Master disable */ + NV_WRITE(NV_PMC_INTEN, 0); +} + +static void nouveau_fifo_irq_handler(drm_device_t *dev) +{ + uint32_t status, chmode, chstat, channel; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + status = NV_READ(NV_PFIFO_INTSTAT); + if (!status) + return; + chmode = NV_READ(NV_PFIFO_MODE); + chstat = NV_READ(NV_PFIFO_DMA); + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + + DRM_DEBUG("NV: PFIFO interrupt! Channel=%d, INTSTAT=0x%08x/MODE=0x%08x/PEND=0x%08x\n", channel, status, chmode, chstat); + + if (status & NV_PFIFO_INTR_CACHE_ERROR) { + uint32_t c1get, c1method, c1data; + + DRM_ERROR("NV: PFIFO error interrupt\n"); + + c1get = NV_READ(NV_PFIFO_CACH1_GET) >> 2; + if (dev_priv->card_type < NV_40) { + /* Untested, so it may not work.. */ + c1method = NV_READ(NV_PFIFO_CACH1_METHOD(c1get)); + c1data = NV_READ(NV_PFIFO_CACH1_DATA(c1get)); + } else { + c1method = NV_READ(NV40_PFIFO_CACH1_METHOD(c1get)); + c1data = NV_READ(NV40_PFIFO_CACH1_DATA(c1get)); + } + + DRM_ERROR("NV: Channel %d/%d - Method 0x%04x, Data 0x%08x\n", + channel, (c1method >> 13) & 7, + c1method & 0x1ffc, c1data + ); + + status &= ~NV_PFIFO_INTR_CACHE_ERROR; + NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_CACHE_ERROR); + } + + if (status & NV_PFIFO_INTR_DMA_PUSHER) { + DRM_INFO("NV: PFIFO DMA pusher interrupt\n"); + + status &= ~NV_PFIFO_INTR_DMA_PUSHER; + NV_WRITE(NV_PFIFO_INTSTAT, NV_PFIFO_INTR_DMA_PUSHER); + + NV_WRITE(NV_PFIFO_CACH1_DMAS, 0x00000000); + if (NV_READ(NV_PFIFO_CACH1_DMAP)!=NV_READ(NV_PFIFO_CACH1_DMAG)) + { + uint32_t getval=NV_READ(NV_PFIFO_CACH1_DMAG)+4; + NV_WRITE(NV_PFIFO_CACH1_DMAG,getval); + } + } + + if (status) { + DRM_INFO("NV: unknown PFIFO interrupt. status=0x%08x\n", status); + + NV_WRITE(NV_PFIFO_INTSTAT, status); + } + + NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PFIFO_PENDING); +} + +static void nouveau_nv04_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t channel,i; + uint32_t max=0; + NV_WRITE(NV_PGRAPH_FIFO,0x0); + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + //DRM_INFO("raw PFIFO_CACH1_PHS1 reg is %x\n",NV_READ(NV_PFIFO_CACH1_PSH1)); + //DRM_INFO("currently on channel %d\n",channel); + for (i=0;i<nouveau_fifo_number(dev);i++) + if ((dev_priv->fifos[i].used)&&(i!=channel)) { + uint32_t put,get,pending; + //put=NV_READ(dev_priv->ramfc_offset+i*32); + //get=NV_READ(dev_priv->ramfc_offset+4+i*32); + put=NV_READ(NV03_FIFO_REGS_DMAPUT(i)); + get=NV_READ(NV03_FIFO_REGS_DMAGET(i)); + pending=NV_READ(NV_PFIFO_DMA); + //DRM_INFO("Channel %d (put/get %x/%x)\n",i,put,get); + /* mark all pending channels as such */ + if ((put!=get)&!(pending&(1<<i))) + { + pending|=(1<<i); + NV_WRITE(NV_PFIFO_DMA,pending); + } + max++; + } + nouveau_wait_for_idle(dev); + +#if 1 + /* 2-channel commute */ + // NV_WRITE(NV_PFIFO_CACH1_PSH1,channel|0x100); + if (channel==0) + channel=1; + else + channel=0; + // dev_priv->cur_fifo=channel; + NV_WRITE(0x2050,channel|0x100); +#endif + //NV_WRITE(NV_PFIFO_CACH1_PSH1,max|0x100); + //NV_WRITE(0x2050,max|0x100); + + NV_WRITE(NV_PGRAPH_FIFO,0x1); + +} + +static void nouveau_nv10_context_switch(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int channel, channel_old; + + channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + channel_old = (NV_READ(NV_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); + + DRM_INFO("NV: PGRAPH context switch interrupt channel %x -> %x\n",channel_old, channel); + + NV_WRITE(NV_PGRAPH_FIFO,0x0); + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000000); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000000); + NV_WRITE(NV_PFIFO_CACHES, 0x00000000); + + dev_priv->fifos[channel_old].pgraph_ctx_user = NV_READ(NV_PGRAPH_CTX_USER); + //XXX save PGRAPH context + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10000000); + NV_WRITE(NV_PGRAPH_CTX_USER, dev_priv->fifos[channel].pgraph_ctx_user); + //XXX restore PGRAPH context + printk("ctx_user %x %x\n", dev_priv->fifos[channel_old].pgraph_ctx_user, dev_priv->fifos[channel].pgraph_ctx_user); + + NV_WRITE(NV_PGRAPH_FFINTFC_ST2, NV_READ(NV_PGRAPH_FFINTFC_ST2)&0xCFFFFFFF); + NV_WRITE(NV_PGRAPH_CTX_CONTROL, 0x10010100); + + NV_WRITE(NV_PFIFO_CACH1_PUL0, 0x00000001); + NV_WRITE(NV_PFIFO_CACH1_PUL1, 0x00000001); + NV_WRITE(NV_PFIFO_CACHES, 0x00000001); + NV_WRITE(NV_PGRAPH_FIFO,0x1); +} + +static void nouveau_pgraph_irq_handler(drm_device_t *dev) +{ + uint32_t status; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + status = NV_READ(NV_PGRAPH_INTSTAT); + if (!status) + return; + + if (status & NV_PGRAPH_INTR_NOTIFY) { + uint32_t nsource, nstatus, instance, notify; + DRM_DEBUG("NV: PGRAPH notify interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + notify = NV_READ(0x00400150) >> 16; + DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", nsource, nstatus); + + status &= ~NV_PGRAPH_INTR_NOTIFY; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_BUFFER_NOTIFY) { + uint32_t nsource, nstatus, instance, notify; + DRM_DEBUG("NV: PGRAPH buffer notify interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + notify = NV_READ(0x00400150) >> 16; + DRM_DEBUG("instance:0x%08x\tnotify:0x%08x\n", instance, notify); + + status &= ~NV_PGRAPH_INTR_BUFFER_NOTIFY; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_BUFFER_NOTIFY); + } + + if (status & NV_PGRAPH_INTR_MISSING_HW) { + DRM_ERROR("NV: PGRAPH missing hw interrupt\n"); + + status &= ~NV_PGRAPH_INTR_MISSING_HW; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_MISSING_HW); + } + + if (status & NV_PGRAPH_INTR_ERROR) { + uint32_t nsource, nstatus, instance; + uint32_t address; + uint32_t channel; + uint32_t method, subc, data; + + DRM_ERROR("NV: PGRAPH error interrupt\n"); + + nstatus = NV_READ(0x00400104); + nsource = NV_READ(0x00400108); + DRM_DEBUG("nsource:0x%08x\tnstatus:0x%08x\n", nsource, nstatus); + + instance = NV_READ(0x00400158); + DRM_DEBUG("instance:0x%08x\n", instance); + + address = NV_READ(0x400704); + data = NV_READ(0x400708); + channel = (address >> 20) & 0x1F; + subc = (address >> 16) & 0x7; + method = address & 0x1FFC; + DRM_DEBUG("NV: 0x400704 = 0x%08x\n", address); + DRM_ERROR("NV: Channel %d/%d (class 0x%04x) -" + "Method 0x%04x, Data 0x%08x\n", + channel, subc, + NV_READ(0x400160+subc*4) & 0xFFFF, + method, data + ); + + status &= ~NV_PGRAPH_INTR_ERROR; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_ERROR); + } + + if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) { + uint32_t channel=NV_READ(NV_PFIFO_CACH1_PSH1)&(nouveau_fifo_number(dev)-1); + DRM_INFO("NV: PGRAPH context switch interrupt channel %x\n",channel); + switch(dev_priv->card_type) + { + case NV_04: + case NV_05: + nouveau_nv04_context_switch(dev); + break; + case NV_10: + nouveau_nv10_context_switch(dev); + break; + default: + DRM_INFO("NV: Context switch not implemented\n"); + break; + } + + status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH; + NV_WRITE(NV_PGRAPH_INTSTAT, NV_PGRAPH_INTR_CONTEXT_SWITCH); + } + + if (status) { + DRM_INFO("NV: Unknown PGRAPH interrupt! STAT=0x%08x\n", status); + NV_WRITE(NV_PGRAPH_INTSTAT, status); + } + + NV_WRITE(NV_PMC_INTSTAT, NV_PMC_INTSTAT_PGRAPH_PENDING); +} + +static void nouveau_crtc_irq_handler(drm_device_t *dev, int crtc) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + if (crtc&1) { + NV_WRITE(NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK); + } + + if (crtc&2) { + NV_WRITE(NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK); + } +} + +irqreturn_t nouveau_irq_handler(DRM_IRQ_ARGS) +{ + drm_device_t *dev = (drm_device_t*)arg; + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t status; + + status = NV_READ(NV_PMC_INTSTAT); + if (!status) + return IRQ_NONE; + + DRM_DEBUG("PMC INTSTAT: 0x%08x\n", status); + + if (status & NV_PMC_INTSTAT_PFIFO_PENDING) { + nouveau_fifo_irq_handler(dev); + status &= ~NV_PMC_INTSTAT_PFIFO_PENDING; + } + if (status & NV_PMC_INTSTAT_PGRAPH_PENDING) { + nouveau_pgraph_irq_handler(dev); + status &= ~NV_PMC_INTSTAT_PGRAPH_PENDING; + } + if (status & NV_PMC_INTSTAT_CRTCn_PENDING) { + nouveau_crtc_irq_handler(dev, (status>>24)&3); + status &= ~NV_PMC_INTSTAT_CRTCn_PENDING; + } + + if (status) + DRM_ERROR("Unhandled PMC INTR status bits 0x%08x\n", status); + + return IRQ_HANDLED; +} + diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c new file mode 100644 index 00000000..cd53d25d --- /dev/null +++ b/shared-core/nouveau_mem.c @@ -0,0 +1,612 @@ +/* + * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. + * Copyright 2005 Stephane Marchesin + * + * The Weather Channel (TM) funded Tungsten Graphics to develop the + * initial release of the Radeon 8500 driver under the XFree86 license. + * This notice must be preserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: + * Keith Whitwell <keith@tungstengraphics.com> + */ + + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "nouveau_drv.h" + +static int meminit_ok=0; + +static struct mem_block *split_block(struct mem_block *p, uint64_t start, uint64_t size, + DRMFILE filp) +{ + /* Maybe cut off the start of an existing block */ + if (start > p->start) { + struct mem_block *newblock = + drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + if (!newblock) + goto out; + newblock->start = start; + newblock->size = p->size - (start - p->start); + newblock->filp = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size -= newblock->size; + p = newblock; + } + + /* Maybe cut off the end of an existing block */ + if (size < p->size) { + struct mem_block *newblock = + drm_alloc(sizeof(*newblock), DRM_MEM_BUFS); + if (!newblock) + goto out; + newblock->start = start + size; + newblock->size = p->size - size; + newblock->filp = NULL; + newblock->next = p->next; + newblock->prev = p; + p->next->prev = newblock; + p->next = newblock; + p->size = size; + } + +out: + /* Our block is in the middle */ + p->filp = filp; + return p; +} + +static struct mem_block *alloc_block(struct mem_block *heap, uint64_t size, + int align2, DRMFILE filp) +{ + struct mem_block *p; + uint64_t mask = (1 << align2) - 1; + + if (!heap) + return NULL; + + list_for_each(p, heap) { + uint64_t start = (p->start + mask) & ~mask; + if (p->filp == 0 && start + size <= p->start + p->size) + return split_block(p, start, size, filp); + } + + return NULL; +} + +static struct mem_block *find_block(struct mem_block *heap, uint64_t start) +{ + struct mem_block *p; + + list_for_each(p, heap) + if (p->start == start) + return p; + + return NULL; +} + +static void free_block(struct mem_block *p) +{ + p->filp = NULL; + + /* Assumes a single contiguous range. Needs a special filp in + * 'heap' to stop it being subsumed. + */ + if (p->next->filp == 0) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + drm_free(q, sizeof(*q), DRM_MEM_BUFS); + } + + if (p->prev->filp == 0) { + struct mem_block *q = p->prev; + q->size += p->size; + q->next = p->next; + q->next->prev = q; + drm_free(p, sizeof(*q), DRM_MEM_BUFS); + } +} + +/* Initialize. How to check for an uninitialized heap? + */ +static int init_heap(struct mem_block **heap, uint64_t start, uint64_t size) +{ + struct mem_block *blocks = drm_alloc(sizeof(*blocks), DRM_MEM_BUFS); + + if (!blocks) + return DRM_ERR(ENOMEM); + + *heap = drm_alloc(sizeof(**heap), DRM_MEM_BUFS); + if (!*heap) { + drm_free(blocks, sizeof(*blocks), DRM_MEM_BUFS); + return DRM_ERR(ENOMEM); + } + + blocks->start = start; + blocks->size = size; + blocks->filp = NULL; + blocks->next = blocks->prev = *heap; + + memset(*heap, 0, sizeof(**heap)); + (*heap)->filp = (DRMFILE) - 1; + (*heap)->next = (*heap)->prev = blocks; + return 0; +} + +/* + * Free all blocks associated with the releasing filp + */ +void nouveau_mem_release(DRMFILE filp, struct mem_block *heap) +{ + struct mem_block *p; + + if (!heap || !heap->next) + return; + + list_for_each(p, heap) { + if (p->filp == filp) + p->filp = NULL; + } + + /* Assumes a single contiguous range. Needs a special filp in + * 'heap' to stop it being subsumed. + */ + list_for_each(p, heap) { + while ((p->filp == 0) && (p->next->filp == 0) && (p->next!=heap)) { + struct mem_block *q = p->next; + p->size += q->size; + p->next = q->next; + p->next->prev = p; + drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + } + } +} + +/* + * Cleanup everything + */ +static void nouveau_mem_takedown(struct mem_block **heap) +{ + struct mem_block *p; + + if (!*heap) + return; + + for (p = (*heap)->next; p != *heap;) { + struct mem_block *q = p; + p = p->next; + drm_free(q, sizeof(*q), DRM_MEM_DRIVER); + } + + drm_free(*heap, sizeof(**heap), DRM_MEM_DRIVER); + *heap = NULL; +} + +void nouveau_mem_close(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + nouveau_mem_takedown(&dev_priv->agp_heap); + nouveau_mem_takedown(&dev_priv->fb_heap); +} + +/* returns the amount of FB ram in bytes */ +uint64_t nouveau_mem_fb_amount(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) + { + case NV03_BOOT_0_RAM_AMOUNT_8MB: + case NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM: + return 8*1024*1024; + case NV03_BOOT_0_RAM_AMOUNT_4MB: + return 4*1024*1024; + case NV03_BOOT_0_RAM_AMOUNT_2MB: + return 2*1024*1024; + } + break; + case NV_04: + case NV_05: + if (NV_READ(NV03_BOOT_0) & 0x00000100) { + return (((NV_READ(NV03_BOOT_0) >> 12) & 0xf)*2+2)*1024*1024; + } else + switch(NV_READ(NV03_BOOT_0)&NV03_BOOT_0_RAM_AMOUNT) + { + case NV04_BOOT_0_RAM_AMOUNT_32MB: + return 32*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_16MB: + return 16*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_8MB: + return 8*1024*1024; + case NV04_BOOT_0_RAM_AMOUNT_4MB: + return 4*1024*1024; + } + break; + case NV_10: + case NV_20: + case NV_30: + case NV_40: + case NV_44: + case NV_50: + default: + // XXX won't work on BSD because of pci_read_config_dword + if (dev_priv->flags&NV_NFORCE) { + uint32_t mem; + pci_read_config_dword(dev->pdev, 0x7C, &mem); + return (uint64_t)(((mem >> 6) & 31) + 1)*1024*1024; + } else if(dev_priv->flags&NV_NFORCE2) { + uint32_t mem; + pci_read_config_dword(dev->pdev, 0x84, &mem); + return (uint64_t)(((mem >> 4) & 127) + 1)*1024*1024; + } else { + uint64_t mem; + mem=(NV_READ(NV04_FIFO_DATA)&NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >> NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT; + return mem*1024*1024; + } + break; + } + + DRM_ERROR("Unable to detect video ram size. Please report your setup to " DRIVER_EMAIL "\n"); + return 0; +} + + + +int nouveau_mem_init(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + uint32_t fb_size; + dev_priv->agp_phys=0; + dev_priv->fb_phys=0; + + /* init AGP */ + dev_priv->agp_heap=NULL; + if (drm_device_is_agp(dev)) + { + int err; + drm_agp_info_t info; + drm_agp_mode_t mode; + drm_agp_buffer_t agp_req; + drm_agp_binding_t bind_req; + + err = drm_agp_acquire(dev); + if (err) { + DRM_ERROR("Unable to acquire AGP: %d\n", err); + goto no_agp; + } + + err = drm_agp_info(dev, &info); + if (err) { + DRM_ERROR("Unable to get AGP info: %d\n", err); + goto no_agp; + } + + /* see agp.h for the AGPSTAT_* modes available */ + mode.mode = info.mode; + err = drm_agp_enable(dev, mode); + if (err) { + DRM_ERROR("Unable to enable AGP: %d\n", err); + goto no_agp; + } + + agp_req.size = info.aperture_size; + agp_req.type = 0; + err = drm_agp_alloc(dev, &agp_req); + if (err) { + DRM_ERROR("Unable to alloc AGP: %d\n", err); + goto no_agp; + } + + bind_req.handle = agp_req.handle; + bind_req.offset = 0; + err = drm_agp_bind(dev, &bind_req); + if (err) { + DRM_ERROR("Unable to bind AGP: %d\n", err); + goto no_agp; + } + + if (init_heap(&dev_priv->agp_heap, info.aperture_base, info.aperture_size)) + goto no_agp; + + dev_priv->agp_phys=info.aperture_base; + } +no_agp: + + /* Init FB */ + dev_priv->fb_phys=drm_get_resource_start(dev,1); + fb_size = nouveau_mem_fb_amount(dev); + /* On at least NV40, RAMIN is actually at the end of vram. + * We don't want to allocate this... */ + if (dev_priv->card_type >= NV_40) + fb_size -= dev_priv->ramin_size; + DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10); + + if (fb_size>256*1024*1024) { + /* On cards with > 256Mb, you can't map everything. + * So we create a second FB heap for that type of memory */ + if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), 256*1024*1024)) + return DRM_ERR(ENOMEM); + if (init_heap(&dev_priv->fb_nomap_heap, drm_get_resource_start(dev,1)+256*1024*1024, fb_size-256*1024*1024)) + return DRM_ERR(ENOMEM); + } else { + if (init_heap(&dev_priv->fb_heap, drm_get_resource_start(dev,1), fb_size)) + return DRM_ERR(ENOMEM); + dev_priv->fb_nomap_heap=NULL; + } + + return 0; +} + +struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment, uint64_t size, int flags, DRMFILE filp) +{ + struct mem_block *block; + int type; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /* + * Init memory if needed + */ + if (meminit_ok==0) + { + nouveau_mem_init(dev); + meminit_ok=1; + } + + /* + * Make things easier on ourselves: all allocations are page-aligned. + * We need that to map allocated regions into the user space + */ + if (alignment < PAGE_SHIFT) + alignment = PAGE_SHIFT; + + /* + * Warn about 0 sized allocations, but let it go through. It'll return 1 page + */ + if (size == 0) + DRM_INFO("warning : 0 byte allocation\n"); + + /* + * Keep alloc size a multiple of the page size to keep drm_addmap() happy + */ + if (size & (~PAGE_MASK)) + size = ((size/PAGE_SIZE) + 1) * PAGE_SIZE; + + if (flags&NOUVEAU_MEM_AGP) { + type=NOUVEAU_MEM_AGP; + block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + if (flags&(NOUVEAU_MEM_FB|NOUVEAU_MEM_FB_ACCEPTABLE)) { + type=NOUVEAU_MEM_FB; + if (!(flags&NOUVEAU_MEM_MAPPED)) { + block = alloc_block(dev_priv->fb_nomap_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + block = alloc_block(dev_priv->fb_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + if (flags&NOUVEAU_MEM_AGP_ACCEPTABLE) { + type=NOUVEAU_MEM_AGP; + block = alloc_block(dev_priv->agp_heap, size, alignment, filp); + if (block) goto alloc_ok; + } + + return NULL; + +alloc_ok: + block->flags=type; + + if (flags&NOUVEAU_MEM_MAPPED) + { + int ret; + block->flags|=NOUVEAU_MEM_MAPPED; + + if (type == NOUVEAU_MEM_AGP) + ret = drm_addmap(dev, block->start - dev->agp->base, block->size, + _DRM_AGP, 0, &block->map); + else + ret = drm_addmap(dev, block->start, block->size, + _DRM_FRAME_BUFFER, 0, &block->map); + if (ret) { + free_block(block); + return NULL; + } + } + + DRM_INFO("allocated 0x%llx\n", block->start); + return block; +} + +void nouveau_mem_free(struct drm_device* dev, struct mem_block* block) +{ + DRM_INFO("freeing 0x%llx\n", block->start); + if (meminit_ok==0) + { + DRM_ERROR("%s called without init\n", __FUNCTION__); + return; + } + if (block->flags&NOUVEAU_MEM_MAPPED) + drm_rmmap(dev, block->map); + free_block(block); +} + +int nouveau_instmem_init(struct drm_device *dev, uint32_t offset) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int ret; + + if (dev_priv->card_type >= NV_40) + /* We'll want more instance memory than this on some NV4x cards. + * There's a 16MB aperture to play with that maps onto the end + * of vram. For now, only reserve a small piece until we know + * more about what each chipset requires. + */ + dev_priv->ramin_size = (1*1024* 1024); + else { + /*XXX: what *are* the limits on <NV40 cards?, and does RAMIN + * exist in vram on those cards as well? + */ + dev_priv->ramin_size = (512*1024); + } + DRM_DEBUG("RAMIN size: %dKiB\n", dev_priv->ramin_size>>10); + + /* Create a heap to manage RAMIN allocations, we don't allocate + * the space that was reserved for RAMHT/FC/RO. + */ + ret = init_heap(&dev_priv->ramin_heap, offset, + dev_priv->ramin_size - offset); + if (ret) { + dev_priv->ramin_heap = NULL; + DRM_ERROR("Failed to init RAMIN heap\n"); + } + + return ret; +} + +struct mem_block *nouveau_instmem_alloc(struct drm_device *dev, + uint32_t size, uint32_t align) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + struct mem_block *block; + + if (!dev_priv->ramin_heap) { + DRM_ERROR("instmem alloc called without init\n"); + return NULL; + } + + block = alloc_block(dev_priv->ramin_heap, size, align, (DRMFILE)-2); + if (block) { + block->flags = NOUVEAU_MEM_INSTANCE; + DRM_DEBUG("instance(size=%d, align=%d) alloc'd at 0x%08x\n", + size, (1<<align), (uint32_t)block->start); + } + + return block; +} + +void nouveau_instmem_free(struct drm_device *dev, struct mem_block *block) +{ + if (dev && block) { + free_block(block); + } +} + +uint32_t nouveau_instmem_r32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index) +{ + uint32_t ofs = (uint32_t)mem->start + (index<<2); + + if (dev_priv->ramin) { +#if defined(__powerpc__) + return in_be32((void __iomem *)(dev_priv->ramin)->handle + ofs); +#else + return DRM_READ32(dev_priv->ramin, ofs); +#endif + } else { + return NV_READ(NV_RAMIN+ofs); + } +} + +void nouveau_instmem_w32(drm_nouveau_private_t *dev_priv, + struct mem_block *mem, int index, uint32_t val) +{ + uint32_t ofs = (uint32_t)mem->start + (index<<2); + + if (dev_priv->ramin) { +#if defined(__powerpc__) + out_be32((void __iomem *)(dev_priv->ramin)->handle + ofs, val); +#else + DRM_WRITE32(dev_priv->ramin, ofs, val); +#endif + } else { + NV_WRITE(NV_RAMIN+ofs, val); + } +} + +/* + * Ioctls + */ + +int nouveau_ioctl_mem_alloc(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_mem_alloc_t alloc; + struct mem_block *block; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(alloc, (drm_nouveau_mem_alloc_t __user *) data, + sizeof(alloc)); + + block=nouveau_mem_alloc(dev, alloc.alignment, alloc.size, alloc.flags, filp); + if (!block) + return DRM_ERR(ENOMEM); + alloc.region_offset=block->start; + alloc.flags=block->flags; + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_mem_alloc_t __user *) data, alloc, sizeof(alloc)); + + return 0; +} + +int nouveau_ioctl_mem_free(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_mem_free_t memfree; + struct mem_block *block; + + if (!dev_priv) { + DRM_ERROR("%s called with no initialization\n", __FUNCTION__); + return DRM_ERR(EINVAL); + } + + DRM_COPY_FROM_USER_IOCTL(memfree, (drm_nouveau_mem_free_t __user *) data, + sizeof(memfree)); + + block=NULL; + if (memfree.flags&NOUVEAU_MEM_FB) + block = find_block(dev_priv->fb_heap, memfree.region_offset); + else if (memfree.flags&NOUVEAU_MEM_AGP) + block = find_block(dev_priv->agp_heap, memfree.region_offset); + if (!block) + return DRM_ERR(EFAULT); + if (block->filp != filp) + return DRM_ERR(EPERM); + + nouveau_mem_free(dev, block); + return 0; +} + + diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c new file mode 100644 index 00000000..c11b05eb --- /dev/null +++ b/shared-core/nouveau_object.c @@ -0,0 +1,578 @@ +/* + * Copyright (C) 2006 Ben Skeggs. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Ben Skeggs <darktama@iinet.net.au> + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* TODO + * - Check object class, deny unsafe objects (add card-specific versioning?) + * - Get rid of DMA object creation, this should be wrapped by MM routines. + */ + +/* Translate a RAMIN offset into a value the card understands, will be useful + * in the future when we can access more instance ram which isn't mapped into + * the PRAMIN aperture + */ +uint32_t nouveau_chip_instance_get(drm_device_t *dev, + struct mem_block *mem) +{ + uint32_t inst = (uint32_t)mem->start >> 4; + DRM_DEBUG("****** on-chip instance for 0x%016llx = 0x%08x\n", + mem->start, inst); + return inst; +} + +static void nouveau_object_link(drm_device_t *dev, int fifo_num, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + + if (!fifo->objs) { + fifo->objs = obj; + return; + } + + obj->prev = NULL; + obj->next = fifo->objs; + + fifo->objs->prev = obj; + fifo->objs = obj; +} + +static void nouveau_object_unlink(drm_device_t *dev, int fifo_num, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + + if (obj->prev == NULL) { + if (obj->next) + obj->next->prev = NULL; + fifo->objs = obj->next; + } else if (obj->next == NULL) { + if (obj->prev) + obj->prev->next = NULL; + } else { + obj->prev->next = obj->next; + obj->next->prev = obj->prev; + } +} + +static struct nouveau_object * +nouveau_object_handle_find(drm_device_t *dev, int fifo_num, uint32_t handle) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_fifo *fifo = &dev_priv->fifos[fifo_num]; + struct nouveau_object *obj = fifo->objs; + + if (!handle) + return NULL; + + DRM_DEBUG("Looking for handle 0x%08x\n", handle); + while (obj) { + if (obj->handle == handle) + return obj; + obj = obj->next; + } + + DRM_DEBUG("...couldn't find handle\n"); + return NULL; +} + +/* NVidia uses context objects to drive drawing operations. + + Context objects can be selected into 8 subchannels in the FIFO, + and then used via DMA command buffers. + + A context object is referenced by a user defined handle (CARD32). The HW + looks up graphics objects in a hash table in the instance RAM. + + An entry in the hash table consists of 2 CARD32. The first CARD32 contains + the handle, the second one a bitfield, that contains the address of the + object in instance RAM. + + The format of the second CARD32 seems to be: + + NV4 to NV30: + + 15: 0 instance_addr >> 4 + 17:16 engine (here uses 1 = graphics) + 28:24 channel id (here uses 0) + 31 valid (use 1) + + NV40: + + 15: 0 instance_addr >> 4 (maybe 19-0) + 21:20 engine (here uses 1 = graphics) + I'm unsure about the other bits, but using 0 seems to work. + + The key into the hash table depends on the object handle and channel id and + is given as: +*/ +static uint32_t nouveau_handle_hash(drm_device_t* dev, uint32_t handle, + int fifo) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + uint32_t hash = 0; + int i; + + for (i=32;i>0;i-=dev_priv->ramht_bits) { + hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1)); + handle >>= dev_priv->ramht_bits; + } + hash ^= fifo << (dev_priv->ramht_bits - 4); + return hash << 3; +} + +static int nouveau_hash_table_insert(drm_device_t* dev, int fifo, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int ht_base = NV_RAMIN + dev_priv->ramht_offset; + int ht_end = ht_base + dev_priv->ramht_size; + int o_ofs, ofs; + + o_ofs = ofs = nouveau_handle_hash(dev, obj->handle, fifo); + + while (NV_READ(ht_base + ofs)) { + ofs += 8; + if (ofs == ht_end) ofs = ht_base; + if (ofs == o_ofs) { + DRM_ERROR("no free hash table entries\n"); + return 1; + } + } + ofs += ht_base; + + DRM_DEBUG("Channel %d - Handle 0x%08x at 0x%08x\n", + fifo, obj->handle, ofs); + + NV_WRITE(NV_RAMHT_HANDLE_OFFSET + ofs, obj->handle); + if (dev_priv->card_type >= NV_40) + NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, + (fifo << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT) | + nouveau_chip_instance_get(dev, obj->instance) + ); + else + NV_WRITE(NV_RAMHT_CONTEXT_OFFSET + ofs, + NV_RAMHT_CONTEXT_VALID | + (fifo << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) | + (obj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT) | + nouveau_chip_instance_get(dev, obj->instance) + ); + + obj->ht_loc = ofs; + return 0; +} + +static void nouveau_hash_table_remove(drm_device_t* dev, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + DRM_DEBUG("Remove handle 0x%08x at 0x%08x from HT\n", + obj->handle, obj->ht_loc); + if (obj->ht_loc) { + DRM_DEBUG("... HT entry was: 0x%08x/0x%08x\n", + NV_READ(obj->ht_loc), NV_READ(obj->ht_loc+4)); + NV_WRITE(obj->ht_loc , 0x00000000); + NV_WRITE(obj->ht_loc+4, 0x00000000); + } +} + +static struct nouveau_object *nouveau_instance_alloc(drm_device_t* dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + + /* Create object struct */ + obj = drm_calloc(1, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + if (!obj) { + DRM_ERROR("couldn't alloc memory for object\n"); + return NULL; + } + obj->instance = nouveau_instmem_alloc(dev, + (dev_priv->card_type >= NV_40 ? 32 : 16), 4); + if (!obj->instance) { + DRM_ERROR("couldn't alloc RAMIN for object\n"); + drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + return NULL; + } + + return obj; +} + +static void nouveau_object_instance_free(drm_device_t *dev, + struct nouveau_object *obj) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int count, i; + + if (dev_priv->card_type >= NV_40) + count = 8; + else + count = 4; + + /* Clean RAMIN entry */ + DRM_DEBUG("Instance entry for 0x%08x" + "(engine %d, class 0x%x) before destroy:\n", + obj->handle, obj->engine, obj->class); + for (i=0;i<count;i++) { + DRM_DEBUG(" +0x%02x: 0x%08x\n", (i*4), + INSTANCE_RD(obj->instance, i)); + INSTANCE_WR(obj->instance, i, 0x00000000); + } + + /* Free RAMIN */ + nouveau_instmem_free(dev, obj->instance); +} + +/* + DMA objects are used to reference a piece of memory in the + framebuffer, PCI or AGP address space. Each object is 16 bytes big + and looks as follows: + + entry[0] + 11:0 class (seems like I can always use 0 here) + 12 page table present? + 13 page entry linear? + 15:14 access: 0 rw, 1 ro, 2 wo + 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP + 31:20 dma adjust (bits 0-11 of the address) + entry[1] + dma limit + entry[2] + 1 0 readonly, 1 readwrite + 31:12 dma frame address (bits 12-31 of the address) + + Non linear page tables seem to need a list of frame addresses afterwards, + the rivatv project has some info on this. + + The method below creates a DMA object in instance RAM and returns a handle + to it that can be used to set up context objects. +*/ +struct nouveau_object *nouveau_dma_object_create(drm_device_t* dev, + uint32_t offset, uint32_t size, + int access, uint32_t target) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + uint32_t frame, adjust; + + DRM_DEBUG("offset:0x%08x, size:0x%08x, target:%d, access:%d\n", + offset, size, target, access); + + frame = offset & ~0x00000FFF; + adjust = offset & 0x00000FFF; + + obj = nouveau_instance_alloc(dev); + if (!obj) { + DRM_ERROR("couldn't allocate DMA object\n"); + return obj; + } + + obj->engine = 0; + obj->class = 0; + + INSTANCE_WR(obj->instance, 0, ((1<<12) | (1<<13) | + (adjust << 20) | + (access << 14) | + (target << 16) | + 0x3D /* DMA_IN_MEMORY */)); + INSTANCE_WR(obj->instance, 1, size-1); + INSTANCE_WR(obj->instance, 2, + frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0)); + /* I don't actually know what this is, the DMA objects I see + * in renouveau dumps usually have this as the same as +8 + */ + INSTANCE_WR(obj->instance, 3, + frame | ((access != NV_DMA_ACCESS_RO) ? (1<<1) : 0)); + + return obj; +} + + +/* Context objects in the instance RAM have the following structure. + * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes. + + NV4 - NV30: + + entry[0] + 11:0 class + 12 chroma key enable + 13 user clip enable + 14 swizzle enable + 17:15 patch config: + scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre + 18 synchronize enable + 19 endian: 1 big, 0 little + 21:20 dither mode + 23 single step enable + 24 patch status: 0 invalid, 1 valid + 25 context_surface 0: 1 valid + 26 context surface 1: 1 valid + 27 context pattern: 1 valid + 28 context rop: 1 valid + 29,30 context beta, beta4 + entry[1] + 7:0 mono format + 15:8 color format + 31:16 notify instance address + entry[2] + 15:0 dma 0 instance address + 31:16 dma 1 instance address + entry[3] + dma method traps + + NV40: + No idea what the exact format is. Here's what can be deducted: + + entry[0]: + 11:0 class (maybe uses more bits here?) + 17 user clip enable + 21:19 patch config + 25 patch status valid ? + entry[1]: + 15:0 DMA notifier (maybe 20:0) + entry[2]: + 15:0 DMA 0 instance (maybe 20:0) + 24 big endian + entry[3]: + 15:0 DMA 1 instance (maybe 20:0) + entry[4]: + entry[5]: + set to 0? +*/ +static struct nouveau_object *nouveau_context_object_create(drm_device_t* dev, + int class, uint32_t flags, + struct nouveau_object *dma0, + struct nouveau_object *dma1, + struct nouveau_object *dma_notifier) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + struct nouveau_object *obj; + uint32_t d0, d1, dn; + uint32_t flags0,flags1,flags2; + flags0=0;flags1=0;flags2=0; + + if (dev_priv->card_type >= NV_40) { + if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND) + flags0 |= 0x02080000; + else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY) + flags0 |= 0x02080000; + if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE) + flags0 |= 0x00020000; +#ifdef __BIG_ENDIAN + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x01000000; + flags2 |= 0x01000000; +#else + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x02000000; +#endif + } else { + if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_ROP_AND) + flags0 |= 0x01008000; + else if (flags & NV_DMA_CONTEXT_FLAGS_PATCH_SRCCOPY) + flags0 |= 0x01018000; + if (flags & NV_DMA_CONTEXT_FLAGS_CLIP_ENABLE) + flags0 |= 0x00002000; +#ifdef __BIG_ENDIAN + flags0 |= 0x00080000; + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x00000001; +#else + if (flags & NV_DMA_CONTEXT_FLAGS_MONO) + flags1 |= 0x00000002; +#endif + } + + DRM_DEBUG("class=%x, dma0=%08x, dma1=%08x, dman=%08x\n", + class, + dma0 ? dma0->handle : 0, + dma1 ? dma1->handle : 0, + dma_notifier ? dma_notifier->handle : 0); + + obj = nouveau_instance_alloc(dev); + if (!obj) { + DRM_ERROR("couldn't allocate context object\n"); + return obj; + } + + obj->engine = 1; + obj->class = class; + + d0 = dma0 ? nouveau_chip_instance_get(dev, dma0->instance) : 0; + d1 = dma1 ? nouveau_chip_instance_get(dev, dma1->instance) : 0; + dn = dma_notifier ? + nouveau_chip_instance_get(dev, dma_notifier->instance) : 0; + + if (dev_priv->card_type >= NV_40) { + INSTANCE_WR(obj->instance, 0, class | flags0); + INSTANCE_WR(obj->instance, 1, dn | flags1); + INSTANCE_WR(obj->instance, 2, d0 | flags2); + INSTANCE_WR(obj->instance, 3, d1); + INSTANCE_WR(obj->instance, 4, 0x00000000); + INSTANCE_WR(obj->instance, 5, 0x00000000); + INSTANCE_WR(obj->instance, 6, 0x00000000); + INSTANCE_WR(obj->instance, 7, 0x00000000); + } else { + INSTANCE_WR(obj->instance, 0, class | flags0); + INSTANCE_WR(obj->instance, 1, (dn << 16) | flags1); + INSTANCE_WR(obj->instance, 2, d0 | (d1 << 16)); + INSTANCE_WR(obj->instance, 3, 0); + } + + return obj; +} + +static void +nouveau_object_free(drm_device_t *dev, int fifo_num, struct nouveau_object *obj) +{ + nouveau_object_unlink(dev, fifo_num, obj); + + nouveau_object_instance_free(dev, obj); + nouveau_hash_table_remove(dev, obj); + + drm_free(obj, sizeof(struct nouveau_object), DRM_MEM_DRIVER); + return; +} + +void nouveau_object_cleanup(drm_device_t *dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return; + + while (dev_priv->fifos[fifo].objs) + nouveau_object_free(dev, fifo, dev_priv->fifos[fifo].objs); +} + +int nouveau_ioctl_object_init(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_object_init_t init; + struct nouveau_object *obj, *dma0, *dma1, *dman; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return DRM_ERR(EINVAL); + + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_object_init_t __user *) + data, sizeof(init)); + + //FIXME: check args, only allow trusted objects to be created + + if (nouveau_object_handle_find(dev, fifo, init.handle)) { + DRM_ERROR("Channel %d: handle 0x%08x already exists\n", + fifo, init.handle); + return DRM_ERR(EINVAL); + } + + dma0 = nouveau_object_handle_find(dev, fifo, init.dma0); + if (init.dma0 && !dma0) { + DRM_ERROR("context dma0 - invalid handle 0x%08x\n", init.dma0); + return DRM_ERR(EINVAL); + } + dma1 = nouveau_object_handle_find(dev, fifo, init.dma1); + if (init.dma1 && !dma1) { + DRM_ERROR("context dma1 - invalid handle 0x%08x\n", init.dma0); + return DRM_ERR(EINVAL); + } + dman = nouveau_object_handle_find(dev, fifo, init.dma_notifier); + if (init.dma_notifier && !dman) { + DRM_ERROR("context dman - invalid handle 0x%08x\n", + init.dma_notifier); + return DRM_ERR(EINVAL); + } + + obj = nouveau_context_object_create(dev, init.class, init.flags, + dma0, dma1, dman); + if (!obj) + return DRM_ERR(ENOMEM); + + obj->handle = init.handle; + + if (nouveau_hash_table_insert(dev, fifo, obj)) { + nouveau_object_free(dev, fifo, obj); + return DRM_ERR(ENOMEM); + } + + nouveau_object_link(dev, fifo, obj); + + return 0; +} + +int nouveau_ioctl_dma_object_init(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_dma_object_init_t init; + struct nouveau_object *obj; + int fifo; + + fifo = nouveau_fifo_id_get(dev, filp); + if (fifo == -1) + return DRM_ERR(EINVAL); + + DRM_COPY_FROM_USER_IOCTL(init, (drm_nouveau_dma_object_init_t __user *) + data, sizeof(init)); + + if (nouveau_object_handle_find(dev, fifo, init.handle)) { + DRM_ERROR("Channel %d: handle 0x%08x already exists\n", + fifo, init.handle); + return DRM_ERR(EINVAL); + } + + obj = nouveau_dma_object_create(dev, init.offset, init.size, + init.access, init.target); + if (!obj) + return DRM_ERR(ENOMEM); + + obj->handle = init.handle; + if (nouveau_hash_table_insert(dev, fifo, obj)) { + nouveau_object_free(dev, fifo, obj); + return DRM_ERR(ENOMEM); + } + + nouveau_object_link(dev, fifo, obj); + + return 0; +} + diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h new file mode 100644 index 00000000..23fce39a --- /dev/null +++ b/shared-core/nouveau_reg.h @@ -0,0 +1,238 @@ + + +#define NV03_BOOT_0 0x00100000 +# define NV03_BOOT_0_RAM_AMOUNT 0x00000003 +# define NV03_BOOT_0_RAM_AMOUNT_8MB 0x00000000 +# define NV03_BOOT_0_RAM_AMOUNT_2MB 0x00000001 +# define NV03_BOOT_0_RAM_AMOUNT_4MB 0x00000002 +# define NV03_BOOT_0_RAM_AMOUNT_8MB_SDRAM 0x00000003 +# define NV04_BOOT_0_RAM_AMOUNT_32MB 0x00000000 +# define NV04_BOOT_0_RAM_AMOUNT_4MB 0x00000001 +# define NV04_BOOT_0_RAM_AMOUNT_8MB 0x00000002 +# define NV04_BOOT_0_RAM_AMOUNT_16MB 0x00000003 + +#define NV04_FIFO_DATA 0x0010020c +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK 0xfff00000 +# define NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT 20 + +#define NV03_PGRAPH_STATUS 0x004006b0 +#define NV04_PGRAPH_STATUS 0x00400700 + +#define NV_RAMIN 0x00700000 + +#define NV_RAMHT_HANDLE_OFFSET 0 +#define NV_RAMHT_CONTEXT_OFFSET 4 +# define NV_RAMHT_CONTEXT_VALID (1<<31) +# define NV_RAMHT_CONTEXT_CHANNEL_SHIFT 24 +# define NV_RAMHT_CONTEXT_ENGINE_SHIFT 16 +# define NV_RAMHT_CONTEXT_ENGINE_SOFTWARE 0 +# define NV_RAMHT_CONTEXT_ENGINE_GRAPHICS 1 +# define NV_RAMHT_CONTEXT_INSTANCE_SHIFT 0 +# define NV40_RAMHT_CONTEXT_CHANNEL_SHIFT 23 +# define NV40_RAMHT_CONTEXT_ENGINE_SHIFT 20 +# define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT 0 + +#define NV_DMA_ACCESS_RW 0 +#define NV_DMA_ACCESS_RO 1 +#define NV_DMA_ACCESS_WO 2 +#define NV_DMA_TARGET_VIDMEM 0 +#define NV_DMA_TARGET_AGP 3 + +#define NV03_FIFO_SIZE 0x8000UL +#define NV_MAX_FIFO_NUMBER 32 +#define NV03_FIFO_REGS_SIZE 0x10000 +#define NV03_FIFO_REGS(i) (0x00800000+i*NV03_FIFO_REGS_SIZE) +# define NV03_FIFO_REGS_DMAPUT(i) (NV03_FIFO_REGS(i)+0x40) +# define NV03_FIFO_REGS_DMAGET(i) (NV03_FIFO_REGS(i)+0x44) + +#define NV_PMC_BOOT_0 0x00000000 +#define NV_PMC_INTSTAT 0x00000100 +# define NV_PMC_INTSTAT_PFIFO_PENDING (1<< 8) +# define NV_PMC_INTSTAT_PGRAPH_PENDING (1<<12) +# define NV_PMC_INTSTAT_CRTC0_PENDING (1<<24) +# define NV_PMC_INTSTAT_CRTC1_PENDING (1<<25) +# define NV_PMC_INTSTAT_CRTCn_PENDING (3<<24) +#define NV_PMC_INTEN 0x00000140 +# define NV_PMC_INTEN_MASTER_ENABLE (1<< 0) + +#define NV_PGRAPH_INTSTAT 0x00400100 +#define NV04_PGRAPH_INTEN 0x00400140 +#define NV40_PGRAPH_INTEN 0x0040013C +# define NV_PGRAPH_INTR_NOTIFY (1<< 0) +# define NV_PGRAPH_INTR_MISSING_HW (1<< 4) +# define NV_PGRAPH_INTR_CONTEXT_SWITCH (1<<12) +# define NV_PGRAPH_INTR_BUFFER_NOTIFY (1<<16) +# define NV_PGRAPH_INTR_ERROR (1<<20) +#define NV_PGRAPH_CTX_CONTROL 0x00400144 +#define NV_PGRAPH_NV40_UNK220 0x00400220 +# define NV_PGRAPH_NV40_UNK220_FB_INSTANCE +#define NV_PGRAPH_CTX_USER 0x00400148 +#define NV_PGRAPH_CTX_SWITCH1 0x0040014C +#define NV_PGRAPH_FIFO 0x00400720 +#define NV_PGRAPH_FFINTFC_ST2 0x00400764 + +/* It's a guess that this works on NV03. Confirmed on NV04, though */ +#define NV_PFIFO_DELAY_0 0x00002040 +#define NV_PFIFO_DMA_TIMESLICE 0x00002044 +#define NV_PFIFO_INTSTAT 0x00002100 +#define NV_PFIFO_INTEN 0x00002140 +# define NV_PFIFO_INTR_CACHE_ERROR (1<< 0) +# define NV_PFIFO_INTR_RUNOUT (1<< 4) +# define NV_PFIFO_INTR_RUNOUT_OVERFLOW (1<< 8) +# define NV_PFIFO_INTR_DMA_PUSHER (1<<12) +# define NV_PFIFO_INTR_DMA_PT (1<<16) +# define NV_PFIFO_INTR_SEMAPHORE (1<<20) +# define NV_PFIFO_INTR_ACQUIRE_TIMEOUT (1<<24) +#define NV_PFIFO_RAMHT 0x00002210 +#define NV_PFIFO_RAMFC 0x00002214 +#define NV_PFIFO_RAMRO 0x00002218 +#define NV40_PFIFO_RAMFC 0x00002220 +#define NV_PFIFO_CACHES 0x00002500 +#define NV_PFIFO_MODE 0x00002504 +#define NV_PFIFO_DMA 0x00002508 +#define NV_PFIFO_SIZE 0x0000250c +#define NV_PFIFO_CACH0_PSH0 0x00003000 +#define NV_PFIFO_CACH0_PUL0 0x00003050 +#define NV_PFIFO_CACH0_PUL1 0x00003054 +#define NV_PFIFO_CACH1_PSH0 0x00003200 +#define NV_PFIFO_CACH1_PSH1 0x00003204 +#define NV_PFIFO_CACH1_DMAPSH 0x00003220 +#define NV_PFIFO_CACH1_DMAF 0x00003224 +# define NV_PFIFO_CACH1_DMAF_TRIG_8_BYTES 0x00000000 +# define NV_PFIFO_CACH1_DMAF_TRIG_16_BYTES 0x00000008 +# define NV_PFIFO_CACH1_DMAF_TRIG_24_BYTES 0x00000010 +# define NV_PFIFO_CACH1_DMAF_TRIG_32_BYTES 0x00000018 +# define NV_PFIFO_CACH1_DMAF_TRIG_40_BYTES 0x00000020 +# define NV_PFIFO_CACH1_DMAF_TRIG_48_BYTES 0x00000028 +# define NV_PFIFO_CACH1_DMAF_TRIG_56_BYTES 0x00000030 +# define NV_PFIFO_CACH1_DMAF_TRIG_64_BYTES 0x00000038 +# define NV_PFIFO_CACH1_DMAF_TRIG_72_BYTES 0x00000040 +# define NV_PFIFO_CACH1_DMAF_TRIG_80_BYTES 0x00000048 +# define NV_PFIFO_CACH1_DMAF_TRIG_88_BYTES 0x00000050 +# define NV_PFIFO_CACH1_DMAF_TRIG_96_BYTES 0x00000058 +# define NV_PFIFO_CACH1_DMAF_TRIG_104_BYTES 0x00000060 +# define NV_PFIFO_CACH1_DMAF_TRIG_112_BYTES 0x00000068 +# define NV_PFIFO_CACH1_DMAF_TRIG_120_BYTES 0x00000070 +# define NV_PFIFO_CACH1_DMAF_TRIG_128_BYTES 0x00000078 +# define NV_PFIFO_CACH1_DMAF_TRIG_136_BYTES 0x00000080 +# define NV_PFIFO_CACH1_DMAF_TRIG_144_BYTES 0x00000088 +# define NV_PFIFO_CACH1_DMAF_TRIG_152_BYTES 0x00000090 +# define NV_PFIFO_CACH1_DMAF_TRIG_160_BYTES 0x00000098 +# define NV_PFIFO_CACH1_DMAF_TRIG_168_BYTES 0x000000A0 +# define NV_PFIFO_CACH1_DMAF_TRIG_176_BYTES 0x000000A8 +# define NV_PFIFO_CACH1_DMAF_TRIG_184_BYTES 0x000000B0 +# define NV_PFIFO_CACH1_DMAF_TRIG_192_BYTES 0x000000B8 +# define NV_PFIFO_CACH1_DMAF_TRIG_200_BYTES 0x000000C0 +# define NV_PFIFO_CACH1_DMAF_TRIG_208_BYTES 0x000000C8 +# define NV_PFIFO_CACH1_DMAF_TRIG_216_BYTES 0x000000D0 +# define NV_PFIFO_CACH1_DMAF_TRIG_224_BYTES 0x000000D8 +# define NV_PFIFO_CACH1_DMAF_TRIG_232_BYTES 0x000000E0 +# define NV_PFIFO_CACH1_DMAF_TRIG_240_BYTES 0x000000E8 +# define NV_PFIFO_CACH1_DMAF_TRIG_248_BYTES 0x000000F0 +# define NV_PFIFO_CACH1_DMAF_TRIG_256_BYTES 0x000000F8 +# define NV_PFIFO_CACH1_DMAF_SIZE 0x0000E000 +# define NV_PFIFO_CACH1_DMAF_SIZE_32_BYTES 0x00000000 +# define NV_PFIFO_CACH1_DMAF_SIZE_64_BYTES 0x00002000 +# define NV_PFIFO_CACH1_DMAF_SIZE_96_BYTES 0x00004000 +# define NV_PFIFO_CACH1_DMAF_SIZE_128_BYTES 0x00006000 +# define NV_PFIFO_CACH1_DMAF_SIZE_160_BYTES 0x00008000 +# define NV_PFIFO_CACH1_DMAF_SIZE_192_BYTES 0x0000A000 +# define NV_PFIFO_CACH1_DMAF_SIZE_224_BYTES 0x0000C000 +# define NV_PFIFO_CACH1_DMAF_SIZE_256_BYTES 0x0000E000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS 0x001F0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_0 0x00000000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_1 0x00010000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_2 0x00020000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_3 0x00030000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_4 0x00040000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_5 0x00050000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_6 0x00060000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_7 0x00070000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_8 0x00080000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_9 0x00090000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_10 0x000A0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_11 0x000B0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_12 0x000C0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_13 0x000D0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_14 0x000E0000 +# define NV_PFIFO_CACH1_DMAF_MAX_REQS_15 0x000F0000 +# define NV_PFIFO_CACH1_ENDIAN 0x80000000 +# define NV_PFIFO_CACH1_LITTLE_ENDIAN 0x7FFFFFFF +# define NV_PFIFO_CACH1_BIG_ENDIAN 0x80000000 +#define NV_PFIFO_CACH1_DMAS 0x00003228 +#define NV_PFIFO_CACH1_DMAI 0x0000322c +#define NV_PFIFO_CACH1_DMAC 0x00003230 +#define NV_PFIFO_CACH1_DMAP 0x00003240 +#define NV_PFIFO_CACH1_DMAG 0x00003244 +#define NV_PFIFO_CACH1_REF_CNT 0x00003248 +#define NV_PFIFO_CACH1_DMASR 0x0000324C +#define NV_PFIFO_CACH1_PUL0 0x00003250 +#define NV_PFIFO_CACH1_PUL1 0x00003254 +#define NV_PFIFO_CACH1_HASH 0x00003258 +#define NV_PFIFO_CACH1_ACQUIRE_TIMEOUT 0x00003260 +#define NV_PFIFO_CACH1_ACQUIRE_TIMESTAMP 0x00003264 +#define NV_PFIFO_CACH1_ACQUIRE_VALUE 0x00003268 +#define NV_PFIFO_CACH1_SEMAPHORE 0x0000326C +#define NV_PFIFO_CACH1_GET 0x00003270 +#define NV_PFIFO_CACH1_ENG 0x00003280 +#define NV_PFIFO_CACH1_DMA_DCOUNT 0x000032A0 +#define NV40_PFIFO_GRCTX_INSTANCE 0x000032E0 +#define NV40_PFIFO_UNK32E4 0x000032E4 +#define NV_PFIFO_CACH1_METHOD(i) (0x00003800+(i*8)) +#define NV_PFIFO_CACH1_DATA(i) (0x00003804+(i*8)) +#define NV40_PFIFO_CACH1_METHOD(i) (0x00090000+(i*8)) +#define NV40_PFIFO_CACH1_DATA(i) (0x00090004+(i*8)) + +#define NV_CRTC0_INTSTAT 0x00600100 +#define NV_CRTC0_INTEN 0x00600140 +#define NV_CRTC1_INTSTAT 0x00602100 +#define NV_CRTC1_INTEN 0x00602140 +# define NV_CRTC_INTR_VBLANK (1<<0) + +/* Fifo commands. These are not regs, neither masks */ +#define NV03_FIFO_CMD_JUMP 0x20000000 +#define NV03_FIFO_CMD_JUMP_OFFSET_MASK 0x1ffffffc +#define NV03_FIFO_CMD_REWIND (NV03_FIFO_CMD_JUMP | (0 & NV03_FIFO_CMD_JUMP_OFFSET_MASK)) + +/* RAMFC offsets */ +#define NV04_RAMFC_DMA_PUT 0x00 +#define NV04_RAMFC_DMA_GET 0x04 +#define NV04_RAMFC_DMA_INSTANCE 0x08 +#define NV04_RAMFC_DMA_FETCH 0x16 + +#define NV10_RAMFC_DMA_PUT 0x00 +#define NV10_RAMFC_DMA_GET 0x04 +#define NV10_RAMFC_REF_CNT 0x08 +#define NV10_RAMFC_DMA_INSTANCE 0x0C +#define NV10_RAMFC_DMA_STATE 0x10 +#define NV10_RAMFC_DMA_FETCH 0x14 +#define NV10_RAMFC_ENGINE 0x18 +#define NV10_RAMFC_PULL1_ENGINE 0x1C +#define NV10_RAMFC_ACQUIRE_VALUE 0x20 +#define NV10_RAMFC_ACQUIRE_TIMESTAMP 0x24 +#define NV10_RAMFC_ACQUIRE_TIMEOUT 0x28 +#define NV10_RAMFC_SEMAPHORE 0x2C +#define NV10_RAMFC_DMA_SUBROUTINE 0x30 + +#define NV40_RAMFC_DMA_PUT 0x00 +#define NV40_RAMFC_DMA_GET 0x04 +#define NV40_RAMFC_REF_CNT 0x08 +#define NV40_RAMFC_DMA_INSTANCE 0x0C +#define NV40_RAMFC_DMA_DCOUNT /* ? */ 0x10 +#define NV40_RAMFC_DMA_STATE 0x14 +#define NV40_RAMFC_DMA_FETCH 0x18 +#define NV40_RAMFC_ENGINE 0x1C +#define NV40_RAMFC_PULL1_ENGINE 0x20 +#define NV40_RAMFC_ACQUIRE_VALUE 0x24 +#define NV40_RAMFC_ACQUIRE_TIMESTAMP 0x28 +#define NV40_RAMFC_ACQUIRE_TIMEOUT 0x2C +#define NV40_RAMFC_SEMAPHORE 0x30 +#define NV40_RAMFC_DMA_SUBROUTINE 0x34 +#define NV40_RAMFC_GRCTX_INSTANCE /* guess */ 0x38 +#define NV40_RAMFC_DMA_TIMESLICE 0x3C +#define NV40_RAMFC_UNK_40 0x40 +#define NV40_RAMFC_UNK_44 0x44 +#define NV40_RAMFC_UNK_48 0x48 +#define NV40_RAMFC_2088 0x4C +#define NV40_RAMFC_3300 0x50 + diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c new file mode 100644 index 00000000..44f8c1aa --- /dev/null +++ b/shared-core/nouveau_state.c @@ -0,0 +1,222 @@ +/* + * Copyright 2005 Stephane Marchesin + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_sarea.h" +#include "nouveau_drv.h" + +/* here a client dies, release the stuff that was allocated for its filp */ +void nouveau_preclose(drm_device_t * dev, DRMFILE filp) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + + nouveau_mem_release(filp,dev_priv->fb_heap); + nouveau_mem_release(filp,dev_priv->agp_heap); + nouveau_object_cleanup(dev, filp); + nouveau_fifo_cleanup(dev, filp); +} + +/* first module load, setup the mmio/fb mapping */ +int nouveau_firstopen(struct drm_device *dev) +{ + int ret; + drm_nouveau_private_t *dev_priv = dev->dev_private; + + /* resource 0 is mmio regs */ + /* resource 1 is linear FB */ + /* resource 2 is RAMIN (mmio regs + 0x1000000) */ + /* resource 6 is bios */ + + /* map the mmio regs */ + ret = drm_addmap(dev, drm_get_resource_start(dev, 0), drm_get_resource_len(dev, 0), + _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio); + if (dev_priv->mmio) + { + DRM_INFO("regs mapped ok at 0x%lx\n",dev_priv->mmio->offset); + } + else + { + DRM_ERROR("Unable to initialize the mmio mapping. Please report your setup to " DRIVER_EMAIL "\n"); + return 1; + } + + DRM_INFO("%lld MB of video ram detected\n",nouveau_mem_fb_amount(dev)>>20); + + /* map larger RAMIN aperture on NV40 cards */ + if (dev_priv->card_type >= NV_40) { + ret = drm_addmap(dev, drm_get_resource_start(dev, 2), + drm_get_resource_len(dev, 2), + _DRM_REGISTERS, + _DRM_READ_ONLY, + &dev_priv->ramin); + if (ret) { + DRM_ERROR("Failed to init RAMIN mapping, " + "limited instance memory available\n"); + dev_priv->ramin = NULL; + } + } else + dev_priv->ramin = NULL; + + /* Clear RAMIN + * Determine locations for RAMHT/FC/RO + * Initialise PFIFO + */ + ret = nouveau_fifo_init(dev); + if (ret) return ret; + + /* setup a mtrr over the FB */ + dev_priv->fb_mtrr=drm_mtrr_add(drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); + + /* FIXME: doesn't belong here, and have no idea what it's for.. */ + if (dev_priv->card_type >= NV_40) + nv40_graph_init(dev); + + return 0; +} + +int nouveau_load(struct drm_device *dev, unsigned long flags) +{ + drm_nouveau_private_t *dev_priv; + + if (flags==NV_UNKNOWN) + return DRM_ERR(EINVAL); + + dev_priv = drm_alloc(sizeof(drm_nouveau_private_t), DRM_MEM_DRIVER); + if (!dev_priv) + return DRM_ERR(ENOMEM); + + memset(dev_priv, 0, sizeof(drm_nouveau_private_t)); + dev_priv->card_type=flags&NOUVEAU_FAMILY; + dev_priv->flags=flags&NOUVEAU_FLAGS; + + dev->dev_private = (void *)dev_priv; + + return 0; +} + +void nouveau_lastclose(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + if(dev_priv->fb_mtrr>0) + { + drm_mtrr_del(dev_priv->fb_mtrr, drm_get_resource_start(dev, 1),nouveau_mem_fb_amount(dev), DRM_MTRR_WC); + dev_priv->fb_mtrr=0; + } +} + +int nouveau_unload(struct drm_device *dev) +{ + drm_free(dev->dev_private, sizeof(*dev->dev_private), DRM_MEM_DRIVER); + dev->dev_private = NULL; + return 0; +} + +int nouveau_ioctl_getparam(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_getparam_t getparam; + + DRM_COPY_FROM_USER_IOCTL(getparam, (drm_nouveau_getparam_t __user *)data, + sizeof(getparam)); + + switch (getparam.param) { + case NOUVEAU_GETPARAM_PCI_VENDOR: + getparam.value=dev->pci_vendor; + break; + case NOUVEAU_GETPARAM_PCI_DEVICE: + getparam.value=dev->pci_device; + break; + case NOUVEAU_GETPARAM_BUS_TYPE: + if (drm_device_is_agp(dev)) + getparam.value=NV_AGP; + else if (drm_device_is_pcie(dev)) + getparam.value=NV_PCIE; + else + getparam.value=NV_PCI; + break; + case NOUVEAU_GETPARAM_FB_PHYSICAL: + getparam.value=dev_priv->fb_phys; + break; + case NOUVEAU_GETPARAM_AGP_PHYSICAL: + getparam.value=dev_priv->agp_phys; + break; + default: + DRM_ERROR("unknown parameter %d\n", getparam.param); + return DRM_ERR(EINVAL); + } + + DRM_COPY_TO_USER_IOCTL((drm_nouveau_getparam_t __user *)data, getparam, + sizeof(getparam)); + return 0; +} + +int nouveau_ioctl_setparam(DRM_IOCTL_ARGS) +{ + DRM_DEVICE; + drm_nouveau_private_t *dev_priv = dev->dev_private; + drm_nouveau_setparam_t setparam; + + DRM_COPY_FROM_USER_IOCTL(setparam, (drm_nouveau_setparam_t __user *)data, + sizeof(setparam)); + + switch (setparam.param) { + case NOUVEAU_SETPARAM_CMDBUF_LOCATION: + switch (setparam.value) { + case NOUVEAU_MEM_AGP: + case NOUVEAU_MEM_FB: + break; + default: + DRM_ERROR("invalid CMDBUF_LOCATION value=%d\n", setparam.value); + return DRM_ERR(EINVAL); + } + dev_priv->config.cmdbuf.location = setparam.value; + break; + case NOUVEAU_SETPARAM_CMDBUF_SIZE: + dev_priv->config.cmdbuf.size = setparam.value; + break; + default: + DRM_ERROR("unknown parameter %d\n", setparam.param); + return DRM_ERR(EINVAL); + } + + return 0; +} + +/* waits for idle */ +void nouveau_wait_for_idle(struct drm_device *dev) +{ + drm_nouveau_private_t *dev_priv=dev->dev_private; + switch(dev_priv->card_type) + { + case NV_03: + while(NV_READ(NV03_PGRAPH_STATUS)); + break; + default: + while(NV_READ(NV04_PGRAPH_STATUS)); + break; + } +} + diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c new file mode 100644 index 00000000..53f55bce --- /dev/null +++ b/shared-core/nv40_graph.c @@ -0,0 +1,827 @@ +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_drm.h" + +/* The sizes are taken from the difference between the start of two + * grctx addresses while running the nvidia driver. Probably slightly + * larger than they actually are, because of other objects being created + * between the contexts + */ +#define NV40_GRCTX_SIZE (175*1024) +#define NV43_GRCTX_SIZE (70*1024) +#define NV4A_GRCTX_SIZE (60*1024) +#define NV4E_GRCTX_SIZE (25*1024) + +/*TODO: deciper what each offset in the context represents. The below + * contexts are taken from dumps just after the 3D object is + * created. + */ +static void nv40_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + /* Always has the "instance address" of itself at offset 0 */ + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + /* unknown */ + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00170/4, 0x00000040); + INSTANCE_WR(ctx, 0x00174/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x80000000); + INSTANCE_WR(ctx, 0x00180/4, 0x80000000); + INSTANCE_WR(ctx, 0x00184/4, 0x80000000); + INSTANCE_WR(ctx, 0x00188/4, 0x80000000); + INSTANCE_WR(ctx, 0x0018c/4, 0x80000000); + INSTANCE_WR(ctx, 0x0019c/4, 0x00000040); + INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001c0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00000010); + INSTANCE_WR(ctx, 0x00480/4, 0x00000100); + INSTANCE_WR(ctx, 0x00494/4, 0x00000111); + INSTANCE_WR(ctx, 0x00498/4, 0x00080060); + INSTANCE_WR(ctx, 0x004b4/4, 0x00000080); + INSTANCE_WR(ctx, 0x004b8/4, 0xffff0000); + INSTANCE_WR(ctx, 0x004bc/4, 0x00000001); + INSTANCE_WR(ctx, 0x004d0/4, 0x46400000); + INSTANCE_WR(ctx, 0x004ec/4, 0xffff0000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x004fc/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00504/4, 0x00011100); + for (i=0x00520; i<=0x0055c; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00568/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x00594/4, 0x30201000); + INSTANCE_WR(ctx, 0x00598/4, 0x70605040); + INSTANCE_WR(ctx, 0x0059c/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x005a0/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x005b4/4, 0x40100000); + INSTANCE_WR(ctx, 0x005cc/4, 0x00000004); + INSTANCE_WR(ctx, 0x005d8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0060c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00610/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00614/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00618/4, 0x00000098); + INSTANCE_WR(ctx, 0x00628/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0062c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00630/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00640/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x0067c/4, 0x00ffff00); + /* 0x680-0x6BC - NV30_TCL_PRIMITIVE_3D_TX_ADDRESS_UNIT(0-15) */ + /* 0x6C0-0x6FC - NV30_TCL_PRIMITIVE_3D_TX_FORMAT_UNIT(0-15) */ + for (i=0x006C0; i<=0x006fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + /* 0x700-0x73C - NV30_TCL_PRIMITIVE_3D_TX_WRAP_UNIT(0-15) */ + for (i=0x00700; i<=0x0073c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + /* 0x740-0x77C - NV30_TCL_PRIMITIVE_3D_TX_ENABLE_UNIT(0-15) */ + /* 0x780-0x7BC - NV30_TCL_PRIMITIVE_3D_TX_SWIZZLE_UNIT(0-15) */ + for (i=0x00780; i<=0x007bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + /* 0x7C0-0x7FC - NV30_TCL_PRIMITIVE_3D_TX_FILTER_UNIT(0-15) */ + for (i=0x007c0; i<=0x007fc; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + /* 0x800-0x83C - NV30_TCL_PRIMITIVE_3D_TX_XY_DIM_UNIT(0-15) */ + for (i=0x00800; i<=0x0083c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + /* 0x840-0x87C - NV30_TCL_PRIMITIVE_3D_TX_UNK07_UNIT(0-15) */ + /* 0x880-0x8BC - NV30_TCL_PRIMITIVE_3D_TX_DEPTH_UNIT(0-15) */ + for (i=0x00880; i<=0x008bc; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + /* unknown */ + for (i=0x00910; i<=0x0091c; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00920; i<=0x0092c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00940; i<=0x0094c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00960; i<=0x0096c; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00980/4, 0x00000002); + INSTANCE_WR(ctx, 0x009b4/4, 0x00000001); + INSTANCE_WR(ctx, 0x009c0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x009c4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x009c8/4, 0x60103f00); + INSTANCE_WR(ctx, 0x009d4/4, 0x00020000); + INSTANCE_WR(ctx, 0x00a08/4, 0x00008100); + INSTANCE_WR(ctx, 0x00aac/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x80800001); + INSTANCE_WR(ctx, 0x00bcc/4, 0x00000005); + INSTANCE_WR(ctx, 0x00bf8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00bfc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c00/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c04/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c08/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c0c/4, 0x00005555); + INSTANCE_WR(ctx, 0x00c44/4, 0x00000001); + for (i=0x03008; i<=0x03080; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x05288; i<=0x08570; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x08628; i<=0x08e18; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0bd28; i<=0x0f010; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0f0c8; i<=0x0f8b8; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x127c8; i<=0x15ab0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x15b68; i<=0x16358; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x19268; i<=0x1c550; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x1c608; i<=0x1cdf8; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x1fd08; i<=0x22ff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x230a8; i<=0x23898; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x267a8; i<=0x29a90; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x29b48; i<=0x2a338; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void +nv43_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x00194/4, 0x80000000); + INSTANCE_WR(ctx, 0x00198/4, 0x80000000); + INSTANCE_WR(ctx, 0x0019c/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a4/4, 0x80000000); + INSTANCE_WR(ctx, 0x001a8/4, 0x80000000); + INSTANCE_WR(ctx, 0x001ac/4, 0x80000000); + INSTANCE_WR(ctx, 0x001b0/4, 0x80000000); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00560/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00598/4, 0x00ffff00); + for (i=0x005dc; i<=0x00618; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x0061c; i<=0x00658; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x0069c; i<=0x006d8; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006dc; i<=0x00718; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x0071c; i<=0x00758; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x0079c; i<=0x007d8; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x0082c; i<=0x00838; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x0083c; i<=0x00848; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x0085c; i<=0x00868; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x0087c; i<=0x00888; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x0089c/4, 0x00000002); + INSTANCE_WR(ctx, 0x008d0/4, 0x00000021); + INSTANCE_WR(ctx, 0x008d4/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00020000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); + for (i=0x02ec0; i<=0x02f38; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x04c80; i<=0x06e70; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x06e80; i<=0x07270; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x096c0; i<=0x0b8b0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0b8c0; i<=0x0bcb0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0e100; i<=0x102f0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x10300; i<=0x106f0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +}; + +static void nv4a_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00158/4, 0x00000001); + INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00160/4, 0x00000001); + INSTANCE_WR(ctx, 0x00164/4, 0x00000001); + INSTANCE_WR(ctx, 0x00168/4, 0x00000001); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00170/4, 0x00000001); + INSTANCE_WR(ctx, 0x00174/4, 0x00000001); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00003010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); + for (i=0x005d8; i<=0x00614; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00618; i<=0x00654; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00698; i<=0x006d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006d8; i<=0x00714; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00718; i<=0x00754; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00798; i<=0x007d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x00828; i<=0x00834; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00838; i<=0x00844; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00858; i<=0x00864; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00878; i<=0x00884; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00898/4, 0x00000002); + INSTANCE_WR(ctx, 0x008cc/4, 0x00000021); + INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a8c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a98/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00ab4/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ab8/4, 0x00005555); + INSTANCE_WR(ctx, 0x00abc/4, 0x00005555); + INSTANCE_WR(ctx, 0x00ac0/4, 0x00000001); + INSTANCE_WR(ctx, 0x00af8/4, 0x00000001); + for (i=0x016c0; i<=0x01738; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x03840; i<=0x05670; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x05680; i<=0x05a70; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x07e00; i<=0x09ff0; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0a000; i<=0x0a3f0; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x0c780; i<=0x0e970; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x0e980; i<=0x0ed70; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +static void nv4e_graph_context_init(drm_device_t *dev, struct mem_block *ctx) +{ + drm_nouveau_private_t *dev_priv = dev->dev_private; + int i; + + INSTANCE_WR(ctx, 0x00000/4, nouveau_chip_instance_get(dev, ctx)); + INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00030/4, 0x00000001); + INSTANCE_WR(ctx, 0x0011c/4, 0x20010001); + INSTANCE_WR(ctx, 0x00120/4, 0x0f73ef00); + INSTANCE_WR(ctx, 0x00128/4, 0x02008821); + INSTANCE_WR(ctx, 0x00158/4, 0x00000001); + INSTANCE_WR(ctx, 0x0015c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00160/4, 0x00000001); + INSTANCE_WR(ctx, 0x00164/4, 0x00000001); + INSTANCE_WR(ctx, 0x00168/4, 0x00000001); + INSTANCE_WR(ctx, 0x0016c/4, 0x00000001); + INSTANCE_WR(ctx, 0x00170/4, 0x00000001); + INSTANCE_WR(ctx, 0x00174/4, 0x00000001); + INSTANCE_WR(ctx, 0x00178/4, 0x00000040); + INSTANCE_WR(ctx, 0x0017c/4, 0x00000040); + INSTANCE_WR(ctx, 0x00180/4, 0x00000040); + INSTANCE_WR(ctx, 0x00188/4, 0x00000040); + INSTANCE_WR(ctx, 0x001d0/4, 0x0b0b0b0c); + INSTANCE_WR(ctx, 0x00340/4, 0x00040000); + INSTANCE_WR(ctx, 0x00350/4, 0x55555555); + INSTANCE_WR(ctx, 0x00354/4, 0x55555555); + INSTANCE_WR(ctx, 0x00358/4, 0x55555555); + INSTANCE_WR(ctx, 0x0035c/4, 0x55555555); + INSTANCE_WR(ctx, 0x00388/4, 0x00000008); + INSTANCE_WR(ctx, 0x0039c/4, 0x00001010); + INSTANCE_WR(ctx, 0x003cc/4, 0x00000111); + INSTANCE_WR(ctx, 0x003d0/4, 0x00080060); + INSTANCE_WR(ctx, 0x003ec/4, 0x00000080); + INSTANCE_WR(ctx, 0x003f0/4, 0xffff0000); + INSTANCE_WR(ctx, 0x003f4/4, 0x00000001); + INSTANCE_WR(ctx, 0x00408/4, 0x46400000); + INSTANCE_WR(ctx, 0x00418/4, 0xffff0000); + INSTANCE_WR(ctx, 0x00424/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00428/4, 0x0fff0000); + INSTANCE_WR(ctx, 0x00430/4, 0x00011100); + for (i=0x0044c; i<=0x00488; i+=4) + INSTANCE_WR(ctx, i/4, 0x07ff0000); + INSTANCE_WR(ctx, 0x00494/4, 0x4b7fffff); + INSTANCE_WR(ctx, 0x004bc/4, 0x30201000); + INSTANCE_WR(ctx, 0x004c0/4, 0x70605040); + INSTANCE_WR(ctx, 0x004c4/4, 0xb8a89888); + INSTANCE_WR(ctx, 0x004c8/4, 0xf8e8d8c8); + INSTANCE_WR(ctx, 0x004dc/4, 0x40100000); + INSTANCE_WR(ctx, 0x004f8/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0052c/4, 0x435185d6); + INSTANCE_WR(ctx, 0x00530/4, 0x2155b699); + INSTANCE_WR(ctx, 0x00534/4, 0xfedcba98); + INSTANCE_WR(ctx, 0x00538/4, 0x00000098); + INSTANCE_WR(ctx, 0x00548/4, 0xffffffff); + INSTANCE_WR(ctx, 0x0054c/4, 0x00ff7000); + INSTANCE_WR(ctx, 0x00550/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x0055c/4, 0x00ff0000); + INSTANCE_WR(ctx, 0x00594/4, 0x00ffff00); + for (i=0x005d8; i<=0x00614; i+=4) + INSTANCE_WR(ctx, i/4, 0x00018488); + for (i=0x00618; i<=0x00654; i+=4) + INSTANCE_WR(ctx, i/4, 0x00028202); + for (i=0x00698; i<=0x006d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x0000aae4); + for (i=0x006d8; i<=0x00714; i+=4) + INSTANCE_WR(ctx, i/4, 0x01012000); + for (i=0x00718; i<=0x00754; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + for (i=0x00798; i<=0x007d4; i+=4) + INSTANCE_WR(ctx, i/4, 0x00100008); + for (i=0x00828; i<=0x00834; i+=4) + INSTANCE_WR(ctx, i/4, 0x0001bc80); + for (i=0x00838; i<=0x00844; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000202); + for (i=0x00858; i<=0x00864; i+=4) + INSTANCE_WR(ctx, i/4, 0x00000008); + for (i=0x00878; i<=0x00884; i+=4) + INSTANCE_WR(ctx, i/4, 0x00080008); + INSTANCE_WR(ctx, 0x00898/4, 0x00000002); + INSTANCE_WR(ctx, 0x008cc/4, 0x00000020); + INSTANCE_WR(ctx, 0x008d0/4, 0x030c30c3); + INSTANCE_WR(ctx, 0x008d4/4, 0x00011001); + INSTANCE_WR(ctx, 0x008e0/4, 0x3e020200); + INSTANCE_WR(ctx, 0x008e4/4, 0x00ffffff); + INSTANCE_WR(ctx, 0x008e8/4, 0x0c103f00); + INSTANCE_WR(ctx, 0x008f4/4, 0x00040000); + INSTANCE_WR(ctx, 0x0092c/4, 0x00008100); + INSTANCE_WR(ctx, 0x009b8/4, 0x00000001); + INSTANCE_WR(ctx, 0x009fc/4, 0x00001001); + INSTANCE_WR(ctx, 0x00a04/4, 0x00000003); + INSTANCE_WR(ctx, 0x00a08/4, 0x00888001); + INSTANCE_WR(ctx, 0x00a6c/4, 0x00000005); + INSTANCE_WR(ctx, 0x00a78/4, 0x0000ffff); + INSTANCE_WR(ctx, 0x00a94/4, 0x00005555); + INSTANCE_WR(ctx, 0x00a98/4, 0x00000001); + INSTANCE_WR(ctx, 0x00aa4/4, 0x00000001); + for (i=0x01668; i<=0x016e0; i+=8) + INSTANCE_WR(ctx, i/4, 0x3f800000); + for (i=0x03428; i<=0x05618; i+=24) + INSTANCE_WR(ctx, i/4, 0x00000001); + for (i=0x05628; i<=0x05a18; i+=16) + INSTANCE_WR(ctx, i/4, 0x3f800000); +} + +int +nv40_graph_context_create(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + void (*ctx_init)(drm_device_t *, struct mem_block *); + unsigned int ctx_size; + int i, chipset; + + chipset = (NV_READ(NV_PMC_BOOT_0) & 0x0ff00000) >> 20; + switch (chipset) { + case 0x40: + ctx_size = NV40_GRCTX_SIZE; + ctx_init = nv40_graph_context_init; + break; + case 0x43: + ctx_size = NV43_GRCTX_SIZE; + ctx_init = nv43_graph_context_init; + break; + case 0x4a: + ctx_size = NV4A_GRCTX_SIZE; + ctx_init = nv4a_graph_context_init; + break; + case 0x4e: + ctx_size = NV4E_GRCTX_SIZE; + ctx_init = nv4e_graph_context_init; + break; + default: + ctx_size = NV40_GRCTX_SIZE; + ctx_init = nv40_graph_context_init; + break; + } + + /* Alloc and clear RAMIN to store the context */ + chan->ramin_grctx = nouveau_instmem_alloc(dev, ctx_size, 4); + if (!chan->ramin_grctx) + return DRM_ERR(ENOMEM); + for (i=0; i<ctx_size; i+=4) + INSTANCE_WR(chan->ramin_grctx, i/4, 0x00000000); + + /* Initialise default context values */ + ctx_init(dev, chan->ramin_grctx); + + return 0; +} + +/* Save current context (from PGRAPH) into the channel's context + *XXX: fails sometimes, not sure why.. + */ +void +nv40_graph_context_save_current(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t instance; + int i; + + NV_WRITE(NV_PGRAPH_FIFO, 0); + + instance = NV_READ(0x40032C) & 0xFFFFF; + if (!instance) { + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + NV_WRITE(0x400784, instance); + NV_WRITE(0x400310, NV_READ(0x400310) | 0x20); + NV_WRITE(0x400304, 1); + /* just in case, we don't want to spin in-kernel forever */ + for (i=0; i<1000; i++) { + if (NV_READ(0x40030C) == 0) + break; + } + if (i==1000) { + DRM_ERROR("failed to save current grctx to ramin\n"); + DRM_ERROR("instance = 0x%08x\n", NV_READ(0x40032C)); + DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + NV_WRITE(NV_PGRAPH_FIFO, 1); +} + +/* Restore the context for a specific channel into PGRAPH + * XXX: fails sometimes.. not sure why + */ +void +nv40_graph_context_restore(drm_device_t *dev, int channel) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + struct nouveau_fifo *chan = &dev_priv->fifos[channel]; + uint32_t instance; + int i; + + instance = nouveau_chip_instance_get(dev, chan->ramin_grctx); + + NV_WRITE(NV_PGRAPH_FIFO, 0); + NV_WRITE(0x400784, instance); + NV_WRITE(0x400310, NV_READ(0x400310) | 0x40); + NV_WRITE(0x400304, 1); + /* just in case, we don't want to spin in-kernel forever */ + for (i=0; i<1000; i++) { + if (NV_READ(0x40030C) == 0) + break; + } + if (i==1000) { + DRM_ERROR("failed to restore grctx for ch%d to PGRAPH\n", + channel); + DRM_ERROR("instance = 0x%08x\n", instance); + DRM_ERROR("0x40030C = 0x%08x\n", NV_READ(0x40030C)); + NV_WRITE(NV_PGRAPH_FIFO, 1); + return; + } + + + /* 0x40032C, no idea of it's exact function. Could simply be a + * record of the currently active PGRAPH context. It's currently + * unknown as to what bit 24 does. The nv ddx has it set, so we will + * set it here too. + */ + NV_WRITE(0x40032C, instance | 0x01000000); + /* 0x32E0 records the instance address of the active FIFO's PGRAPH + * context. If at any time this doesn't match 0x40032C, you will + * recieve PGRAPH_INTR_CONTEXT_SWITCH + */ + NV_WRITE(NV40_PFIFO_GRCTX_INSTANCE, instance); + NV_WRITE(NV_PGRAPH_FIFO, 1); +} + +/* Some voodoo that makes context switching work without the binary driver + * initialising the card first. + * + * It is possible to effect how the context is saved from PGRAPH into a block + * of instance memory by altering the values in these tables. This may mean + * that the context layout of each chipset is slightly different (at least + * NV40 and C51 are different). It would also be possible for chipsets to + * have an identical context layout, but pull the data from different PGRAPH + * registers. + * + * TODO: decode the meaning of the magic values, may provide clues about the + * differences between the various NV40 chipsets. + * TODO: one we have a better idea of how each chipset differs, perhaps think + * about unifying these instead of providing a separate table for each + * chip. + * + * mmio-trace dumps from other nv4x/g7x/c5x cards very welcome :) + */ +static uint32_t nv40_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00408f65, 0x00409406, + 0x0040a268, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00110205, 0x0011420a, 0x00114210, 0x00110216, + 0x0012421b, 0x00120270, 0x001242c0, 0x00200040, 0x00100280, 0x00128100, + 0x00128120, 0x00128143, 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, + 0x00110400, 0x00104d10, 0x00500060, 0x00403b87, 0x0060000d, 0x004076e6, + 0x002000f0, 0x0060000a, 0x00200045, 0x00100620, 0x00108668, 0x0011466b, + 0x00120682, 0x0011068b, 0x00168691, 0x0010c6ae, 0x001206b4, 0x0020002a, + 0x001006c4, 0x001246f0, 0x002000c0, 0x00100700, 0x0010c3d7, 0x001043e1, + 0x00500060, 0x00405600, 0x00405684, 0x00600003, 0x00500067, 0x00600008, + 0x00500060, 0x00700082, 0x0020026c, 0x0060000a, 0x00104800, 0x00104901, + 0x00120920, 0x00200035, 0x00100940, 0x00148a00, 0x00104a14, 0x00200038, + 0x00100b00, 0x00138d00, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, + 0x0020031a, 0x0060000a, 0x00300000, 0x00200680, 0x00406c00, 0x00200684, + 0x00800001, 0x00200b62, 0x0060000a, 0x0020a0b0, 0x0040728a, 0x00201b68, + 0x00800041, 0x00407684, 0x00203e60, 0x00800002, 0x00408700, 0x00600006, + 0x00700003, 0x004080e6, 0x00700080, 0x0020031a, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a284, + 0x00700002, 0x00600004, 0x0040a268, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409388, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040a406, 0x0040a505, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + +static uint32_t nv43_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, + 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x00200020, 0x001006cc, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200233, 0x0060000a, + 0x00104800, 0x00108901, 0x00124920, 0x0020001f, 0x00100940, 0x00140965, + 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, 0x0010cd04, + 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, 0x00104f06, + 0x002002c8, 0x0060000a, 0x00300000, 0x00200680, 0x00407200, 0x00200684, + 0x00800001, 0x00200b10, 0x0060000a, 0x00203870, 0x0040788a, 0x00201350, + 0x00800041, 0x00407c84, 0x00201560, 0x00800002, 0x00408d00, 0x00600006, + 0x00700003, 0x004086e6, 0x00700080, 0x002002c8, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, + 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x00940400, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + +static uint32_t nv4a_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, + 0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, + 0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, + 0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, + 0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, + 0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, + 0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, + 0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, + 0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, + 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, + 0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, + 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, + 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05, + 0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0 +}; + +static uint32_t nv4e_ctx_voodoo[] = { + 0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, + 0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409565, 0x00409a06, + 0x0040a868, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, + 0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, + 0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, + 0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, + 0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, + 0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, + 0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, + 0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407ce6, 0x002000f1, + 0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, + 0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, + 0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, + 0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, + 0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, + 0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, + 0x00140965, 0x00148a00, 0x00108a14, 0x00140b00, 0x00134b2c, 0x0010cd00, + 0x0010cd04, 0x00104d08, 0x00104d80, 0x00104e00, 0x00105c00, 0x00104f06, + 0x002002b2, 0x0060000a, 0x00300000, 0x00200080, 0x00407200, 0x00200084, + 0x00800001, 0x002004fa, 0x0060000a, 0x00201320, 0x0040788a, 0xfffffb06, + 0x00800029, 0x00407c84, 0x00200b20, 0x00800002, 0x00408d00, 0x00600006, + 0x00700003, 0x004086e6, 0x00700080, 0x002002b2, 0x0060000a, 0x00200004, + 0x00800001, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x0040a884, + 0x00700002, 0x00600004, 0x0040a868, 0x00700000, 0x00200000, 0x0060000a, + 0x00106002, 0x00700080, 0x00400a84, 0x00700002, 0x00400a68, 0x00500060, + 0x00600007, 0x00409988, 0x0060000f, 0x00000000, 0x00500060, 0x00200000, + 0x0060000a, 0x00700000, 0x00106001, 0x00700083, 0x00910880, 0x00901ffe, + 0x01940000, 0x00200020, 0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, + 0x0040aa06, 0x0040ab05, 0x00600009, 0x00700005, 0x00700006, 0x0060000e, + ~0 +}; + + +int +nv40_graph_init(drm_device_t *dev) +{ + drm_nouveau_private_t *dev_priv = + (drm_nouveau_private_t *)dev->dev_private; + uint32_t *ctx_voodoo; + uint32_t pg0220_inst; + int i, chipset; + + chipset = (NV_READ(NV_PMC_BOOT_0) & 0x0ff00000) >> 20; + DRM_DEBUG("chipset (from PMC_BOOT_0): NV%02X\n", chipset); + switch (chipset) { + case 0x40: ctx_voodoo = nv40_ctx_voodoo; break; + case 0x43: ctx_voodoo = nv43_ctx_voodoo; break; + case 0x4a: ctx_voodoo = nv4a_ctx_voodoo; break; + case 0x4e: ctx_voodoo = nv4e_ctx_voodoo; break; + default: + DRM_ERROR("Unknown ctx_voodoo for chipset 0x%02x\n", chipset); + ctx_voodoo = NULL; + break; + } + + /* Load the context voodoo onto the card */ + if (ctx_voodoo) { + DRM_DEBUG("Loading context-switch voodoo\n"); + i = 0; + + NV_WRITE(0x400324, 0); + while (ctx_voodoo[i] != ~0) { + NV_WRITE(0x400328, ctx_voodoo[i]); + i++; + } + } + + /* No context present currently */ + NV_WRITE(0x40032C, 0x00000000); + + /* No idea what this is for.. */ + dev_priv->fb_obj = nouveau_dma_object_create(dev, + 0, nouveau_mem_fb_amount(dev), + NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM); + pg0220_inst = nouveau_chip_instance_get(dev, + dev_priv->fb_obj->instance); + NV_WRITE(NV_PGRAPH_NV40_UNK220, pg0220_inst); + + return 0; +} + diff --git a/shared-core/r128_irq.c b/shared-core/r128_irq.c index de1597ec..87f8ca2b 100644 --- a/shared-core/r128_irq.c +++ b/shared-core/r128_irq.c @@ -1,5 +1,4 @@ -/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- - */ +/* r128_irq.c -- IRQ handling for radeon -*- linux-c -*- */ /* * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. * diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index deeb67ed..17b11e7d 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -221,7 +221,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv) ADVANCE_RING(); } -static __inline__ void r128_emit_state(drm_r128_private_t * dev_priv) +static void r128_emit_state(drm_r128_private_t * dev_priv) { drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; unsigned int dirty = sarea_priv->dirty; diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index c65ffd59..0c04b5f8 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -242,26 +242,6 @@ static __inline__ int r300_check_range(unsigned reg, int count) return 0; } -/* - * we expect offsets passed to the framebuffer to be either within video - * memory or within AGP space - */ -static __inline__ int r300_check_offset(drm_radeon_private_t *dev_priv, - u32 offset) -{ - /* we realy want to check against end of video aperture - but this value is not being kept. - This code is correct for now (does the same thing as the - code that sets MC_FB_LOCATION) in radeon_cp.c */ - if (offset >= dev_priv->fb_location && - offset < (dev_priv->fb_location + dev_priv->fb_size)) - return 0; - if (offset >= dev_priv->gart_vm_start && - offset < (dev_priv->gart_vm_start + dev_priv->gart_size)) - return 0; - return 1; -} - static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * dev_priv, drm_radeon_kcmd_buffer_t @@ -290,7 +270,7 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t * case MARK_SAFE: break; case MARK_CHECK_OFFSET: - if (r300_check_offset(dev_priv, (u32) values[i])) { + if (!radeon_check_offset(dev_priv, (u32) values[i])) { DRM_ERROR ("Offset failed range check (reg=%04x sz=%d)\n", reg, sz); @@ -452,7 +432,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, i = 1; while ((k < narrays) && (i < (count + 1))) { i++; /* skip attribute field */ - if (r300_check_offset(dev_priv, payload[i])) { + if (!radeon_check_offset(dev_priv, payload[i])) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -463,7 +443,7 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv, if (k == narrays) break; /* have one more to process, they come in pairs */ - if (r300_check_offset(dev_priv, payload[i])) { + if (!radeon_check_offset(dev_priv, payload[i])) { DRM_ERROR ("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n", k, i); @@ -508,7 +488,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[2] << 10; - ret = r300_check_offset(dev_priv, offset); + ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt first offset is %08X\n", offset); return DRM_ERR(EINVAL); @@ -518,7 +498,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv, if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) && (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) { offset = cmd[3] << 10; - ret = r300_check_offset(dev_priv, offset); + ret = !radeon_check_offset(dev_priv, offset); if (ret) { DRM_ERROR("Invalid bitblt second offset is %08X\n", offset); return DRM_ERR(EINVAL); @@ -551,7 +531,7 @@ static __inline__ int r300_emit_indx_buffer(drm_radeon_private_t *dev_priv, DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]); return DRM_ERR(EINVAL); } - ret = r300_check_offset(dev_priv, cmd[2]); + ret = !radeon_check_offset(dev_priv, cmd[2]); if (ret) { DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]); return DRM_ERR(EINVAL); diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index 6ea2a175..5c426fe0 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -306,6 +306,21 @@ extern int radeon_no_wb; extern drm_ioctl_desc_t radeon_ioctls[]; extern int radeon_max_ioctl; +/* Check whether the given hardware address is inside the framebuffer or the + * GART area. + */ +static __inline__ int radeon_check_offset(drm_radeon_private_t *dev_priv, + u64 off) +{ + u32 fb_start = dev_priv->fb_location; + u32 fb_end = fb_start + dev_priv->fb_size - 1; + u32 gart_start = dev_priv->gart_vm_start; + u32 gart_end = gart_start + dev_priv->gart_size - 1; + + return ((off >= fb_start && off <= fb_end) || + (off >= gart_start && off <= gart_end)); +} + /* radeon_cp.c */ extern int radeon_cp_init(DRM_IOCTL_ARGS); extern int radeon_cp_start(DRM_IOCTL_ARGS); diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index bf5e3d29..40b7d6ce 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -43,10 +43,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * u32 * offset) { u64 off = *offset; - u32 fb_start = dev_priv->fb_location; - u32 fb_end = fb_start + dev_priv->fb_size - 1; - u32 gart_start = dev_priv->gart_vm_start; - u32 gart_end = gart_start + dev_priv->gart_size - 1; + u32 fb_end = dev_priv->fb_location + dev_priv->fb_size - 1; struct drm_radeon_driver_file_fields *radeon_priv; /* Hrm ... the story of the offset ... So this function converts @@ -66,8 +63,7 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * /* First, the best case, the offset already lands in either the * framebuffer or the GART mapped space */ - if ((off >= fb_start && off <= fb_end) || - (off >= gart_start && off <= gart_end)) + if (radeon_check_offset(dev_priv, off)) return 0; /* Ok, that didn't happen... now check if we have a zero based @@ -81,11 +77,10 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t * /* Finally, assume we aimed at a GART offset if beyond the fb */ if (off > fb_end) - off = off - fb_end - 1 + gart_start; + off = off - fb_end - 1 + dev_priv->gart_vm_start; /* Now recheck and fail if out of bounds */ - if ((off >= fb_start && off <= fb_end) || - (off >= gart_start && off <= gart_end)) { + if (radeon_check_offset(dev_priv, off)) { DRM_DEBUG("offset fixed up to 0x%x\n", (unsigned int)off); *offset = off; return 0; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 01121b92..5632b5c8 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -32,6 +32,8 @@ #define SAVAGE_EVENT_USEC_TIMEOUT 5000000 /* 5s */ #define SAVAGE_FREELIST_DEBUG 0 +static int savage_do_cleanup_bci(drm_device_t *dev); + static int savage_bci_wait_fifo_shadow(drm_savage_private_t *dev_priv, unsigned int n) { @@ -895,7 +897,7 @@ static int savage_do_init_bci(drm_device_t *dev, drm_savage_init_t *init) return 0; } -int savage_do_cleanup_bci(drm_device_t *dev) +static int savage_do_cleanup_bci(drm_device_t *dev) { drm_savage_private_t *dev_priv = dev->dev_private; diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index 560f934e..88c571e1 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -212,7 +212,6 @@ extern int savage_driver_load(drm_device_t *dev, unsigned long chipset); extern int savage_driver_firstopen(drm_device_t *dev); extern void savage_driver_lastclose(drm_device_t *dev); extern int savage_driver_unload(drm_device_t *dev); -extern int savage_do_cleanup_bci(drm_device_t *dev); extern void savage_reclaim_buffers(drm_device_t *dev, DRMFILE filp); /* state functions */ diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index 5c8a43eb..acc98f89 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -993,7 +993,7 @@ int savage_bci_cmdbuf(DRM_IOCTL_ARGS) if (cmdbuf.size) { kcmd_addr = drm_alloc(cmdbuf.size * 8, DRM_MEM_DRIVER); if (kcmd_addr == NULL) - return ENOMEM; + return DRM_ERR(ENOMEM); if (DRM_COPY_FROM_USER(kcmd_addr, cmdbuf.cmd_addr, cmdbuf.size * 8)) diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index d1cdc19c..006d148c 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -57,9 +57,9 @@ enum sis_family { typedef struct drm_sis_private { drm_local_map_t *mmio; - unsigned idle_fault; + unsigned int idle_fault; drm_sman_t sman; - unsigned long chipset; + unsigned int chipset; int vram_initialized; int agp_initialized; unsigned long vram_offset; diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index c6e7aa77..90dbb6a2 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -489,6 +489,7 @@ static int via_hook_segment(drm_via_private_t *dev_priv, VIA_WRITE(VIA_REG_TRANSET, (HC_ParaType_PreCR << 16)); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); + VIA_READ(VIA_REG_TRANSPACE); } } return paused; @@ -572,8 +573,9 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv) VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_hi); VIA_WRITE(VIA_REG_TRANSPACE, pause_addr_lo); - + DRM_WRITEMEMORYBARRIER(); VIA_WRITE(VIA_REG_TRANSPACE, command | HC_HAGPCMNT_MASK); + VIA_READ(VIA_REG_TRANSPACE); } static void via_pad_cache(drm_via_private_t *dev_priv, int qwords) diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index ee92ff69..16421d74 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -42,11 +42,11 @@ * backwards incompatibilities, (which should be avoided whenever possible). */ -#define VIA_DRM_DRIVER_DATE "20060616" +#define VIA_DRM_DRIVER_DATE "20061227" #define VIA_DRM_DRIVER_MAJOR 2 -#define VIA_DRM_DRIVER_MINOR 10 -#define VIA_DRM_DRIVER_PATCHLEVEL 2 +#define VIA_DRM_DRIVER_MINOR 11 +#define VIA_DRM_DRIVER_PATCHLEVEL 0 #define VIA_DRM_DRIVER_VERSION (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR)) #define VIA_NR_SAREA_CLIPRECTS 8 diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 18d2a331..7a8f2c34 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -83,7 +83,7 @@ typedef struct drm_via_private { char pci_buf[VIA_PCI_BUF_SIZE]; const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; uint32_t num_fire_offsets; - int pro_group_a; + int chipset; drm_via_irq_t via_irqs[VIA_NUM_IRQS]; unsigned num_irqs; maskarray_t *irq_masks; @@ -105,8 +105,9 @@ typedef struct drm_via_private { } drm_via_private_t; enum via_family { - VIA_OTHER = 0, - VIA_PRO_GROUP_A, + VIA_OTHER = 0, /* Baseline */ + VIA_PRO_GROUP_A, /* Another video engine and DMA commands */ + VIA_DX9_0 /* Same video as pro_group_a, but 3D is unsupported */ }; /* VIA MMIO register access */ diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index db60b28e..2ac86970 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -267,13 +267,17 @@ void via_driver_irq_preinstall(drm_device_t * dev) dev_priv->irq_enable_mask = VIA_IRQ_VBLANK_ENABLE; dev_priv->irq_pending_mask = VIA_IRQ_VBLANK_PENDING; - dev_priv->irq_masks = (dev_priv->pro_group_a) ? - via_pro_group_a_irqs : via_unichrome_irqs; - dev_priv->num_irqs = (dev_priv->pro_group_a) ? - via_num_pro_group_a : via_num_unichrome; - dev_priv->irq_map = (dev_priv->pro_group_a) ? - via_irqmap_pro_group_a : via_irqmap_unichrome; - + if (dev_priv->chipset == VIA_PRO_GROUP_A || + dev_priv->chipset == VIA_DX9_0) { + dev_priv->irq_masks = via_pro_group_a_irqs; + dev_priv->num_irqs = via_num_pro_group_a; + dev_priv->irq_map = via_irqmap_pro_group_a; + } else { + dev_priv->irq_masks = via_unichrome_irqs; + dev_priv->num_irqs = via_num_unichrome; + dev_priv->irq_map = via_irqmap_unichrome; + } + for(i=0; i < dev_priv->num_irqs; ++i) { atomic_set(&cur_irq->irq_received, 0); cur_irq->enable_mask = dev_priv->irq_masks[i][0]; diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 71967d6c..a37f5fd2 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -107,8 +107,7 @@ int via_driver_load(drm_device_t *dev, unsigned long chipset) dev->dev_private = (void *)dev_priv; - if (chipset == VIA_PRO_GROUP_A) - dev_priv->pro_group_a = 1; + dev_priv->chipset = chipset; #ifdef VIA_HAVE_CORE_MM ret = drm_sman_init(&dev_priv->sman, 2, 12, 8); diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index 1a5edd4f..b5a1d33a 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -311,6 +311,7 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) unsigned long lo = ~0, hi = 0, tmp; uint32_t *addr, *pitch, *height, tex; unsigned i; + int npot; if (end > 9) end = 9; @@ -321,12 +322,15 @@ static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq) &(cur_seq->t_addr[tex = cur_seq->texture][start]); pitch = &(cur_seq->pitch[tex][start]); height = &(cur_seq->height[tex][start]); - + npot = cur_seq->tex_npot[tex]; for (i = start; i <= end; ++i) { tmp = *addr++; if (tmp < lo) lo = tmp; - tmp += (*height++ << *pitch++); + if (i == 0 && npot) + tmp += (*height++ * *pitch++); + else + tmp += (*height++ << *pitch++); if (tmp > hi) hi = tmp; } @@ -448,13 +452,21 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq) return 0; case check_texture_addr3: cur_seq->unfinished = tex_address; - tmp = ((cmd >> 24) - 0x2B); - cur_seq->pitch[cur_seq->texture][tmp] = - (cmd & 0x00F00000) >> 20; - if (!tmp && (cmd & 0x000FFFFF)) { - DRM_ERROR - ("Unimplemented texture level 0 pitch mode.\n"); - return 2; + tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); + if (tmp == 0 && + (cmd & HC_HTXnEnPit_MASK)) { + cur_seq->pitch[cur_seq->texture][tmp] = + (cmd & HC_HTXnLnPit_MASK); + cur_seq->tex_npot[cur_seq->texture] = 1; + } else { + cur_seq->pitch[cur_seq->texture][tmp] = + (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; + cur_seq->tex_npot[cur_seq->texture] = 0; + if (cmd & 0x000FFFFF) { + DRM_ERROR + ("Unimplemented texture level 0 pitch mode.\n"); + return 2; + } } return 0; case check_texture_addr4: @@ -966,7 +978,13 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, uint32_t cmd; const uint32_t *buf_end = buf + (size >> 2); verifier_state_t state = state_command; - int pro_group_a = dev_priv->pro_group_a; + int cme_video; + int supported_3d; + + cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A || + dev_priv->chipset == VIA_DX9_0); + + supported_3d = dev_priv->chipset != VIA_DX9_0; hc_state->dev = dev; hc_state->unfinished = no_sequence; @@ -991,17 +1009,21 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size, state = via_check_vheader6(&buf, buf_end); break; case state_command: - if (HALCYON_HEADER2 == (cmd = *buf)) + if ((HALCYON_HEADER2 == (cmd = *buf)) && + supported_3d) state = state_header2; else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) state = state_header1; - else if (pro_group_a + else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5) state = state_vheader5; - else if (pro_group_a + else if (cme_video && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6) state = state_vheader6; - else { + else if ((cmd == HALCYON_HEADER2) && !supported_3d) { + DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n"); + state = state_error; + } else { DRM_ERROR ("Invalid / Unimplemented DMA HEADER command. 0x%x\n", cmd); diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index 96708a39..84497c44 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -45,6 +45,7 @@ typedef struct { uint32_t tex_level_lo[2]; uint32_t tex_level_hi[2]; uint32_t tex_palette_size[2]; + uint32_t tex_npot[2]; drm_via_sequence_t unfinished; int agp_texture; int multitex; |