diff options
157 files changed, 5343 insertions, 2953 deletions
diff --git a/bsd-core/ati_pcigart.c b/bsd-core/ati_pcigart.c index db19a75d..bb0c46e2 100644 --- a/bsd-core/ati_pcigart.c +++ b/bsd-core/ati_pcigart.c @@ -35,7 +35,7 @@  #define ATI_PCIGART_PAGE_SIZE		4096	/* PCI GART page size */ -int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(drm_device_t *dev, struct drm_ati_pcigart_info *gart_info)  {  	unsigned long pages;  	u32 *pci_gart = NULL, page_base; @@ -94,7 +94,7 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)  	return 1;  } -int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(drm_device_t *dev, struct drm_ati_pcigart_info *gart_info)  {  	if (dev->sg == NULL) {  		DRM_ERROR( "no scatter/gather memory!\n" ); diff --git a/bsd-core/drmP.h b/bsd-core/drmP.h index 8a768f0c..99457bf8 100644 --- a/bsd-core/drmP.h +++ b/bsd-core/drmP.h @@ -76,7 +76,11 @@ typedef struct drm_file drm_file_t;  #if defined(__FreeBSD__)  #include <sys/rman.h>  #include <sys/memrange.h> +#if __FreeBSD_version >= 800004 +#include <dev/agp/agpvar.h> +#else /* __FreeBSD_version >= 800004 */  #include <pci/agpvar.h> +#endif /* __FreeBSD_version >= 800004 */  #include <sys/agpio.h>  #if __FreeBSD_version >= 500000  #include <sys/mutex.h> @@ -108,6 +112,7 @@ typedef struct drm_file drm_file_t;  #include "drm.h"  #include "drm_linux_list.h"  #include "drm_atomic.h" +#include "drm_internal.h"  #ifdef __FreeBSD__  #include <opt_drm.h> @@ -383,7 +388,7 @@ for ( ret = 0 ; !ret && !(condition) ; ) {			\  	DRM_UNLOCK();						\  	mtx_lock(&dev->irq_lock);				\  	if (!(condition))					\ -	   ret = -msleep(&(queue), &dev->irq_lock, 		\ +	   ret = -mtx_sleep(&(queue), &dev->irq_lock, 		\  			 PZERO | PCATCH, "drmwtq", (timeout));	\  	mtx_unlock(&dev->irq_lock);				\  	DRM_LOCK();						\ @@ -609,14 +614,14 @@ typedef struct drm_vbl_sig {  #define DRM_ATI_GART_PCIE 2  #define DRM_ATI_GART_IGP  3 -typedef struct ati_pcigart_info { +struct drm_ati_pcigart_info {  	int gart_table_location;  	int gart_reg_if;  	void *addr;  	dma_addr_t bus_addr;  	drm_local_map_t mapping;  	int table_size; -} drm_ati_pcigart_info; +};  struct drm_driver_info {  	int	(*load)(struct drm_device *, unsigned long flags); @@ -645,6 +650,7 @@ struct drm_driver_info {  	void	(*irq_uninstall)(drm_device_t *dev);  	void	(*irq_handler)(DRM_IRQ_ARGS);  	int	(*vblank_wait)(drm_device_t *dev, unsigned int *sequence); +	int	(*vblank_wait2)(drm_device_t *dev, unsigned int *sequence);  	drm_pci_id_list_t *id_entry;	/* PCI ID, name, and chipset private */ @@ -681,6 +687,7 @@ struct drm_driver_info {  	unsigned use_dma_queue :1;  	unsigned use_irq :1;  	unsigned use_vbl_irq :1; +	unsigned use_vbl_irq2 :1;  	unsigned use_mtrr :1;  }; @@ -920,9 +927,9 @@ extern int		drm_sysctl_cleanup(drm_device_t *dev);  /* ATI PCIGART support (ati_pcigart.c) */  int	drm_ati_pcigart_init(drm_device_t *dev, -			     drm_ati_pcigart_info *gart_info); +				struct drm_ati_pcigart_info *gart_info);  int	drm_ati_pcigart_cleanup(drm_device_t *dev, -				drm_ati_pcigart_info *gart_info); +				struct drm_ati_pcigart_info *gart_info);  /* Locking IOCTL support (drm_drv.c) */  int	drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv); diff --git a/bsd-core/drm_agpsupport.c b/bsd-core/drm_agpsupport.c index 6f963b9c..4b921322 100644 --- a/bsd-core/drm_agpsupport.c +++ b/bsd-core/drm_agpsupport.c @@ -36,7 +36,11 @@  #include "drmP.h"  #ifdef __FreeBSD__ +#if __FreeBSD_version >= 800004 +#include <dev/agp/agpreg.h> +#else /* __FreeBSD_version >= 800004 */  #include <pci/agpreg.h> +#endif /* __FreeBSD_version >= 800004 */  #include <dev/pci/pcireg.h>  #endif diff --git a/bsd-core/drm_drv.c b/bsd-core/drm_drv.c index d6868b9c..8466ce33 100644 --- a/bsd-core/drm_drv.c +++ b/bsd-core/drm_drv.c @@ -403,17 +403,6 @@ static int drm_firstopen(drm_device_t *dev)  			return i;  	} -	dev->counters  = 6; -	dev->types[0]  = _DRM_STAT_LOCK; -	dev->types[1]  = _DRM_STAT_OPENS; -	dev->types[2]  = _DRM_STAT_CLOSES; -	dev->types[3]  = _DRM_STAT_IOCTLS; -	dev->types[4]  = _DRM_STAT_LOCKS; -	dev->types[5]  = _DRM_STAT_UNLOCKS; - -	for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ ) -		atomic_set( &dev->counts[i], 0 ); -  	for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) {  		dev->magiclist[i].head = NULL;  		dev->magiclist[i].tail = NULL; @@ -495,10 +484,10 @@ static int drm_lastclose(drm_device_t *dev)  	}  	TAILQ_FOREACH_SAFE(map, &dev->maplist, link, mapsave) { -		drm_rmmap(dev, map); +		if (!(map->flags & _DRM_DRIVER)) +			drm_rmmap(dev, map);  	} -  	drm_dma_takedown(dev);  	if ( dev->lock.hw_lock ) {  		dev->lock.hw_lock = NULL; /* SHM removed */ @@ -511,7 +500,7 @@ static int drm_lastclose(drm_device_t *dev)  static int drm_load(drm_device_t *dev)  { -	int retcode; +	int i, retcode;  	DRM_DEBUG( "\n" ); @@ -536,6 +525,17 @@ static int drm_load(drm_device_t *dev)  #endif  	TAILQ_INIT(&dev->files); +	dev->counters  = 6; +	dev->types[0]  = _DRM_STAT_LOCK; +	dev->types[1]  = _DRM_STAT_OPENS; +	dev->types[2]  = _DRM_STAT_CLOSES; +	dev->types[3]  = _DRM_STAT_IOCTLS; +	dev->types[4]  = _DRM_STAT_LOCKS; +	dev->types[5]  = _DRM_STAT_UNLOCKS; + +	for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ ) +		atomic_set( &dev->counts[i], 0 ); +  	if (dev->driver.load != NULL) {  		DRM_LOCK();  		/* Shared code returns -errno. */ @@ -772,7 +772,7 @@ int drm_close(struct cdev *kdev, int flags, int fmt, DRM_STRUCTPROC *p)  			}  				/* Contention */  #if defined(__FreeBSD__) && __FreeBSD_version > 500000 -			retcode = msleep((void *)&dev->lock.lock_queue, +			retcode = mtx_sleep((void *)&dev->lock.lock_queue,  			    &dev->dev_lock, PZERO | PCATCH, "drmlk2", 0);  #else  			retcode = tsleep((void *)&dev->lock.lock_queue, diff --git a/bsd-core/drm_internal.h b/bsd-core/drm_internal.h new file mode 120000 index 00000000..b30ef94a --- /dev/null +++ b/bsd-core/drm_internal.h @@ -0,0 +1 @@ +../shared-core/drm_internal.h
\ No newline at end of file diff --git a/bsd-core/drm_irq.c b/bsd-core/drm_irq.c index 0772445a..40d0b71f 100644 --- a/bsd-core/drm_irq.c +++ b/bsd-core/drm_irq.c @@ -211,17 +211,43 @@ int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv)  {  	drm_wait_vblank_t *vblwait = data;  	struct timeval now; -	int ret, flags; +	int ret = 0; +	int flags, seq;  	if (!dev->irq_enabled)  		return EINVAL; -	if (vblwait->request.type & _DRM_VBLANK_RELATIVE) { -		vblwait->request.sequence += atomic_read(&dev->vbl_received); -		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; +	if (vblwait->request.type & +	    ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { +		DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", +		    vblwait->request.type, +		    (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); +		return EINVAL;  	}  	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; + +	if ((flags & _DRM_VBLANK_SECONDARY) && !dev->driver.use_vbl_irq2) +		return EINVAL; +	 +	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? +	    &dev->vbl_received2 : &dev->vbl_received); + +	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { +	case _DRM_VBLANK_RELATIVE: +		vblwait->request.sequence += seq; +		vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; +	case _DRM_VBLANK_ABSOLUTE: +		break; +	default: +		return EINVAL; +	} + +	if ((flags & _DRM_VBLANK_NEXTONMISS) && +	    (seq - vblwait->request.sequence) <= (1<<23)) { +		vblwait->request.sequence = seq + 1; +	} +  	if (flags & _DRM_VBLANK_SIGNAL) {  #if 0 /* disabled */  		drm_vbl_sig_t *vbl_sig = malloc(sizeof(drm_vbl_sig_t), M_DRM, @@ -244,8 +270,14 @@ int drm_wait_vblank(drm_device_t *dev, void *data, struct drm_file *file_priv)  	} else {  		DRM_LOCK();  		/* shared code returns -errno */ -		ret = -dev->driver.vblank_wait(dev, -		    &vblwait->request.sequence); +		if (flags & _DRM_VBLANK_SECONDARY) { +			if (dev->driver.vblank_wait2) +				ret = -dev->driver.vblank_wait2(dev, +				    &vblwait->request.sequence); +		} else if (dev->driver.vblank_wait) +			ret = -dev->driver.vblank_wait(dev, +			    &vblwait->request.sequence); +  		DRM_UNLOCK();  		microtime(&now); @@ -303,7 +335,7 @@ static void drm_locked_task(void *context, int pending __unused)  		/* Contention */  #if defined(__FreeBSD__) && __FreeBSD_version > 500000 -		ret = msleep((void *)&dev->lock.lock_queue, &dev->dev_lock, +		ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,  		    PZERO | PCATCH, "drmlk2", 0);  #else  		ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH, diff --git a/bsd-core/drm_lock.c b/bsd-core/drm_lock.c index fb86fc68..9731ff92 100644 --- a/bsd-core/drm_lock.c +++ b/bsd-core/drm_lock.c @@ -140,7 +140,7 @@ int drm_lock(drm_device_t *dev, void *data, struct drm_file *file_priv)  		/* Contention */  #if defined(__FreeBSD__) && __FreeBSD_version > 500000 -		ret = msleep((void *)&dev->lock.lock_queue, &dev->dev_lock, +		ret = mtx_sleep((void *)&dev->lock.lock_queue, &dev->dev_lock,  		    PZERO | PCATCH, "drmlk2", 0);  #else  		ret = tsleep((void *)&dev->lock.lock_queue, PZERO | PCATCH, diff --git a/bsd-core/i915_drv.c b/bsd-core/i915_drv.c index d42b2076..e8897fbe 100644 --- a/bsd-core/i915_drv.c +++ b/bsd-core/i915_drv.c @@ -46,8 +46,9 @@ static void i915_configure(drm_device_t *dev)  	dev->driver.load		= i915_driver_load;  	dev->driver.preclose		= i915_driver_preclose;  	dev->driver.lastclose		= i915_driver_lastclose; -	dev->driver.device_is_agp	= i915_driver_device_is_agp, +	dev->driver.device_is_agp	= i915_driver_device_is_agp;  	dev->driver.vblank_wait		= i915_driver_vblank_wait; +	dev->driver.vblank_wait2	= i915_driver_vblank_wait2;  	dev->driver.irq_preinstall	= i915_driver_irq_preinstall;  	dev->driver.irq_postinstall	= i915_driver_irq_postinstall;  	dev->driver.irq_uninstall	= i915_driver_irq_uninstall; @@ -68,6 +69,7 @@ static void i915_configure(drm_device_t *dev)  	dev->driver.use_mtrr		= 1;  	dev->driver.use_irq		= 1;  	dev->driver.use_vbl_irq		= 1; +	dev->driver.use_vbl_irq2	= 1;  }  #ifdef __FreeBSD__ diff --git a/bsd-core/radeon_drv.c b/bsd-core/radeon_drv.c index f66bc795..93f875c5 100644 --- a/bsd-core/radeon_drv.c +++ b/bsd-core/radeon_drv.c @@ -53,6 +53,7 @@ static void radeon_configure(drm_device_t *dev)  	dev->driver.postclose		= radeon_driver_postclose;  	dev->driver.lastclose		= radeon_driver_lastclose;  	dev->driver.vblank_wait		= radeon_driver_vblank_wait; +	dev->driver.vblank_wait2	= radeon_driver_vblank_wait2;  	dev->driver.irq_preinstall	= radeon_driver_irq_preinstall;  	dev->driver.irq_postinstall	= radeon_driver_irq_postinstall;  	dev->driver.irq_uninstall	= radeon_driver_irq_uninstall; @@ -76,6 +77,7 @@ static void radeon_configure(drm_device_t *dev)  	dev->driver.use_dma		= 1;  	dev->driver.use_irq		= 1;  	dev->driver.use_vbl_irq		= 1; +	dev->driver.use_vbl_irq2	= 1;  }  #ifdef __FreeBSD__ diff --git a/libdrm/xf86drm.c b/libdrm/xf86drm.c index 7001a0ef..e3550de7 100644 --- a/libdrm/xf86drm.c +++ b/libdrm/xf86drm.c @@ -2432,7 +2432,7 @@ int drmFenceFlush(int fd, drmFence *fence, unsigned flush_type)      fence->fence_class = arg.fence_class;      fence->type = arg.type;      fence->signaled = arg.signaled; -    return 0; +    return arg.error;  }  int drmFenceUpdate(int fd, drmFence *fence) @@ -2487,6 +2487,7 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)      fence->fence_class = arg.fence_class;      fence->type = arg.type;      fence->signaled = arg.signaled; +    fence->sequence = arg.sequence;      return 0;  } @@ -2495,7 +2496,50 @@ int drmFenceEmit(int fd, unsigned flags, drmFence *fence, unsigned emit_type)   * DRM_FENCE_FLAG_WAIT_LAZY   * DRM_FENCE_FLAG_WAIT_IGNORE_SIGNALS   */ + +#define DRM_IOCTL_TIMEOUT_USEC 3000000UL + +static unsigned long +drmTimeDiff(struct timeval *now, struct timeval *then) +{ +    uint64_t val; + +    val = now->tv_sec - then->tv_sec; +    val *= 1000000LL; +    val += now->tv_usec; +    val -= then->tv_usec; + +    return (unsigned long) val; +} + +static int +drmIoctlTimeout(int fd, unsigned long request, void *argp) +{ +    int haveThen = 0; +    struct timeval then, now; +    int ret; + +    do { +	ret = ioctl(fd, request, argp); +	if (ret != 0 && errno == EAGAIN) { +	    if (!haveThen) { +		gettimeofday(&then, NULL); +		haveThen = 1; +	    } +	    gettimeofday(&now, NULL); +	} +    } while (ret != 0 && errno == EAGAIN &&  +	     drmTimeDiff(&now, &then) < DRM_IOCTL_TIMEOUT_USEC); +     +    if (ret != 0) +	return ((errno == EAGAIN) ? -EBUSY : -errno); + +    return 0; +} +	 + +  int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)  {      drm_fence_arg_t arg; @@ -2516,17 +2560,15 @@ int drmFenceWait(int fd, unsigned flags, drmFence *fence, unsigned flush_type)      arg.type = flush_type;      arg.flags = flags; -    do { -	ret = ioctl(fd, DRM_IOCTL_FENCE_WAIT, &arg); -    } while (ret != 0 && errno == EAGAIN); +    ret = drmIoctlTimeout(fd, DRM_IOCTL_FENCE_WAIT, &arg);      if (ret) -	return -errno; +	return ret;      fence->fence_class = arg.fence_class;      fence->type = arg.type;      fence->signaled = arg.signaled; -    return 0; +    return arg.error;  }      static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf) @@ -2536,7 +2578,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)      buf->size = rep->size;      buf->offset = rep->offset;      buf->mapHandle = rep->arg_handle; -    buf->mask = rep->mask; +    buf->proposedFlags = rep->proposed_flags;      buf->start = rep->buffer_start;      buf->fenceFlags = rep->fence_flags;      buf->replyFlags = rep->rep_flags; @@ -2550,7 +2592,7 @@ static void drmBOCopyReply(const struct drm_bo_info_rep *rep, drmBO *buf)  int drmBOCreate(int fd, unsigned long size,  		unsigned pageAlignment, void *user_buffer, -		uint64_t mask, +		uint64_t flags,  		unsigned hint, drmBO *buf)  {      struct drm_bo_create_arg arg; @@ -2560,7 +2602,7 @@ int drmBOCreate(int fd, unsigned long size,      memset(buf, 0, sizeof(*buf));      memset(&arg, 0, sizeof(arg)); -    req->mask = mask; +    req->flags = flags;      req->hint = hint;      req->size = size;      req->page_alignment = pageAlignment; @@ -2568,15 +2610,12 @@ int drmBOCreate(int fd, unsigned long size,      buf->virtual = NULL; -    do { -	ret = ioctl(fd, DRM_IOCTL_BO_CREATE, &arg); -    } while (ret != 0 && errno == EAGAIN); - +    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_CREATE, &arg);      if (ret) -	return -errno; -     +	return ret; +      drmBOCopyReply(rep, buf); -    buf->mapVirtual = NULL; +    buf->virtual = user_buffer;      buf->mapCount = 0;      return 0; @@ -2606,7 +2645,7 @@ int drmBOUnreference(int fd, drmBO *buf)  {      struct drm_bo_handle_arg arg; -    if (buf->mapVirtual) { +    if (buf->mapVirtual && buf->mapHandle) {  	(void) munmap(buf->mapVirtual, buf->start + buf->size);  	buf->mapVirtual = NULL;  	buf->virtual = NULL; @@ -2665,12 +2704,9 @@ int drmBOMap(int fd, drmBO *buf, unsigned mapFlags, unsigned mapHint,       * This IOCTL synchronizes the buffer.       */ -    do { -	ret = ioctl(fd, DRM_IOCTL_BO_MAP, &arg); -    } while (ret != 0 && errno == EAGAIN); - -    if (ret)  -	return -errno; +    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_MAP, &arg); +    if (ret) +	return ret;      drmBOCopyReply(rep, buf);	      buf->mapFlags = mapFlags; @@ -2715,14 +2751,12 @@ int drmBOSetStatus(int fd, drmBO *buf,      req->desired_tile_stride = desired_tile_stride;      req->tile_info = tile_info; -    do { -	    ret = ioctl(fd, DRM_IOCTL_BO_SETSTATUS, &arg); -    } while (ret && errno == EAGAIN); - +    ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_SETSTATUS, &arg);      if (ret)  -	    return -errno; +	    return ret;      drmBOCopyReply(rep, buf); +    return 0;  } @@ -2757,12 +2791,9 @@ int drmBOWaitIdle(int fd, drmBO *buf, unsigned hint)  	req->handle = buf->handle;  	req->hint = hint; -	do { -	    ret = ioctl(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg); -	} while (ret && errno == EAGAIN); - +	ret = drmIoctlTimeout(fd, DRM_IOCTL_BO_WAIT_IDLE, &arg);  	if (ret)  -	    return -errno; +	    return ret;  	drmBOCopyReply(rep, buf);      } @@ -2824,35 +2855,25 @@ int drmMMTakedown(int fd, unsigned memType)  int drmMMLock(int fd, unsigned memType, int lockBM, int ignoreNoEvict)  {      struct drm_mm_type_arg arg; -    int ret;      memset(&arg, 0, sizeof(arg));      arg.mem_type = memType;      arg.lock_flags |= (lockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0;      arg.lock_flags |= (ignoreNoEvict) ? DRM_BO_LOCK_IGNORE_NO_EVICT : 0; -    do{ -        ret = ioctl(fd, DRM_IOCTL_MM_LOCK, &arg); -    } while (ret && errno == EAGAIN); - -    return (ret) ? -errno : 0; +    return drmIoctlTimeout(fd, DRM_IOCTL_MM_LOCK, &arg);  }  int drmMMUnlock(int fd, unsigned memType, int unlockBM)  {      struct drm_mm_type_arg arg; -    int ret;      memset(&arg, 0, sizeof(arg));      arg.mem_type = memType;      arg.lock_flags |= (unlockBM) ? DRM_BO_LOCK_UNLOCK_BM : 0; -    do{ -	ret = ioctl(fd, DRM_IOCTL_MM_UNLOCK, &arg); -    } while (ret && errno == EAGAIN); - -    return (ret) ? -errno : 0; +    return drmIoctlTimeout(fd, DRM_IOCTL_MM_UNLOCK, &arg);  }  int drmBOVersion(int fd, unsigned int *major, diff --git a/libdrm/xf86mm.h b/libdrm/xf86mm.h index d3df8497..c80288a7 100644 --- a/libdrm/xf86mm.h +++ b/libdrm/xf86mm.h @@ -110,7 +110,7 @@ typedef struct _drmBO      unsigned handle;      uint64_t mapHandle;      uint64_t flags; -    uint64_t mask; +    uint64_t proposedFlags;      unsigned mapFlags;      unsigned long size;      unsigned long offset; diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 79136431..e7c280d0 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -13,16 +13,17 @@ drm-objs    := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \  		drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \  		drm_memory_debug.o ati_pcigart.o drm_sman.o \  		drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \ -	        drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o +	        drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \ +		drm_regman.o  tdfx-objs   := tdfx_drv.o  r128-objs   := r128_drv.o r128_cce.o r128_state.o r128_irq.o  mga-objs    := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o  i810-objs   := i810_drv.o i810_dma.o  i915-objs   := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ -		i915_buffer.o +		i915_buffer.o i915_compat.o  nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \  		nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ -		nouveau_sgdma.o nouveau_dma.o \ +		nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \  		nv04_timer.o \  		nv04_mc.o nv40_mc.o nv50_mc.o \  		nv04_fb.o nv10_fb.o nv40_fb.o \ diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 7241c2a8..68029635 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -41,7 +41,7 @@ static void *drm_ati_alloc_pcigart_table(int order)  	struct page *page;  	int i; -	DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order); +	DRM_DEBUG("%d order\n", order);  	address = __get_free_pages(GFP_KERNEL | __GFP_COMP,  				   order); @@ -58,7 +58,7 @@ static void *drm_ati_alloc_pcigart_table(int order)  		SetPageReserved(page);  	} -	DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address); +	DRM_DEBUG("returning 0x%08lx\n", address);  	return (void *)address;  } @@ -67,7 +67,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)  	struct page *page;  	int i;  	int num_pages = 1 << order; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	page = virt_to_page((unsigned long)address); @@ -81,7 +81,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)  	free_pages((unsigned long)address, order);  } -int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)  {  	struct drm_sg_mem *entry = dev->sg;  	unsigned long pages; @@ -132,7 +132,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gar  }  EXPORT_SYMBOL(drm_ati_pcigart_cleanup); -int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)  {  	struct drm_sg_mem *entry = dev->sg;  	void *address = NULL; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 332ee1cd..4e8b087b 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -66,8 +66,8 @@  #ifdef CONFIG_MTRR  #include <asm/mtrr.h>  #endif -#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)  #include <asm/agp.h> +#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)  #include <linux/types.h>  #include <linux/agp_backend.h>  #endif @@ -83,7 +83,9 @@  #include "drm_os_linux.h"  #include "drm_hashtab.h" +#include "drm_internal.h" +struct drm_device;  struct drm_file;  /* If you want the memory alloc debug functionality, change define below */ @@ -159,6 +161,12 @@ struct drm_file;  #define DRM_OBJECT_HASH_ORDER 12  #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)  #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16) +/* + * This should be small enough to allow the use of kmalloc for hash tables + * instead of vmalloc. + */ + +#define DRM_FILE_HASH_ORDER 8  #define DRM_MM_INIT_MAX_PAGES 256  /*@}*/ @@ -199,7 +207,7 @@ struct drm_file;  #if DRM_DEBUG_CODE  #define DRM_DEBUG(fmt, arg...)						\  	do {								\ -		if ( drm_debug )						\ +		if ( drm_debug )					\  			printk(KERN_DEBUG				\  			       "[" DRM_NAME ":%s] " fmt ,		\  			       __FUNCTION__ , ##arg);			\ @@ -273,9 +281,6 @@ do {									\  			return -EFAULT;					\  	} -struct drm_device; -struct drm_file; -  /**   * Ioctl function type.   * @@ -391,14 +396,9 @@ struct drm_buf_entry {  	struct drm_freelist freelist;  }; -/* - * This should be small enough to allow the use of kmalloc for hash tables - * instead of vmalloc. - */ -#define DRM_FILE_HASH_ORDER 8  enum drm_ref_type { -	_DRM_REF_USE=0, +	_DRM_REF_USE = 0,  	_DRM_REF_TYPE1,  	_DRM_NO_REF_TYPES  }; @@ -501,14 +501,14 @@ struct drm_agp_mem {  /**   * AGP data.   * - * \sa drm_agp_init)() and drm_device::agp. + * \sa drm_agp_init() and drm_device::agp.   */  struct drm_agp_head {  	DRM_AGP_KERN agp_info;		/**< AGP device information */  	struct list_head memory;  	unsigned long mode;		/**< AGP mode */  #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11) -	struct agp_bridge_data  *bridge; +	struct agp_bridge_data *bridge;  #endif  	int enabled;			/**< whether the AGP bus as been enabled */  	int acquired;			/**< whether the AGP device has been acquired */ @@ -584,15 +584,6 @@ struct drm_vbl_sig {  	struct task_struct *task;  }; -/** - * Drawable information. - */ -struct drm_drawable_info { -	unsigned int num_rects; -	struct drm_clip_rect *rects; -}; - -  /* location of GART table */  #define DRM_ATI_GART_MAIN 1  #define DRM_ATI_GART_FB   2 @@ -601,7 +592,7 @@ struct drm_drawable_info {  #define DRM_ATI_GART_PCIE 2  #define DRM_ATI_GART_IGP 3 -struct ati_pcigart_info { +struct drm_ati_pcigart_info {  	int gart_table_location;  	int gart_reg_if;  	void *addr; @@ -631,9 +622,9 @@ struct drm_driver {  	int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);  	void (*dma_ready) (struct drm_device *);  	int (*dma_quiescent) (struct drm_device *); -	int (*context_ctor) (struct drm_device * dev, int context); -	int (*context_dtor) (struct drm_device * dev, int context); -	int (*kernel_context_switch) (struct drm_device * dev, int old, +	int (*context_ctor) (struct drm_device *dev, int context); +	int (*context_dtor) (struct drm_device *dev, int context); +	int (*kernel_context_switch) (struct drm_device *dev, int old,  				      int new);  	void (*kernel_context_switch_unlock) (struct drm_device * dev);  	/** @@ -680,7 +671,7 @@ struct drm_driver {  	 * interrupts will have to stay on to keep the count accurate.  	 */  	void (*disable_vblank) (struct drm_device *dev, int crtc); -	int (*dri_library_name) (struct drm_device * dev, char * buf); +	int (*dri_library_name) (struct drm_device *dev, char * buf);  	/**  	 * Called by \c drm_device_is_agp.  Typically used to determine if a @@ -693,22 +684,23 @@ struct drm_driver {  	 * card is absolutely \b not AGP (return of 0), absolutely \b is AGP  	 * (return of 1), or may or may not be AGP (return of 2).  	 */ -	int (*device_is_agp) (struct drm_device * dev); +	int (*device_is_agp) (struct drm_device *dev);  /* these have to be filled in */  	 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); -	void (*irq_preinstall) (struct drm_device * dev); -	int (*irq_postinstall) (struct drm_device * dev); -	void (*irq_uninstall) (struct drm_device * dev); +	void (*irq_preinstall) (struct drm_device *dev); +	int (*irq_postinstall) (struct drm_device *dev); +	void (*irq_uninstall) (struct drm_device *dev);  	void (*reclaim_buffers) (struct drm_device *dev,  				 struct drm_file *file_priv);  	void (*reclaim_buffers_locked) (struct drm_device *dev,  					struct drm_file *file_priv);  	void (*reclaim_buffers_idlelocked) (struct drm_device *dev,  					    struct drm_file *file_priv); -	unsigned long (*get_map_ofs) (struct drm_map * map); -	unsigned long (*get_reg_ofs) (struct drm_device * dev); -	void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); +	unsigned long (*get_map_ofs) (struct drm_map *map); +	unsigned long (*get_reg_ofs) (struct drm_device *dev); +	void (*set_version) (struct drm_device *dev, +			     struct drm_set_version *sv);  	struct drm_fence_driver *fence_driver;  	struct drm_bo_driver *bo_driver; @@ -1188,6 +1180,7 @@ extern int drm_agp_free_memory(DRM_AGP_MEM * handle);  extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);  extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);  extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); +extern void drm_agp_chipset_flush(struct drm_device *dev);  				/* Stub support (drm_stub.h) */  extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,  		     struct drm_driver *driver); @@ -1219,8 +1212,8 @@ extern int drm_sg_free(struct drm_device *dev, void *data,  		       struct drm_file *file_priv);  			       /* ATI PCIGART support (ati_pcigart.h) */ -extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); -extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);  extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,  			   size_t align, dma_addr_t maxaddr); @@ -1231,7 +1224,7 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);  struct drm_sysfs_class;  extern struct class *drm_sysfs_create(struct module *owner, char *name);  extern void drm_sysfs_destroy(void); -extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head * head); +extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);  extern void drm_sysfs_device_remove(struct drm_device *dev);  /* @@ -1271,7 +1264,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,  static __inline__ int drm_device_is_agp(struct drm_device *dev)  {  	if ( dev->driver->device_is_agp != NULL ) { -		int err = (*dev->driver->device_is_agp)( dev ); +		int err = (*dev->driver->device_is_agp)(dev);  		if (err != 2) {  			return err; @@ -1342,19 +1335,5 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)  /*@}*/ -/** Type for the OS's non-sleepable mutex lock */ -#define DRM_SPINTYPE		spinlock_t -/** - * Initialize the lock for use.  name is an optional string describing the - * lock - */ -#define DRM_SPININIT(l,name)	spin_lock_init(l) -#define DRM_SPINUNINIT(l) -#define DRM_SPINLOCK(l)		spin_lock(l) -#define DRM_SPINUNLOCK(l)	spin_unlock(l) -#define DRM_SPINLOCK_IRQSAVE(l, _flags)	spin_lock_irqsave(l, _flags); -#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); -#define DRM_SPINLOCK_ASSERT(l)		do {} while (0)  -  #endif				/* __KERNEL__ */  #endif diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index b68efc64..02187017 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -48,7 +48,7 @@   * Verifies the AGP device has been initialized and acquired and fills in the   * drm_agp_info structure with the information in drm_agp_head::agp_info.   */ -int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) +int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)  {  	DRM_AGP_KERN *kern; @@ -130,7 +130,7 @@ EXPORT_SYMBOL(drm_agp_acquire);  int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,  			  struct drm_file *file_priv)  { -	return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); +	return drm_agp_acquire((struct drm_device *) file_priv->head->dev);  }  /** @@ -426,7 +426,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)  		if (!(head->bridge = agp_backend_acquire(dev->pdev))) {  			drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);  			return NULL; -	} +		}  		agp_copy_info(head->bridge, &head->agp_info);  		agp_backend_release(head->bridge);  	} else { @@ -498,18 +498,21 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)  #define AGP_REQUIRED_MAJOR 0  #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) +{  	return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);  } -static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, -			    struct page **pages) { - -	struct drm_agp_ttm_backend *agp_be =  +static int drm_agp_populate(struct drm_ttm_backend *backend, +			    unsigned long num_pages, struct page **pages, +			    struct page *dummy_read_page) +{ +	struct drm_agp_ttm_backend *agp_be =  		container_of(backend, struct drm_agp_ttm_backend, backend);  	struct page **cur_page, **last_page = pages + num_pages;  	DRM_AGP_MEM *mem; +	int dummy_page_count = 0;  	if (drm_alloc_memctl(num_pages * sizeof(void *)))  		return -1; @@ -521,15 +524,22 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p  	mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);  #endif  	if (!mem) { -		drm_free_memctl(num_pages *sizeof(void *)); +		drm_free_memctl(num_pages * sizeof(void *));  		return -1;  	}  	DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);  	mem->page_count = 0;  	for (cur_page = pages; cur_page < last_page; ++cur_page) { -		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page)); +		struct page *page = *cur_page; +		if (!page) { +			page = dummy_read_page; +			++dummy_page_count; +		} +		mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));  	} +	if (dummy_page_count) +		DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);  	agp_be->mem = mem;  	return 0;  } @@ -541,24 +551,28 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,  		container_of(backend, struct drm_agp_ttm_backend, backend);  	DRM_AGP_MEM *mem = agp_be->mem;  	int ret; +	int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);  	DRM_DEBUG("drm_agp_bind_ttm\n");  	mem->is_flushed = TRUE; -	mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY : -		AGP_USER_MEMORY; +	mem->type = AGP_USER_MEMORY; +	/* CACHED MAPPED implies not snooped memory */ +	if (snooped) +		mem->type = AGP_USER_CACHED_MEMORY; +  	ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); -	if (ret) { +	if (ret)  		DRM_ERROR("AGP Bind memory failed\n"); -	} +  	DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?  			DRM_BE_FLAG_BOUND_CACHED : 0,  			DRM_BE_FLAG_BOUND_CACHED);  	return ret;  } -static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { - -	struct drm_agp_ttm_backend *agp_be =  +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) +{ +	struct drm_agp_ttm_backend *agp_be =  		container_of(backend, struct drm_agp_ttm_backend, backend);  	DRM_DEBUG("drm_agp_unbind_ttm\n"); @@ -568,9 +582,9 @@ static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {  		return 0;  } -static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { - -	struct drm_agp_ttm_backend *agp_be =  +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) +{ +	struct drm_agp_ttm_backend *agp_be =  		container_of(backend, struct drm_agp_ttm_backend, backend);  	DRM_AGP_MEM *mem = agp_be->mem; @@ -579,29 +593,27 @@ static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {  		unsigned long num_pages = mem->page_count;  		backend->func->unbind(backend);  		agp_free_memory(mem); -		drm_free_memctl(num_pages *sizeof(void *)); +		drm_free_memctl(num_pages * sizeof(void *));  	}  	agp_be->mem = NULL;  } -static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { - +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) +{  	struct drm_agp_ttm_backend *agp_be;  	if (backend) {  		DRM_DEBUG("drm_agp_destroy_ttm\n");  		agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);  		if (agp_be) { -			if (agp_be->mem) { +			if (agp_be->mem)  				backend->func->clear(backend); -			}  			drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);  		}  	}  } -static struct drm_ttm_backend_func agp_ttm_backend =  -{ +static struct drm_ttm_backend_func agp_ttm_backend = {  	.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,  	.populate = drm_agp_populate,  	.clear = drm_agp_clear_ttm, @@ -633,7 +645,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)  		return NULL;  	} -	 +  	agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);  	if (!agp_be)  		return NULL; @@ -643,11 +655,18 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)  	agp_be->bridge = dev->agp->bridge;  	agp_be->populated = FALSE;  	agp_be->backend.func = &agp_ttm_backend; -	//	agp_be->backend.mem_type = DRM_BO_MEM_TT;  	agp_be->backend.dev = dev;  	return &agp_be->backend;  }  EXPORT_SYMBOL(drm_agp_init_ttm); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25) +void drm_agp_chipset_flush(struct drm_device *dev) +{ +	agp_flush_chipset(dev->agp->bridge); +} +EXPORT_SYMBOL(drm_agp_flush_chipset); +#endif +  #endif				/* __OS_HAS_AGP */ diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index e35e8b6d..c904a91d 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -83,7 +83,7 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,  		return -ENOMEM;  	memset(entry, 0, sizeof(*entry));  	entry->priv = priv; -	entry->hash_item.key = (unsigned long) magic; +	entry->hash_item.key = (unsigned long)magic;  	mutex_lock(&dev->struct_mutex);  	drm_ht_insert_item(&dev->magiclist, &entry->hash_item);  	list_add_tail(&entry->head, &dev->magicfree); @@ -109,7 +109,7 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)  	DRM_DEBUG("%d\n", magic);  	mutex_lock(&dev->struct_mutex); -	if (drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { +	if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {  		mutex_unlock(&dev->struct_mutex);  		return -EINVAL;  	} diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 16203c77..df10e12b 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -36,23 +36,23 @@   * The buffer usage atomic_t needs to be protected by dev->struct_mutex   * when there is a chance that it can be zero before or after the operation.   * - * dev->struct_mutex also protects all lists and list heads. Hash tables and hash - * heads. + * dev->struct_mutex also protects all lists and list heads, + * Hash tables and hash heads.   *   * bo->mutex protects the buffer object itself excluding the usage field. - * bo->mutex does also protect the buffer list heads, so to manipulate those, we need - * both the bo->mutex and the dev->struct_mutex. + * bo->mutex does also protect the buffer list heads, so to manipulate those, + * we need both the bo->mutex and the dev->struct_mutex.   * - * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit - * complicated. When dev->struct_mutex is released to grab bo->mutex, the list - * traversal will, in general, need to be restarted. + * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal + * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex, + * the list traversal will, in general, need to be restarted.   *   */ -static void drm_bo_destroy_locked(struct drm_buffer_object * bo); -static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); -static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); -static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object *bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);  static inline uint64_t drm_bo_type_flags(unsigned type)  { @@ -63,7 +63,7 @@ static inline uint64_t drm_bo_type_flags(unsigned type)   * bo locked. dev->struct_mutex locked.   */ -void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)  {  	struct drm_mem_type_manager *man; @@ -74,13 +74,13 @@ void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)  	list_add_tail(&bo->pinned_lru, &man->pinned);  } -void drm_bo_add_to_lru(struct drm_buffer_object * bo) +void drm_bo_add_to_lru(struct drm_buffer_object *bo)  {  	struct drm_mem_type_manager *man;  	DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); -	if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) +	if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))  	    || bo->mem.mem_type != bo->pinned_mem_type) {  		man = &bo->dev->bm.man[bo->mem.mem_type];  		list_add_tail(&bo->lru, &man->lru); @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)  	}  } -static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)  {  #ifdef DRM_ODD_MM_COMPAT  	int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)  	return 0;  } -static void drm_bo_vm_post_move(struct drm_buffer_object * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object *bo)  {  #ifdef DRM_ODD_MM_COMPAT  	int ret; @@ -133,22 +133,39 @@ static void drm_bo_vm_post_move(struct drm_buffer_object * bo)   * Call bo->mutex locked.   */ -static int drm_bo_add_ttm(struct drm_buffer_object * bo) +static int drm_bo_add_ttm(struct drm_buffer_object *bo)  {  	struct drm_device *dev = bo->dev;  	int ret = 0; -	bo->ttm = NULL; +	uint32_t page_flags = 0;  	DRM_ASSERT_LOCKED(&bo->mutex); +	bo->ttm = NULL; + +	if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE) +		page_flags |= DRM_TTM_PAGE_WRITE;  	switch (bo->type) { -	case drm_bo_type_dc: +	case drm_bo_type_device:  	case drm_bo_type_kernel: -		bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); +		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,  +					 page_flags, dev->bm.dummy_read_page);  		if (!bo->ttm)  			ret = -ENOMEM;  		break;  	case drm_bo_type_user: +		bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, +					 page_flags | DRM_TTM_PAGE_USER, +					 dev->bm.dummy_read_page); +		if (!bo->ttm) +			ret = -ENOMEM; + +		ret = drm_ttm_set_user(bo->ttm, current, +				       bo->buffer_start, +				       bo->num_pages); +		if (ret) +			return ret; +  		break;  	default:  		DRM_ERROR("Illegal buffer object type\n"); @@ -159,8 +176,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)  	return ret;  } -static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, -				  struct drm_bo_mem_reg * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object *bo, +				  struct drm_bo_mem_reg *mem,  				  int evict, int no_wait)  {  	struct drm_device *dev = bo->dev; @@ -187,7 +204,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  			goto out_err;  		if (mem->mem_type != DRM_BO_MEM_LOCAL) { -			ret = drm_bind_ttm(bo->ttm, mem); +			ret = drm_ttm_bind(bo->ttm, mem);  			if (ret)  				goto out_err;  		} @@ -197,11 +214,11 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  		struct drm_bo_mem_reg *old_mem = &bo->mem;  		uint64_t save_flags = old_mem->flags; -		uint64_t save_mask = old_mem->mask; +		uint64_t save_proposed_flags = old_mem->proposed_flags;  		*old_mem = *mem;  		mem->mm_node = NULL; -		old_mem->mask = save_mask; +		old_mem->proposed_flags = save_proposed_flags;  		DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);  	} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && @@ -243,14 +260,14 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,  	return 0; -      out_err: +out_err:  	if (old_is_pci || new_is_pci)  		drm_bo_vm_post_move(bo);  	new_man = &bm->man[bo->mem.mem_type];  	if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {  		drm_ttm_unbind(bo->ttm); -		drm_destroy_ttm(bo->ttm); +		drm_ttm_destroy(bo->ttm);  		bo->ttm = NULL;  	} @@ -262,7 +279,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,   * Wait until the buffer is idle.   */ -int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,  		int no_wait)  {  	int ret; @@ -274,11 +291,10 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,  			drm_fence_usage_deref_unlocked(&bo->fence);  			return 0;  		} -		if (no_wait) { +		if (no_wait)  			return -EBUSY; -		} -		ret = -		    drm_fence_object_wait(bo->fence, lazy, ignore_signals, + +		ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,  					  bo->fence_type);  		if (ret)  			return ret; @@ -289,7 +305,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,  }  EXPORT_SYMBOL(drm_bo_wait); -static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)  {  	struct drm_device *dev = bo->dev;  	struct drm_buffer_manager *bm = &dev->bm; @@ -324,7 +340,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)   * fence object and removing from lru lists and memory managers.   */ -static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)  {  	struct drm_device *dev = bo->dev;  	struct drm_buffer_manager *bm = &dev->bm; @@ -346,9 +362,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)  	mutex_lock(&dev->struct_mutex); -	if (!atomic_dec_and_test(&bo->usage)) { +	if (!atomic_dec_and_test(&bo->usage))  		goto out; -	}  	if (!bo->fence) {  		list_del_init(&bo->lru); @@ -376,7 +391,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)  				      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);  	} -      out: +out:  	mutex_unlock(&bo->mutex);  	return;  } @@ -386,7 +401,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)   * to the buffer object. Then destroy it.   */ -static void drm_bo_destroy_locked(struct drm_buffer_object * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object *bo)  {  	struct drm_device *dev = bo->dev;  	struct drm_buffer_manager *bm = &dev->bm; @@ -409,13 +424,12 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)  		if (bo->ttm) {  			drm_ttm_unbind(bo->ttm); -			drm_destroy_ttm(bo->ttm); +			drm_ttm_destroy(bo->ttm);  			bo->ttm = NULL;  		}  		atomic_dec(&bm->count); -		//		BUG_ON(!list_empty(&bo->base.list));  		drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);  		return; @@ -435,7 +449,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)   * Call dev->struct_mutex locked.   */ -static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)  {  	struct drm_buffer_manager *bm = &dev->bm; @@ -454,9 +468,8 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)  		drm_bo_cleanup_refs(entry, remove_all); -		if (nentry) { +		if (nentry)  			atomic_dec(&nentry->usage); -		}  	}  } @@ -490,21 +503,20 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)  	mutex_unlock(&dev->struct_mutex);  } -void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)  { -        struct drm_buffer_object *tmp_bo = *bo; +	struct drm_buffer_object *tmp_bo = *bo;  	bo = NULL;  	DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex); -	if (atomic_dec_and_test(&tmp_bo->usage)) { +	if (atomic_dec_and_test(&tmp_bo->usage))  		drm_bo_destroy_locked(tmp_bo); -	}  }  EXPORT_SYMBOL(drm_bo_usage_deref_locked); -static void drm_bo_base_deref_locked(struct drm_file * file_priv, -				     struct drm_user_object * uo) +static void drm_bo_base_deref_locked(struct drm_file *file_priv, +				     struct drm_user_object *uo)  {  	struct drm_buffer_object *bo =  	    drm_user_object_entry(uo, struct drm_buffer_object, base); @@ -515,7 +527,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv,  	drm_bo_usage_deref_locked(&bo);  } -void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) +void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)  {  	struct drm_buffer_object *tmp_bo = *bo;  	struct drm_device *dev = tmp_bo->dev; @@ -571,8 +583,8 @@ EXPORT_SYMBOL(drm_putback_buffer_objects);  int drm_fence_buffer_objects(struct drm_device *dev,  			     struct list_head *list,  			     uint32_t fence_flags, -			     struct drm_fence_object * fence, -			     struct drm_fence_object ** used_fence) +			     struct drm_fence_object *fence, +			     struct drm_fence_object **used_fence)  {  	struct drm_buffer_manager *bm = &dev->bm;  	struct drm_buffer_object *entry; @@ -656,7 +668,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,  		l = list->next;  	}  	DRM_DEBUG("Fenced %d buffers\n", count); -      out: +out:  	mutex_unlock(&dev->struct_mutex);  	*used_fence = fence;  	return ret; @@ -667,7 +679,7 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);   * bo->mutex locked   */ -static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,  			int no_wait)  {  	int ret = 0; @@ -675,7 +687,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,  	struct drm_bo_mem_reg evict_mem;  	/* -	 * Someone might have modified the buffer before we took the buffer mutex. +	 * Someone might have modified the buffer before we took the +	 * buffer mutex.  	 */  	if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) @@ -695,7 +708,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,  	evict_mem.mm_node = NULL;  	evict_mem = bo->mem; -	evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); +	evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);  	ret = drm_bo_mem_space(bo, &evict_mem, no_wait);  	if (ret) { @@ -726,7 +739,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,  	DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,  			_DRM_BO_FLAG_EVICTED); -      out: +out:  	return ret;  } @@ -734,8 +747,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,   * Repeatedly evict memory from the LRU for @mem_type until we create enough   * space, or we've evicted everything and there isn't enough space.   */ -static int drm_bo_mem_force_space(struct drm_device * dev, -				  struct drm_bo_mem_reg * mem, +static int drm_bo_mem_force_space(struct drm_device *dev, +				  struct drm_bo_mem_reg *mem,  				  uint32_t mem_type, int no_wait)  {  	struct drm_mm_node *node; @@ -783,13 +796,16 @@ static int drm_bo_mem_force_space(struct drm_device * dev,  	return 0;  } -static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager *man, +				int disallow_fixed,  				uint32_t mem_type, -				uint64_t mask, uint32_t * res_mask) +				uint64_t mask, uint32_t *res_mask)  {  	uint64_t cur_flags = drm_bo_type_flags(mem_type);  	uint64_t flag_diff; +	if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed) +		return 0;  	if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)  		cur_flags |= DRM_BO_FLAG_CACHED;  	if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE) @@ -806,6 +822,9 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,  	}  	flag_diff = (mask ^ cur_flags); +	if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED) +		cur_flags |= DRM_BO_FLAG_CACHED_MAPPED; +  	if ((flag_diff & DRM_BO_FLAG_CACHED) &&  	    (!(mask & DRM_BO_FLAG_CACHED) ||  	     (mask & DRM_BO_FLAG_FORCE_CACHING))) @@ -813,7 +832,7 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,  	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&  	    ((mask & DRM_BO_FLAG_MAPPABLE) || -	     (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) ) +	     (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))  		return 0;  	*res_mask = cur_flags; @@ -828,8 +847,8 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,   * drm_bo_mem_force_space is attempted in priority order to evict and find   * space.   */ -int drm_bo_mem_space(struct drm_buffer_object * bo, -		     struct drm_bo_mem_reg * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object *bo, +		     struct drm_bo_mem_reg *mem, int no_wait)  {  	struct drm_device *dev = bo->dev;  	struct drm_buffer_manager *bm = &dev->bm; @@ -851,7 +870,9 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,  		mem_type = prios[i];  		man = &bm->man[mem_type]; -		type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, +		type_ok = drm_bo_mt_compatible(man, +					       bo->type == drm_bo_type_user, +					       mem_type, mem->proposed_flags,  					       &cur_flags);  		if (!type_ok) @@ -900,12 +921,16 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,  		if (!man->has_type)  			continue; -		if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags)) +		if (!drm_bo_mt_compatible(man, +					  bo->type == drm_bo_type_user, +					  mem_type, +					  mem->proposed_flags, +					  &cur_flags))  			continue;  		ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait); -		if (ret == 0) { +		if (ret == 0 && mem->mm_node) {  			mem->flags = cur_flags;  			return 0;  		} @@ -917,41 +942,53 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,  	ret = (has_eagain) ? -EAGAIN : -ENOMEM;  	return ret;  } -  EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(struct drm_buffer_object * bo, -			   uint64_t new_flags, uint64_t used_mask) -{ -	uint32_t new_props; - -	if (bo->type == drm_bo_type_user) { -		DRM_ERROR("User buffers are not supported yet.\n"); +/* + * drm_bo_propose_flags: + * + * @bo: the buffer object getting new flags + * + * @new_flags: the new set of proposed flag bits + * + * @new_mask: the mask of bits changed in new_flags + * + * Modify the proposed_flag bits in @bo + */ +static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo, +					 uint64_t new_flags, uint64_t new_mask) +{ +	uint32_t new_access; + +	/* Copy unchanging bits from existing proposed_flags */ +	DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask); +	  +	if (bo->type == drm_bo_type_user && +	    ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) != +	     (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) { +		DRM_ERROR("User buffers require cache-coherent memory.\n");  		return -EINVAL;  	} -	if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { -		DRM_ERROR -		    ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " -		     "processes.\n"); +	if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { +		DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");  		return -EPERM;  	}  	if ((new_flags & DRM_BO_FLAG_NO_MOVE)) { -		DRM_ERROR -			("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n"); +		DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");  		return -EPERM;  	} -	new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | -				 DRM_BO_FLAG_READ); +	new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | +				  DRM_BO_FLAG_READ); -	if (!new_props) { +	if (new_access == 0) {  		DRM_ERROR("Invalid buffer object rwx properties\n");  		return -EINVAL;  	} -	bo->mem.mask = new_flags; +	bo->mem.proposed_flags = new_flags;  	return 0;  } @@ -989,7 +1026,7 @@ EXPORT_SYMBOL(drm_lookup_buffer_object);   * Doesn't do any fence flushing as opposed to the drm_bo_busy function.   */ -static int drm_bo_quick_busy(struct drm_buffer_object * bo) +static int drm_bo_quick_busy(struct drm_buffer_object *bo)  {  	struct drm_fence_object *fence = bo->fence; @@ -1009,7 +1046,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object * bo)   * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.   */ -static int drm_bo_busy(struct drm_buffer_object * bo) +static int drm_bo_busy(struct drm_buffer_object *bo)  {  	struct drm_fence_object *fence = bo->fence; @@ -1029,7 +1066,7 @@ static int drm_bo_busy(struct drm_buffer_object * bo)  	return 0;  } -static int drm_bo_read_cached(struct drm_buffer_object * bo) +static int drm_bo_evict_cached(struct drm_buffer_object *bo)  {  	int ret = 0; @@ -1043,7 +1080,7 @@ static int drm_bo_read_cached(struct drm_buffer_object * bo)   * Wait until a buffer is unmapped.   */ -static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)  {  	int ret = 0; @@ -1059,7 +1096,7 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)  	return ret;  } -static int drm_bo_check_unfenced(struct drm_buffer_object * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object *bo)  {  	int ret; @@ -1074,7 +1111,7 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo)   * Until then, we cannot really do anything with it except delete it.   */ -static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,  				int eagain_if_wait)  {  	int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1086,8 +1123,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,  	ret = 0;  	mutex_unlock(&bo->mutex); -	DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ, -		    !drm_bo_check_unfenced(bo)); +	DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ, +		     !drm_bo_check_unfenced(bo));  	mutex_lock(&bo->mutex);  	if (ret == -EINTR)  		return -EAGAIN; @@ -1107,7 +1144,7 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,   * Bo locked.   */ -static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, +static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,  				struct drm_bo_info_rep *rep)  {  	if (!rep) @@ -1117,8 +1154,18 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,  	rep->flags = bo->mem.flags;  	rep->size = bo->num_pages * PAGE_SIZE;  	rep->offset = bo->offset; -	rep->arg_handle = bo->map_list.user_token; -	rep->mask = bo->mem.mask; + +	/* +	 * drm_bo_type_device buffers have user-visible +	 * handles which can be used to share across +	 * processes. Hand that back to the application +	 */ +	if (bo->type == drm_bo_type_device) +		rep->arg_handle = bo->map_list.user_token; +	else +		rep->arg_handle = 0; + +	rep->proposed_flags = bo->mem.proposed_flags;  	rep->buffer_start = bo->buffer_start;  	rep->fence_flags = bo->fence_type;  	rep->rep_flags = 0; @@ -1177,15 +1224,11 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,  				goto out;  			} -			if ((map_flags & DRM_BO_FLAG_READ) && -			    (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && -			    (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { -				drm_bo_read_cached(bo); -			} +			if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) +				drm_bo_evict_cached(bo); +  			break; -		} else if ((map_flags & DRM_BO_FLAG_READ) && -			   (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) && -			   (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) { +		} else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {  			/*  			 * We are already mapped with different flags. @@ -1210,7 +1253,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,  	} else  		drm_bo_fill_rep_arg(bo, rep); -      out: +out:  	mutex_unlock(&bo->mutex);  	drm_bo_usage_deref_unlocked(&bo);  	return ret; @@ -1239,7 +1282,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)  	drm_remove_ref_object(file_priv, ro);  	drm_bo_usage_deref_locked(&bo); -      out: +out:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } @@ -1249,7 +1292,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)   */  static void drm_buffer_user_object_unmap(struct drm_file *file_priv, -					 struct drm_user_object * uo, +					 struct drm_user_object *uo,  					 enum drm_ref_type action)  {  	struct drm_buffer_object *bo = @@ -1268,10 +1311,10 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,  /*   * bo->mutex locked. - * Note that new_mem_flags are NOT transferred to the bo->mem.mask. + * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.   */ -int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,  		       int no_wait, int move_unfenced)  {  	struct drm_device *dev = bo->dev; @@ -1294,7 +1337,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,  	mem.num_pages = bo->num_pages;  	mem.size = mem.num_pages << PAGE_SHIFT; -	mem.mask = new_mem_flags; +	mem.proposed_flags = new_mem_flags;  	mem.page_alignment = bo->mem.page_alignment;  	mutex_lock(&bm->evict_mutex); @@ -1311,44 +1354,70 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,  	ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait); - out_unlock: +out_unlock: +	mutex_lock(&dev->struct_mutex);  	if (ret || !move_unfenced) { -		mutex_lock(&dev->struct_mutex);  		if (mem.mm_node) {  			if (mem.mm_node != bo->pinned_node)  				drm_mm_put_block(mem.mm_node);  			mem.mm_node = NULL;  		} -		mutex_unlock(&dev->struct_mutex); +		drm_bo_add_to_lru(bo); +		if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) { +			DRM_WAKEUP(&bo->event_queue); +			DRM_FLAG_MASKED(bo->priv_flags, 0, +					_DRM_BO_FLAG_UNFENCED); +		} +	} else { +		list_add_tail(&bo->lru, &bm->unfenced); +		DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED, +				_DRM_BO_FLAG_UNFENCED);  	} - +	mutex_unlock(&dev->struct_mutex);  	mutex_unlock(&bm->evict_mutex);  	return ret;  } -static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)  { -	uint32_t flag_diff = (mem->mask ^ mem->flags); +	uint32_t flag_diff = (mem->proposed_flags ^ mem->flags); -	if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) +	if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)  		return 0;  	if ((flag_diff & DRM_BO_FLAG_CACHED) && -	    (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ -	     (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { -	  return 0; -	} +	    (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/ +	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING))) +		return 0; +  	if ((flag_diff & DRM_BO_FLAG_MAPPABLE) && -	    ((mem->mask & DRM_BO_FLAG_MAPPABLE) || -	     (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE))) +	    ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) || +	     (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))  		return 0;  	return 1;  } -/* - * bo locked. +/** + * drm_buffer_object_validate: + * + * @bo: the buffer object to modify + * + * @fence_class: the new fence class covering this buffer + * + * @move_unfenced: a boolean indicating whether switching the + * memory space of this buffer should cause the buffer to + * be placed on the unfenced list. + * + * @no_wait: whether this function should return -EBUSY instead + * of waiting. + * + * Change buffer access parameters. This can involve moving + * the buffer to the correct memory type, pinning the buffer + * or changing the class/type of fence covering this buffer + * + * Must be called with bo locked.   */ -static int drm_buffer_object_validate(struct drm_buffer_object * bo, +static int drm_buffer_object_validate(struct drm_buffer_object *bo,  				      uint32_t fence_class,  				      int move_unfenced, int no_wait)  { @@ -1358,8 +1427,8 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	uint32_t ftype;  	int ret; -	DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", -		  (unsigned long long) bo->mem.mask, +	DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n", +		  (unsigned long long) bo->mem.proposed_flags,  		  (unsigned long long) bo->mem.flags);  	ret = driver->fence_type(bo, &fence_class, &ftype); @@ -1391,7 +1460,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	ret = drm_bo_wait_unmapped(bo, no_wait);  	if (ret) { -	        DRM_ERROR("Timed out waiting for buffer unmap.\n"); +		DRM_ERROR("Timed out waiting for buffer unmap.\n");  		return ret;  	} @@ -1400,7 +1469,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	 */  	if (!drm_bo_mem_compat(&bo->mem)) { -		ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait, +		ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,  					 move_unfenced);  		if (ret) {  			if (ret != -EAGAIN) @@ -1413,7 +1482,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	 * Pinned buffers.  	 */ -	if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {  		bo->pinned_mem_type = bo->mem.mem_type;  		mutex_lock(&dev->struct_mutex);  		list_del_init(&bo->pinned_lru); @@ -1449,7 +1518,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  		if (ret)  			return ret;  	} -	DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE); +	/* +	 * Validation has succeeded, move the access and other +	 * non-mapping-related flag bits from the proposed flags to +	 * the active flags +	 */ + +	DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);  	/*  	 * Finally, adjust lru to be sure. @@ -1474,13 +1549,38 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,  	return 0;  } +/** + * drm_bo_do_validate: + * + * @bo:	the buffer object + * + * @flags: access rights, mapping parameters and cacheability. See + * the DRM_BO_FLAG_* values in drm.h + * + * @mask: Which flag values to change; this allows callers to modify + * things without knowing the current state of other flags. + * + * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* + * values in drm.h. + * + * @fence_class: a driver-specific way of doing fences. Presumably, + * this would be used if the driver had more than one submission and + * fencing mechanism. At this point, there isn't any use of this + * from the user mode code. + * + * @rep: To be stuffed with the reply from validation + *  + * 'validate' a buffer object. This changes where the buffer is + * located, along with changing access modes. + */ +  int drm_bo_do_validate(struct drm_buffer_object *bo,  		       uint64_t flags, uint64_t mask, uint32_t hint,  		       uint32_t fence_class, -		       int no_wait,  		       struct drm_bo_info_rep *rep)  {  	int ret; +	int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;  	mutex_lock(&bo->mutex);  	ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1488,9 +1588,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,  	if (ret)  		goto out; - -	DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); -	ret = drm_bo_new_mask(bo, flags, mask); +	ret = drm_bo_modify_proposed_flags (bo, flags, mask);  	if (ret)  		goto out; @@ -1507,25 +1605,55 @@ out:  }  EXPORT_SYMBOL(drm_bo_do_validate); +/** + * drm_bo_handle_validate + * + * @file_priv: the drm file private, used to get a handle to the user context + * + * @handle: the buffer object handle + * + * @flags: access rights, mapping parameters and cacheability. See + * the DRM_BO_FLAG_* values in drm.h + * + * @mask: Which flag values to change; this allows callers to modify + * things without knowing the current state of other flags. + * + * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_* + * values in drm.h. + * + * @fence_class: a driver-specific way of doing fences. Presumably, + * this would be used if the driver had more than one submission and + * fencing mechanism. At this point, there isn't any use of this + * from the user mode code. + * + * @use_old_fence_class: don't change fence class, pull it from the buffer object + * + * @rep: To be stuffed with the reply from validation + *  + * @bp_rep: To be stuffed with the buffer object pointer + * + * Perform drm_bo_do_validate on a buffer referenced by a user-space handle. + * Some permissions checking is done on the parameters, otherwise this + * is a thin wrapper. + */ -int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, -			   uint32_t fence_class, -			   uint64_t flags, uint64_t mask,  +int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, +			   uint64_t flags, uint64_t mask,  			   uint32_t hint, +			   uint32_t fence_class,  			   int use_old_fence_class, -			   struct drm_bo_info_rep * rep, +			   struct drm_bo_info_rep *rep,  			   struct drm_buffer_object **bo_rep)  {  	struct drm_device *dev = file_priv->head->dev;  	struct drm_buffer_object *bo;  	int ret; -	int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;  	mutex_lock(&dev->struct_mutex);  	bo = drm_lookup_buffer_object(file_priv, handle, 1);  	mutex_unlock(&dev->struct_mutex); -	if (!bo)  +	if (!bo)  		return -EINVAL;  	if (use_old_fence_class) @@ -1535,12 +1663,11 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,  	 * Only allow creator to change shared buffer mask.  	 */ -	if (bo->base.owner != file_priv)  +	if (bo->base.owner != file_priv)  		mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE); -		 -	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, -				 no_wait, rep); + +	ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);  	if (!ret && bo_rep)  		*bo_rep = bo; @@ -1561,9 +1688,9 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,  	bo = drm_lookup_buffer_object(file_priv, handle, 1);  	mutex_unlock(&dev->struct_mutex); -	if (!bo) { +	if (!bo)  		return -EINVAL; -	} +  	mutex_lock(&bo->mutex);  	if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))  		(void)drm_bo_busy(bo); @@ -1586,9 +1713,8 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,  	bo = drm_lookup_buffer_object(file_priv, handle, 1);  	mutex_unlock(&dev->struct_mutex); -	if (!bo) { +	if (!bo)  		return -EINVAL; -	}  	mutex_lock(&bo->mutex);  	ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1600,7 +1726,7 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,  	drm_bo_fill_rep_arg(bo, rep); -      out: +out:  	mutex_unlock(&bo->mutex);  	drm_bo_usage_deref_unlocked(&bo);  	return ret; @@ -1609,21 +1735,18 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,  int drm_buffer_object_create(struct drm_device *dev,  			     unsigned long size,  			     enum drm_bo_type type, -			     uint64_t mask, +			     uint64_t flags,  			     uint32_t hint,  			     uint32_t page_alignment,  			     unsigned long buffer_start, -			     struct drm_buffer_object ** buf_obj) +			     struct drm_buffer_object **buf_obj)  {  	struct drm_buffer_manager *bm = &dev->bm;  	struct drm_buffer_object *bo;  	int ret = 0;  	unsigned long num_pages; -	if (buffer_start & ~PAGE_MASK) { -		DRM_ERROR("Invalid buffer object start.\n"); -		return -EINVAL; -	} +	size += buffer_start & ~PAGE_MASK;  	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;  	if (num_pages == 0) {  		DRM_ERROR("Illegal buffer object size.\n"); @@ -1649,28 +1772,31 @@ int drm_buffer_object_create(struct drm_device *dev,  	INIT_LIST_HEAD(&bo->vma_list);  #endif  	bo->dev = dev; -	if (buffer_start != 0) -		bo->type = drm_bo_type_user; -	else -		bo->type = type; +	bo->type = type;  	bo->num_pages = num_pages;  	bo->mem.mem_type = DRM_BO_MEM_LOCAL;  	bo->mem.num_pages = bo->num_pages;  	bo->mem.mm_node = NULL;  	bo->mem.page_alignment = page_alignment; -	bo->buffer_start = buffer_start; +	bo->buffer_start = buffer_start & PAGE_MASK;  	bo->priv_flags = 0; -	bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |  -		DRM_BO_FLAG_MAPPABLE; -	bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | -		DRM_BO_FLAG_MAPPABLE; +	bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED | +			 DRM_BO_FLAG_MAPPABLE); +	bo->mem.proposed_flags = 0;  	atomic_inc(&bm->count); -	ret = drm_bo_new_mask(bo, mask, hint); - +	/* +	 * Use drm_bo_modify_proposed_flags to error-check the proposed flags +	 */ +	ret = drm_bo_modify_proposed_flags (bo, flags, flags);  	if (ret)  		goto out_err; -	if (bo->type == drm_bo_type_dc) { +	/* +	 * For drm_bo_type_device buffers, allocate +	 * address space from the device so that applications +	 * can mmap the buffer from there +	 */ +	if (bo->type == drm_bo_type_device) {  		mutex_lock(&dev->struct_mutex);  		ret = drm_bo_setup_vm_locked(bo);  		mutex_unlock(&dev->struct_mutex); @@ -1686,7 +1812,7 @@ int drm_buffer_object_create(struct drm_device *dev,  	*buf_obj = bo;  	return 0; -      out_err: +out_err:  	mutex_unlock(&bo->mutex);  	drm_bo_usage_deref_unlocked(&bo); @@ -1711,7 +1837,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv,  	bo->base.ref_struct_locked = NULL;  	bo->base.unref = drm_buffer_user_object_unmap; -      out: +out:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } @@ -1722,6 +1848,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil  	struct drm_bo_create_req *req = &arg->d.req;  	struct drm_bo_info_rep *rep = &arg->d.rep;  	struct drm_buffer_object *entry; +	enum drm_bo_type bo_type;  	int ret = 0;  	DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", @@ -1732,20 +1859,33 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil  		return -EINVAL;  	} +	/* +	 * If the buffer creation request comes in with a starting address, +	 * that points at the desired user pages to map. Otherwise, create +	 * a drm_bo_type_device buffer, which uses pages allocated from the kernel +	 */ +	bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device; + +	/* +	 * User buffers cannot be shared +	 */ +	if (bo_type == drm_bo_type_user) +		req->flags &= ~DRM_BO_FLAG_SHAREABLE; +  	ret = drm_buffer_object_create(file_priv->head->dev, -				       req->size, drm_bo_type_dc, req->mask, +				       req->size, bo_type, req->flags,  				       req->hint, req->page_alignment,  				       req->buffer_start, &entry);  	if (ret)  		goto out; -	 +  	ret = drm_bo_add_user_object(file_priv, entry, -				     req->mask & DRM_BO_FLAG_SHAREABLE); +				     req->flags & DRM_BO_FLAG_SHAREABLE);  	if (ret) {  		drm_bo_usage_deref_unlocked(&entry);  		goto out;  	} -	 +  	mutex_lock(&entry->mutex);  	drm_bo_fill_rep_arg(entry, rep);  	mutex_unlock(&entry->mutex); @@ -1754,7 +1894,7 @@ out:  	return ret;  } -int drm_bo_setstatus_ioctl(struct drm_device *dev,  +int drm_bo_setstatus_ioctl(struct drm_device *dev,  			   void *data, struct drm_file *file_priv)  {  	struct drm_bo_map_wait_idle_arg *arg = data; @@ -1771,11 +1911,17 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,  	if (ret)  		return ret; -	ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class, +	/* +	 * validate the buffer. note that 'fence_class' will be unused +	 * as we pass use_old_fence_class=1 here. Note also that +	 * the libdrm API doesn't pass fence_class to the kernel, +	 * so it's a good thing it isn't used here. +	 */ +	ret = drm_bo_handle_validate(file_priv, req->handle,  				     req->flags,  				     req->mask,  				     req->hint | DRM_BO_HINT_DONT_FENCE, -				     1, +				     req->fence_class, 1,  				     rep, NULL);  	(void) drm_bo_read_unlock(&dev->bm.bm_lock); @@ -1835,7 +1981,7 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *  				  drm_buffer_type, &uo);  	if (ret)  		return ret; -	 +  	ret = drm_bo_handle_info(file_priv, req->handle, rep);  	if (ret)  		return ret; @@ -1895,7 +2041,7 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *  	return 0;  } -static int drm_bo_leave_list(struct drm_buffer_object * bo, +static int drm_bo_leave_list(struct drm_buffer_object *bo,  			     uint32_t mem_type,  			     int free_pinned,  			     int allow_errors) @@ -1926,7 +2072,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,  		DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "  			  "cleanup. Removing flag and evicting.\n");  		bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; -		bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; +		bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;  	}  	if (bo->mem.mem_type == mem_type) @@ -1941,7 +2087,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,  		}  	} -      out: +out:  	mutex_unlock(&bo->mutex);  	return ret;  } @@ -1960,7 +2106,7 @@ static struct drm_buffer_object *drm_bo_entry(struct list_head *list,   * dev->struct_mutex locked.   */ -static int drm_bo_force_list_clean(struct drm_device * dev, +static int drm_bo_force_list_clean(struct drm_device *dev,  				   struct list_head *head,  				   unsigned mem_type,  				   int free_pinned, @@ -2025,7 +2171,7 @@ restart:  	return 0;  } -int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)  {  	struct drm_buffer_manager *bm = &dev->bm;  	struct drm_mem_type_manager *man = &bm->man[mem_type]; @@ -2067,7 +2213,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm);   *point since we have the hardware lock.   */ -static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)  {  	int ret;  	struct drm_buffer_manager *bm = &dev->bm; @@ -2092,7 +2238,7 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)  	return ret;  } -int drm_bo_init_mm(struct drm_device * dev, +int drm_bo_init_mm(struct drm_device *dev,  		   unsigned type,  		   unsigned long p_offset, unsigned long p_size)  { @@ -2139,11 +2285,11 @@ EXPORT_SYMBOL(drm_bo_init_mm);  /*   * This function is intended to be called on drm driver unload.   * If you decide to call it from lastclose, you must protect the call - * from a potentially racing drm_bo_driver_init in firstopen.  + * from a potentially racing drm_bo_driver_init in firstopen.   * (This may happen on X server restart).   */ -int drm_bo_driver_finish(struct drm_device * dev) +int drm_bo_driver_finish(struct drm_device *dev)  {  	struct drm_buffer_manager *bm = &dev->bm;  	int ret = 0; @@ -2170,24 +2316,29 @@ int drm_bo_driver_finish(struct drm_device * dev)  	}  	mutex_unlock(&dev->struct_mutex); -	if (!cancel_delayed_work(&bm->wq)) { +	if (!cancel_delayed_work(&bm->wq))  		flush_scheduled_work(); -	} +  	mutex_lock(&dev->struct_mutex);  	drm_bo_delayed_delete(dev, 1); -	if (list_empty(&bm->ddestroy)) { +	if (list_empty(&bm->ddestroy))  		DRM_DEBUG("Delayed destroy list was clean\n"); -	} -	if (list_empty(&bm->man[0].lru)) { + +	if (list_empty(&bm->man[0].lru))  		DRM_DEBUG("Swap list was clean\n"); -	} -	if (list_empty(&bm->man[0].pinned)) { + +	if (list_empty(&bm->man[0].pinned))  		DRM_DEBUG("NO_MOVE list was clean\n"); -	} -	if (list_empty(&bm->unfenced)) { + +	if (list_empty(&bm->unfenced))  		DRM_DEBUG("Unfenced list was clean\n"); -	} -      out: + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +	ClearPageReserved(bm->dummy_read_page); +#endif +	__free_page(bm->dummy_read_page); + +out:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } @@ -2195,21 +2346,32 @@ int drm_bo_driver_finish(struct drm_device * dev)  /*   * This function is intended to be called on drm driver load.   * If you decide to call it from firstopen, you must protect the call - * from a potentially racing drm_bo_driver_finish in lastclose.  + * from a potentially racing drm_bo_driver_finish in lastclose.   * (This may happen on X server restart).   */ -int drm_bo_driver_init(struct drm_device * dev) +int drm_bo_driver_init(struct drm_device *dev)  {  	struct drm_bo_driver *driver = dev->driver->bo_driver;  	struct drm_buffer_manager *bm = &dev->bm;  	int ret = -EINVAL; +	bm->dummy_read_page = NULL;  	drm_bo_init_lock(&bm->bm_lock);  	mutex_lock(&dev->struct_mutex);  	if (!driver)  		goto out_unlock; +	bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); +	if (!bm->dummy_read_page) { +		ret = -ENOMEM; +		goto out_unlock; +	} + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +	SetPageReserved(bm->dummy_read_page); +#endif +  	/*  	 * Initialize the system memory buffer type.  	 * Other types need to be driver / IOCTL initialized. @@ -2229,11 +2391,10 @@ int drm_bo_driver_init(struct drm_device * dev)  	bm->cur_pages = 0;  	INIT_LIST_HEAD(&bm->unfenced);  	INIT_LIST_HEAD(&bm->ddestroy); -      out_unlock: +out_unlock:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } -  EXPORT_SYMBOL(drm_bo_driver_init);  int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -2343,13 +2504,13 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_  		DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");  		return -EINVAL;  	} -		 +  	if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {  		ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);  		if (ret)  			return ret;  	} -		 +  	mutex_lock(&dev->struct_mutex);  	ret = drm_bo_lock_mm(dev, arg->mem_type);  	mutex_unlock(&dev->struct_mutex); @@ -2361,8 +2522,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_  	return 0;  } -int drm_mm_unlock_ioctl(struct drm_device *dev,  -			void *data,  +int drm_mm_unlock_ioctl(struct drm_device *dev, +			void *data,  			struct drm_file *file_priv)  {  	struct drm_mm_type_arg *arg = data; @@ -2379,7 +2540,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,  		if (ret)  			return ret;  	} -		 +  	return 0;  } @@ -2387,7 +2548,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,   * buffer object vm functions.   */ -int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) +int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)  {  	struct drm_buffer_manager *bm = &dev->bm;  	struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; @@ -2404,7 +2565,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)  	}  	return 1;  } -  EXPORT_SYMBOL(drm_mem_reg_is_pci);  /** @@ -2450,7 +2610,7 @@ int drm_bo_pci_offset(struct drm_device *dev,   * Call bo->mutex locked.   */ -void drm_bo_unmap_virtual(struct drm_buffer_object * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object *bo)  {  	struct drm_device *dev = bo->dev;  	loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2462,13 +2622,25 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)  	unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);  } -static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) +/** + * drm_bo_takedown_vm_locked: + * + * @bo: the buffer object to remove any drm device mapping + * + * Remove any associated vm mapping on the drm device node that + * would have been created for a drm_bo_type_device buffer + */ +static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)  { -	struct drm_map_list *list = &bo->map_list; +	struct drm_map_list *list;  	drm_local_map_t *map;  	struct drm_device *dev = bo->dev;  	DRM_ASSERT_LOCKED(&dev->struct_mutex); +	if (bo->type != drm_bo_type_device) +		return; + +	list = &bo->map_list;  	if (list->user_token) {  		drm_ht_remove_item(&dev->map_hash, &list->hash);  		list->user_token = 0; @@ -2488,7 +2660,17 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)  	drm_bo_usage_deref_locked(&bo);  } -static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) +/** + * drm_bo_setup_vm_locked: + * + * @bo: the buffer to allocate address space for + * + * Allocate address space in the drm device so that applications + * can mmap the buffer and access the contents. This only + * applies to drm_bo_type_device objects as others are not + * placed in the drm device address space. + */ +static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)  {  	struct drm_map_list *list = &bo->map_list;  	drm_local_map_t *map; @@ -2529,11 +2711,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)  	return 0;  } -int drm_bo_version_ioctl(struct drm_device *dev, void *data,  +int drm_bo_version_ioctl(struct drm_device *dev, void *data,  			 struct drm_file *file_priv)  {  	struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data; -	 +  	arg->major = DRM_BO_INIT_MAJOR;  	arg->minor = DRM_BO_INIT_MINOR;  	arg->patchlevel = DRM_BO_INIT_PATCH; diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c index e5a86826..f967fb7c 100644 --- a/linux-core/drm_bo_lock.c +++ b/linux-core/drm_bo_lock.c @@ -31,19 +31,19 @@  /*   * This file implements a simple replacement for the buffer manager use   * of the heavyweight hardware lock. - * The lock is a read-write lock. Taking it in read mode is fast, and  + * The lock is a read-write lock. Taking it in read mode is fast, and   * intended for in-kernel use only.   * Taking it in write mode is slow.   * - * The write mode is used only when there is a need to block all  - * user-space processes from allocating a  + * The write mode is used only when there is a need to block all + * user-space processes from allocating a   * new memory area.   * Typical use in write mode is X server VT switching, and it's allowed   * to leave kernel space with the write lock held. If a user-space process   * dies while having the write-lock, it will be released during the file   * descriptor release.   * - * The read lock is typically placed at the start of an IOCTL- or  + * The read lock is typically placed at the start of an IOCTL- or   * user-space callable function that may end up allocating a memory area.   * This includes setstatus, super-ioctls and no_pfn; the latter may move   * unmappable regions to mappable. It's a bug to leave kernel space with the @@ -53,7 +53,7 @@   * latency. The locking functions will return -EAGAIN if interrupted by a   * signal.   * - * Locking order: The lock should be taken BEFORE any kernel mutexes  + * Locking order: The lock should be taken BEFORE any kernel mutexes   * or spinlocks.   */ @@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock)  	if (atomic_read(&lock->readers) == 0)  		wake_up_interruptible(&lock->queue);  } -  EXPORT_SYMBOL(drm_bo_read_unlock);  int drm_bo_read_lock(struct drm_bo_lock *lock) @@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)  	}  	return 0;  } -  EXPORT_SYMBOL(drm_bo_read_lock);  static int __drm_bo_write_unlock(struct drm_bo_lock *lock) @@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)  	int ret = 0;  	struct drm_device *dev; -	if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) { +	if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))  		return -EINVAL; -	}  	while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {  		ret = wait_event_interruptible @@ -140,7 +137,7 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)  	/*  	 * Add a dummy user-object, the destructor of which will -	 * make sure the lock is released if the client dies  +	 * make sure the lock is released if the client dies  	 * while holding it.  	 */ @@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)  	ret = drm_add_user_object(file_priv, &lock->base, 0);  	lock->base.remove = &drm_bo_write_lock_remove;  	lock->base.type = drm_lock_type; -	if (ret) { +	if (ret)  		(void)__drm_bo_write_unlock(lock); -	} +  	mutex_unlock(&dev->struct_mutex);  	return ret; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 7c86c4aa..b06a09f0 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -1,8 +1,8 @@  /************************************************************************** - *  + *   * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -10,7 +10,7 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. @@ -19,8 +19,8 @@   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   *   **************************************************************************/ @@ -35,7 +35,7 @@   * have not been requested to free also pinned regions.   */ -static void drm_bo_free_old_node(struct drm_buffer_object * bo) +static void drm_bo_free_old_node(struct drm_buffer_object *bo)  {  	struct drm_bo_mem_reg *old_mem = &bo->mem; @@ -48,13 +48,13 @@ static void drm_bo_free_old_node(struct drm_buffer_object * bo)  	old_mem->mm_node = NULL;  } -int drm_bo_move_ttm(struct drm_buffer_object * bo, -		    int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object *bo, +		    int evict, int no_wait, struct drm_bo_mem_reg *new_mem)  {  	struct drm_ttm *ttm = bo->ttm;  	struct drm_bo_mem_reg *old_mem = &bo->mem;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	int ret;  	if (old_mem->mem_type == DRM_BO_MEM_TT) { @@ -71,18 +71,17 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,  		save_flags = old_mem->flags;  	}  	if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { -		ret = drm_bind_ttm(ttm, new_mem); +		ret = drm_ttm_bind(ttm, new_mem);  		if (ret)  			return ret;  	}  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	return 0;  } -  EXPORT_SYMBOL(drm_bo_move_ttm);  /** @@ -90,17 +89,17 @@ EXPORT_SYMBOL(drm_bo_move_ttm);   *   * \param bo The buffer object.   * \return Failure indication. - *  + *   * Returns -EINVAL if the buffer object is currently not mappable.   * Returns -ENOMEM if the ioremap operation failed.   * Otherwise returns zero. - *  + *   * After a successfull call, bo->iomap contains the virtual address, or NULL - * if the buffer object content is not accessible through PCI space.  + * if the buffer object content is not accessible through PCI space.   * Call bo->mutex locked.   */ -int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, +int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,  			void **virtual)  {  	struct drm_buffer_manager *bm = &dev->bm; @@ -136,7 +135,7 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap);   * Call bo->mutex locked.   */ -void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, +void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,  			 void *virtual)  {  	struct drm_buffer_manager *bm; @@ -145,9 +144,8 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,  	bm = &dev->bm;  	man = &bm->man[mem->mem_type]; -	if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { +	if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))  		iounmap(virtual); -	}  }  static int drm_copy_io_page(void *dst, void *src, unsigned long page) @@ -163,7 +161,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)  	return 0;  } -static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src, +				unsigned long page)  {  	struct page *d = drm_ttm_get_page(ttm, page);  	void *dst; @@ -181,7 +180,7 @@ static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long p  	return 0;  } -static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)  {  	struct page *s = drm_ttm_get_page(ttm, page);  	void *src; @@ -199,8 +198,8 @@ static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long p  	return 0;  } -int drm_bo_move_memcpy(struct drm_buffer_object * bo, -		       int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object *bo, +		       int evict, int no_wait, struct drm_bo_mem_reg *new_mem)  {  	struct drm_device *dev = bo->dev;  	struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; @@ -211,7 +210,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,  	void *new_iomap;  	int ret;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	unsigned long i;  	unsigned long page;  	unsigned long add = 0; @@ -251,27 +250,26 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,  			goto out1;  	}  	mb(); -      out2: +out2:  	drm_bo_free_old_node(bo);  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {  		drm_ttm_unbind(ttm); -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		bo->ttm = NULL;  	} -      out1: +out1:  	drm_mem_reg_iounmap(dev, new_mem, new_iomap); -      out: +out:  	drm_mem_reg_iounmap(dev, &old_copy, old_iomap);  	return ret;  } -  EXPORT_SYMBOL(drm_bo_move_memcpy);  /* @@ -280,8 +278,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy);   * object. Call bo->mutex locked.   */ -int drm_buffer_object_transfer(struct drm_buffer_object * bo, -			       struct drm_buffer_object ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object *bo, +			       struct drm_buffer_object **new_obj)  {  	struct drm_buffer_object *fbo;  	struct drm_device *dev = bo->dev; @@ -305,7 +303,7 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,  	INIT_LIST_HEAD(&fbo->p_mm_list);  #endif -	drm_fence_reference_unlocked(&fbo->fence, bo->fence); +	fbo->fence = drm_fence_reference_locked(bo->fence);  	fbo->pinned_node = NULL;  	fbo->mem.mm_node->private = (void *)fbo;  	atomic_set(&fbo->usage, 1); @@ -322,19 +320,17 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,   * We cannot restart until it has finished.   */ -int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, -			      int evict, -			      int no_wait, -			      uint32_t fence_class, -			      uint32_t fence_type, -			      uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) +int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, +			      int evict, int no_wait, uint32_t fence_class, +			      uint32_t fence_type, uint32_t fence_flags, +			      struct drm_bo_mem_reg *new_mem)  {  	struct drm_device *dev = bo->dev;  	struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];  	struct drm_bo_mem_reg *old_mem = &bo->mem;  	int ret;  	uint64_t save_flags = old_mem->flags; -	uint64_t save_mask = old_mem->mask; +	uint64_t save_proposed_flags = old_mem->proposed_flags;  	struct drm_buffer_object *old_obj;  	if (bo->fence) @@ -349,11 +345,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  #ifdef DRM_ODD_MM_COMPAT  	/*  	 * In this mode, we don't allow pipelining a copy blit, -	 * since the buffer will be accessible from user space  +	 * since the buffer will be accessible from user space  	 * the moment we return and rebuild the page tables.  	 *  	 * With normal vm operation, page tables are rebuilt -	 * on demand using fault(), which waits for buffer idle.  +	 * on demand using fault(), which waits for buffer idle.  	 */  	if (1)  #else @@ -369,7 +365,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  		if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {  			drm_ttm_unbind(bo->ttm); -			drm_destroy_ttm(bo->ttm); +			drm_ttm_destroy(bo->ttm);  			bo->ttm = NULL;  		}  	} else { @@ -403,11 +399,10 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,  	*old_mem = *new_mem;  	new_mem->mm_node = NULL; -	old_mem->mask = save_mask; +	old_mem->proposed_flags = save_proposed_flags;  	DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);  	return 0;  } -  EXPORT_SYMBOL(drm_bo_move_accel_cleanup);  int drm_bo_same_page(unsigned long offset, @@ -420,13 +415,11 @@ EXPORT_SYMBOL(drm_bo_same_page);  unsigned long drm_bo_offset_end(unsigned long offset,  				unsigned long end)  { -  	offset = (offset + PAGE_SIZE) & PAGE_MASK;  	return (end < offset) ? end : offset;  }  EXPORT_SYMBOL(drm_bo_offset_end); -  static pgprot_t drm_kernel_io_prot(uint32_t map_type)  {  	pgprot_t tmp = PAGE_KERNEL; @@ -475,8 +468,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,  	return (!map->virtual) ? -ENOMEM : 0;  } -static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, -			   unsigned long num_pages, struct drm_bo_kmap_obj *map) +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, +			   unsigned long start_page, unsigned long num_pages, +			   struct drm_bo_kmap_obj *map)  {  	struct drm_device *dev = bo->dev;  	struct drm_bo_mem_reg *mem = &bo->mem; @@ -503,7 +497,7 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag  		 * Populate the part we're mapping;  		 */ -		for (i = start_page; i< start_page + num_pages; ++i) { +		for (i = start_page; i < start_page + num_pages; ++i) {  			d = drm_ttm_get_page(ttm, i);  			if (!d)  				return -ENOMEM; @@ -530,7 +524,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag   * and caching policy the buffer currently has.   * Mapping multiple pages or buffers that live in io memory is a bit slow and   * consumes vmalloc space. Be restrictive with such mappings. - * Mapping single pages usually returns the logical kernel address, (which is fast) + * Mapping single pages usually returns the logical kernel address, + * (which is fast)   * BUG may use slower temporary mappings for high memory pages or   * uncached / write-combined pages.   * @@ -581,7 +576,7 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)  	if (!map->virtual)  		return; -	switch(map->bo_kmap_type) { +	switch (map->bo_kmap_type) {  	case bo_map_iomap:  		iounmap(map->virtual);  		break; diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 60eca60c..75c75c2f 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -53,7 +53,7 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map  	struct drm_map_list *entry;  	list_for_each_entry(entry, &dev->maplist, head) {  		if (entry->map && map->type == entry->map->type && -		    ((entry->map->offset == map->offset) ||  +		    ((entry->map->offset == map->offset) ||  		     (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {  			return entry;  		} @@ -80,10 +80,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,  		int ret;  		hash->key = user_token >> PAGE_SHIFT;  		ret = drm_ht_insert_item(&dev->map_hash, hash); -		if (ret != -EINVAL)  +		if (ret != -EINVAL)  			return ret;  	} -	return drm_ht_just_insert_please(&dev->map_hash, hash,  +	return drm_ht_just_insert_please(&dev->map_hash, hash,  					 user_token, 32 - PAGE_SHIFT - 3,  					 0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);  } @@ -173,12 +173,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,  		if (drm_core_has_MTRR(dev)) {  			if (map->type == _DRM_FRAME_BUFFER ||  			    (map->flags & _DRM_WRITE_COMBINING)) { -				map->mtrr =  mtrr_add(map->offset, map->size, -						      MTRR_TYPE_WRCOMB, 1); +				map->mtrr = mtrr_add(map->offset, map->size, +						     MTRR_TYPE_WRCOMB, 1);  			}  		} -		if (map->type == _DRM_REGISTERS) +		if (map->type == _DRM_REGISTERS) {  			map->handle = ioremap(map->offset, map->size); +			if (!map->handle) { +				drm_free(map, sizeof(*map), DRM_MEM_MAPS); +				return -ENOMEM; +			} +		}  		break;  	case _DRM_SHM:  		list = drm_find_matching_map(dev, map); @@ -297,7 +302,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,  	/* Assign a 32-bit handle */ -	user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :  +	user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :  		map->offset;  	ret = drm_map_handle(dev, &list->hash, user_token, 0); @@ -379,7 +384,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)  	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {  		if (r_list->map == map) {  			list_del(&r_list->head); -			drm_ht_remove_key(&dev->map_hash,  +			drm_ht_remove_key(&dev->map_hash,  					  r_list->user_token >> PAGE_SHIFT);  			drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);  			found = 1; @@ -387,9 +392,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)  		}  	} -	if (!found) { +	if (!found)  		return -EINVAL; -	} +  	/* List has wrapped around to the head pointer, or it's empty and we  	 * didn't find anything.  	 */ @@ -494,7 +499,8 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,   *   * Frees any pages and buffers associated with the given entry.   */ -static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) +static void drm_cleanup_buf_error(struct drm_device *dev, +				  struct drm_buf_entry *entry)  {  	int i; @@ -529,7 +535,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *  #if __OS_HAS_AGP  /** - * Add AGP buffers for DMA transfers + * Add AGP buffers for DMA transfers.   *   * \param dev struct drm_device to which the buffers are to be added.   * \param request pointer to a struct drm_buf_desc describing the request. @@ -539,7 +545,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *   * reallocates the buffer list of the same size order to accommodate the new   * buffers.   */ -int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) +int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)  {  	struct drm_device_dma *dma = dev->dma;  	struct drm_buf_entry *entry; @@ -709,7 +715,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request)  EXPORT_SYMBOL(drm_addbufs_agp);  #endif				/* __OS_HAS_AGP */ -int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) +int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)  {  	struct drm_device_dma *dma = dev->dma;  	int count; @@ -821,9 +827,9 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)  	page_count = 0;  	while (entry->buf_count < count) { -		 +  		dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful); -		 +  		if (!dmah) {  			/* Set count correctly so we free the proper amount. */  			entry->buf_count = count; @@ -935,7 +941,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)  }  EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) +static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)  {  	struct drm_device_dma *dma = dev->dma;  	struct drm_buf_entry *entry; @@ -1600,5 +1606,3 @@ int drm_order(unsigned long size)  	return order;  }  EXPORT_SYMBOL(drm_order); - - diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index ae44e500..a745a7d9 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -1,5 +1,5 @@  /************************************************************************** - *  + *   * This kernel module is free software; you can redistribute it and/or   * modify it under the terms of the GNU General Public License as   * published by the Free Software Foundation; either version 2 of the @@ -13,7 +13,7 @@   * You should have received a copy of the GNU General Public License   * along with this program; if not, write to the Free Software   * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - *  + *   **************************************************************************/  /*   * This code provides access to unexported mm kernel features. It is necessary @@ -21,7 +21,7 @@   * directly.   *   * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> - *          Linux kernel mm subsystem authors.  + *          Linux kernel mm subsystem authors.   *          (Most code taken from there).   */ @@ -50,7 +50,7 @@ int drm_unmap_page_from_agp(struct page *page)           * performance reasons */          return i;  } -#endif  +#endif  #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) @@ -80,22 +80,22 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)  /*   * vm code for kernels below 2.6.15 in which version a major vm write - * occured. This implement a simple straightforward  + * occured. This implement a simple straightforward   * version similar to what's going to be   * in kernel 2.6.19+   * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use   * nopfn. - */  + */  static struct {  	spinlock_t lock;  	struct page *dummy_page;  	atomic_t present; -} drm_np_retry =  +} drm_np_retry =  {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)}; -static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  +static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  				    struct fault_data *data); @@ -126,7 +126,7 @@ void free_nopage_retry(void)  }  struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, -			       unsigned long address,  +			       unsigned long address,  			       int *type)  {  	struct fault_data data; @@ -204,14 +204,14 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;  	unsigned long page_offset;  	struct page *page = NULL; -	struct drm_ttm *ttm;  +	struct drm_ttm *ttm;  	struct drm_device *dev;  	unsigned long pfn;  	int err;  	unsigned long bus_base;  	unsigned long bus_offset;  	unsigned long bus_size; -	 +  	dev = bo->dev;  	while(drm_bo_read_lock(&dev->bm.bm_lock)); @@ -219,12 +219,12 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  	err = drm_bo_wait(bo, 0, 1, 0);  	if (err) { -		data->type = (err == -EAGAIN) ?  +		data->type = (err == -EAGAIN) ?  			VM_FAULT_MINOR : VM_FAULT_SIGBUS;  		goto out_unlock;  	} -	 -	 + +  	/*  	 * If buffer happens to be in a non-mappable location,  	 * move it to a mappable. @@ -232,7 +232,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {  		unsigned long _end = jiffies + 3*DRM_HZ; -		uint32_t new_mask = bo->mem.mask | +		uint32_t new_mask = bo->mem.proposed_flags |  			DRM_BO_FLAG_MAPPABLE |  			DRM_BO_FLAG_FORCE_MAPPABLE; @@ -253,7 +253,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  	}  	dev = bo->dev; -	err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,  +	err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,  				&bus_size);  	if (err) { @@ -286,7 +286,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,  	err = vm_insert_pfn(vma, address, pfn);  	if (!err || err == -EBUSY) -		data->type = VM_FAULT_MINOR;  +		data->type = VM_FAULT_MINOR;  	else  		data->type = VM_FAULT_OOM;  out_unlock: @@ -330,7 +330,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,   * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated   * workaround for a single BUG statement in do_no_page in these versions. The   * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_ - * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to  + * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to   * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this   * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,   * release the cpu and retry. We also need to keep track of all vmas mapping the ttm. @@ -351,13 +351,13 @@ typedef struct vma_entry {  struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, -			       unsigned long address,  +			       unsigned long address,  			       int *type)  {  	struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;  	unsigned long page_offset;  	struct page *page; -	struct drm_ttm *ttm;  +	struct drm_ttm *ttm;  	struct drm_device *dev;  	mutex_lock(&bo->mutex); @@ -369,7 +369,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,  		page = NOPAGE_SIGBUS;  		goto out_unlock;  	} -	 +  	dev = bo->dev;  	if (drm_mem_reg_is_pci(dev, &bo->mem)) { @@ -403,8 +403,8 @@ int drm_bo_map_bound(struct vm_area_struct *vma)  	unsigned long bus_base;  	unsigned long bus_offset;  	unsigned long bus_size; -	 -	ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,  + +	ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,  				&bus_offset, &bus_size);  	BUG_ON(ret); @@ -419,7 +419,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)  	return ret;  } -	 +  int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)  { @@ -493,7 +493,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)  {  	p_mm_entry_t *entry;  	int lock_ok = 1; -	 +  	list_for_each_entry(entry, &bo->p_mm_list, head) {  		BUG_ON(entry->locked);  		if (!down_write_trylock(&entry->mm->mmap_sem)) { @@ -507,7 +507,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)  		return 0;  	list_for_each_entry(entry, &bo->p_mm_list, head) { -		if (!entry->locked)  +		if (!entry->locked)  			break;  		up_write(&entry->mm->mmap_sem);  		entry->locked = 0; @@ -524,7 +524,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)  void drm_bo_unlock_kmm(struct drm_buffer_object * bo)  {  	p_mm_entry_t *entry; -	 +  	list_for_each_entry(entry, &bo->p_mm_list, head) {  		BUG_ON(!entry->locked);  		up_write(&entry->mm->mmap_sem); @@ -532,7 +532,7 @@ void drm_bo_unlock_kmm(struct drm_buffer_object * bo)  	}  } -int drm_bo_remap_bound(struct drm_buffer_object *bo)  +int drm_bo_remap_bound(struct drm_buffer_object *bo)  {  	vma_entry_t *v_entry;  	int ret = 0; @@ -553,9 +553,9 @@ void drm_bo_finish_unmap(struct drm_buffer_object *bo)  	vma_entry_t *v_entry;  	list_for_each_entry(v_entry, &bo->vma_list, head) { -		v_entry->vma->vm_flags &= ~VM_PFNMAP;  +		v_entry->vma->vm_flags &= ~VM_PFNMAP;  	} -}	 +}  #endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index f74f4bc2..f8933e0c 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -89,7 +89,7 @@  #define __user  #endif -#if !defined(__put_page)  +#if !defined(__put_page)  #define __put_page(p)           atomic_dec(&(p)->count)  #endif @@ -104,7 +104,7 @@  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)  static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)  { -  return remap_page_range(vma, from,  +  return remap_page_range(vma, from,  			  pfn << PAGE_SHIFT,  			  size,  			  pgprot); @@ -178,7 +178,7 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)  /* - * Flush relevant caches and clear a VMA structure so that page references  + * Flush relevant caches and clear a VMA structure so that page references   * will cause a page fault. Don't flush tlbs.   */ @@ -186,7 +186,7 @@ extern void drm_clear_vma(struct vm_area_struct *vma,  			  unsigned long addr, unsigned long end);  /* - * Return the PTE protection map entries for the VMA flags given by  + * Return the PTE protection map entries for the VMA flags given by   * flags. This is a functional interface to the kernel's protection map.   */ @@ -223,7 +223,7 @@ extern void free_nopage_retry(void);  #ifndef DRM_FULL_MM_COMPAT  /* - * For now, just return a dummy page that we've allocated out of  + * For now, just return a dummy page that we've allocated out of   * static space. The page will be put by do_nopage() since we've already   * filled out the pte.   */ @@ -233,13 +233,13 @@ struct fault_data {  	unsigned long address;  	pgoff_t pgoff;  	unsigned int flags; -	 +  	int type;  };  #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))  extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, -				     unsigned long address,  +				     unsigned long address,  				     int *type);  #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \    !defined(DRM_FULL_MM_COMPAT) @@ -254,22 +254,22 @@ struct drm_buffer_object;  /* - * Add a vma to the ttm vma list, and the  + * Add a vma to the ttm vma list, and the   * process mm pointer to the ttm mm list. Needs the ttm mutex.   */ -extern int drm_bo_add_vma(struct drm_buffer_object * bo,  +extern int drm_bo_add_vma(struct drm_buffer_object * bo,  			   struct vm_area_struct *vma);  /*   * Delete a vma and the corresponding mm pointer from the   * ttm lists. Needs the ttm mutex.   */ -extern void drm_bo_delete_vma(struct drm_buffer_object * bo,  +extern void drm_bo_delete_vma(struct drm_buffer_object * bo,  			      struct vm_area_struct *vma);  /*   * Attempts to lock all relevant mmap_sems for a ttm, while - * not releasing the ttm mutex. May return -EAGAIN to avoid  + * not releasing the ttm mutex. May return -EAGAIN to avoid   * deadlocks. In that case the caller shall release the ttm mutex,   * schedule() and try again.   */ @@ -292,7 +292,7 @@ extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);  extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);  /* - * Remap all vmas of this ttm using io_remap_pfn_range. We cannot  + * Remap all vmas of this ttm using io_remap_pfn_range. We cannot   * fault these pfns in, because the first one will set the vma VM_PFNMAP   * flag, which will make the next fault bug in do_nopage(). The function   * releases the mmap_sems for this ttm. diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 7854e89c..83ad291e 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -89,7 +89,7 @@ again:  		mutex_unlock(&dev->struct_mutex);  		goto again;  	} -	 +  	mutex_unlock(&dev->struct_mutex);  	return new_id;  } @@ -160,7 +160,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,  	request->handle = NULL;  	list_for_each_entry(_entry, &dev->maplist, head) {  		if (_entry->map == map) { -			request->handle =  +			request->handle =  			    (void *)(unsigned long)_entry->user_token;  			break;  		} diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index 7cc44193..f7bff0ac 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -43,7 +43,7 @@   *   * Allocate and initialize a drm_device_dma structure.   */ -int drm_dma_setup(struct drm_device * dev) +int drm_dma_setup(struct drm_device *dev)  {  	int i; @@ -65,9 +65,9 @@ int drm_dma_setup(struct drm_device * dev)   * \param dev DRM device.   *   * Free all pages associated with DMA buffers, the buffers and pages lists, and - * finally the the drm_device::dma structure itself. + * finally the drm_device::dma structure itself.   */ -void drm_dma_takedown(struct drm_device * dev) +void drm_dma_takedown(struct drm_device *dev)  {  	struct drm_device_dma *dma = dev->dma;  	int i, j; @@ -129,7 +129,7 @@ void drm_dma_takedown(struct drm_device * dev)   *   * Resets the fields of \p buf.   */ -void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) +void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)  {  	if (!buf)  		return; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 296a3268..3c2794d0 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -121,13 +121,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {  	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -	DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,  +	DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,  		      DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -	DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,  +	DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,  		      DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -	DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,  +	DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,  		      DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), -	DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,  +	DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,  		      DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),  	DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), @@ -183,8 +183,8 @@ int drm_lastclose(struct drm_device * dev)  	if (dev->unique) {  		drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER); -		dev->unique=NULL; -		dev->unique_len=0; +		dev->unique = NULL; +		dev->unique_len = 0;  	}  	if (dev->irq_enabled) @@ -242,10 +242,12 @@ int drm_lastclose(struct drm_device * dev)  		list_del(&vma->head);  		drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);  	} -	 +  	list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) { -		drm_rmmap_locked(dev, r_list->map); -		r_list = NULL; +		if (!(r_list->map->flags & _DRM_DRIVER)) { +			drm_rmmap_locked(dev, r_list->map); +			r_list = NULL; +		}  	}  	if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) { @@ -322,7 +324,7 @@ int drm_init(struct drm_driver *driver,  			pci_get_subsys(pid->vendor, pid->device, pid->subvendor,  				       pid->subdevice, pdev))) {  			/* Are there device class requirements? */ -			if ((pid->class != 0)  +			if ((pid->class != 0)  				&& ((pdev->class & pid->class_mask) != pid->class)) {  				continue;  			} @@ -353,7 +355,7 @@ int drm_init(struct drm_driver *driver,  					       pid->subvendor, pid->subdevice,  					       pdev))) {  				/* Are there device class requirements? */ -				if ((pid->class != 0)  +				if ((pid->class != 0)  					&& ((pdev->class & pid->class_mask) != pid->class)) {  					continue;  				} @@ -390,15 +392,6 @@ static void drm_cleanup(struct drm_device * dev)  	drm_lastclose(dev);  	drm_fence_manager_takedown(dev); -	drm_ht_remove(&dev->map_hash); -	drm_mm_takedown(&dev->offset_manager); -	drm_ht_remove(&dev->object_hash); - -	if (!drm_fb_loaded) -		pci_disable_device(dev->pdev); - -	drm_ctxbitmap_cleanup(dev); -  	if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp  	    && dev->agp->agp_mtrr >= 0) {  		int retval; @@ -415,6 +408,14 @@ static void drm_cleanup(struct drm_device * dev)  	if (dev->driver->unload)  		dev->driver->unload(dev); +	if (!drm_fb_loaded) +		pci_disable_device(dev->pdev); + +	drm_ctxbitmap_cleanup(dev); +	drm_ht_remove(&dev->map_hash); +	drm_mm_takedown(&dev->offset_manager); +	drm_ht_remove(&dev->object_hash); +  	drm_put_head(&dev->primary);  	if (drm_put_dev(dev))  		DRM_ERROR("Cannot unload module\n"); @@ -467,19 +468,19 @@ static int __init drm_core_init(void)  	unsigned long max_memctl_mem;  	si_meminfo(&si); -	 +  	/*  	 * AGP only allows low / DMA32 memory ATM.  	 */  	avail_memctl_mem = si.totalram - si.totalhigh; -	/*  -	 * Avoid overflows  +	/* +	 * Avoid overflows  	 */  	max_memctl_mem = 1UL << (32 - PAGE_SHIFT); -	max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;  +	max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;  	if (avail_memctl_mem >= max_memctl_mem)  		avail_memctl_mem = max_memctl_mem; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index e696b42d..288b4db6 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,7 +34,7 @@   * Typically called by the IRQ handler.   */ -void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, +void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,  		       uint32_t sequence, uint32_t type, uint32_t error)  {  	int wake = 0; @@ -58,9 +58,8 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  	diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;  	ge_last_exe = diff < driver->wrap_diff; -	if (is_exe && ge_last_exe) { +	if (is_exe && ge_last_exe)  		fc->last_exe_flush = sequence; -	}  	if (list_empty(&fc->ring))  		return; @@ -123,11 +122,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  	 */  	if ((fc->pending_flush & type) != type) { -	        head = head->prev; +		head = head->prev;  		list_for_each_entry(fence, head, ring) {  			if (&fence->ring == &fc->ring)  				break; -	    		diff = (fc->last_exe_flush - fence->sequence) & +			diff = (fc->last_exe_flush - fence->sequence) &  				driver->sequence_mask;  			if (diff > driver->wrap_diff)  				break; @@ -141,10 +140,9 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,  		DRM_WAKEUP(&fc->fence_queue);  	}  } -  EXPORT_SYMBOL(drm_fence_handler); -static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) +static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)  {  	struct drm_fence_manager *fm = &dev->fm;  	unsigned long flags; @@ -154,7 +152,7 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)  	write_unlock_irqrestore(&fm->lock, flags);  } -void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object **fence)  {  	struct drm_fence_object *tmp_fence = *fence;  	struct drm_device *dev = tmp_fence->dev; @@ -173,7 +171,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)  }  EXPORT_SYMBOL(drm_fence_usage_deref_locked); -void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)  {  	struct drm_fence_object *tmp_fence = *fence;  	struct drm_device *dev = tmp_fence->dev; @@ -212,7 +210,8 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,  }  EXPORT_SYMBOL(drm_fence_reference_unlocked); -static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) +static void drm_fence_object_destroy(struct drm_file *priv, +				     struct drm_user_object *base)  {  	struct drm_fence_object *fence =  	    drm_user_object_entry(base, struct drm_fence_object, base); @@ -220,7 +219,7 @@ static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_obje  	drm_fence_usage_deref_locked(&fence);  } -int drm_fence_object_signaled(struct drm_fence_object * fence, +int drm_fence_object_signaled(struct drm_fence_object *fence,  			      uint32_t mask, int poke_flush)  {  	unsigned long flags; @@ -240,8 +239,9 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,  }  EXPORT_SYMBOL(drm_fence_object_signaled); -static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, -				struct drm_fence_driver * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager *fc, +				struct drm_fence_driver *driver, +				uint32_t sequence)  {  	uint32_t diff; @@ -249,15 +249,13 @@ static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,  		fc->exe_flush_sequence = sequence;  		fc->pending_exe_flush = 1;  	} else { -		diff = -		    (sequence - fc->exe_flush_sequence) & driver->sequence_mask; -		if (diff < driver->wrap_diff) { +		diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; +		if (diff < driver->wrap_diff)  			fc->exe_flush_sequence = sequence; -		}  	}  } -int drm_fence_object_flush(struct drm_fence_object * fence, +int drm_fence_object_flush(struct drm_fence_object *fence,  			   uint32_t type)  {  	struct drm_device *dev = fence->dev; @@ -296,7 +294,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,   * wrapped around and reused.   */ -void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, +			 uint32_t sequence)  {  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; @@ -328,12 +327,10 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t  	mutex_unlock(&dev->struct_mutex);  	diff = (old_sequence - fence->sequence) & driver->sequence_mask;  	read_unlock_irqrestore(&fm->lock, flags); -	if (diff < driver->wrap_diff) { +	if (diff < driver->wrap_diff)  		drm_fence_object_flush(fence, fence->type); -	}  	drm_fence_usage_deref_unlocked(&fence);  } -  EXPORT_SYMBOL(drm_fence_flush_old);  static int drm_fence_lazy_wait(struct drm_fence_object *fence, @@ -378,7 +375,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,  	return 0;  } -int drm_fence_object_wait(struct drm_fence_object * fence, +int drm_fence_object_wait(struct drm_fence_object *fence,  			  int lazy, int ignore_signals, uint32_t mask)  {  	struct drm_device *dev = fence->dev; @@ -431,10 +428,9 @@ int drm_fence_object_wait(struct drm_fence_object * fence,  	/*  	 * Avoid kernel-space busy-waits.  	 */ -#if 1  	if (!ignore_signals)  		return -EAGAIN; -#endif +  	do {  		schedule();  		signaled = drm_fence_object_signaled(fence, mask, 1); @@ -447,9 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence,  }  EXPORT_SYMBOL(drm_fence_object_wait); - -int drm_fence_object_emit(struct drm_fence_object * fence, -			  uint32_t fence_flags, uint32_t fence_class, uint32_t type) +int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags, +			  uint32_t fence_class, uint32_t type)  {  	struct drm_device *dev = fence->dev;  	struct drm_fence_manager *fm = &dev->fm; @@ -461,7 +456,8 @@ int drm_fence_object_emit(struct drm_fence_object * fence,  	int ret;  	drm_fence_unring(dev, &fence->ring); -	ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type); +	ret = driver->emit(dev, fence_class, fence_flags, &sequence, +			   &native_type);  	if (ret)  		return ret; @@ -481,10 +477,10 @@ int drm_fence_object_emit(struct drm_fence_object * fence,  }  EXPORT_SYMBOL(drm_fence_object_emit); -static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, +static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,  				 uint32_t type,  				 uint32_t fence_flags, -				 struct drm_fence_object * fence) +				 struct drm_fence_object *fence)  {  	int ret = 0;  	unsigned long flags; @@ -497,7 +493,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,  	write_lock_irqsave(&fm->lock, flags);  	INIT_LIST_HEAD(&fence->ring); -	/*  +	/*  	 *  Avoid hitting BUG() for kernel-only fence objects.  	 */ @@ -517,8 +513,8 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,  	return ret;  } -int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, -			      int shareable) +int drm_fence_add_user_object(struct drm_file *priv, +			      struct drm_fence_object *fence, int shareable)  {  	struct drm_device *dev = priv->head->dev;  	int ret; @@ -537,8 +533,9 @@ out:  }  EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, -			    unsigned flags, struct drm_fence_object ** c_fence) +int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class, +			    uint32_t type, unsigned flags, +			    struct drm_fence_object **c_fence)  {  	struct drm_fence_object *fence;  	int ret; @@ -557,10 +554,9 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint3  	return 0;  } -  EXPORT_SYMBOL(drm_fence_object_create); -void drm_fence_manager_init(struct drm_device * dev) +void drm_fence_manager_init(struct drm_device *dev)  {  	struct drm_fence_manager *fm = &dev->fm;  	struct drm_fence_class_manager *fence_class; @@ -578,7 +574,7 @@ void drm_fence_manager_init(struct drm_device * dev)  	fm->num_classes = fed->num_classes;  	BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); -	for (i=0; i<fm->num_classes; ++i) { +	for (i = 0; i < fm->num_classes; ++i) {  	    fence_class = &fm->fence_class[i];  	    INIT_LIST_HEAD(&fence_class->ring); @@ -591,7 +587,8 @@ void drm_fence_manager_init(struct drm_device * dev)  	write_unlock_irqrestore(&fm->lock, flags);  } -void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) +void drm_fence_fill_arg(struct drm_fence_object *fence, +			struct drm_fence_arg *arg)  {  	struct drm_device *dev = fence->dev;  	struct drm_fence_manager *fm = &dev->fm; @@ -608,12 +605,12 @@ void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *ar  }  EXPORT_SYMBOL(drm_fence_fill_arg); - -void drm_fence_manager_takedown(struct drm_device * dev) +void drm_fence_manager_takedown(struct drm_device *dev)  {  } -struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv, +						 uint32_t handle)  {  	struct drm_device *dev = priv->head->dev;  	struct drm_user_object *uo; @@ -656,14 +653,13 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *  		drm_fence_usage_deref_unlocked(&fence);  		return ret;  	} -	 +  	/*  	 * usage > 0. No need to lock dev->struct_mutex;  	 */  	arg->handle = fence->base.hash.key; -  	drm_fence_fill_arg(fence, arg);  	drm_fence_usage_deref_unlocked(&fence); diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 0ccaed5b..0e1c486c 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -85,7 +85,6 @@ static int drm_setup(struct drm_device * dev)  	dev->queue_reserved = 0;  	dev->queue_slots = 0;  	dev->queuelist = NULL; -	dev->irq_enabled = 0;  	dev->context_flag = 0;  	dev->interrupt_flag = 0;  	dev->dma_flag = 0; @@ -153,7 +152,7 @@ int drm_open(struct inode *inode, struct file *filp)  		spin_unlock(&dev->count_lock);  	} - out: +out:  	mutex_lock(&dev->struct_mutex);  	BUG_ON((dev->dev_mapping != NULL) &&  	       (dev->dev_mapping != inode->i_mapping)); @@ -237,7 +236,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,  	int minor = iminor(inode);  	struct drm_file *priv;  	int ret; -	int i,j; +	int i, j;  	if (filp->f_flags & O_EXCL)  		return -EBUSY;	/* No exclusive opens */ @@ -265,16 +264,16 @@ static int drm_open_helper(struct inode *inode, struct file *filp,  	INIT_LIST_HEAD(&priv->lhead);  	INIT_LIST_HEAD(&priv->refd_objects); -	for (i=0; i<_DRM_NO_REF_TYPES; ++i) { -		ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER); +	for (i = 0; i < _DRM_NO_REF_TYPES; ++i) { +		ret = drm_ht_create(&priv->refd_object_hash[i], +				    DRM_FILE_HASH_ORDER);  		if (ret)  			break;  	}  	if (ret) { -		for(j=0; j<i; ++j) { +		for (j = 0; j < i; ++j)  			drm_ht_remove(&priv->refd_object_hash[j]); -		}  		goto out_free;  	} @@ -333,8 +332,8 @@ int drm_fasync(int fd, struct file *filp, int on)  }  EXPORT_SYMBOL(drm_fasync); -static void drm_object_release(struct file *filp) { - +static void drm_object_release(struct file *filp) +{  	struct drm_file *priv = filp->private_data;  	struct list_head *head;  	struct drm_ref_object *ref_object; @@ -342,8 +341,9 @@ static void drm_object_release(struct file *filp) {  	/*  	 * Free leftover ref objects created by me. Note that we cannot use -	 * list_for_each() here, as the struct_mutex may be temporarily released -	 * by the remove_() functions, and thus the lists may be altered. +	 * list_for_each() here, as the struct_mutex may be temporarily +	 * released by the remove_() functions, and thus the lists may be +	 * altered.  	 * Also, a drm_remove_ref_object() will not remove it  	 * from the list unless its refcount is 1.  	 */ @@ -355,9 +355,8 @@ static void drm_object_release(struct file *filp) {  		head = &priv->refd_objects;  	} -	for(i=0; i<_DRM_NO_REF_TYPES; ++i) { +	for (i = 0; i < _DRM_NO_REF_TYPES; ++i)  		drm_ht_remove(&priv->refd_object_hash[i]); -	}  }  /** @@ -528,4 +527,3 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)  	return 0;  }  EXPORT_SYMBOL(drm_poll); - diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index a8ec8468..f5a4f849 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,7 +36,7 @@  #include "drm_hashtab.h"  #include <linux/hash.h> -int drm_ht_create(struct drm_open_hash * ht, unsigned int order) +int drm_ht_create(struct drm_open_hash *ht, unsigned int order)  {  	unsigned int i; @@ -63,7 +63,7 @@ int drm_ht_create(struct drm_open_hash * ht, unsigned int order)  	return 0;  } -void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) +void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)  {  	struct drm_hash_item *entry;  	struct hlist_head *h_list; @@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key)  	}  } -static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,  					  unsigned long key)  {  	struct drm_hash_item *entry; @@ -100,7 +100,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht,  	return NULL;  } -int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) +int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)  {  	struct drm_hash_item *entry;  	struct hlist_head *h_list; @@ -128,10 +128,11 @@ int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)  }  /* - * Just insert an item and return any "bits" bit key that hasn't been  + * Just insert an item and return any "bits" bit key that hasn't been   * used before.   */ -int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, +int drm_ht_just_insert_please(struct drm_open_hash *ht, +			      struct drm_hash_item *item,  			      unsigned long seed, int bits, int shift,  			      unsigned long add)  { @@ -155,8 +156,8 @@ int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item *  	return 0;  } -int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, -		     struct drm_hash_item ** item) +int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, +		     struct drm_hash_item **item)  {  	struct hlist_node *list; @@ -168,7 +169,7 @@ int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key,  	return 0;  } -int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) +int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)  {  	struct hlist_node *list; @@ -181,14 +182,14 @@ int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key)  	return -EINVAL;  } -int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) +int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)  {  	hlist_del_init(&item->head);  	ht->fill--;  	return 0;  } -void drm_ht_remove(struct drm_open_hash * ht) +void drm_ht_remove(struct drm_open_hash *ht)  {  	if (ht->table) {  		if (ht->use_vmalloc) diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 0f137677..c090677b 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);  #endif - diff --git a/linux-core/drm_internal.h b/linux-core/drm_internal.h new file mode 120000 index 00000000..b30ef94a --- /dev/null +++ b/linux-core/drm_internal.h @@ -0,0 +1 @@ +../shared-core/drm_internal.h
\ No newline at end of file diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 9d52fd8a..3df163db 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -98,12 +98,14 @@ int drm_setunique(struct drm_device *dev, void *data,  	dev->unique[dev->unique_len] = '\0'; -	dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + strlen(dev->unique) + 2, -				 DRM_MEM_DRIVER); +	dev->devname = +	    drm_alloc(strlen(dev->driver->pci_driver.name) + +		      strlen(dev->unique) + 2, DRM_MEM_DRIVER);  	if (!dev->devname)  		return -ENOMEM; -	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique); +	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, +		dev->unique);  	/* Return error if the busid submitted doesn't match the device's actual  	 * busid. @@ -142,12 +144,14 @@ static int drm_set_busid(struct drm_device * dev)  	if (len > dev->unique_len)  		DRM_ERROR("buffer overflow"); -	dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2, -				 DRM_MEM_DRIVER); +	dev->devname = +	    drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + +		      2, DRM_MEM_DRIVER);  	if (dev->devname == NULL)  		return -ENOMEM; -	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique); +	sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, +		dev->unique);  	return 0;  } @@ -264,7 +268,7 @@ int drm_getstats(struct drm_device *dev, void *data,  	struct drm_stats *stats = data;  	int i; -	memset(stats, 0, sizeof(stats)); +	memset(stats, 0, sizeof(*stats));  	mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 4aa58d77..2a5a4539 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -188,7 +188,7 @@ int drm_irq_install(struct drm_device * dev)  	dev->irq_enabled = 1;  	mutex_unlock(&dev->struct_mutex); -	DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); +	DRM_DEBUG("irq=%d\n", dev->irq);  	/* Before installing handler */  	dev->driver->irq_preinstall(dev); @@ -240,7 +240,7 @@ int drm_irq_uninstall(struct drm_device * dev)  	if (!irq_enabled)  		return -EINVAL; -	DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq); +	DRM_DEBUG("irq=%d\n", dev->irq);  	dev->driver->irq_uninstall(dev); @@ -636,7 +636,7 @@ EXPORT_SYMBOL(drm_handle_vblank);   */  static void drm_locked_tasklet_func(unsigned long data)  { -	struct drm_device *dev = (struct drm_device*)data; +	struct drm_device *dev = (struct drm_device *)data;  	unsigned long irqflags;  	spin_lock_irqsave(&dev->tasklet_lock, irqflags); @@ -673,7 +673,7 @@ static void drm_locked_tasklet_func(unsigned long data)   * context, it must not make any assumptions about this. Also, the HW lock will   * be held with the kernel context or any client context.   */ -void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) +void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))  {  	unsigned long irqflags;  	static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index f68a3a3e..402a680f 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -45,13 +45,13 @@ static struct {  	.lock = SPIN_LOCK_UNLOCKED  }; -static inline size_t drm_size_align(size_t size) { - +static inline size_t drm_size_align(size_t size) +{  	size_t tmpSize = 4;  	if (size > PAGE_SIZE)  		return PAGE_ALIGN(size); -	while(tmpSize < size) +	while (tmpSize < size)  		tmpSize <<= 1;  	return (size_t) tmpSize; diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h index 5590c491..63e425b5 100644 --- a/linux-core/drm_memory.h +++ b/linux-core/drm_memory.h @@ -42,7 +42,6 @@   * drm_memory.h.   */ -/* Need the 4-argument version of vmap().  */  #if __OS_HAS_AGP  #include <linux/vmalloc.h> diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index cf0d92fa..59110293 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -235,12 +235,12 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,  		entry = list_entry(list, struct drm_mm_node, fl_entry);  		wasted = 0; -		if (entry->size < size)  +		if (entry->size < size)  			continue;  		if (alignment) {  			register unsigned tmp = entry->start % alignment; -			if (tmp)  +			if (tmp)  				wasted += alignment - tmp;  		} diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index a6d6c0d7..7d2e3a2b 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,7 +30,7 @@  #include "drmP.h" -int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, +int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,  			int shareable)  {  	struct drm_device *dev = priv->head->dev; @@ -44,7 +44,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,  	item->owner = priv;  	ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash, -					(unsigned long)item, 32, 0, 0); +					(unsigned long)item, 31, 0, 0);  	if (ret)  		return ret; @@ -56,7 +56,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,  }  EXPORT_SYMBOL(drm_add_user_object); -struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)  {  	struct drm_device *dev = priv->head->dev;  	struct drm_hash_item *hash; @@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t  	DRM_ASSERT_LOCKED(&dev->struct_mutex);  	ret = drm_ht_find_item(&dev->object_hash, key, &hash); -	if (ret) { +	if (ret)  		return NULL; -	} +  	item = drm_hash_entry(hash, struct drm_user_object, hash);  	if (priv != item->owner) { @@ -83,7 +83,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t  }  EXPORT_SYMBOL(drm_lookup_user_object); -static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) +static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)  {  	struct drm_device *dev = priv->head->dev;  	int ret; @@ -95,7 +95,7 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object  	}  } -static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, +static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,  				 enum drm_ref_type action)  {  	int ret = 0; @@ -114,7 +114,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object  	return ret;  } -int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, +int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,  		       enum drm_ref_type ref_action)  {  	int ret = 0; @@ -167,12 +167,12 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc  	list_add(&item->list, &priv->refd_objects);  	ret = drm_object_ref_action(priv, referenced_object, ref_action); -      out: +out:  	return ret;  } -struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, -					struct drm_user_object * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, +					struct drm_user_object *referenced_object,  					enum drm_ref_type ref_action)  {  	struct drm_hash_item *hash; @@ -188,8 +188,8 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,  }  EXPORT_SYMBOL(drm_lookup_ref_object); -static void drm_remove_other_references(struct drm_file * priv, -					struct drm_user_object * ro) +static void drm_remove_other_references(struct drm_file *priv, +					struct drm_user_object *ro)  {  	int i;  	struct drm_open_hash *ht; @@ -205,7 +205,7 @@ static void drm_remove_other_references(struct drm_file * priv,  	}  } -void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) +void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)  {  	int ret;  	struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; @@ -234,9 +234,10 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)  	}  } +EXPORT_SYMBOL(drm_remove_ref_object); -int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, -			enum drm_object_type type, struct drm_user_object ** object) +int drm_user_object_ref(struct drm_file *priv, uint32_t user_token, +			enum drm_object_type type, struct drm_user_object **object)  {  	struct drm_device *dev = priv->head->dev;  	struct drm_user_object *uo; @@ -260,12 +261,12 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,  	mutex_unlock(&dev->struct_mutex);  	*object = uo;  	return 0; -      out_err: +out_err:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } -int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, +int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,  			  enum drm_object_type type)  {  	struct drm_device *dev = priv->head->dev; @@ -287,7 +288,7 @@ int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,  	drm_remove_ref_object(priv, ro);  	mutex_unlock(&dev->struct_mutex);  	return 0; -      out_err: +out_err:  	mutex_unlock(&dev->struct_mutex);  	return ret;  } diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index cea811eb..a2d10b5d 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -68,12 +68,12 @@ struct drm_user_object {  	atomic_t refcount;  	int shareable;  	struct drm_file *owner; -	void (*ref_struct_locked) (struct drm_file * priv, -				   struct drm_user_object * obj, +	void (*ref_struct_locked) (struct drm_file *priv, +				   struct drm_user_object *obj,  				   enum drm_ref_type ref_action); -	void (*unref) (struct drm_file * priv, struct drm_user_object * obj, +	void (*unref) (struct drm_file *priv, struct drm_user_object *obj,  		       enum drm_ref_type unref_action); -	void (*remove) (struct drm_file * priv, struct drm_user_object * obj); +	void (*remove) (struct drm_file *priv, struct drm_user_object *obj);  };  /* @@ -94,29 +94,29 @@ struct drm_ref_object {   * Must be called with the struct_mutex held.   */ -extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, +extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,  			       int shareable);  /**   * Must be called with the struct_mutex held.   */ -extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,  						 uint32_t key);  /*   * Must be called with the struct_mutex held. May temporarily release it.   */ -extern int drm_add_ref_object(struct drm_file * priv, -			      struct drm_user_object * referenced_object, +extern int drm_add_ref_object(struct drm_file *priv, +			      struct drm_user_object *referenced_object,  			      enum drm_ref_type ref_action);  /*   * Must be called with the struct_mutex held.   */ -struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, -					struct drm_user_object * referenced_object, +struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv, +					struct drm_user_object *referenced_object,  					enum drm_ref_type ref_action);  /*   * Must be called with the struct_mutex held. @@ -125,11 +125,11 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,   * This function may temporarily release the struct_mutex.   */ -extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); -extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, +extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item); +extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,  			       enum drm_object_type type, -			       struct drm_user_object ** object); -extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, +			       struct drm_user_object **object); +extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,  				 enum drm_object_type type);  /*************************************************** @@ -138,7 +138,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,  struct drm_fence_object {  	struct drm_user_object base; -        struct drm_device *dev; +	struct drm_device *dev;  	atomic_t usage;  	/* @@ -153,7 +153,7 @@ struct drm_fence_object {  	uint32_t sequence;  	uint32_t flush_mask;  	uint32_t submitted_flush; -        uint32_t error; +	uint32_t error;  };  #define _DRM_FENCE_CLASSES 8 @@ -182,40 +182,44 @@ struct drm_fence_driver {  	uint32_t flush_diff;  	uint32_t sequence_mask;  	int lazy_capable; -	int (*has_irq) (struct drm_device * dev, uint32_t fence_class, +	int (*has_irq) (struct drm_device *dev, uint32_t fence_class,  			uint32_t flags); -	int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags, -		     uint32_t * breadcrumb, uint32_t * native_type); -	void (*poke_flush) (struct drm_device * dev, uint32_t fence_class); +	int (*emit) (struct drm_device *dev, uint32_t fence_class, +		     uint32_t flags, uint32_t *breadcrumb, +		     uint32_t *native_type); +	void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);  };  extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, -			      uint32_t sequence, uint32_t type, uint32_t error); +			      uint32_t sequence, uint32_t type, +			      uint32_t error);  extern void drm_fence_manager_init(struct drm_device *dev);  extern void drm_fence_manager_takedown(struct drm_device *dev);  extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,  				uint32_t sequence); -extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); -extern int drm_fence_object_signaled(struct drm_fence_object * fence, +extern int drm_fence_object_flush(struct drm_fence_object *fence, +				  uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object *fence,  				     uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); -extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);  extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);  extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,  					 struct drm_fence_object *src); -extern int drm_fence_object_wait(struct drm_fence_object * fence, +extern int drm_fence_object_wait(struct drm_fence_object *fence,  				 int lazy, int ignore_signals, uint32_t mask);  extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,  				   uint32_t fence_flags, uint32_t fence_class, -				   struct drm_fence_object ** c_fence); -extern int drm_fence_object_emit(struct drm_fence_object * fence, +				   struct drm_fence_object **c_fence); +extern int drm_fence_object_emit(struct drm_fence_object *fence,  				 uint32_t fence_flags, uint32_t class,  				 uint32_t type);  extern void drm_fence_fill_arg(struct drm_fence_object *fence,  			       struct drm_fence_arg *arg); -extern int drm_fence_add_user_object(struct drm_file * priv, -				     struct drm_fence_object * fence, int shareable); +extern int drm_fence_add_user_object(struct drm_file *priv, +				     struct drm_fence_object *fence, +				     int shareable);  extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,  				  struct drm_file *file_priv); @@ -242,7 +246,7 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,  /*   * The ttm backend GTT interface. (In our case AGP).   * Any similar type of device (PCIE?) - * needs only to implement these functions to be usable with the "TTM" interface. + * needs only to implement these functions to be usable with the TTM interface.   * The AGP backend implementation lives in drm_agpsupport.c   * basically maps these calls to available functions in agpgart.   * Each drm device driver gets an @@ -257,24 +261,26 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,  struct drm_ttm_backend;  struct drm_ttm_backend_func { -	int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); -	int (*populate) (struct drm_ttm_backend * backend, -			 unsigned long num_pages, struct page ** pages); -	void (*clear) (struct drm_ttm_backend * backend); -	int (*bind) (struct drm_ttm_backend * backend, -		     struct drm_bo_mem_reg * bo_mem); -	int (*unbind) (struct drm_ttm_backend * backend); -	void (*destroy) (struct drm_ttm_backend * backend); +	int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend); +	int (*populate) (struct drm_ttm_backend *backend, +			 unsigned long num_pages, struct page **pages, +			 struct page *dummy_read_page); +	void (*clear) (struct drm_ttm_backend *backend); +	int (*bind) (struct drm_ttm_backend *backend, +		     struct drm_bo_mem_reg *bo_mem); +	int (*unbind) (struct drm_ttm_backend *backend); +	void (*destroy) (struct drm_ttm_backend *backend);  }; -typedef struct drm_ttm_backend { -        struct drm_device *dev; -        uint32_t flags; -        struct drm_ttm_backend_func *func; -} drm_ttm_backend_t; +struct drm_ttm_backend { +	struct drm_device *dev; +	uint32_t flags; +	struct drm_ttm_backend_func *func; +};  struct drm_ttm { +	struct page *dummy_read_page;  	struct page **pages;  	uint32_t page_flags;  	unsigned long num_pages; @@ -292,22 +298,28 @@ struct drm_ttm {  }; -extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem); -extern void drm_ttm_unbind(struct drm_ttm * ttm); -extern void drm_ttm_evict(struct drm_ttm * ttm); -extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); -extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, +				      uint32_t page_flags, +				      struct page *dummy_read_page); +extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem); +extern void drm_ttm_unbind(struct drm_ttm *ttm); +extern void drm_ttm_evict(struct drm_ttm *ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm *ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);  extern void drm_ttm_cache_flush(void); -extern int drm_ttm_populate(struct drm_ttm * ttm); +extern int drm_ttm_populate(struct drm_ttm *ttm); +extern int drm_ttm_set_user(struct drm_ttm *ttm, +			    struct task_struct *tsk, +			    unsigned long start, +			    unsigned long num_pages);  /* - * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, - * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called - * when the last vma exits. + * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do + * this which calls this function iff there are no vmas referencing it anymore. + * Otherwise it is called when the last vma exits.   */ -extern int drm_destroy_ttm(struct drm_ttm * ttm); +extern int drm_ttm_destroy(struct drm_ttm *ttm);  #define DRM_FLAG_MASKED(_old, _new, _mask) {\  (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -320,11 +332,48 @@ extern int drm_destroy_ttm(struct drm_ttm * ttm);   * Page flags.   */ -#define DRM_TTM_PAGE_UNCACHED 0x01 -#define DRM_TTM_PAGE_USED     0x02 -#define DRM_TTM_PAGE_BOUND    0x04 -#define DRM_TTM_PAGE_PRESENT  0x08 -#define DRM_TTM_PAGE_VMALLOC  0x10 +/* + * This ttm should not be cached by the CPU + */ +#define DRM_TTM_PAGE_UNCACHED   (1 << 0) +/* + * This flat is not used at this time; I don't know what the + * intent was + */ +#define DRM_TTM_PAGE_USED       (1 << 1) +/* + * This flat is not used at this time; I don't know what the + * intent was + */ +#define DRM_TTM_PAGE_BOUND      (1 << 2) +/* + * This flat is not used at this time; I don't know what the + * intent was + */ +#define DRM_TTM_PAGE_PRESENT    (1 << 3) +/* + * The array of page pointers was allocated with vmalloc + * instead of drm_calloc. + */ +#define DRM_TTM_PAGE_VMALLOC    (1 << 4) +/* + * This ttm is mapped from user space + */ +#define DRM_TTM_PAGE_USER       (1 << 5) +/* + * This ttm will be written to by the GPU + */ +#define DRM_TTM_PAGE_WRITE	(1 << 6) +/* + * This ttm was mapped to the GPU, and so the contents may have + * been modified + */ +#define DRM_TTM_PAGE_USER_DIRTY (1 << 7) +/* + * This flag is not used at this time; I don't know what the + * intent was. + */ +#define DRM_TTM_PAGE_USER_DMA   (1 << 8)  /***************************************************   * Buffer objects. (drm_bo.c, drm_bo_move.c) @@ -336,16 +385,50 @@ struct drm_bo_mem_reg {  	unsigned long num_pages;  	uint32_t page_alignment;  	uint32_t mem_type; +	/* +	 * Current buffer status flags, indicating +	 * where the buffer is located and which +	 * access modes are in effect +	 */  	uint64_t flags; -	uint64_t mask; -        uint32_t desired_tile_stride; -        uint32_t hw_tile_stride; +	/** +	 * These are the flags proposed for +	 * a validate operation. If the +	 * validate succeeds, they'll get moved +	 * into the flags field +	 */ +	uint64_t proposed_flags; +	 +	uint32_t desired_tile_stride; +	uint32_t hw_tile_stride;  };  enum drm_bo_type { -	drm_bo_type_dc, +	/* +	 * drm_bo_type_device are 'normal' drm allocations, +	 * pages are allocated from within the kernel automatically +	 * and the objects can be mmap'd from the drm device. Each +	 * drm_bo_type_device object has a unique name which can be +	 * used by other processes to share access to the underlying +	 * buffer. +	 */ +	drm_bo_type_device, +	/* +	 * drm_bo_type_user are buffers of pages that already exist +	 * in the process address space. They are more limited than +	 * drm_bo_type_device buffers in that they must always +	 * remain cached (as we assume the user pages are mapped cached), +	 * and they are not sharable to other processes through DRM +	 * (although, regular shared memory should still work fine). +	 */  	drm_bo_type_user, -	drm_bo_type_kernel, /* for initial kernel allocations */ +	/* +	 * drm_bo_type_kernel are buffers that exist solely for use +	 * within the kernel. The pages cannot be mapped into the +	 * process. One obvious use would be for the ring +	 * buffer where user access would not (ideally) be required. +	 */ +	drm_bo_type_kernel,  };  struct drm_buffer_object { @@ -369,8 +452,8 @@ struct drm_buffer_object {  	uint32_t fence_type;  	uint32_t fence_class; -        uint32_t new_fence_type; -        uint32_t new_fence_class; +	uint32_t new_fence_type; +	uint32_t new_fence_class;  	struct drm_fence_object *fence;  	uint32_t priv_flags;  	wait_queue_head_t event_queue; @@ -409,7 +492,7 @@ struct drm_mem_type_manager {  	struct list_head pinned;  	uint32_t flags;  	uint32_t drm_bus_maptype; -        unsigned long gpu_offset; +	unsigned long gpu_offset;  	unsigned long io_offset;  	unsigned long io_size;  	void *io_addr; @@ -431,8 +514,8 @@ struct drm_bo_lock {  #define _DRM_FLAG_MEMTYPE_CSELECT   0x00000020	/* Select caching */  struct drm_buffer_manager { -        struct drm_bo_lock bm_lock; -        struct mutex evict_mutex; +	struct drm_bo_lock bm_lock; +	struct mutex evict_mutex;  	int nice_mode;  	int initialized;  	struct drm_file *last_to_validate; @@ -447,6 +530,7 @@ struct drm_buffer_manager {  	uint32_t fence_type;  	unsigned long cur_pages;  	atomic_t count; +	struct page *dummy_read_page;  };  struct drm_bo_driver { @@ -455,15 +539,42 @@ struct drm_bo_driver {  	uint32_t num_mem_type_prio;  	uint32_t num_mem_busy_prio;  	struct drm_ttm_backend *(*create_ttm_backend_entry) -	 (struct drm_device * dev); +	 (struct drm_device *dev);  	int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, -		     uint32_t * type); -	int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); -	int (*init_mem_type) (struct drm_device * dev, uint32_t type, -			      struct drm_mem_type_manager * man); -	 uint32_t(*evict_mask) (struct drm_buffer_object *bo); -	int (*move) (struct drm_buffer_object * bo, -		     int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +			   uint32_t *type); +	int (*invalidate_caches) (struct drm_device *dev, uint64_t flags); +	int (*init_mem_type) (struct drm_device *dev, uint32_t type, +			      struct drm_mem_type_manager *man); +	/* +	 * evict_flags: +	 * +	 * @bo: the buffer object to be evicted +	 * +	 * Return the bo flags for a buffer which is not mapped to the hardware. +	 * These will be placed in proposed_flags so that when the move is +	 * finished, they'll end up in bo->mem.flags +	 */ +	uint64_t(*evict_flags) (struct drm_buffer_object *bo); +	/* +	 * move: +	 * +	 * @bo: the buffer to move +	 * +	 * @evict: whether this motion is evicting the buffer from +	 * the graphics address space +	 * +	 * @no_wait: whether this should give up and return -EBUSY +	 * if this move would require sleeping +	 * +	 * @new_mem: the new memory region receiving the buffer +	 * +	 * Move a buffer between two memory regions. +	 */ +	int (*move) (struct drm_buffer_object *bo, +		     int evict, int no_wait, struct drm_bo_mem_reg *new_mem); +	/* +	 * ttm_cache_flush +	 */  	void (*ttm_cache_flush)(struct drm_ttm *ttm);  }; @@ -488,49 +599,47 @@ extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_f  extern int drm_bo_driver_finish(struct drm_device *dev);  extern int drm_bo_driver_init(struct drm_device *dev);  extern int drm_bo_pci_offset(struct drm_device *dev, -			     struct drm_bo_mem_reg * mem, +			     struct drm_bo_mem_reg *mem,  			     unsigned long *bus_base,  			     unsigned long *bus_offset,  			     unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem); -extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);  extern void drm_putback_buffer_objects(struct drm_device *dev); -extern int drm_fence_buffer_objects(struct drm_device * dev, +extern int drm_fence_buffer_objects(struct drm_device *dev,  				    struct list_head *list,  				    uint32_t fence_flags, -				    struct drm_fence_object * fence, -				    struct drm_fence_object ** used_fence); -extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +				    struct drm_fence_object *fence, +				    struct drm_fence_object **used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);  extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, -				    enum drm_bo_type type, uint64_t mask, +				    enum drm_bo_type type, uint64_t flags,  				    uint32_t hint, uint32_t page_alignment,  				    unsigned long buffer_start,  				    struct drm_buffer_object **bo); -extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, +extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,  		       int no_wait); -extern int drm_bo_mem_space(struct drm_buffer_object * bo, -			    struct drm_bo_mem_reg * mem, int no_wait); -extern int drm_bo_move_buffer(struct drm_buffer_object * bo, +extern int drm_bo_mem_space(struct drm_buffer_object *bo, +			    struct drm_bo_mem_reg *mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object *bo,  			      uint64_t new_mem_flags,  			      int no_wait, int move_unfenced); -extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); -extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,  			  unsigned long p_offset, unsigned long p_size); -extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, -				  uint32_t fence_class, uint64_t flags, -				  uint64_t mask, uint32_t hint, -				  int use_old_fence_class, -				  struct drm_bo_info_rep * rep, +extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle, +				  uint64_t flags, uint64_t mask, uint32_t hint, +				  uint32_t fence_class, int use_old_fence_class, +				  struct drm_bo_info_rep *rep,  				  struct drm_buffer_object **bo_rep); -extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,  							  uint32_t handle,  							  int check_owner);  extern int drm_bo_do_validate(struct drm_buffer_object *bo,  			      uint64_t flags, uint64_t mask, uint32_t hint,  			      uint32_t fence_class, -			      int no_wait,  			      struct drm_bo_info_rep *rep);  /* @@ -538,18 +647,17 @@ extern int drm_bo_do_validate(struct drm_buffer_object *bo,   * drm_bo_move.c   */ -extern int drm_bo_move_ttm(struct drm_buffer_object * bo, -			   int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object *bo, +			   int evict, int no_wait, +			   struct drm_bo_mem_reg *new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,  			      int evict, -			      int no_wait, struct drm_bo_mem_reg * new_mem); -extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, -				     int evict, -				     int no_wait, -				     uint32_t fence_class, -				     uint32_t fence_type, +			      int no_wait, struct drm_bo_mem_reg *new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo, +				     int evict, int no_wait, +				     uint32_t fence_class, uint32_t fence_type,  				     uint32_t fence_flags, -				     struct drm_bo_mem_reg * new_mem); +				     struct drm_bo_mem_reg *new_mem);  extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);  extern unsigned long drm_bo_offset_end(unsigned long offset,  				       unsigned long end); @@ -615,7 +723,7 @@ extern void drm_regs_init(struct drm_reg_manager *manager,  			  void (*reg_destroy)(struct drm_reg *));  /* - * drm_bo_lock.c  + * drm_bo_lock.c   * Simple replacement for the hardware lock on buffer manager init and clean.   */ @@ -623,10 +731,10 @@ extern void drm_regs_init(struct drm_reg_manager *manager,  extern void drm_bo_init_lock(struct drm_bo_lock *lock);  extern void drm_bo_read_unlock(struct drm_bo_lock *lock);  extern int drm_bo_read_lock(struct drm_bo_lock *lock); -extern int drm_bo_write_lock(struct drm_bo_lock *lock,  +extern int drm_bo_write_lock(struct drm_bo_lock *lock,  			     struct drm_file *file_priv); -extern int drm_bo_write_unlock(struct drm_bo_lock *lock,  +extern int drm_bo_write_unlock(struct drm_bo_lock *lock,  			       struct drm_file *file_priv);  #ifdef CONFIG_DEBUG_MUTEXES diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 2688479a..8921944e 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -92,9 +92,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)  #define DRM_COPY_TO_USER(arg1, arg2, arg3)		\  	copy_to_user(arg1, arg2, arg3)  /* Macros for copyfrom user, but checking readability only once */ -#define DRM_VERIFYAREA_READ( uaddr, size ) 		\ +#define DRM_VERIFYAREA_READ( uaddr, size )		\  	(access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT) -#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) 	\ +#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3)	\  	__copy_from_user(arg1, arg2, arg3)  #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3)	\  	__copy_to_user(arg1, arg2, arg3) @@ -129,3 +129,17 @@ do {								\  #define DRM_WAKEUP( queue ) wake_up_interruptible( queue )  #define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue ) + +/** Type for the OS's non-sleepable mutex lock */ +#define DRM_SPINTYPE		spinlock_t +/** + * Initialize the lock for use.  name is an optional string describing the + * lock + */ +#define DRM_SPININIT(l,name)	spin_lock_init(l) +#define DRM_SPINUNINIT(l) +#define DRM_SPINLOCK(l)		spin_lock(l) +#define DRM_SPINUNLOCK(l)	spin_unlock(l) +#define DRM_SPINLOCK_IRQSAVE(l, _flags)	spin_lock_irqsave(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK_ASSERT(l)		do {} while (0) diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c index a608eed3..7569286c 100644 --- a/linux-core/drm_pci.c +++ b/linux-core/drm_pci.c @@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc);   *   * This function is for internal use in the Linux-specific DRM core code.   */ -void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) +void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)  {  	unsigned long addr;  	size_t sz; @@ -167,7 +167,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)  /**   * \brief Free a PCI consistent memory block   */ -void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) +void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)  {  	__drm_pci_free(dev, dmah);  	kfree(dmah); diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 08bf99d6..3012c5b0 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -239,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,  		else  			type = types[map->type];  		DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s  0x%02x 0x%08lx ", -		       i, -		       map->offset, -		       map->size, type, map->flags, -		       (unsigned long) r_list->user_token); +			       i, +			       map->offset, +			       map->size, type, map->flags, +			       (unsigned long) r_list->user_token);  		if (map->mtrr < 0) {  			DRM_PROC_PRINT("none\n"); diff --git a/linux-core/drm_regman.c b/linux-core/drm_regman.c new file mode 100644 index 00000000..aa117323 --- /dev/null +++ b/linux-core/drm_regman.c @@ -0,0 +1,200 @@ +/************************************************************************** + * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ +/* + * An allocate-fence manager implementation intended for sets of base-registers + * or tiling-registers. + */ + +#include "drmP.h" + +/* + * Allocate a compatible register and put it on the unfenced list. + */ + +int drm_regs_alloc(struct drm_reg_manager *manager, +		   const void *data, +		   uint32_t fence_class, +		   uint32_t fence_type, +		   int interruptible, int no_wait, struct drm_reg **reg) +{ +	struct drm_reg *entry, *next_entry; +	int ret; + +	*reg = NULL; + +	/* +	 * Search the unfenced list. +	 */ + +	list_for_each_entry(entry, &manager->unfenced, head) { +		if (manager->reg_reusable(entry, data)) { +			entry->new_fence_type |= fence_type; +			goto out; +		} +	} + +	/* +	 * Search the lru list. +	 */ + +	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { +		struct drm_fence_object *fence = entry->fence; +		if (fence->fence_class == fence_class && +		    (entry->fence_type & fence_type) == entry->fence_type && +		    manager->reg_reusable(entry, data)) { +			list_del(&entry->head); +			entry->new_fence_type = fence_type; +			list_add_tail(&entry->head, &manager->unfenced); +			goto out; +		} +	} + +	/* +	 * Search the free list. +	 */ + +	list_for_each_entry(entry, &manager->free, head) { +		list_del(&entry->head); +		entry->new_fence_type = fence_type; +		list_add_tail(&entry->head, &manager->unfenced); +		goto out; +	} + +	if (no_wait) +		return -EBUSY; + +	/* +	 * Go back to the lru list and try to expire fences. +	 */ + +	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { +		BUG_ON(!entry->fence); +		ret = drm_fence_object_wait(entry->fence, 0, !interruptible, +					    entry->fence_type); +		if (ret) +			return ret; + +		drm_fence_usage_deref_unlocked(&entry->fence); +		list_del(&entry->head); +		entry->new_fence_type = fence_type; +		list_add_tail(&entry->head, &manager->unfenced); +		goto out; +	} + +	/* +	 * Oops. All registers are used up :(. +	 */ + +	return -EBUSY; +out: +	*reg = entry; +	return 0; +} +EXPORT_SYMBOL(drm_regs_alloc); + +void drm_regs_fence(struct drm_reg_manager *manager, +		    struct drm_fence_object *fence) +{ +	struct drm_reg *entry; +	struct drm_reg *next_entry; + +	if (!fence) { + +		/* +		 * Old fence (if any) is still valid. +		 * Put back on free and lru lists. +		 */ + +		list_for_each_entry_safe_reverse(entry, next_entry, +						 &manager->unfenced, head) { +			list_del(&entry->head); +			list_add(&entry->head, (entry->fence) ? +				 &manager->lru : &manager->free); +		} +	} else { + +		/* +		 * Fence with a new fence and put on lru list. +		 */ + +		list_for_each_entry_safe(entry, next_entry, &manager->unfenced, +					 head) { +			list_del(&entry->head); +			if (entry->fence) +				drm_fence_usage_deref_unlocked(&entry->fence); +			drm_fence_reference_unlocked(&entry->fence, fence); + +			entry->fence_type = entry->new_fence_type; +			BUG_ON((entry->fence_type & fence->type) != +			       entry->fence_type); + +			list_add_tail(&entry->head, &manager->lru); +		} +	} +} +EXPORT_SYMBOL(drm_regs_fence); + +void drm_regs_free(struct drm_reg_manager *manager) +{ +	struct drm_reg *entry; +	struct drm_reg *next_entry; + +	drm_regs_fence(manager, NULL); + +	list_for_each_entry_safe(entry, next_entry, &manager->free, head) { +		list_del(&entry->head); +		manager->reg_destroy(entry); +	} + +	list_for_each_entry_safe(entry, next_entry, &manager->lru, head) { + +		(void)drm_fence_object_wait(entry->fence, 1, 1, +					    entry->fence_type); +		list_del(&entry->head); +		drm_fence_usage_deref_unlocked(&entry->fence); +		manager->reg_destroy(entry); +	} +} +EXPORT_SYMBOL(drm_regs_free); + +void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg) +{ +	reg->fence = NULL; +	list_add_tail(®->head, &manager->free); +} +EXPORT_SYMBOL(drm_regs_add); + +void drm_regs_init(struct drm_reg_manager *manager, +		   int (*reg_reusable) (const struct drm_reg *, const void *), +		   void (*reg_destroy) (struct drm_reg *)) +{ +	INIT_LIST_HEAD(&manager->free); +	INIT_LIST_HEAD(&manager->lru); +	INIT_LIST_HEAD(&manager->unfenced); +	manager->reg_reusable = reg_reusable; +	manager->reg_destroy = reg_destroy; +} +EXPORT_SYMBOL(drm_regs_init); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index 3c0f672e..77b9f95d 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -68,7 +68,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)  	struct drm_sg_mem *entry;  	unsigned long pages, i, j; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (!drm_core_check_feature(dev, DRIVER_SG))  		return -EINVAL; @@ -82,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)  	memset(entry, 0, sizeof(*entry));  	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; -	DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages); +	DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);  	entry->pages = pages;  	entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), @@ -123,10 +123,10 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)  	entry->handle = ScatterHandle((unsigned long)entry->virtual); -	DRM_DEBUG("sg alloc handle  = %08lx\n", entry->handle); -	DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual); +	DRM_DEBUG("handle  = %08lx\n", entry->handle); +	DRM_DEBUG("virtual = %p\n", entry->virtual); -	for (i = (unsigned long)entry->virtual, j = 0; j < pages;  +	for (i = (unsigned long)entry->virtual, j = 0; j < pages;  	     i += PAGE_SIZE, j++) {  		entry->pagelist[j] = vmalloc_to_page((void *)i);  		if (!entry->pagelist[j]) @@ -211,7 +211,7 @@ int drm_sg_free(struct drm_device *dev, void *data,  	if (!entry || entry->handle != request->handle)  		return -EINVAL; -	DRM_DEBUG("sg free virtual  = %p\n", entry->virtual); +	DRM_DEBUG("virtual  = %p\n", entry->virtual);  	drm_sg_cleanup(entry); diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 118e82ae..8421a939 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -264,7 +264,8 @@ int drm_sman_free_key(struct drm_sman *sman, unsigned int key)  	if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))  		return -EINVAL; -	memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); +	memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, +				       user_hash);  	drm_sman_free(memblock_item);  	return 0;  } diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 1d88d375..00a24521 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -55,8 +55,8 @@ struct class *drm_class;  struct proc_dir_entry *drm_proc_root;  static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, -		       const struct pci_device_id *ent, -		       struct drm_driver *driver) +			   const struct pci_device_id *ent, +			   struct drm_driver *driver)  {  	int retcode; @@ -75,7 +75,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,  	mutex_init(&dev->bm.evict_mutex);  	idr_init(&dev->drw_idr); -	 +  	dev->pdev = pdev;  	dev->pci_device = pdev->device;  	dev->pci_vendor = pdev->vendor; @@ -84,6 +84,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,  	dev->hose = pdev->sysdata;  #endif  	dev->irq = pdev->irq; +	dev->irq_enabled = 0;  	if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {  		return -ENOMEM; @@ -111,10 +112,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,  	dev->driver = driver; -	if (dev->driver->load) -		if ((retcode = dev->driver->load(dev, ent->driver_data))) -			goto error_out_unreg; -  	if (drm_core_has_AGP(dev)) {  		if (drm_device_is_agp(dev))  			dev->agp = drm_agp_init(dev); @@ -134,6 +131,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,  		}  	} +	if (dev->driver->load) +		if ((retcode = dev->driver->load(dev, ent->driver_data))) +			goto error_out_unreg; + +  	retcode = drm_ctxbitmap_init(dev);  	if (retcode) {  		DRM_ERROR("Cannot allocate memory for context bitmap.\n"); @@ -217,7 +219,7 @@ err_g1:   * Try and register, if we fail to register, backout previous work.   */  int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, -	      struct drm_driver *driver) +		struct drm_driver *driver)  {  	struct drm_device *dev;  	int ret; @@ -317,7 +319,7 @@ int drm_put_head(struct drm_head * head)  	drm_proc_cleanup(minor, drm_proc_root, head->dev_root);  	drm_sysfs_device_remove(head->dev); -	*head = (struct drm_head){.dev = NULL}; +	*head = (struct drm_head) {.dev = NULL};  	drm_heads[minor] = NULL;  	return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 6f8623ce..3aaac11b 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -89,8 +89,10 @@ struct class *drm_sysfs_create(struct module *owner, char *name)  		goto err_out;  	} +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))  	class->suspend = drm_sysfs_suspend;  	class->resume = drm_sysfs_resume; +#endif  	err = class_create_file(class, &class_attr_version);  	if (err) @@ -160,12 +162,7 @@ int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)  	dev->dev.parent = &dev->pdev->dev;  	dev->dev.class = drm_class;  	dev->dev.release = drm_sysfs_device_release; -	/* -	 * This will actually add the major:minor file so that udev -	 * will create the device node.  We don't want to do that just -	 * yet... -	 */ -	/* dev->dev.devt = head->device; */ +	dev->dev.devt = head->device;  	snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);  	err = device_register(&dev->dev); diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index df9e7e44..a9d87338 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush);   * Use kmalloc if possible. Otherwise fall back to vmalloc.   */ -static void ttm_alloc_pages(struct drm_ttm * ttm) +static void drm_ttm_alloc_pages(struct drm_ttm *ttm)  {  	unsigned long size = ttm->num_pages * sizeof(*ttm->pages);  	ttm->pages = NULL; @@ -54,20 +54,19 @@ static void ttm_alloc_pages(struct drm_ttm * ttm)  	if (drm_alloc_memctl(size))  		return; -	if (size <= PAGE_SIZE) { +	if (size <= PAGE_SIZE)  		ttm->pages = drm_calloc(1, size, DRM_MEM_TTM); -	} +  	if (!ttm->pages) {  		ttm->pages = vmalloc_user(size);  		if (ttm->pages)  			ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;  	} -	if (!ttm->pages) { +	if (!ttm->pages)  		drm_free_memctl(size); -	}  } -static void ttm_free_pages(struct drm_ttm * ttm) +static void drm_ttm_free_pages(struct drm_ttm *ttm)  {  	unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -85,17 +84,15 @@ static struct page *drm_ttm_alloc_page(void)  {  	struct page *page; -	if (drm_alloc_memctl(PAGE_SIZE)) { +	if (drm_alloc_memctl(PAGE_SIZE))  		return NULL; -	} +  	page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);  	if (!page) {  		drm_free_memctl(PAGE_SIZE);  		return NULL;  	} -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) -	SetPageLocked(page); -#else +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))  	SetPageReserved(page);  #endif  	return page; @@ -106,7 +103,7 @@ static struct page *drm_ttm_alloc_page(void)   * for range of pages in a ttm.   */ -static int drm_set_caching(struct drm_ttm * ttm, int noncached) +static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)  {  	int i;  	struct page **cur_page; @@ -139,15 +136,65 @@ static int drm_set_caching(struct drm_ttm * ttm, int noncached)  	return 0;  } -/* - * Free all resources associated with a ttm. - */ -int drm_destroy_ttm(struct drm_ttm * ttm) +static void drm_ttm_free_user_pages(struct drm_ttm *ttm)  { +	int write; +	int dirty; +	struct page *page; +	int i; +	BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER)); +	write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0); +	dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0); + +	for (i = 0; i < ttm->num_pages; ++i) { +		page = ttm->pages[i]; +		if (page == NULL) +			continue; + +		if (page == ttm->dummy_read_page) { +			BUG_ON(write); +			continue; +		} + +		if (write && dirty && !PageReserved(page)) +			set_page_dirty_lock(page); + +		ttm->pages[i] = NULL; +		put_page(page); +	} +} + +static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm) +{  	int i; +	struct drm_buffer_manager *bm = &ttm->dev->bm;  	struct page **cur_page; + +	for (i = 0; i < ttm->num_pages; ++i) { +		cur_page = ttm->pages + i; +		if (*cur_page) { +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) +			ClearPageReserved(*cur_page); +#endif +			if (page_count(*cur_page) != 1) +				DRM_ERROR("Erroneous page count. Leaking pages.\n"); +			if (page_mapped(*cur_page)) +				DRM_ERROR("Erroneous map count. Leaking page mappings.\n"); +			__free_page(*cur_page); +			drm_free_memctl(PAGE_SIZE); +			--bm->cur_pages; +		} +	} +} + +/* + * Free all resources associated with a ttm. + */ + +int drm_ttm_destroy(struct drm_ttm *ttm) +{  	struct drm_ttm_backend *be;  	if (!ttm) @@ -160,39 +207,22 @@ int drm_destroy_ttm(struct drm_ttm * ttm)  	}  	if (ttm->pages) { -		struct drm_buffer_manager *bm = &ttm->dev->bm;  		if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) -			drm_set_caching(ttm, 0); +			drm_ttm_set_caching(ttm, 0); -		for (i = 0; i < ttm->num_pages; ++i) { -			cur_page = ttm->pages + i; -			if (*cur_page) { -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)) -				unlock_page(*cur_page); -#else -				ClearPageReserved(*cur_page); -#endif -				if (page_count(*cur_page) != 1) { -					DRM_ERROR("Erroneous page count. " -						  "Leaking pages.\n"); -				} -				if (page_mapped(*cur_page)) { -					DRM_ERROR("Erroneous map count. " -						  "Leaking page mappings.\n"); -				} -				__free_page(*cur_page); -				drm_free_memctl(PAGE_SIZE); -				--bm->cur_pages; -			} -		} -		ttm_free_pages(ttm); +		if (ttm->page_flags & DRM_TTM_PAGE_USER) +			drm_ttm_free_user_pages(ttm); +		else +			drm_ttm_free_alloced_pages(ttm); + +		drm_ttm_free_pages(ttm);  	}  	drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);  	return 0;  } -struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)  {  	struct page *p;  	struct drm_buffer_manager *bm = &ttm->dev->bm; @@ -209,7 +239,56 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)  }  EXPORT_SYMBOL(drm_ttm_get_page); -int drm_ttm_populate(struct drm_ttm * ttm) +/** + * drm_ttm_set_user: + * + * @ttm: the ttm to map pages to. This must always be + * a freshly created ttm. + * + * @tsk: a pointer to the address space from which to map + * pages. + *  + * @write: a boolean indicating that write access is desired + * + * start: the starting address + * + * Map a range of user addresses to a new ttm object. This + * provides access to user memory from the graphics device. + */ +int drm_ttm_set_user(struct drm_ttm *ttm, +		     struct task_struct *tsk, +		     unsigned long start, +		     unsigned long num_pages) +{ +	struct mm_struct *mm = tsk->mm; +	int ret; +	int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0; + +	BUG_ON(num_pages != ttm->num_pages); +	BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0); + +	down_read(&mm->mmap_sem); +	ret = get_user_pages(tsk, mm, start, num_pages, +			     write, 0, ttm->pages, NULL); +	up_read(&mm->mmap_sem); + +	if (ret != num_pages && write) { +		drm_ttm_free_user_pages(ttm); +		return -ENOMEM; +	} + +	return 0; +} + +/** + * drm_ttm_populate: + * + * @ttm: the object to allocate pages for + * + * Allocate pages for all unset page entries, then + * call the backend to create the hardware mappings + */ +int drm_ttm_populate(struct drm_ttm *ttm)  {  	struct page *page;  	unsigned long i; @@ -219,21 +298,32 @@ int drm_ttm_populate(struct drm_ttm * ttm)  		return 0;  	be = ttm->be; -	for (i = 0; i < ttm->num_pages; ++i) { -		page = drm_ttm_get_page(ttm, i); -		if (!page) -			return -ENOMEM; +	if (ttm->page_flags & DRM_TTM_PAGE_WRITE) { +		for (i = 0; i < ttm->num_pages; ++i) { +			page = drm_ttm_get_page(ttm, i); +			if (!page) +				return -ENOMEM; +		}  	} -	be->func->populate(be, ttm->num_pages, ttm->pages); +	be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);  	ttm->state = ttm_unbound;  	return 0;  } -/* - * Initialize a ttm. +/** + * drm_ttm_create: + * + * @dev: the drm_device + * + * @size: The size (in bytes) of the desired object + * + * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h. + * + * Allocate and initialize a ttm, leaving it unpopulated at this time   */ -struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size, +			       uint32_t page_flags, struct page *dummy_read_page)  {  	struct drm_bo_driver *bo_driver = dev->driver->bo_driver;  	struct drm_ttm *ttm; @@ -251,21 +341,23 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)  	ttm->destroy = 0;  	ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; -	ttm->page_flags = 0; +	ttm->page_flags = page_flags; + +	ttm->dummy_read_page = dummy_read_page;  	/*  	 * Account also for AGP module memory usage.  	 */ -	ttm_alloc_pages(ttm); +	drm_ttm_alloc_pages(ttm);  	if (!ttm->pages) { -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		DRM_ERROR("Failed allocating page table\n");  		return NULL;  	}  	ttm->be = bo_driver->create_ttm_backend_entry(dev);  	if (!ttm->be) { -		drm_destroy_ttm(ttm); +		drm_ttm_destroy(ttm);  		DRM_ERROR("Failed creating ttm backend entry\n");  		return NULL;  	} @@ -273,11 +365,16 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)  	return ttm;  } -/* - * Unbind a ttm region from the aperture. +/** + * drm_ttm_evict: + * + * @ttm: the object to be unbound from the aperture. + * + * Transition a ttm from bound to evicted, where it + * isn't present in the aperture, but various caches may + * not be consistent.   */ - -void drm_ttm_evict(struct drm_ttm * ttm) +void drm_ttm_evict(struct drm_ttm *ttm)  {  	struct drm_ttm_backend *be = ttm->be;  	int ret; @@ -290,19 +387,34 @@ void drm_ttm_evict(struct drm_ttm * ttm)  	ttm->state = ttm_evicted;  } -void drm_ttm_fixup_caching(struct drm_ttm * ttm) +/** + * drm_ttm_fixup_caching: + * + * @ttm: the object to set unbound + * + * XXX this function is misnamed. Transition a ttm from evicted to + * unbound, flushing caches as appropriate. + */ +void drm_ttm_fixup_caching(struct drm_ttm *ttm)  {  	if (ttm->state == ttm_evicted) {  		struct drm_ttm_backend *be = ttm->be; -		if (be->func->needs_ub_cache_adjust(be)) { -			drm_set_caching(ttm, 0); -		} +		if (be->func->needs_ub_cache_adjust(be)) +			drm_ttm_set_caching(ttm, 0);  		ttm->state = ttm_unbound;  	}  } -void drm_ttm_unbind(struct drm_ttm * ttm) +/** + * drm_ttm_unbind: + * + * @ttm: the object to unbind from the graphics device + * + * Unbind an object from the aperture. This removes the mappings + * from the graphics device and flushes caches if necessary. + */ +void drm_ttm_unbind(struct drm_ttm *ttm)  {  	if (ttm->state == ttm_bound)  		drm_ttm_evict(ttm); @@ -310,7 +422,19 @@ void drm_ttm_unbind(struct drm_ttm * ttm)  	drm_ttm_fixup_caching(ttm);  } -int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) +/** + * drm_ttm_bind: + * + * @ttm: the ttm object to bind to the graphics device + * + * @bo_mem: the aperture memory region which will hold the object + * + * Bind a ttm object to the aperture. This ensures that the necessary + * pages are allocated, flushes CPU caches as needed and marks the + * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been + * modified by the GPU + */ +int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)  {  	struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;  	int ret = 0; @@ -327,21 +451,22 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)  	if (ret)  		return ret; -	if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { -		drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); -	} else if ((bo_mem->flags & DRM_BO_FLAG_CACHED) && +	if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) +		drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); +	else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&  		   bo_driver->ttm_cache_flush)  		bo_driver->ttm_cache_flush(ttm); -	if ((ret = be->func->bind(be, bo_mem))) { +	ret = be->func->bind(be, bo_mem); +	if (ret) {  		ttm->state = ttm_evicted;  		DRM_ERROR("Couldn't bind backend.\n");  		return ret;  	}  	ttm->state = ttm_bound; - +	if (ttm->page_flags & DRM_TTM_PAGE_USER) +		ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;  	return 0;  } - -EXPORT_SYMBOL(drm_bind_ttm); +EXPORT_SYMBOL(drm_ttm_bind); diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index d2554f31..c481a530 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -166,7 +166,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,   * \param address access address.   * \return pointer to the page structure.   * - * Get the the mapping, find the real physical page to map, get the page, and + * Get the mapping, find the real physical page to map, get the page, and   * return it.   */  static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, @@ -189,7 +189,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,  		return NOPAGE_SIGBUS;  	get_page(page); -	DRM_DEBUG("shm_nopage 0x%lx\n", address); +	DRM_DEBUG("0x%lx\n", address);  	return page;  } @@ -263,7 +263,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)  				dmah.size = map->size;  				__drm_pci_free(dev, &dmah);  				break; -		        case _DRM_TTM: +			case _DRM_TTM:  				BUG_ON(1);  				break;  			} @@ -305,7 +305,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,  	get_page(page); -	DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr); +	DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);  	return page;  } @@ -632,9 +632,9 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)  		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);  #endif  		if (io_remap_pfn_range(vma, vma->vm_start, -					(map->offset + offset) >> PAGE_SHIFT, -					vma->vm_end - vma->vm_start, -					vma->vm_page_prot)) +				       (map->offset + offset) >> PAGE_SHIFT, +				       vma->vm_end - vma->vm_start, +				       vma->vm_page_prot))  			return -EAGAIN;  		DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"  			  " offset = 0x%lx\n", @@ -751,10 +751,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,  	 */  	if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) { -		uint32_t new_mask = bo->mem.mask | +		uint32_t new_flags = bo->mem.proposed_flags |  			DRM_BO_FLAG_MAPPABLE |  			DRM_BO_FLAG_FORCE_MAPPABLE; -		err = drm_bo_move_buffer(bo, new_mask, 0, 0); +		err = drm_bo_move_buffer(bo, new_flags, 0, 0);  		if (err) {  			ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;  			goto out_unlock; diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h index bad3c94d..f961ba47 100644 --- a/linux-core/ffb_drv.h +++ b/linux-core/ffb_drv.h @@ -124,7 +124,7 @@ typedef struct _ffb_fbc {  /*294*/	volatile unsigned int	xpmask;		/* X PlaneMask				*/  /*298*/	volatile unsigned int	ypmask;		/* Y PlaneMask				*/  /*29c*/	volatile unsigned int	zpmask;		/* Z PlaneMask				*/ -/*2a0*/	ffb_auxclip		auxclip[4]; 	/* Auxilliary Viewport Clip		*/ +/*2a0*/	ffb_auxclip		auxclip[4];	/* Auxilliary Viewport Clip		*/  	/* New 3dRAM III support regs */  /*2c0*/	volatile unsigned int	rawblend2; @@ -266,7 +266,7 @@ typedef struct ffb_dev_priv {  	int			prom_node;  	enum ffb_chip_type	ffb_type;  	u64			card_phys_base; -	struct miscdevice 	miscdev; +	struct miscdevice	miscdev;  	/* Controller registers. */  	ffb_fbcPtr		regs; diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 7c37b4bb..3c9ca3b2 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -41,7 +41,7 @@  #define I810_BUF_FREE		2  #define I810_BUF_CLIENT		1 -#define I810_BUF_HARDWARE      	0 +#define I810_BUF_HARDWARE	0  #define I810_BUF_UNMAPPED 0  #define I810_BUF_MAPPED   1 @@ -589,7 +589,7 @@ static void i810EmitState(struct drm_device * dev)  	drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;  	unsigned int dirty = sarea_priv->dirty; -	DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); +	DRM_DEBUG("%x\n", dirty);  	if (dirty & I810_UPLOAD_BUFFERS) {  		i810EmitDestVerified(dev, sarea_priv->BufferState); @@ -821,8 +821,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)  	int pitch = dev_priv->pitch;  	RING_LOCALS; -	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", -		  __FUNCTION__, +	DRM_DEBUG("page=%d pfCurrentPage=%d\n",  		  dev_priv->current_page,  		  dev_priv->sarea_priv->pf_current_page); @@ -867,8 +866,6 @@ static void i810_dma_quiescent(struct drm_device * dev)  	drm_i810_private_t *dev_priv = dev->dev_private;  	RING_LOCALS; -/*  	printk("%s\n", __FUNCTION__); */ -  	i810_kernel_lost_context(dev);  	BEGIN_LP_RING(4); @@ -888,8 +885,6 @@ static int i810_flush_queue(struct drm_device * dev)  	int i, ret = 0;  	RING_LOCALS; -/*  	printk("%s\n", __FUNCTION__); */ -  	i810_kernel_lost_context(dev);  	BEGIN_LP_RING(2); @@ -968,7 +963,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,  	LOCK_TEST_WITH_RETURN(dev, file_priv); -	DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", +	DRM_DEBUG("idx %d used %d discard %d\n",  		  vertex->idx, vertex->used, vertex->discard);  	if (vertex->idx < 0 || vertex->idx > dma->buf_count) @@ -1006,7 +1001,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,  static int i810_swap_bufs(struct drm_device *dev, void *data,  			  struct drm_file *file_priv)  { -	DRM_DEBUG("i810_swap_bufs\n"); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1087,11 +1082,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,  	sarea_priv->dirty = 0x7f; -	DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used); +	DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);  	dev_priv->counter++;  	DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter); -	DRM_DEBUG("i810_dma_dispatch_mc\n");  	DRM_DEBUG("start : %lx\n", start);  	DRM_DEBUG("used : %d\n", used);  	DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4); @@ -1197,7 +1191,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)  {  	drm_i810_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	dev_priv->page_flipping = 1;  	dev_priv->current_page = 0;  	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; @@ -1207,7 +1201,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)  {  	drm_i810_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (dev_priv->current_page != 0)  		i810_dma_dispatch_flip(dev); @@ -1220,7 +1214,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,  {  	drm_i810_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1271,7 +1265,7 @@ int i810_driver_dma_quiescent(struct drm_device * dev)  }  struct drm_ioctl_desc i810_ioctls[] = { -  DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), +	DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),  	DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),  	DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),  	DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index c525e165..f5c175fe 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -25,7 +25,7 @@   * DEALINGS IN THE SOFTWARE.   *   * Authors: Rickard E. (Rik) Faith <faith@valinux.com> - * 	    Jeff Hartmann <jhartmann@valinux.com> + *	    Jeff Hartmann <jhartmann@valinux.com>   *   */ @@ -134,7 +134,7 @@ extern int i810_max_ioctl;  #define I810_ADDR(reg)		(I810_BASE(reg) + reg)  #define I810_DEREF(reg)		*(__volatile__ int *)I810_ADDR(reg)  #define I810_READ(reg)		I810_DEREF(reg) -#define I810_WRITE(reg,val) 	do { I810_DEREF(reg) = val; } while (0) +#define I810_WRITE(reg,val)	do { I810_DEREF(reg) = val; } while (0)  #define I810_DEREF16(reg)	*(__volatile__ u16 *)I810_ADDR(reg)  #define I810_READ16(reg)	I810_DEREF16(reg)  #define I810_WRITE16(reg,val)	do { I810_DEREF16(reg) = val; } while (0) @@ -145,7 +145,7 @@ extern int i810_max_ioctl;  #define BEGIN_LP_RING(n) do {						\  	if (I810_VERBOSE)						\ -		DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__);\ +		DRM_DEBUG("BEGIN_LP_RING(%d)\n", n);			\  	if (dev_priv->ring.space < n*4)					\  		i810_wait_ring(dev, n*4);				\  	dev_priv->ring.space -= n*4;					\ @@ -155,19 +155,19 @@ extern int i810_max_ioctl;  } while (0)  #define ADVANCE_LP_RING() do {					\ -	if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n");    	\ +	if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n");	\  	dev_priv->ring.tail = outring;				\  	I810_WRITE(LP_RING + RING_TAIL, outring);		\  } while(0) -#define OUT_RING(n) do {  						\ +#define OUT_RING(n) do {						\  	if (I810_VERBOSE) DRM_DEBUG("   OUT_RING %x\n", (int)(n));	\  	*(volatile unsigned int *)(virt + outring) = n;			\  	outring += 4;							\  	outring &= ringmask;						\  } while (0) -#define GFX_OP_USER_INTERRUPT 		((0<<29)|(2<<23)) +#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))  #define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))  #define CMD_REPORT_HEAD			(7<<23)  #define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1) @@ -184,28 +184,28 @@ extern int i810_max_ioctl;  #define I810REG_HWSTAM		0x02098  #define I810REG_INT_IDENTITY_R	0x020a4 -#define I810REG_INT_MASK_R 	0x020a8 +#define I810REG_INT_MASK_R	0x020a8  #define I810REG_INT_ENABLE_R	0x020a0 -#define LP_RING     		0x2030 -#define HP_RING     		0x2040 -#define RING_TAIL      		0x00 +#define LP_RING			0x2030 +#define HP_RING			0x2040 +#define RING_TAIL		0x00  #define TAIL_ADDR		0x000FFFF8 -#define RING_HEAD      		0x04 -#define HEAD_WRAP_COUNT     	0xFFE00000 -#define HEAD_WRAP_ONE       	0x00200000 -#define HEAD_ADDR           	0x001FFFFC -#define RING_START     		0x08 -#define START_ADDR        	0x00FFFFF8 -#define RING_LEN       		0x0C -#define RING_NR_PAGES       	0x000FF000 -#define RING_REPORT_MASK    	0x00000006 -#define RING_REPORT_64K     	0x00000002 -#define RING_REPORT_128K    	0x00000004 -#define RING_NO_REPORT      	0x00000000 -#define RING_VALID_MASK     	0x00000001 -#define RING_VALID          	0x00000001 -#define RING_INVALID        	0x00000000 +#define RING_HEAD		0x04 +#define HEAD_WRAP_COUNT		0xFFE00000 +#define HEAD_WRAP_ONE		0x00200000 +#define HEAD_ADDR		0x001FFFFC +#define RING_START		0x08 +#define START_ADDR		0x00FFFFF8 +#define RING_LEN		0x0C +#define RING_NR_PAGES		0x000FF000 +#define RING_REPORT_MASK	0x00000006 +#define RING_REPORT_64K		0x00000002 +#define RING_REPORT_128K	0x00000004 +#define RING_NO_REPORT		0x00000000 +#define RING_VALID_MASK		0x00000001 +#define RING_VALID		0x00000001 +#define RING_INVALID		0x00000000  #define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))  #define SC_UPDATE_SCISSOR       (0x1<<1) diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index bbc7e1db..08067476 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -1,8 +1,8 @@  /************************************************************************** - *  + *   * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -10,20 +10,20 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  - *  + * + *   **************************************************************************/  /*   * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> @@ -33,23 +33,23 @@  #include "i915_drm.h"  #include "i915_drv.h" -struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)  {  	return drm_agp_init_ttm(dev);  } -int i915_fence_types(struct drm_buffer_object *bo, -		     uint32_t * fclass, -		     uint32_t * type) +int i915_fence_type(struct drm_buffer_object *bo, +		     uint32_t *fclass, +		     uint32_t *type)  { -	if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))  		*type = 3;  	else  		*type = 1;  	return 0;  } -int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) +int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)  {  	/*  	 * FIXME: Only emit once per batchbuffer submission. @@ -65,8 +65,8 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)  	return i915_emit_mi_flush(dev, flush_cmd);  } -int i915_init_mem_type(struct drm_device * dev, uint32_t type, -		       struct drm_mem_type_manager * man) +int i915_init_mem_type(struct drm_device *dev, uint32_t type, +		       struct drm_mem_type_manager *man)  {  	switch (type) {  	case DRM_BO_MEM_LOCAL: @@ -110,7 +110,16 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,  	return 0;  } -uint32_t i915_evict_mask(struct drm_buffer_object *bo) +/* + * i915_evict_flags: + * + * @bo: the buffer object to be evicted + * + * Return the bo flags for a buffer which is not mapped to the hardware. + * These will be placed in proposed_flags so that when the move is + * finished, they'll end up in bo->mem.flags + */ +uint64_t i915_evict_flags(struct drm_buffer_object *bo)  {  	switch (bo->mem.mem_type) {  	case DRM_BO_MEM_LOCAL: @@ -183,7 +192,7 @@ static int i915_move_blit(struct drm_buffer_object * bo,  }  /* - * Flip destination ttm into cached-coherent AGP,  + * Flip destination ttm into cached-coherent AGP,   * then blit and subsequently move out again.   */ @@ -226,25 +235,24 @@ out_cleanup:  #endif  /* - * Disable i915_move_flip for now, since we can't guarantee that the hardware lock - * is held here. To re-enable we need to make sure either + * Disable i915_move_flip for now, since we can't guarantee that the hardware + * lock is held here. To re-enable we need to make sure either   * a) The X server is using DRM to submit commands to the ring, or - * b) DRM can use the HP ring for these blits. This means i915 needs to implement - *    a new ring submission mechanism and fence class. + * b) DRM can use the HP ring for these blits. This means i915 needs to + *    implement a new ring submission mechanism and fence class.   */ - -int i915_move(struct drm_buffer_object * bo, -	      int evict, int no_wait, struct drm_bo_mem_reg * new_mem) +int i915_move(struct drm_buffer_object *bo, +	      int evict, int no_wait, struct drm_bo_mem_reg *new_mem)  {  	struct drm_bo_mem_reg *old_mem = &bo->mem;  	if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {  		return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);  	} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { -		if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/) +		if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/  			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);  	} else { -		if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/) +		if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/  			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);  	}  	return 0; @@ -258,8 +266,8 @@ static inline void clflush(volatile void *__p)  #endif  static inline void drm_cache_flush_addr(void *virt) -{  -        int i; +{ +	int i;  	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)  		clflush(virt+i); @@ -278,7 +286,18 @@ void i915_flush_ttm(struct drm_ttm *ttm)  		return;  	DRM_MEMORYBARRIER(); -	for (i = ttm->num_pages-1; i >= 0; i--) + +#ifdef CONFIG_X86_32 +	/* Hopefully nobody has built an x86-64 processor without clflush */ +	if (!cpu_has_clflush) { +		wbinvd(); +		DRM_MEMORYBARRIER(); +		return; +	} +#endif + +	for (i = ttm->num_pages - 1; i >= 0; i--)  		drm_cache_flush_page(drm_ttm_get_page(ttm, i)); +  	DRM_MEMORYBARRIER();  } diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c new file mode 100644 index 00000000..cc024085 --- /dev/null +++ b/linux-core/i915_compat.c @@ -0,0 +1,215 @@ +#include "drmP.h" + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) + +#include "i915_drm.h" +#include "i915_drv.h" + +#define PCI_DEVICE_ID_INTEL_82946GZ_HB      0x2970 +#define PCI_DEVICE_ID_INTEL_82965G_1_HB     0x2980 +#define PCI_DEVICE_ID_INTEL_82965Q_HB       0x2990 +#define PCI_DEVICE_ID_INTEL_82965G_HB       0x29A0 +#define PCI_DEVICE_ID_INTEL_82965GM_HB      0x2A00 +#define PCI_DEVICE_ID_INTEL_82965GME_HB     0x2A10 +#define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC +#define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0 +#define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0 +#define PCI_DEVICE_ID_INTEL_Q33_HB          0x29D0 + +#define I915_IFPADDR    0x60 +#define I965_IFPADDR    0x70 + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) +#define upper_32_bits(_val) (((u64)(_val)) >> 32) +#endif + +static struct _i9xx_private_compat { +	void __iomem *flush_page; +	int resource_valid; +	struct resource ifp_resource; +} i9xx_private; + +static struct _i8xx_private_compat { +	void *flush_page; +	struct page *page; +} i8xx_private; + +static void +intel_compat_align_resource(void *data, struct resource *res, +                        resource_size_t size, resource_size_t align) +{ +	return; +} + + +static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev) +{ +	int ret; +	ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE, +				     PAGE_SIZE, PCIBIOS_MIN_MEM, 0, +				     intel_compat_align_resource, pdev); +	if (ret != 0) +		return ret; + +	return 0; +} + +static void intel_i915_setup_chipset_flush(struct pci_dev *pdev) +{ +	int ret; +	u32 temp; + +	pci_read_config_dword(pdev, I915_IFPADDR, &temp); +	if (!(temp & 0x1)) { +		intel_alloc_chipset_flush_resource(pdev); +		i9xx_private.resource_valid = 1; +		pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); +	} else { +		temp &= ~1; + +		i9xx_private.resource_valid = 1; +		i9xx_private.ifp_resource.start = temp; +		i9xx_private.ifp_resource.end = temp + PAGE_SIZE; +		ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); +		if (ret) { +			i9xx_private.resource_valid = 0; +			printk("Failed inserting resource into tree\n"); +		} +	} +} + +static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev) +{ +	u32 temp_hi, temp_lo; +	int ret; + +	pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi); +	pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo); + +	if (!(temp_lo & 0x1)) { + +		intel_alloc_chipset_flush_resource(pdev); + +		i9xx_private.resource_valid = 1; +		pci_write_config_dword(pdev, I965_IFPADDR + 4, +			upper_32_bits(i9xx_private.ifp_resource.start)); +		pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1); +	} else { +		u64 l64; + +		temp_lo &= ~0x1; +		l64 = ((u64)temp_hi << 32) | temp_lo; + +		i9xx_private.resource_valid = 1; +		i9xx_private.ifp_resource.start = l64; +		i9xx_private.ifp_resource.end = l64 + PAGE_SIZE; +		ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource); +		if (!ret) { +			i9xx_private.resource_valid = 0; +			printk("Failed inserting resource into tree\n"); +		} +	} +} + +static void intel_i8xx_fini_flush(struct drm_device *dev) +{ +	kunmap(i8xx_private.page); +	i8xx_private.flush_page = NULL; +	unmap_page_from_agp(i8xx_private.page); +	flush_agp_mappings(); + +	__free_page(i8xx_private.page); +} + +static void intel_i8xx_setup_flush(struct drm_device *dev) +{ + +	i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32); +	if (!i8xx_private.page) { +		return; +	} + +	/* make page uncached */ +	map_page_into_agp(i8xx_private.page); +	flush_agp_mappings(); + +	i8xx_private.flush_page = kmap(i8xx_private.page); +	if (!i8xx_private.flush_page) +		intel_i8xx_fini_flush(dev); +} + + +static void intel_i8xx_flush_page(struct drm_device *dev) +{ +	unsigned int *pg = i8xx_private.flush_page; +	int i; + +	/* HAI NUT CAN I HAZ HAMMER?? */ +	for (i = 0; i < 256; i++) +		*(pg + i) = i; +	 +	DRM_MEMORYBARRIER(); +} + +static void intel_i9xx_setup_flush(struct drm_device *dev) +{ +	struct pci_dev *agp_dev = dev->agp->agp_info.device; + +	i9xx_private.ifp_resource.name = "GMCH IFPBAR"; +	i9xx_private.ifp_resource.flags = IORESOURCE_MEM; + +	/* Setup chipset flush for 915 */ +	if (IS_I965G(dev) || IS_G33(dev)) { +		intel_i965_g33_setup_chipset_flush(agp_dev); +	} else { +		intel_i915_setup_chipset_flush(agp_dev); +	} + +	if (i9xx_private.ifp_resource.start) { +		i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE); +		if (!i9xx_private.flush_page) +			printk("unable to ioremap flush  page - no chipset flushing"); +	} +} + +static void intel_i9xx_fini_flush(struct drm_device *dev) +{ +	iounmap(i9xx_private.flush_page); +	if (i9xx_private.resource_valid) +		release_resource(&i9xx_private.ifp_resource); +	i9xx_private.resource_valid = 0; +} + +static void intel_i9xx_flush_page(struct drm_device *dev) +{ +	if (i9xx_private.flush_page) +		writel(1, i9xx_private.flush_page); +} + +void intel_init_chipset_flush_compat(struct drm_device *dev) +{ +	/* not flush on i8xx */ +	if (IS_I9XX(dev))	 +		intel_i9xx_setup_flush(dev); +	else +		intel_i8xx_setup_flush(dev); +	 +} + +void intel_fini_chipset_flush_compat(struct drm_device *dev) +{ +	/* not flush on i8xx */ +	if (IS_I9XX(dev)) +		intel_i9xx_fini_flush(dev); +	else +		intel_i8xx_fini_flush(dev); +} + +void drm_agp_chipset_flush(struct drm_device *dev) +{ +	if (IS_I9XX(dev)) +		intel_i9xx_flush_page(dev); +	else +		intel_i8xx_flush_page(dev); +} +#endif diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 84df64a7..a5f60ee1 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -1,10 +1,10 @@  /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-   */  /* - *  + *   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -12,11 +12,11 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. @@ -24,7 +24,7 @@   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - *  + *   */  #include "drmP.h" @@ -61,10 +61,10 @@ static struct drm_bo_driver i915_bo_driver = {  	.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),  	.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),  	.create_ttm_backend_entry = i915_create_ttm_backend_entry, -	.fence_type = i915_fence_types, +	.fence_type = i915_fence_type,  	.invalidate_caches = i915_invalidate_caches,  	.init_mem_type = i915_init_mem_type, -	.evict_mask = i915_evict_mask, +	.evict_flags = i915_evict_flags,  	.move = i915_move,  	.ttm_cache_flush = i915_flush_ttm,  }; @@ -330,7 +330,7 @@ static int i915_suspend(struct drm_device *dev)  	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);  	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);  	dev_priv->saveDSPBBASE = I915_READ(DSPBBASE); -	if (IS_I965GM(dev)) { +	if (IS_I965GM(dev) || IS_IGD_GM(dev)) {  		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);  		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);  	} @@ -420,7 +420,7 @@ static int i915_resume(struct drm_device *dev)  	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);  	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);  	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); -    +  	/* Restore plane info */  	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);  	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); @@ -431,7 +431,11 @@ static int i915_resume(struct drm_device *dev)  		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);  		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);  	} -	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); + +	if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) && +	    (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS)) +		I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); +  	i915_restore_palette(dev, PIPE_A);  	/* Enable the plane */  	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); @@ -451,7 +455,7 @@ static int i915_resume(struct drm_device *dev)  	if (IS_I965G(dev))  		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);  	udelay(150); -    +  	/* Restore mode */  	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);  	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); @@ -471,7 +475,10 @@ static int i915_resume(struct drm_device *dev)  		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);  		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);  	} -	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); + +	if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) && +	    (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS)) +		I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);  	i915_restore_palette(dev, PIPE_A);  	/* Enable the plane */  	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index a0f22785..e3c76df6 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -1,8 +1,8 @@  /************************************************************************** - *  + *   * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -10,20 +10,20 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  - *  + * + *   **************************************************************************/  /*   * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> @@ -38,7 +38,7 @@   * Implements an intel sync flush operation.   */ -static void i915_perform_flush(struct drm_device * dev) +static void i915_perform_flush(struct drm_device *dev)  {  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;  	struct drm_fence_manager *fm = &dev->fm; @@ -63,14 +63,14 @@ static void i915_perform_flush(struct drm_device * dev)  		diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;  		if (diff < driver->wrap_diff && diff != 0) { -		        drm_fence_handler(dev, 0, sequence, +			drm_fence_handler(dev, 0, sequence,  					  DRM_FENCE_TYPE_EXE, 0);  		}  		if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {  			i915_user_irq_off(dev_priv);  			dev_priv->fence_irq_on = 0; -		} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {  +		} else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {  			i915_user_irq_on(dev_priv);  			dev_priv->fence_irq_on = 1;  		} @@ -110,7 +110,7 @@ static void i915_perform_flush(struct drm_device * dev)  } -void i915_poke_flush(struct drm_device * dev, uint32_t class) +void i915_poke_flush(struct drm_device *dev, uint32_t class)  {  	struct drm_fence_manager *fm = &dev->fm;  	unsigned long flags; @@ -120,8 +120,9 @@ void i915_poke_flush(struct drm_device * dev, uint32_t class)  	write_unlock_irqrestore(&fm->lock, flags);  } -int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, -			     uint32_t * sequence, uint32_t * native_type) +int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class, +			     uint32_t flags, uint32_t *sequence, +			     uint32_t *native_type)  {  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;  	if (!dev_priv) @@ -136,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f  	return 0;  } -void i915_fence_handler(struct drm_device * dev) +void i915_fence_handler(struct drm_device *dev)  {  	struct drm_fence_manager *fm = &dev->fm; diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c index c1e776b7..0b8fff19 100644 --- a/linux-core/i915_ioc32.c +++ b/linux-core/i915_ioc32.c @@ -3,7 +3,7 @@   *   * 32-bit ioctl compatibility routines for the i915 DRM.   * - * \author Alan Hourihane <alanh@fairlite.demon.co.uk>  + * \author Alan Hourihane <alanh@fairlite.demon.co.uk>   *   *   * Copyright (C) Paul Mackerras 2005 @@ -34,6 +34,7 @@  #include "drmP.h"  #include "drm.h"  #include "i915_drm.h" +#include "i915_drv.h"  typedef struct _drm_i915_batchbuffer32 {  	int start;		/* agp offset */ @@ -45,15 +46,15 @@ typedef struct _drm_i915_batchbuffer32 {  } drm_i915_batchbuffer32_t;  static int compat_i915_batchbuffer(struct file *file, unsigned int cmd, -			   unsigned long arg) +				   unsigned long arg)  {  	drm_i915_batchbuffer32_t batchbuffer32;  	drm_i915_batchbuffer_t __user *batchbuffer; -	 +  	if (copy_from_user  	    (&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))  		return -EFAULT; -	 +  	batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));  	if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))  	    || __put_user(batchbuffer32.start, &batchbuffer->start) @@ -65,7 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,  	    || __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,  			  &batchbuffer->cliprects))  		return -EFAULT; -	 +  	return drm_ioctl(file->f_dentry->d_inode, file,  			 DRM_IOCTL_I915_BATCHBUFFER,  			 (unsigned long) batchbuffer); @@ -81,15 +82,15 @@ typedef struct _drm_i915_cmdbuffer32 {  } drm_i915_cmdbuffer32_t;  static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd, -			   unsigned long arg) +				 unsigned long arg)  {  	drm_i915_cmdbuffer32_t cmdbuffer32;  	drm_i915_cmdbuffer_t __user *cmdbuffer; -	 +  	if (copy_from_user  	    (&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))  		return -EFAULT; -	 +  	cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));  	if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))  	    || __put_user((int __user *)(unsigned long)cmdbuffer32.buf, @@ -101,7 +102,7 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,  	    || __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,  			  &cmdbuffer->cliprects))  		return -EFAULT; -	 +  	return drm_ioctl(file->f_dentry->d_inode, file,  			 DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);  } @@ -111,7 +112,7 @@ typedef struct drm_i915_irq_emit32 {  } drm_i915_irq_emit32_t;  static int compat_i915_irq_emit(struct file *file, unsigned int cmd, -				  unsigned long arg) +				unsigned long arg)  {  	drm_i915_irq_emit32_t req32;  	drm_i915_irq_emit_t __user *request; @@ -134,7 +135,7 @@ typedef struct drm_i915_getparam32 {  } drm_i915_getparam32_t;  static int compat_i915_getparam(struct file *file, unsigned int cmd, -				     unsigned long arg) +				unsigned long arg)  {  	drm_i915_getparam32_t req32;  	drm_i915_getparam_t __user *request; @@ -161,7 +162,7 @@ typedef struct drm_i915_mem_alloc32 {  } drm_i915_mem_alloc32_t;  static int compat_i915_alloc(struct file *file, unsigned int cmd, -				     unsigned long arg) +			     unsigned long arg)  {  	drm_i915_mem_alloc32_t req32;  	drm_i915_mem_alloc_t __user *request; @@ -182,13 +183,73 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,  			 DRM_IOCTL_I915_ALLOC, (unsigned long) request);  } +typedef struct drm_i915_execbuffer32 { +	uint64_t ops_list; +	uint32_t num_buffers; +	struct _drm_i915_batchbuffer32 batch; +	drm_context_t context;  +	struct drm_fence_arg fence_arg; +} drm_i915_execbuffer32_t; + +static int compat_i915_execbuffer(struct file *file, unsigned int cmd, +			     unsigned long arg) +{ +	drm_i915_execbuffer32_t req32; +	struct drm_i915_execbuffer __user *request; +	int err; + +	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +		return -EFAULT; + +	request = compat_alloc_user_space(sizeof(*request)); + +	if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) +       || __put_user(req32.ops_list, &request->ops_list) +       || __put_user(req32.num_buffers, &request->num_buffers) +       || __put_user(req32.context, &request->context) +       || __copy_to_user(&request->fence_arg, &req32.fence_arg,  +                         sizeof(req32.fence_arg)) +       || __put_user(req32.batch.start, &request->batch.start) +       || __put_user(req32.batch.used, &request->batch.used) +       || __put_user(req32.batch.DR1, &request->batch.DR1) +       || __put_user(req32.batch.DR4, &request->batch.DR4) +       || __put_user(req32.batch.num_cliprects, +                     &request->batch.num_cliprects) +       || __put_user((int __user *)(unsigned long)req32.batch.cliprects, +                     &request->batch.cliprects)) +		return -EFAULT; + +	err = drm_ioctl(file->f_dentry->d_inode, file, +			 DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request); + +	if (err) +		return err; + +	if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle) +	    || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class) +	    || __get_user(req32.fence_arg.type, &request->fence_arg.type) +	    || __get_user(req32.fence_arg.flags, &request->fence_arg.flags) +	    || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled) +	    || __get_user(req32.fence_arg.error, &request->fence_arg.error) +	    || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence)) +		return -EFAULT; + +	if (copy_to_user((void __user *)arg, &req32, sizeof(req32))) +		return -EFAULT; + +	return 0; +} +  drm_ioctl_compat_t *i915_compat_ioctls[] = {  	[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,  	[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,  	[DRM_I915_GETPARAM] = compat_i915_getparam,  	[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit, -	[DRM_I915_ALLOC] = compat_i915_alloc +	[DRM_I915_ALLOC] = compat_i915_alloc, +#ifdef I915_HAVE_BUFFER +	[DRM_I915_EXECBUFFER] = compat_i915_execbuffer, +#endif  };  /** @@ -208,7 +269,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)  	if (nr < DRM_COMMAND_BASE)  		return drm_compat_ioctl(filp, cmd, arg); -	 +  	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))  		fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE]; diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c index 9709934d..16bc9ff3 100644 --- a/linux-core/mach64_drv.c +++ b/linux-core/mach64_drv.c @@ -42,9 +42,11 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);  static struct drm_driver driver = {  	.driver_features =  	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA -	    | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, +	    | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,  	.lastclose = mach64_driver_lastclose, -	.vblank_wait = mach64_driver_vblank_wait, +	.get_vblank_counter = mach64_get_vblank_counter, +	.enable_vblank = mach64_enable_vblank, +	.disable_vblank = mach64_disable_vblank,  	.irq_preinstall = mach64_driver_irq_preinstall,  	.irq_postinstall = mach64_driver_irq_postinstall,  	.irq_uninstall = mach64_driver_irq_uninstall, diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index 11796b01..14a0be45 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -141,10 +141,10 @@ static int mga_driver_device_is_agp(struct drm_device * dev)  	 * device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the  	 * device.  	 */ -	 +  	if ((pdev->device == 0x0525) && pdev->bus->self -	     && (pdev->bus->self->vendor == 0x3388) -	     && (pdev->bus->self->device == 0x0021) ) { +	    && (pdev->bus->self->vendor == 0x3388) +	    && (pdev->bus->self->device == 0x0021)) {  		return 0;  	} diff --git a/linux-core/mga_ioc32.c b/linux-core/mga_ioc32.c index 75f2a231..e3df567e 100644 --- a/linux-core/mga_ioc32.c +++ b/linux-core/mga_ioc32.c @@ -39,17 +39,17 @@  typedef struct drm32_mga_init {  	int func; -   	u32 sarea_priv_offset; +	u32 sarea_priv_offset;  	int chipset; -   	int sgram; +	int sgram;  	unsigned int maccess; -   	unsigned int fb_cpp; +	unsigned int fb_cpp;  	unsigned int front_offset, front_pitch; -   	unsigned int back_offset, back_pitch; -   	unsigned int depth_cpp; -   	unsigned int depth_offset, depth_pitch; -   	unsigned int texture_offset[MGA_NR_TEX_HEAPS]; -   	unsigned int texture_size[MGA_NR_TEX_HEAPS]; +	unsigned int back_offset, back_pitch; +	unsigned int depth_cpp; +	unsigned int depth_offset, depth_pitch; +	unsigned int texture_offset[MGA_NR_TEX_HEAPS]; +	unsigned int texture_size[MGA_NR_TEX_HEAPS];  	u32 fb_offset;  	u32 mmio_offset;  	u32 status_offset; @@ -64,10 +64,10 @@ static int compat_mga_init(struct file *file, unsigned int cmd,  	drm_mga_init32_t init32;  	drm_mga_init_t __user *init;  	int err = 0, i; -	 +  	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))  		return -EFAULT; -	 +  	init = compat_alloc_user_space(sizeof(*init));  	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))  	    || __put_user(init32.func, &init->func) @@ -90,7 +90,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,  	    || __put_user(init32.primary_offset, &init->primary_offset)  	    || __put_user(init32.buffers_offset, &init->buffers_offset))  		return -EFAULT; -	 +  	for (i=0; i<MGA_NR_TEX_HEAPS; i++)  	{  		err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]); @@ -98,7 +98,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,  	}  	if (err)  		return -EFAULT; -	 +  	return drm_ioctl(file->f_dentry->d_inode, file,  			 DRM_IOCTL_MGA_INIT, (unsigned long) init);  } @@ -115,7 +115,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,  {  	drm_mga_getparam32_t getparam32;  	drm_mga_getparam_t __user *getparam; -	 +  	if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))  		return -EFAULT; @@ -125,7 +125,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,  	    || __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))  		return -EFAULT; -	return drm_ioctl(file->f_dentry->d_inode, file,  +	return drm_ioctl(file->f_dentry->d_inode, file,  			 DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);  } @@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,  		return -EFAULT;  	if (copy_to_user((void __user *)arg, &dma_bootstrap32, -	    		 sizeof(dma_bootstrap32))) +			 sizeof(dma_bootstrap32)))  		return -EFAULT;  	return 0; @@ -219,7 +219,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd,  	if (nr < DRM_COMMAND_BASE)  		return drm_compat_ioctl(filp, cmd, arg); -	 +  	if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))  		fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE]; diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c new file mode 100644 index 00000000..a652bb1d --- /dev/null +++ b/linux-core/nouveau_buffer.c @@ -0,0 +1,298 @@ +/* + * Copyright 2007 Dave Airlied + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +/* + * Authors: Dave Airlied <airlied@linux.ie> + *	    Ben Skeggs   <darktama@iinet.net.au> + *	    Jeremy Kolb  <jkolb@brandeis.edu> + */ + +#include "drmP.h" +#include "nouveau_drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static struct drm_ttm_backend * +nouveau_bo_create_ttm_backend_entry(struct drm_device * dev) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	switch (dev_priv->gart_info.type) { +	case NOUVEAU_GART_AGP: +		return drm_agp_init_ttm(dev); +	case NOUVEAU_GART_SGDMA: +		return nouveau_sgdma_init_ttm(dev); +	default: +		DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type); +		break; +	} + +	return NULL; +} + +static int +nouveau_bo_fence_type(struct drm_buffer_object *bo, +		      uint32_t *fclass, uint32_t *type) +{ +	/* When we get called, *fclass is set to the requested fence class */ + +	if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) +		*type = 3; +	else +		*type = 1; +	return 0; + +} + +static int +nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags) +{ +	/* We'll do this from user space. */ +	return 0; +} + +static int +nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type, +			 struct drm_mem_type_manager *man) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	switch (type) { +	case DRM_BO_MEM_LOCAL: +		man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | +			     _DRM_FLAG_MEMTYPE_CACHED; +		man->drm_bus_maptype = 0; +		break; +	case DRM_BO_MEM_VRAM: +		man->flags = _DRM_FLAG_MEMTYPE_FIXED | +			     _DRM_FLAG_MEMTYPE_MAPPABLE | +			     _DRM_FLAG_NEEDS_IOREMAP; +		man->io_addr = NULL; +		man->drm_bus_maptype = _DRM_FRAME_BUFFER; +		man->io_offset = drm_get_resource_start(dev, 1); +		man->io_size = drm_get_resource_len(dev, 1); +		if (man->io_size > nouveau_mem_fb_amount(dev)) +			man->io_size = nouveau_mem_fb_amount(dev); +		break; +	case DRM_BO_MEM_PRIV0: +		/* Unmappable VRAM */ +		man->flags = _DRM_FLAG_MEMTYPE_CMA; +		man->drm_bus_maptype = 0; +		break; +	case DRM_BO_MEM_TT: +		switch (dev_priv->gart_info.type) { +		case NOUVEAU_GART_AGP: +			man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | +				     _DRM_FLAG_MEMTYPE_CSELECT | +				     _DRM_FLAG_NEEDS_IOREMAP; +			man->drm_bus_maptype = _DRM_AGP; +			break; +		case NOUVEAU_GART_SGDMA: +			man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | +				     _DRM_FLAG_MEMTYPE_CSELECT | +				     _DRM_FLAG_MEMTYPE_CMA; +			man->drm_bus_maptype = _DRM_SCATTER_GATHER; +			break; +		default: +			DRM_ERROR("Unknown GART type: %d\n", +				  dev_priv->gart_info.type); +			return -EINVAL; +		} + +		man->io_offset  = dev_priv->gart_info.aper_base; +		man->io_size    = dev_priv->gart_info.aper_size; +		man->io_addr   = NULL; +		break; +	default: +		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); +		return -EINVAL; +	} +	return 0; +} + +static uint64_t +nouveau_bo_evict_flags(struct drm_buffer_object *bo) +{ +	switch (bo->mem.mem_type) { +	case DRM_BO_MEM_LOCAL: +	case DRM_BO_MEM_TT: +		return DRM_BO_FLAG_MEM_LOCAL; +	default: +		return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED; +	} +	return 0; +} + + +/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access + * DRM_BO_MEM_{VRAM,PRIV0,TT} directly. + */ +static int +nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait, +		     struct drm_bo_mem_reg *new_mem) +{ +	struct drm_device *dev = bo->dev; +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_drm_channel *dchan = &dev_priv->channel; +	struct drm_bo_mem_reg *old_mem = &bo->mem; +	uint32_t srch, dsth, page_count; + +	/* Can happen during init/takedown */ +	if (!dchan->chan) +		return -EINVAL; + +	srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; +	dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB; +	if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) { +		dchan->m2mf_dma_source = srch; +		dchan->m2mf_dma_destin = dsth; + +		BEGIN_RING(NvSubM2MF, +			   NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2); +		OUT_RING  (dchan->m2mf_dma_source); +		OUT_RING  (dchan->m2mf_dma_destin); +	} + +	page_count = new_mem->num_pages; +	while (page_count) { +		int line_count = (page_count > 2047) ? 2047 : page_count; + +		BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); +		OUT_RING  (old_mem->mm_node->start << PAGE_SHIFT); +		OUT_RING  (new_mem->mm_node->start << PAGE_SHIFT); +		OUT_RING  (PAGE_SIZE); /* src_pitch */ +		OUT_RING  (PAGE_SIZE); /* dst_pitch */ +		OUT_RING  (PAGE_SIZE); /* line_length */ +		OUT_RING  (line_count); +		OUT_RING  ((1<<8)|(1<<0)); +		OUT_RING  (0); +		BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); +		OUT_RING  (0); + +		page_count -= line_count; +	} + +	return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id, +					 DRM_FENCE_TYPE_EXE, 0, new_mem); +} + +/* Flip pages into the GART and move if we can. */ +static int +nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait, +                     struct drm_bo_mem_reg *new_mem) +{ +        struct drm_device *dev = bo->dev; +        struct drm_bo_mem_reg tmp_mem; +        int ret; + +        tmp_mem = *new_mem; +        tmp_mem.mm_node = NULL; +        tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT | +				  DRM_BO_FLAG_CACHED | +				  DRM_BO_FLAG_FORCE_CACHING); + +        ret = drm_bo_mem_space(bo, &tmp_mem, no_wait); + +        if (ret) +                return ret; + +        ret = drm_ttm_bind (bo->ttm, &tmp_mem); +        if (ret) +                goto out_cleanup; + +        ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem); +        if (ret) +                goto out_cleanup; + +        ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem); + +out_cleanup: +        if (tmp_mem.mm_node) { +                mutex_lock(&dev->struct_mutex); +                if (tmp_mem.mm_node != bo->pinned_node) +                        drm_mm_put_block(tmp_mem.mm_node); +                tmp_mem.mm_node = NULL; +                mutex_unlock(&dev->struct_mutex); +        } +        return ret; +} + +static int +nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait, +		struct drm_bo_mem_reg *new_mem) +{ +	struct drm_bo_mem_reg *old_mem = &bo->mem; + +	if (new_mem->mem_type == DRM_BO_MEM_LOCAL) { +		if (old_mem->mem_type == DRM_BO_MEM_LOCAL) +			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +#if 0 +		if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) +#endif +			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +	} +	else +	if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { +#if 0 +		if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem)) +#endif +			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +	} +	else { +//		if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem)) +			return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); +	} +	return 0; +} + +static void +nouveau_bo_flush_ttm(struct drm_ttm *ttm) +{ +} + +static uint32_t nouveau_mem_prios[]  = { +	DRM_BO_MEM_PRIV0, +	DRM_BO_MEM_VRAM, +	DRM_BO_MEM_TT, +	DRM_BO_MEM_LOCAL +}; +static uint32_t nouveau_busy_prios[] = { +	DRM_BO_MEM_TT, +	DRM_BO_MEM_PRIV0, +	DRM_BO_MEM_VRAM, +	DRM_BO_MEM_LOCAL +}; + +struct drm_bo_driver nouveau_bo_driver = { +	.mem_type_prio = nouveau_mem_prios, +	.mem_busy_prio = nouveau_busy_prios, +	.num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t), +	.num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t), +	.create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, +	.fence_type = nouveau_bo_fence_type, +	.invalidate_caches = nouveau_bo_invalidate_caches, +	.init_mem_type = nouveau_bo_init_mem_type, +	.evict_flags = nouveau_bo_evict_flags, +	.move = nouveau_bo_move, +	.ttm_cache_flush= nouveau_bo_flush_ttm +}; diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index 01de67de..e9623eb1 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -81,6 +81,9 @@ static struct drm_driver driver = {  		.remove = __devexit_p(drm_cleanup_pci),  	}, +        .bo_driver = &nouveau_bo_driver, +        .fence_driver = &nouveau_fence_driver, +  	.name = DRIVER_NAME,  	.desc = DRIVER_DESC,  	.date = DRIVER_DATE, diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c new file mode 100644 index 00000000..4e624a7a --- /dev/null +++ b/linux-core/nouveau_fence.c @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2007 Ben Skeggs. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "drmP.h" +#include "drm.h" +#include "nouveau_drv.h" +#include "nouveau_dma.h" + +static int +nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); + +	/* DRM's channel always uses IRQs to signal fences */ +	if (class == dev_priv->channel.chan->id) +		return 1; + +	/* Other channels don't use IRQs at all yet */ +	return 0; +} + +static int +nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags, +		   uint32_t *breadcrumb, uint32_t *native_type) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_channel *chan = dev_priv->fifos[class]; +	struct nouveau_drm_channel *dchan = &dev_priv->channel; + +	DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags); + +	/* We can't emit fences on client channels, update sequence number +	 * and userspace will emit the fence +	 */ +	*breadcrumb  = ++chan->next_sequence; +	*native_type = DRM_FENCE_TYPE_EXE; +	if (chan != dchan->chan) { +		DRM_DEBUG("user fence 0x%08x\n", *breadcrumb); +		return 0; +	} + +	DRM_DEBUG("emit 0x%08x\n", *breadcrumb); +	BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1); +	OUT_RING  (*breadcrumb); +	BEGIN_RING(NvSubM2MF, 0x0150, 1); +	OUT_RING  (0); +	FIRE_RING (); + +	return 0; +} + +static void +nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; +	struct nouveau_channel *chan = dev_priv->fifos[class]; +	uint32_t pending_types = 0; + +	DRM_DEBUG("class=%d\n", class); + +	pending_types = fc->pending_flush | +			((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); +	DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types, +					      fc->pending_flush); + +	if (pending_types) { +		uint32_t sequence = NV_READ(chan->ref_cnt); + +		DRM_DEBUG("got 0x%08x\n", sequence); +		drm_fence_handler(dev, class, sequence, pending_types, 0); +	} +} + +static void +nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class) +{ +	struct drm_fence_manager *fm = &dev->fm; +	unsigned long flags; + +	DRM_DEBUG("class=%d\n", class); + +	write_lock_irqsave(&fm->lock, flags); +	nouveau_fence_perform_flush(dev, class); +	write_unlock_irqrestore(&fm->lock, flags); +} + +void +nouveau_fence_handler(struct drm_device *dev, int channel) +{ +	struct drm_fence_manager *fm = &dev->fm; + +	DRM_DEBUG("class=%d\n", channel); + +	write_lock(&fm->lock); +	nouveau_fence_perform_flush(dev, channel); +	write_unlock(&fm->lock); +} + +struct drm_fence_driver nouveau_fence_driver = { +	.num_classes	= 8, +	.wrap_diff	= (1 << 30), +	.flush_diff	= (1 << 29), +	.sequence_mask	= 0xffffffffU, +	.lazy_capable	= 1, +	.has_irq	= nouveau_fence_has_irq, +	.emit		= nouveau_fence_emit, +	.poke_flush	= nouveau_fence_poke_flush +}; diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index b86c5d7c..cc4d5a92 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -25,7 +25,7 @@ nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)  static int  nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, -		       struct page **pages) +		       struct page **pages, struct page *dummy_read_page)  {  	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;  	int p, d, o; @@ -41,8 +41,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,  	nvbe->pages_populated = d = 0;  	for (p = 0; p < num_pages; p++) {  		for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { +			struct page *page = pages[p]; +			if (!page) +				page = dummy_read_page;  			nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, -							 pages[p], o, +							 page, o,  							 NV_CTXDMA_PAGE_SIZE,  							 PCI_DMA_BIDIRECTIONAL);  			if (pci_dma_mapping_error(nvbe->pagelist[d])) { @@ -128,7 +131,7 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)  	if (nvbe->is_bound) {  		struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;  		unsigned int pte; -		 +  		pte = nvbe->pte_start;  		while (pte < (nvbe->pte_start + nvbe->pages)) {  			uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; @@ -136,8 +139,8 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)  			if (dev_priv->card_type < NV_50) {  				INSTANCE_WR(gpuobj, pte, pteval | 3);  			} else { -				INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); -				INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); +				INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21); +				INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);  			}  			pte++; @@ -218,15 +221,14 @@ nouveau_sgdma_init(struct drm_device *dev)  		return ret;  	} -	if (dev_priv->card_type < NV_50) { -		dev_priv->gart_info.sg_dummy_page = -			alloc_page(GFP_KERNEL|__GFP_DMA32); -		SetPageLocked(dev_priv->gart_info.sg_dummy_page); -		dev_priv->gart_info.sg_dummy_bus = -			pci_map_page(dev->pdev, -				     dev_priv->gart_info.sg_dummy_page, 0, -				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +	dev_priv->gart_info.sg_dummy_page = +		alloc_page(GFP_KERNEL|__GFP_DMA32); +	SetPageLocked(dev_priv->gart_info.sg_dummy_page); +	dev_priv->gart_info.sg_dummy_bus = +		pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, +			     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); +	if (dev_priv->card_type < NV_50) {  		/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and  		 * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE  		 * on those cards? */ @@ -242,8 +244,9 @@ nouveau_sgdma_init(struct drm_device *dev)  		}  	} else {  		for (i=0; i<obj_size; i+=8) { -			INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010); -			INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004); +			INSTANCE_WR(gpuobj, (i+0)/4, +				    dev_priv->gart_info.sg_dummy_bus | 0x21); +			INSTANCE_WR(gpuobj, (i+4)/4, 0);  		}  	} @@ -299,7 +302,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)  	}  	dev_priv->gart_info.sg_handle = sgreq.handle; -	if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { +	if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {  		DRM_ERROR("failed populate: %d\n", ret);  		return ret;  	} @@ -336,4 +339,3 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)  	DRM_ERROR("Unimplemented on NV50\n");  	return -EINVAL;  } - diff --git a/linux-core/r128_ioc32.c b/linux-core/r128_ioc32.c index 6b757576..64b16798 100644 --- a/linux-core/r128_ioc32.c +++ b/linux-core/r128_ioc32.c @@ -64,10 +64,10 @@ static int compat_r128_init(struct file *file, unsigned int cmd,  {  	drm_r128_init32_t init32;  	drm_r128_init_t __user *init; -	 +  	if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))  		return -EFAULT; -	 +  	init = compat_alloc_user_space(sizeof(*init));  	if (!access_ok(VERIFY_WRITE, init, sizeof(*init))  	    || __put_user(init32.func, &init->func) @@ -94,7 +94,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,  	    || __put_user(init32.agp_textures_offset,  			  &init->agp_textures_offset))  		return -EFAULT; -	 +  	return drm_ioctl(file->f_dentry->d_inode, file,  			 DRM_IOCTL_R128_INIT, (unsigned long)init);  } diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c index 39c35134..f0f3320e 100644 --- a/linux-core/radeon_drv.c +++ b/linux-core/radeon_drv.c @@ -49,7 +49,7 @@ static int dri_library_name(struct drm_device * dev, char * buf)  	return snprintf(buf, PAGE_SIZE, "%s\n",  		(family < CHIP_R200) ? "radeon" :  		((family < CHIP_R300) ? "r200" : - 		"r300")); +		"r300"));  }  static struct pci_device_id pciidlist[] = { diff --git a/linux-core/radeon_ioc32.c b/linux-core/radeon_ioc32.c index bc8aa35a..a842c743 100644 --- a/linux-core/radeon_ioc32.c +++ b/linux-core/radeon_ioc32.c @@ -136,7 +136,7 @@ typedef struct drm_radeon_stipple32 {  static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,  				    unsigned long arg)  { -	drm_radeon_stipple32_t __user *argp = (void __user *) arg; +	drm_radeon_stipple32_t __user *argp = (void __user *)arg;  	drm_radeon_stipple_t __user *request;  	u32 mask; @@ -176,7 +176,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,  	drm_radeon_tex_image32_t img32;  	drm_radeon_tex_image_t __user *image; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	if (req32.image == 0)  		return -EINVAL; @@ -223,7 +223,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,  	drm_radeon_vertex2_32_t req32;  	drm_radeon_vertex2_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); @@ -255,7 +255,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,  	drm_radeon_cmd_buffer32_t req32;  	drm_radeon_cmd_buffer_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); @@ -283,7 +283,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,  	drm_radeon_getparam32_t req32;  	drm_radeon_getparam_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); @@ -310,7 +310,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,  	drm_radeon_mem_alloc32_t req32;  	drm_radeon_mem_alloc_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); @@ -336,7 +336,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,  	drm_radeon_irq_emit32_t req32;  	drm_radeon_irq_emit_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); @@ -362,7 +362,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,  	drm_radeon_setparam32_t req32;  	drm_radeon_setparam_t __user *request; -	if (copy_from_user(&req32, (void __user *) arg, sizeof(req32))) +	if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))  		return -EFAULT;  	request = compat_alloc_user_space(sizeof(*request)); diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 9222b08d..6782731d 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -74,7 +74,7 @@ static void sis_sman_mm_destroy(void *private)  	;  } -unsigned long sis_sman_mm_offset(void *private, void *ref) +static unsigned long sis_sman_mm_offset(void *private, void *ref)  {  	return ~((unsigned long)ref);  } @@ -114,12 +114,12 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file  	dev_priv->vram_offset = fb->offset;  	mutex_unlock(&dev->struct_mutex); -	DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); +	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);  	return 0;  } -static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, +static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,  			 void *data, int pool)  {  	drm_sis_private_t *dev_priv = dev->dev_private; @@ -204,7 +204,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,  	dev_priv->agp_offset = agp->offset;  	mutex_unlock(&dev->struct_mutex); -	DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); +	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);  	return 0;  } @@ -231,8 +231,7 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)  	return NULL;  } -int -sis_idle(struct drm_device *dev) +int sis_idle(struct drm_device *dev)  {  	drm_sis_private_t *dev_priv = dev->dev_private;  	uint32_t idle_reg; @@ -249,7 +248,7 @@ sis_idle(struct drm_device *dev)  			return 0;  		}  	} -	 +  	/*  	 * Implement a device switch here if needed  	 */ diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index a6c59832..532fae6a 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -94,9 +94,9 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,  		man->drm_bus_maptype = 0;  		break; -	case DRM_BO_MEM_TT:  +	case DRM_BO_MEM_TT:  		/* Dynamic agpgart memory */ -		 +  		if (!(drm_core_has_AGP(dev) && dev->agp)) {  			DRM_ERROR("AGP is not enabled for memory type %u\n",  				  (unsigned)type); @@ -109,21 +109,21 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,  		/* Only to get pte protection right. */ -		man->drm_bus_maptype = _DRM_AGP;  +		man->drm_bus_maptype = _DRM_AGP;  		break; -	case DRM_BO_MEM_VRAM:  +	case DRM_BO_MEM_VRAM:  		/* "On-card" video ram */ -		 +  		man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;  		man->drm_bus_maptype = _DRM_FRAME_BUFFER;  		man->io_addr = NULL;  		return via_vram_info(dev, &man->io_offset, &man->io_size);  		break; -	case DRM_BO_MEM_PRIV0:  +	case DRM_BO_MEM_PRIV0:  		/* Pre-bound agpgart memory */ -		 +  		if (!(drm_core_has_AGP(dev) && dev->agp)) {  			DRM_ERROR("AGP is not enabled for memory type %u\n",  				  (unsigned)type); @@ -144,7 +144,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,  	return 0;  } -uint32_t via_evict_mask(struct drm_buffer_object *bo) +uint64_t via_evict_flags(struct drm_buffer_object *bo)  {  	switch (bo->mem.mem_type) {  	case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index d44c26f4..a6a21782 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -1,5 +1,5 @@  /* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro - *  + *   * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.   *   * Permission is hereby granted, free of charge, to any person obtaining a @@ -16,22 +16,22 @@   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,  - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   * - * Authors:  + * Authors:   *    Thomas Hellstrom.   *    Partially based on code obtained from Digeo Inc.   */  /* - * Unmaps the DMA mappings.  - * FIXME: Is this a NoOp on x86? Also  - * FIXME: What happens if this one is called and a pending blit has previously done  - * the same DMA mappings?  + * Unmaps the DMA mappings. + * FIXME: Is this a NoOp on x86? Also + * FIXME: What happens if this one is called and a pending blit has previously done + * the same DMA mappings?   */  #include "drmP.h" @@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  	int num_desc = vsg->num_desc;  	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;  	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page; -	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +  +	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +  		descriptor_this_page;  	dma_addr_t next = vsg->chain_start; @@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  		if (descriptor_this_page-- == 0) {  			cur_descriptor_page--;  			descriptor_this_page = vsg->descriptors_per_page - 1; -			desc_ptr = vsg->desc_pages[cur_descriptor_page] +  +			desc_ptr = vsg->desc_pages[cur_descriptor_page] +  				descriptor_this_page;  		}  		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE); @@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  static void  via_map_blit_for_device(struct pci_dev *pdev,  		   const drm_via_dmablit_t *xfer, -		   drm_via_sg_info_t *vsg,  +		   drm_via_sg_info_t *vsg,  		   int mode)  {  	unsigned cur_descriptor_page = 0; @@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,  	dma_addr_t next = 0 | VIA_DMA_DPR_EC;  	drm_via_descriptor_t *desc_ptr = NULL; -	if (mode == 1)  +	if (mode == 1)  		desc_ptr = vsg->desc_pages[cur_descriptor_page];  	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) { @@ -118,7 +118,7 @@ via_map_blit_for_device(struct pci_dev *pdev,  		line_len = xfer->line_length;  		cur_fb = fb_addr;  		cur_mem = mem_addr; -		 +  		while (line_len > 0) {  			remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len); @@ -131,10 +131,10 @@ via_map_blit_for_device(struct pci_dev *pdev,  					VIA_PGOFF(cur_mem), remaining_len,  					vsg->direction);  				desc_ptr->dev_addr = cur_fb; -				 +  				desc_ptr->size = remaining_len;  				desc_ptr->next = (uint32_t) next; -				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),  +				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),  						      DMA_TO_DEVICE);  				desc_ptr++;  				if (++num_descriptors_this_page >= vsg->descriptors_per_page) { @@ -142,12 +142,12 @@ via_map_blit_for_device(struct pci_dev *pdev,  					desc_ptr = vsg->desc_pages[++cur_descriptor_page];  				}  			} -			 +  			num_desc++;  			cur_mem += remaining_len;  			cur_fb += remaining_len;  		} -		 +  		mem_addr += xfer->mem_stride;  		fb_addr += xfer->fb_stride;  	} @@ -160,14 +160,14 @@ via_map_blit_for_device(struct pci_dev *pdev,  }  /* - * Function that frees up all resources for a blit. It is usable even if the  + * Function that frees up all resources for a blit. It is usable even if the   * blit info has only been partially built as long as the status enum is consistent   * with the actual status of the used resources.   */  static void -via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  +via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  {  	struct page *page;  	int i; @@ -184,7 +184,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  	case dr_via_pages_locked:  		for (i=0; i<vsg->num_pages; ++i) {  			if ( NULL != (page = vsg->pages[i])) { -				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))  +				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))  					SetPageDirty(page);  				page_cache_release(page);  			} @@ -199,7 +199,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)  		vsg->bounce_buffer = NULL;  	}  	vsg->free_on_sequence = 0; -}		 +}  /*   * Fire a blit engine. @@ -212,7 +212,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)  	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);  	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0); -	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |  +	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |  		  VIA_DMA_CSR_DE);  	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);  	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0); @@ -232,20 +232,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)  {  	int ret;  	unsigned long first_pfn = VIA_PFN(xfer->mem_addr); -	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -  +	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -  		first_pfn + 1; -	 +  	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))  		return -ENOMEM;  	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);  	down_read(¤t->mm->mmap_sem); -	ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, -			     vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),  +	ret = get_user_pages(current, current->mm, +			     (unsigned long)xfer->mem_addr, +			     vsg->num_pages, +			     (vsg->direction == DMA_FROM_DEVICE),  			     0, vsg->pages, NULL);  	up_read(¤t->mm->mmap_sem);  	if (ret != vsg->num_pages) { -		if (ret < 0)  +		if (ret < 0)  			return ret;  		vsg->state = dr_via_pages_locked;  		return -EINVAL; @@ -261,22 +263,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)   * quite large for some blits, and pages don't need to be contingous.   */ -static int  +static int  via_alloc_desc_pages(drm_via_sg_info_t *vsg)  {  	int i; -	 +  	vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t); -	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /  +	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /  		vsg->descriptors_per_page; -	if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))  +	if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))  		return -ENOMEM; -	 +  	memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);  	vsg->state = dr_via_desc_pages_alloc;  	for (i=0; i<vsg->num_desc_pages; ++i) { -		if (NULL == (vsg->desc_pages[i] =  +		if (NULL == (vsg->desc_pages[i] =  			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))  			return -ENOMEM;  	} @@ -284,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)  		  vsg->num_desc);  	return 0;  } -			 +  static void  via_abort_dmablit(struct drm_device *dev, int engine)  { @@ -298,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)  {  	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; -	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);  +	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);  } @@ -309,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)   * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while   * the workqueue task takes care of processing associated with the old blit.   */ -		 +  void  via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)  { @@ -329,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)  		spin_lock_irqsave(&blitq->blit_lock, irqsave);  	} -	done_transfer = blitq->is_active &&  +	done_transfer = blitq->is_active &&  	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD); -	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));  +	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));  	cur = blitq->cur;  	if (done_transfer) {  		blitq->blits[cur]->aborted = blitq->aborting;  		blitq->done_blit_handle++; -		DRM_WAKEUP(blitq->blit_queue + cur);		 +		DRM_WAKEUP(blitq->blit_queue + cur);  		cur++; -		if (cur >= VIA_NUM_BLIT_SLOTS)  +		if (cur >= VIA_NUM_BLIT_SLOTS)  			cur = 0;  		blitq->cur = cur; @@ -353,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)  		blitq->is_active = 0;  		blitq->aborting = 0; -		schedule_work(&blitq->wq);	 +		schedule_work(&blitq->wq);  	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) { @@ -365,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)  		blitq->aborting = 1;  		blitq->end = jiffies + DRM_HZ;  	} -	  		 +  	if (!blitq->is_active) {  		if (blitq->num_outstanding) {  			via_fire_dmablit(dev, blitq->blits[cur], engine); @@ -383,14 +385,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)  			}  			via_dmablit_engine_off(dev, engine);  		} -	}		 +	}  	if (from_irq) {  		spin_unlock(&blitq->blit_lock);  	} else {  		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);  	} -}  +} @@ -426,13 +428,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que  	return active;  } -	 +  /*   * Sync. Wait for at least three seconds for the blit to be performed.   */  static int -via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)  +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)  {  	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -441,12 +443,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)  	int ret = 0;  	if (via_dmablit_active(blitq, engine, handle, &queue)) { -		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,  +		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,  			    !via_dmablit_active(blitq, engine, handle, NULL));  	}  	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",  		  handle, engine, ret); -	 +  	return ret;  } @@ -468,12 +470,12 @@ via_dmablit_timer(unsigned long data)  	struct drm_device *dev = blitq->dev;  	int engine = (int)  		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); -		 -	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,  + +	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,  		  (unsigned long) jiffies);  	via_dmablit_handler(dev, engine, 0); -	 +  	if (!timer_pending(&blitq->poll_timer)) {  		blitq->poll_timer.expires = jiffies+1;  		add_timer(&blitq->poll_timer); @@ -497,7 +499,7 @@ via_dmablit_timer(unsigned long data)   */ -static void  +static void  #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)  via_dmablit_workqueue(void *data)  #else @@ -509,42 +511,42 @@ via_dmablit_workqueue(struct work_struct *work)  #else  	drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);  #endif -        struct drm_device *dev = blitq->dev; +	struct drm_device *dev = blitq->dev;  	unsigned long irqsave;  	drm_via_sg_info_t *cur_sg;  	int cur_released; -	 -	 -	DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)  + + +	DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)  		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));  	spin_lock_irqsave(&blitq->blit_lock, irqsave); -	 +  	while(blitq->serviced != blitq->cur) {  		cur_released = blitq->serviced++;  		DRM_DEBUG("Releasing blit slot %d\n", cur_released); -		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)  +		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)  			blitq->serviced = 0; -		 +  		cur_sg = blitq->blits[cur_released];  		blitq->num_free++; -				 +  		spin_unlock_irqrestore(&blitq->blit_lock, irqsave); -		 +  		DRM_WAKEUP(&blitq->busy_queue); -		 +  		via_free_sg_info(dev->pdev, cur_sg);  		kfree(cur_sg); -		 +  		spin_lock_irqsave(&blitq->blit_lock, irqsave);  	}  	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);  } -	 +  /*   * Init all blit engines. Currently we use two, but some hardware have 4. @@ -558,8 +560,8 @@ via_init_dmablit(struct drm_device *dev)  	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;  	drm_via_blitq_t *blitq; -	pci_set_master(dev->pdev);	 -	 +	pci_set_master(dev->pdev); +  	for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {  		blitq = dev_priv->blit_queues + i;  		blitq->dev = dev; @@ -585,20 +587,20 @@ via_init_dmablit(struct drm_device *dev)  		init_timer(&blitq->poll_timer);  		blitq->poll_timer.function = &via_dmablit_timer;  		blitq->poll_timer.data = (unsigned long) blitq; -	}	 +	}  }  /*   * Build all info and do all mappings required for a blit.   */ -		 +  static int  via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)  {  	int draw = xfer->to_fb;  	int ret = 0; -	 +  	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;  	vsg->bounce_buffer = NULL; @@ -612,7 +614,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli  	/*  	 * Below check is a driver limitation, not a hardware one. We  	 * don't want to lock unused pages, and don't want to incoporate the -	 * extra logic of avoiding them. Make sure there are no.  +	 * extra logic of avoiding them. Make sure there are no.  	 * (Not a big limitation anyway.)  	 */ @@ -638,11 +640,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli  	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {  		DRM_ERROR("Too large PCI DMA bitblt.\n");  		return -EINVAL; -	}		 +	} -	/*  +	/*  	 * we allow a negative fb stride to allow flipping of images in -	 * transfer.  +	 * transfer.  	 */  	if (xfer->mem_stride < xfer->line_length || @@ -668,7 +670,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli  	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {  		DRM_ERROR("Invalid DRM bitblt alignment.\n");  		return -EINVAL; -	}	 +	}  #endif  	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) { @@ -684,17 +686,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli  		return ret;  	}  	via_map_blit_for_device(dev->pdev, xfer, vsg, 1); -	 +  	return 0;  } -	 +  /*   * Reserve one free slot in the blit queue. Will wait for one second for one   * to become available. Otherwise -EBUSY is returned.   */ -static int  +static int  via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)  {  	int ret=0; @@ -709,10 +711,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)  		if (ret) {  			return (-EINTR == ret) ? -EAGAIN : ret;  		} -		 +  		spin_lock_irqsave(&blitq->blit_lock, irqsave);  	} -	 +  	blitq->num_free--;  	spin_unlock_irqrestore(&blitq->blit_lock, irqsave); @@ -723,7 +725,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)   * Hand back a free slot if we changed our mind.   */ -static void  +static void  via_dmablit_release_slot(drm_via_blitq_t *blitq)  {  	unsigned long irqsave; @@ -739,8 +741,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)   */ -static int  -via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)	  +static int +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)  {  	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;  	drm_via_sg_info_t *vsg; @@ -771,15 +773,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)  	spin_lock_irqsave(&blitq->blit_lock, irqsave);  	blitq->blits[blitq->head++] = vsg; -	if (blitq->head >= VIA_NUM_BLIT_SLOTS)  +	if (blitq->head >= VIA_NUM_BLIT_SLOTS)  		blitq->head = 0;  	blitq->num_outstanding++; -	xfer->sync.sync_handle = ++blitq->cur_blit_handle;  +	xfer->sync.sync_handle = ++blitq->cur_blit_handle;  	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);  	xfer->sync.engine = engine; -       	via_dmablit_handler(dev, engine, 0); +	via_dmablit_handler(dev, engine, 0);  	return 0;  } @@ -787,7 +789,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)  /*   * Sync on a previously submitted blit. Note that the X server use signals extensively, and   * that there is a very big probability that this IOCTL will be interrupted by a signal. In that - * case it returns with -EAGAIN for the signal to be delivered.  + * case it returns with -EAGAIN for the signal to be delivered.   * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().   */ @@ -797,7 +799,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri  	drm_via_blitsync_t *sync = data;  	int err; -	if (sync->engine >= VIA_NUM_BLIT_ENGINES)  +	if (sync->engine >= VIA_NUM_BLIT_ENGINES)  		return -EINVAL;  	err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); @@ -807,15 +809,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri  	return err;  } -	 +  /*   * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal - * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should  + * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should   * be reissued. See the above IOCTL code.   */ -int  +int  via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )  {  	drm_via_dmablit_t *xfer = data; diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h index 726ad25d..9b662a32 100644 --- a/linux-core/via_dmablit.h +++ b/linux-core/via_dmablit.h @@ -1,5 +1,5 @@  /* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro - *  + *   * Copyright 2005 Thomas Hellstrom.   * All Rights Reserved.   * @@ -17,12 +17,12 @@   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,  - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   * - * Authors:  + * Authors:   *    Thomas Hellstrom.   *    Register info from Digeo Inc.   */ @@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {  	unsigned cur;  	unsigned num_free;  	unsigned num_outstanding; -	unsigned long end;   +	unsigned long end;  	int aborting;  	int is_active;  	drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS]; @@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {  	struct work_struct wq;  	struct timer_list poll_timer;  } drm_via_blitq_t; -	 -/*  + +/*   *  PCI DMA Registers   *  Channels 2 & 3 don't seem to be implemented in hardware.   */ -  -#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */  -#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */  -#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */  -#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */  - -#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */  -#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */  -#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */  -#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */  - -#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */  -#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */  -#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */  -#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */  - -#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */  -#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */  -#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */  -#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */  - -#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */  -#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */  -#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */  -#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */  - -#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */  -#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */  -#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */  -#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */  - -#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */  - -/* Define for DMA engine */  + +#define VIA_PCI_DMA_MAR0            0xE40   /* Memory Address Register of Channel 0 */ +#define VIA_PCI_DMA_DAR0            0xE44   /* Device Address Register of Channel 0 */ +#define VIA_PCI_DMA_BCR0            0xE48   /* Byte Count Register of Channel 0 */ +#define VIA_PCI_DMA_DPR0            0xE4C   /* Descriptor Pointer Register of Channel 0 */ + +#define VIA_PCI_DMA_MAR1            0xE50   /* Memory Address Register of Channel 1 */ +#define VIA_PCI_DMA_DAR1            0xE54   /* Device Address Register of Channel 1 */ +#define VIA_PCI_DMA_BCR1            0xE58   /* Byte Count Register of Channel 1 */ +#define VIA_PCI_DMA_DPR1            0xE5C   /* Descriptor Pointer Register of Channel 1 */ + +#define VIA_PCI_DMA_MAR2            0xE60   /* Memory Address Register of Channel 2 */ +#define VIA_PCI_DMA_DAR2            0xE64   /* Device Address Register of Channel 2 */ +#define VIA_PCI_DMA_BCR2            0xE68   /* Byte Count Register of Channel 2 */ +#define VIA_PCI_DMA_DPR2            0xE6C   /* Descriptor Pointer Register of Channel 2 */ + +#define VIA_PCI_DMA_MAR3            0xE70   /* Memory Address Register of Channel 3 */ +#define VIA_PCI_DMA_DAR3            0xE74   /* Device Address Register of Channel 3 */ +#define VIA_PCI_DMA_BCR3            0xE78   /* Byte Count Register of Channel 3 */ +#define VIA_PCI_DMA_DPR3            0xE7C   /* Descriptor Pointer Register of Channel 3 */ + +#define VIA_PCI_DMA_MR0             0xE80   /* Mode Register of Channel 0 */ +#define VIA_PCI_DMA_MR1             0xE84   /* Mode Register of Channel 1 */ +#define VIA_PCI_DMA_MR2             0xE88   /* Mode Register of Channel 2 */ +#define VIA_PCI_DMA_MR3             0xE8C   /* Mode Register of Channel 3 */ + +#define VIA_PCI_DMA_CSR0            0xE90   /* Command/Status Register of Channel 0 */ +#define VIA_PCI_DMA_CSR1            0xE94   /* Command/Status Register of Channel 1 */ +#define VIA_PCI_DMA_CSR2            0xE98   /* Command/Status Register of Channel 2 */ +#define VIA_PCI_DMA_CSR3            0xE9C   /* Command/Status Register of Channel 3 */ + +#define VIA_PCI_DMA_PTR             0xEA0   /* Priority Type Register */ + +/* Define for DMA engine */  /* DPR */  #define VIA_DMA_DPR_EC		(1<<1)	/* end of chain */  #define VIA_DMA_DPR_DDIE	(1<<2)	/* descriptor done interrupt enable */ diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index 35ca6bfc..3f75af38 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)  	dev_priv->agp_offset = agp->offset;  	mutex_unlock(&dev->struct_mutex); -	DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); +	DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);  	return 0;  } @@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)  	dev_priv->vram_offset = fb->offset;  	mutex_unlock(&dev->struct_mutex); -	DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); +	DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);  	return 0; @@ -115,7 +115,7 @@ void via_lastclose(struct drm_device *dev)  	dev_priv->vram_initialized = 0;  	dev_priv->agp_initialized = 0;  	mutex_unlock(&dev->struct_mutex); -}	 +}  int via_mem_alloc(struct drm_device *dev, void *data,  		  struct drm_file *file_priv) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index d7b23c89..64401ae5 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -78,7 +78,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,   * @type: Type of the current batch   *   * See section 3.2.2 "Begin" (page 15) of the 3D SPG. - *  + *   * This function assumes that @type is on the range [0,3].   */  unsigned int get_batch_command(enum xgi_batch_type type) @@ -86,7 +86,7 @@ unsigned int get_batch_command(enum xgi_batch_type type)  	static const unsigned int ports[4] = {  		0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2  	}; -	 +  	return ports[type];  } @@ -159,7 +159,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,                  2 - fb                  3 - logout  */ -int xgi_state_change(struct xgi_info * info, unsigned int to,  +int xgi_state_change(struct xgi_info * info, unsigned int to,  		     unsigned int from)  {  #define STATE_CONSOLE   0 @@ -219,7 +219,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)  		}  		xgi_waitfor_pci_idle(info); -		 +  		(void) memset(&info->cmdring, 0, sizeof(info->cmdring));  	}  } @@ -243,7 +243,7 @@ static void triggerHWCommandList(struct xgi_info * info)  void xgi_emit_flush(struct xgi_info * info, bool stop)  {  	const u32 flush_command[8] = { -		((0x10 << 24)  +		((0x10 << 24)  		 | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),  		BEGIN_LINK_ENABLE_MASK | (0x00004),  		0x00000000, 0x00000000, @@ -266,9 +266,9 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)  		info->cmdring.ring_offset = 0;  	} -	hw_addr = info->cmdring.ring_hw_base  +	hw_addr = info->cmdring.ring_hw_base  		+ info->cmdring.ring_offset; -	batch_addr = info->cmdring.ptr  +	batch_addr = info->cmdring.ptr  		+ (info->cmdring.ring_offset / 4);  	for (i = 0; i < (flush_size / 4); i++) { diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 4e66197e..4f0b4ed0 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -352,7 +352,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)  	struct drm_device *dev = (struct drm_device *) arg;  	struct xgi_info *info = dev->dev_private;  	const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map, -					(0x2800  +					(0x2800  					 + M2REG_AUTO_LINK_STATUS_ADDRESS)))  		& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK  		   | M2REG_ACTIVE_INTERRUPT_0_MASK @@ -361,7 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)  	if (irq_bits != 0) { -		DRM_WRITE32(info->mmio_map,  +		DRM_WRITE32(info->mmio_map,  			    0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,  			    cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));  		xgi_fence_handler(dev); @@ -413,7 +413,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags)  	return 0; -	 +  fail:  	drm_free(info, sizeof(*info), DRM_MEM_DRIVER);  	return err; diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index d9a94f5f..9408073e 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -64,7 +64,7 @@ struct xgi_info {  	struct drm_map *fb_map;  	/* look up table parameters */ -	struct ati_pcigart_info gart_info; +	struct drm_ati_pcigart_info gart_info;  	unsigned int lutPageSize;  	struct drm_sman sman; diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 2e2d0094..3f50fe8f 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -32,7 +32,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,  	      struct drm_file * filp)  {  	struct drm_memblock_item *block; -	const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)  +	const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)  		? "on-card" : "GART"; @@ -43,7 +43,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,  		return -EINVAL;  	} -	if ((alloc->location == XGI_MEMLOC_LOCAL)  +	if ((alloc->location == XGI_MEMLOC_LOCAL)  	    ? !info->fb_heap_initialized : !info->pcie_heap_initialized) {  		DRM_ERROR("Attempt to allocate from uninitialized memory "  			  "pool (0x%08x).\n", alloc->location); @@ -118,7 +118,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,  int xgi_fb_heap_init(struct xgi_info * info)  {  	int err; -	 +  	mutex_lock(&info->dev->struct_mutex);  	err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,  				 XGI_FB_HEAP_START, diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 526bc5db..9a75581a 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -72,7 +72,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)  int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, -			    uint32_t flags, uint32_t * sequence,  +			    uint32_t flags, uint32_t * sequence,  			    uint32_t * native_type)  {  	struct xgi_info * info = dev->dev_private; diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c index c54044fa..e4338417 100644 --- a/linux-core/xgi_ioc32.c +++ b/linux-core/xgi_ioc32.c @@ -43,7 +43,7 @@ struct drm_map32 {  	u32 handle;		/**< User-space: "Handle" to pass to mmap() */  	int mtrr;		/**< MTRR slot used */  }; -	 +  struct drm32_xgi_bootstrap {  	struct drm_map32 gart;  }; diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 4a4a9844..2b3a1788 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -90,7 +90,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)  	DRM_WRITE8(map, 0xb057, 8);  	while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) { -		while (0 != ((--time_out) & 0xfff))  +		while (0 != ((--time_out) & 0xfff))  			/* empty */ ;  		if (0 == time_out) { @@ -117,8 +117,8 @@ static void xgi_ge_hang_reset(struct drm_map * map)  			DRM_WRITE8(map, 0x3d4, 0x36);  			old_36 = DRM_READ8(map, 0x3d5);  			DRM_WRITE8(map, 0x3d5, old_36 | 0x10); -			 -			while (0 != ((--time_out) & 0xfff))  + +			while (0 != ((--time_out) & 0xfff))  				/* empty */ ;  			DRM_WRITE8(map, 0x3d5, old_36); @@ -134,7 +134,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)  	DRM_WRITE8(map, 0xb057, 0);  } -	 +  bool xgi_ge_irq_handler(struct xgi_info * info)  {  	const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810)); @@ -143,7 +143,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)  	/* Check GE on/off */  	if (0 == (0xffffc0f0 & int_status)) {  		if (0 != (0x1000 & int_status)) { -			/* We got GE stall interrupt.  +			/* We got GE stall interrupt.  			 */  			DRM_WRITE32(info->mmio_map, 0x2810,  				    cpu_to_le32(int_status | 0x04000000)); @@ -289,7 +289,7 @@ static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)  		printk("%1x ", i);  		for (j = 0; j < 0x10; j++) { -			u8 temp = DRM_READ8(info->mmio_map,  +			u8 temp = DRM_READ8(info->mmio_map,  					    regbase + (i * 0x10) + j);  			printk("%3x", temp);  		} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index af19a11a..5f9e4f09 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,5 +1,5 @@  /**************************************************************************** - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.   *   * All Rights Reserved.   * diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 5c0100a0..a897fd8a 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -4,7 +4,7 @@   * All Rights Reserved.   *   * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files (the	 + * a copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including   * without limitation on the rights to use, copy, modify, merge,   * publish, distribute, sublicense, and/or sell copies of the Software, diff --git a/scripts/create_lk_drm.sh b/scripts/create_lk_drm.sh index ddfbf499..b18a9514 100755 --- a/scripts/create_lk_drm.sh +++ b/scripts/create_lk_drm.sh @@ -2,10 +2,10 @@  # script to create a Linux Kernel tree from the DRM tree for diffing etc..  #  # Original author - Dave Airlie (C) 2004 - airlied@linux.ie -# +# kernel_version to remove below (e.g. 2.6.24) -if [ $# -lt 1 ] ;then -	echo usage: $0 output_dir +if [ $# -lt 2 ] ;then +	echo usage: $0 output_dir kernel_version  	exit 1  fi @@ -15,43 +15,23 @@ if [ ! -d shared-core -o ! -d linux-core ]  ;then  fi  OUTDIR=$1/drivers/char/drm/ - +KERNEL_VERS=$2  echo "Copying kernel independent files" -mkdir -p $OUTDIR +mkdir -p $OUTDIR/.tmp  ( cd linux-core/ ; make drm_pciids.h ) -cp shared-core/*.[ch] $OUTDIR -cp linux-core/*.[ch] $OUTDIR -cp linux-core/Makefile.kernel $OUTDIR/Makefile +cp shared-core/*.[ch] $OUTDIR/.tmp +cp linux-core/*.[ch] $OUTDIR/.tmp +cp linux-core/Makefile.kernel $OUTDIR/.tmp/Makefile  echo "Copying 2.6 Kernel files" -cp linux-core/Kconfig $OUTDIR/ +cp linux-core/Kconfig $OUTDIR/.tmp +./scripts/drm-scripts-gentree.pl $KERNEL_VERS $OUTDIR/.tmp $OUTDIR  cd $OUTDIR +rm -rf .tmp  rm via_ds.[ch] -for i in via*.[ch] -do -unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DVIA_HAVE_CORE_MM $i > $i.tmp -mv $i.tmp $i -done -  rm sis_ds.[ch] -for i in sis*.[ch] -do -unifdef -D__linux__ -DVIA_HAVE_DMABLIT -DSIS_HAVE_CORE_MM $i > $i.tmp -mv $i.tmp $i -done - -for i in i915*.[ch] -do -unifdef -D__linux__ -DI915_HAVE_FENCE -DI915_HAVE_BUFFER $i > $i.tmp -mv $i.tmp $i -done - -for i in drm*.[ch] -do -unifdef -UDRM_ODD_MM_COMPAT -D__linux__ $i > $i.tmp -mv $i.tmp $i -done +  cd - diff --git a/scripts/drm-scripts-gentree.pl b/scripts/drm-scripts-gentree.pl new file mode 100755 index 00000000..cbc10175 --- /dev/null +++ b/scripts/drm-scripts-gentree.pl @@ -0,0 +1,254 @@ +#!/usr/bin/perl +# +# Original version were part of Gerd Knorr's v4l scripts. +# +# Several improvements by (c) 2005-2007 Mauro Carvalho Chehab +# +# Largely re-written (C) 2007 Trent Piepho <xyzzy@speakeasy.org> +# Stolen for DRM usage by airlied +# +# Theory of Operation +# +# This acts as a sort of mini version of cpp, which will process +# #if/#elif/#ifdef/etc directives to strip out code used to support +# multiple kernel versions or otherwise not wanted to be sent upstream to +# git. +# +# Conditional compilation directives fall into two catagories, +# "processed" and "other".  The "other" directives are ignored and simply +# output as they come in without changes (see 'keep' exception).  The +# "processed" variaty are evaluated and only the lines in the 'true' part +# are kept, like cpp would do. +# +# If gentree knows the result of an expression, that directive will be +# "processed", otherwise it will be an "other".  gentree knows the value +# of LINUX_VERSION_CODE, BTTV_VERSION_CODE, the KERNEL_VERSION(x,y,z) +# macro, numeric constants like 0 and 1, and a few defines like MM_KERNEL +# and STV0297_CS2. +# +# An exception is if the comment "/*KEEP*/" appears after the expression, +# in which case that directive will be considered an "other" and not +# processed, other than to remove the keep comment. +# +# Known bugs: +# don't specify the root directory e.g. '/' or even '////' +# directives continued with a back-slash will always be ignored +# you can't modify a source tree in-place, i.e. source dir == dest dir + +use strict; +use File::Find; +use Fcntl ':mode'; + +my $VERSION = shift; +my $SRC = shift; +my $DESTDIR = shift; + +if (!defined($DESTDIR)) { +	print "Usage:\ngentree.pl\t<version> <source dir> <dest dir>\n\n"; +	exit; +} + +my $BTTVCODE = KERNEL_VERSION(0,9,17); +my ($LINUXCODE, $extra) = kernel_version($VERSION); +my $DEBUG = 0; + +my %defs = ( +	'LINUX_VERSION_CODE' => $LINUXCODE, +	'MM_KERNEL' => ($extra =~ /-mm/)?1:0, +	'DRM_ODD_MM_COMPAT' => 0, +	'I915_HAVE_FENCE' => 1, +	'I915_HAVE_BUFFER' => 1, +	'VIA_HAVE_DMABLIT' => 1, +	'VIA_HAVE_CORE_MM' => 1, +	'VIA_HAVE_FENCE' => 1, +        'VIA_HAVE_BUFFER' => 1, +	'SIS_HAVE_CORE_MM' => 1, +        'DRM_FULL_MM_COMPAT' => 1,    +	'__linux__' => 1, +); + +################################################################# +# helpers + +sub kernel_version($) { +	$_[0] =~ m/(\d+)\.(\d+)\.(\d+)(.*)/; +	return ($1*65536 + $2*256 + $3, $4); +} + +# used in eval() +sub KERNEL_VERSION($$$) { return $_[0]*65536 + $_[1]*256 + $_[2]; } + +sub evalexp($) { +	local $_ = shift; +	s|/\*.*?\*/||go;	# delete /* */ comments +	s|//.*$||o;		# delete // comments +	s/\bdefined\s*\(/(/go;	# defined(foo) to (foo) +	while (/\b([_A-Za-z]\w*)\b/go) { +		if (exists $defs{$1}) { +			my $id = $1; my $pos = $-[0]; +			s/$id/$defs{$id}/; +			pos = $-[0]; +		} elsif ($1 ne 'KERNEL_VERSION') { +			return(undef); +		} +	} +	return(eval($_) ? 1 : 0); +} + +################################################################# +# filter out version-specific code + +sub filter_source ($$) { +	my ($in,$out) = @_; +	my $line; +	my $level=0; +	my %if = (); +	my %state = (); + +	my @dbgargs = \($level, %state, %if, $line); +	sub dbgline($\@) { +		my $level = ${$_[1][0]}; +		printf STDERR ("/* BP %4d $_[0] state=$_[1][1]->{$level} if=$_[1][2]->{$level} level=$level (${$_[1][3]}) */\n", $.) if $DEBUG; +	} + +	open IN, '<', $in or die "Error opening $in: $!\n"; +	open OUT, '>', $out or die "Error opening $out: $!\n"; + +	print STDERR "File: $in, for kernel $VERSION($LINUXCODE)/\n" if $DEBUG; + +	while ($line = <IN>) { +		chomp $line; +		next if ($line =~ m/^#include \"compat.h\"/o); +#		next if ($line =~ m/[\$]Id:/); + +		# For "#if 0 /*KEEP*/;" the ; should be dropped too +		if ($line =~ m@^\s*#\s*if(n?def)?\s.*?(\s*/\*\s*(?i)keep\s*\*/;?)@) { +			$state{$level} = "ifother"; +			$if{$level} = 1; +			dbgline "#if$1 (keep)", @dbgargs; +			$line =~ s/\Q$2\E//; +			$level++; +		} +		# handle all ifdef/ifndef lines +		elsif ($line =~ /^\s*#\s*if(n?)def\s*(\w+)/o) { +			if (exists $defs{$2}) { +				$state{$level} = 'if'; +				$if{$level} = ($1 eq 'n') ? !$defs{$2} : $defs{$2}; +				dbgline "#if$1def $2", @dbgargs; +				$level++; +				next; +			} +			$state{$level} = "ifother"; +			$if{$level} = 1; +			dbgline "#if$1def (other)", @dbgargs; +			$level++; +		} +		# handle all ifs +		elsif ($line =~ /^\s*#\s*if\s+(.*)$/o) { +			my $res = evalexp($1); +			if (defined $res) { +				$state{$level} = 'if'; +				$if{$level} = $res; +				dbgline '#if '.($res?'(yes)':'(no)'), @dbgargs; +				$level++; +				next; +			} else { +				$state{$level} = 'ifother'; +				$if{$level} = 1; +				dbgline '#if (other)', @dbgargs; +				$level++; +			} +		} +		# handle all elifs +		elsif ($line =~ /^\s*#\s*elif\s+(.*)$/o) { +			my $exp = $1; +			$level--; +			$level < 0 and die "more elifs than ifs"; +			$state{$level} =~ /if/ or die "unmatched elif"; + +			if ($state{$level} eq 'if' && !$if{$level}) { +				my $res = evalexp($exp); +				defined $res or die 'moving from if to ifother'; +				$state{$level} = 'if'; +				$if{$level} = $res; +				dbgline '#elif1 '.($res?'(yes)':'(no)'), @dbgargs; +				$level++; +				next; +			} elsif ($state{$level} ne 'ifother') { +				$if{$level} = 0; +				$state{$level} = 'elif'; +				dbgline '#elif0', @dbgargs; +				$level++; +				next; +			} +			$level++; +		} +		elsif ($line =~ /^\s*#\s*else/o) { +			$level--; +			$level < 0 and die "more elses than ifs"; +			$state{$level} =~ /if/ or die "unmatched else"; +			$if{$level} = !$if{$level} if ($state{$level} eq 'if'); +			$state{$level} =~ s/^if/else/o; # if -> else, ifother -> elseother, elif -> elif +			dbgline '#else', @dbgargs; +			$level++; +			next if $state{$level-1} !~ /other$/o; +		} +		elsif ($line =~ /^\s*#\s*endif/o) { +			$level--; +			$level < 0 and die "more endifs than ifs"; +			dbgline '#endif', @dbgargs; +			next if $state{$level} !~ /other$/o; +		} + +		my $print = 1; +		for (my $i=0;$i<$level;$i++) { +			next if $state{$i} =~ /other$/o;	# keep code in ifother/elseother blocks +			if (!$if{$i}) { +				$print = 0; +				dbgline 'DEL', @{[\$i, \%state, \%if, \$line]}; +				last; +			} +		} +		print OUT "$line\n" if $print; +	} +	close IN; +	close OUT; +} + +################################################################# + +sub parse_dir { +	my $file = $File::Find::name; + +	return if ($file =~ /CVS/); +	return if ($file =~ /~$/); + +	my $f2 = $file; +	$f2 =~ s/^\Q$SRC\E/$DESTDIR/; + +	my $mode = (stat($file))[2]; +	if ($mode & S_IFDIR) { +		print("mkdir -p '$f2'\n"); +		system("mkdir -p '$f2'");  # should check for error +		return; +	} +	print "from $file to $f2\n"; + +	if ($file =~ m/.*\.[ch]$/) { +		filter_source($file, $f2); +	} else { +		system("cp $file $f2"); +	} +} + + +# main + +printf "kernel is %s (0x%x)\n",$VERSION,$LINUXCODE; + +# remove any trailing slashes from dir names.  don't pass in just '/' +$SRC =~ s|/*$||; $DESTDIR =~ s|/*$||; + +print "finding files at $SRC\n"; + +find({wanted => \&parse_dir, no_chdir => 1}, $SRC); diff --git a/shared-core/drm.h b/shared-core/drm.h index cbd6a941..52de596b 100644 --- a/shared-core/drm.h +++ b/shared-core/drm.h @@ -249,7 +249,8 @@ enum drm_map_flags {  	_DRM_KERNEL = 0x08,	     /**< kernel requires access */  	_DRM_WRITE_COMBINING = 0x10, /**< use write-combining if available */  	_DRM_CONTAINS_LOCK = 0x20,   /**< SHM page that contains lock */ -	_DRM_REMOVABLE = 0x40	     /**< Removable mapping */ +	_DRM_REMOVABLE = 0x40,	     /**< Removable mapping */ +	_DRM_DRIVER = 0x80	     /**< Managed by driver */  };  struct drm_ctx_priv_map { @@ -661,7 +662,7 @@ struct drm_fence_arg {  	unsigned int signaled;  	unsigned int error;  	unsigned int sequence; -        unsigned int pad64; +	unsigned int pad64;  	uint64_t expand_pad[2]; /*Future expansion */  }; @@ -676,6 +677,10 @@ struct drm_fence_arg {  #define DRM_BO_FLAG_EXE         (1ULL << 2)  /* + * All of the bits related to access mode + */ +#define DRM_BO_MASK_ACCESS	(DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_EXE) +/*   * Status flags. Can be read to determine the actual state of a buffer.   * Can also be set in the buffer mask before validation.   */ @@ -715,10 +720,21 @@ struct drm_fence_arg {   */  #define DRM_BO_FLAG_NO_MOVE     (1ULL << 8) -/* Mask: Make sure the buffer is in cached memory when mapped for reading. +/* Mask: Make sure the buffer is in cached memory when mapped.  In conjunction + * with DRM_BO_FLAG_CACHED it also allows the buffer to be bound into the GART + * with unsnooped PTEs instead of snooped, by using chipset-specific cache + * flushing at bind time.  A better name might be DRM_BO_FLAG_TT_UNSNOOPED, + * as the eviction to local memory (TTM unbind) on map is just a side effect + * to prevent aggressive cache prefetch from the GPU disturbing the cache + * management that the DRM is doing. + *   * Flags: Acknowledge. + * Buffers allocated with this flag should not be used for suballocators + * This type may have issues on CPUs with over-aggressive caching + * http://marc.info/?l=linux-kernel&m=102376926732464&w=2   */ -#define DRM_BO_FLAG_READ_CACHED    (1ULL << 19) +#define DRM_BO_FLAG_CACHED_MAPPED    (1ULL << 19) +  /* Mask: Force DRM_BO_FLAG_CACHED flag strictly also if it is set.   * Flags: Acknowledge. @@ -751,18 +767,50 @@ struct drm_fence_arg {  #define DRM_BO_FLAG_MEM_PRIV4  (1ULL << 31)  /* We can add more of these now with a 64-bit flag type */ -/* Memory flag mask */ +/* + * This is a mask covering all of the memory type flags; easier to just + * use a single constant than a bunch of | values. It covers + * DRM_BO_FLAG_MEM_LOCAL through DRM_BO_FLAG_MEM_PRIV4 + */  #define DRM_BO_MASK_MEM         0x00000000FF000000ULL -#define DRM_BO_MASK_MEMTYPE     0x00000000FF0000A0ULL - +/* + * This adds all of the CPU-mapping options in with the memory + * type to label all bits which change how the page gets mapped + */ +#define DRM_BO_MASK_MEMTYPE     (DRM_BO_MASK_MEM | \ +				 DRM_BO_FLAG_CACHED_MAPPED | \ +				 DRM_BO_FLAG_CACHED | \ +				 DRM_BO_FLAG_MAPPABLE) +				   /* Driver-private flags */  #define DRM_BO_MASK_DRIVER      0xFFFF000000000000ULL -/* Don't block on validate and map */ +/* + * Don't block on validate and map. Instead, return EBUSY. + */  #define DRM_BO_HINT_DONT_BLOCK  0x00000002 -/* Don't place this buffer on the unfenced list.*/ +/* + * Don't place this buffer on the unfenced list. This means + * that the buffer will not end up having a fence associated + * with it as a result of this operation + */  #define DRM_BO_HINT_DONT_FENCE  0x00000004 +/* + * Sleep while waiting for the operation to complete. + * Without this flag, the kernel will, instead, spin + * until this operation has completed. I'm not sure + * why you would ever want this, so please always + * provide DRM_BO_HINT_WAIT_LAZY to any operation + * which may block + */  #define DRM_BO_HINT_WAIT_LAZY   0x00000008 +/* + * The client has compute relocations refering to this buffer using the + * offset in the presumed_offset field. If that offset ends up matching + * where this buffer lands, the kernel is free to skip executing those + * relocations + */ +#define DRM_BO_HINT_PRESUMED_OFFSET 0x00000010  #define DRM_BO_INIT_MAGIC 0xfe769812  #define DRM_BO_INIT_MAJOR 1 @@ -779,10 +827,11 @@ struct drm_bo_info_req {  	unsigned int desired_tile_stride;  	unsigned int tile_info;  	unsigned int pad64; +	uint64_t presumed_offset;  };  struct drm_bo_create_req { -	uint64_t mask; +	uint64_t flags;  	uint64_t size;  	uint64_t buffer_start;  	unsigned int hint; @@ -798,7 +847,7 @@ struct drm_bo_create_req {  struct drm_bo_info_rep {  	uint64_t flags; -	uint64_t mask; +	uint64_t proposed_flags;  	uint64_t size;  	uint64_t offset;  	uint64_t arg_handle; @@ -889,7 +938,7 @@ struct drm_bo_version_arg {  struct drm_mm_type_arg {  	unsigned int mem_type; -        unsigned int lock_flags; +	unsigned int lock_flags;  };  struct drm_mm_init_arg { @@ -936,7 +985,7 @@ struct drm_mm_init_arg {  #define DRM_IOCTL_RM_MAP		DRM_IOW( 0x1b, struct drm_map)  #define DRM_IOCTL_SET_SAREA_CTX		DRM_IOW( 0x1c, struct drm_ctx_priv_map) -#define DRM_IOCTL_GET_SAREA_CTX 	DRM_IOWR(0x1d, struct drm_ctx_priv_map) +#define DRM_IOCTL_GET_SAREA_CTX		DRM_IOWR(0x1d, struct drm_ctx_priv_map)  #define DRM_IOCTL_ADD_CTX		DRM_IOWR(0x20, struct drm_ctx)  #define DRM_IOCTL_RM_CTX		DRM_IOWR(0x21, struct drm_ctx) diff --git a/shared-core/drm_internal.h b/shared-core/drm_internal.h new file mode 100644 index 00000000..b82a189d --- /dev/null +++ b/shared-core/drm_internal.h @@ -0,0 +1,40 @@ +/* + * Copyright 2007 Red Hat, Inc + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +/* This header file holds function prototypes and data types that are + * internal to the drm (not exported to user space) but shared across + * drivers and platforms */ + +#ifndef __DRM_INTERNAL_H__ +#define __DRM_INTERNAL_H__ + +/** + * Drawable information. + */ +struct drm_drawable_info { +	unsigned int num_rects; +	struct drm_clip_rect *rects; +}; + +#endif diff --git a/shared-core/drm_pciids.txt b/shared-core/drm_pciids.txt index 05d32f2e..83fbc90d 100644 --- a/shared-core/drm_pciids.txt +++ b/shared-core/drm_pciids.txt @@ -135,6 +135,101 @@  0x1002 0x5e4c CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE"  0x1002 0x5e4d CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700"  0x1002 0x5e4f CHIP_RV410|RADEON_NEW_MEMMAP "ATI Radeon RV410 X700 SE" +0x1002 0x7100 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x7101 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1800 XT" +0x1002 0x7102 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1800" +0x1002 0x7103 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V7200" +0x1002 0x7104 CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7200" +0x1002 0x7105 CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V5300" +0x1002 0x7106 CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V7100" +0x1002 0x7108 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x7109 CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x710A CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x710B CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x710C CHIP_R520|RADEON_NEW_MEMMAP "ATI Radeon X1800" +0x1002 0x710E CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7300" +0x1002 0x710F CHIP_R520|RADEON_NEW_MEMMAP "ATI FireGL V7350" +0x1002 0x7140 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x7141 CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505" +0x1002 0x7142 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550" +0x1002 0x7143 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550" +0x1002 0x7144 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI M54-GL" +0x1002 0x7145 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1400" +0x1002 0x7146 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550" +0x1002 0x7147 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit" +0x1002 0x7149 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300" +0x1002 0x714A CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300" +0x1002 0x714B CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300" +0x1002 0x714C CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1300" +0x1002 0x714D CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300" +0x1002 0x714E CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300" +0x1002 0x714F CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505" +0x1002 0x7151 CHIP_RV515|RADEON_NEW_MEMMAP "ATI RV505" +0x1002 0x7152 CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireGL V3300" +0x1002 0x7153 CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireGL V3350" +0x1002 0x715E CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300" +0x1002 0x715F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit" +0x1002 0x7180 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550" +0x1002 0x7181 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x7183 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550" +0x1002 0x7186 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1450" +0x1002 0x7187 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300/X1550" +0x1002 0x7188 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X2300" +0x1002 0x718A CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X2300" +0x1002 0x718B CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350" +0x1002 0x718C CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350" +0x1002 0x718D CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1450" +0x1002 0x718F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1300" +0x1002 0x7193 CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550" +0x1002 0x7196 CHIP_RV515|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1350" +0x1002 0x719B CHIP_RV515|RADEON_NEW_MEMMAP "ATI FireMV 2250" +0x1002 0x719F CHIP_RV515|RADEON_NEW_MEMMAP "ATI Radeon X1550 64-bit" +0x1002 0x71C0 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x71C1 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650" +0x1002 0x71C2 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x71C3 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x71C4 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5200" +0x1002 0x71C5 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1600" +0x1002 0x71C6 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650" +0x1002 0x71C7 CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1650" +0x1002 0x71CD CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1600" +0x1002 0x71CE CHIP_RV530|RADEON_NEW_MEMMAP "ATI Radeon X1300 XT/X1600 Pro" +0x1002 0x71D2 CHIP_RV530|RADEON_NEW_MEMMAP "ATI FireGL V3400" +0x1002 0x71D4 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility FireGL V5250" +0x1002 0x71D5 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700" +0x1002 0x71D6 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700 XT" +0x1002 0x71DA CHIP_RV530|RADEON_NEW_MEMMAP "ATI FireGL V5200" +0x1002 0x71DE CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1700" +0x1002 0x7200 CHIP_RV530|RADEON_NEW_MEMMAP "ATI  Radeon X2300HD" +0x1002 0x7210 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300" +0x1002 0x7211 CHIP_RV530|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon HD 2300" +0x1002 0x7240 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950" +0x1002 0x7243 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7244 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1950" +0x1002 0x7245 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7246 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7247 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7248 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7249 CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x724A CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x724B CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x724C CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x724D CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x724E CHIP_R580|RADEON_NEW_MEMMAP "ATI AMD Stream Processor" +0x1002 0x724F CHIP_R580|RADEON_NEW_MEMMAP "ATI Radeon X1900" +0x1002 0x7280 CHIP_RV570|RADEON_NEW_MEMMAP "ATI Radeon X1950" +0x1002 0x7281 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560" +0x1002 0x7283 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560" +0x1002 0x7284 CHIP_R580|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Mobility Radeon X1900" +0x1002 0x7287 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560" +0x1002 0x7288 CHIP_RV570|RADEON_NEW_MEMMAP "ATI Radeon X1950 GT" +0x1002 0x7289 CHIP_RV570|RADEON_NEW_MEMMAP "ATI RV570" +0x1002 0x728B CHIP_RV570|RADEON_NEW_MEMMAP "ATI RV570" +0x1002 0x728C CHIP_RV570|RADEON_NEW_MEMMAP "ATI ATI FireGL V7400" +0x1002 0x7290 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560" +0x1002 0x7291 CHIP_RV560|RADEON_NEW_MEMMAP "ATI Radeon X1650" +0x1002 0x7293 CHIP_RV560|RADEON_NEW_MEMMAP "ATI Radeon X1650" +0x1002 0x7297 CHIP_RV560|RADEON_NEW_MEMMAP "ATI RV560"  0x1002 0x7834 CHIP_RS300|RADEON_IS_IGP|RADEON_NEW_MEMMAP "ATI Radeon RS350 9000/9100 IGP"  0x1002 0x7835 CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP "ATI Radeon RS350 Mobility IGP" @@ -232,6 +327,7 @@  0x1106 0x3343 0 "VIA P4M890"  0x1106 0x3230 VIA_DX9_0 "VIA K8M890"  0x1106 0x3157 VIA_PRO_GROUP_A "VIA CX700" +0x1106 0x3371 VIA_DX9_0 "VIA P4M900 / VN896"  [i810]  0x8086 0x7121 0 "Intel i810 GMCH" @@ -294,6 +390,7 @@  0x8086 0x29C2 CHIP_I9XX|CHIP_I915 "Intel G33"  0x8086 0x29B2 CHIP_I9XX|CHIP_I915 "Intel Q35"  0x8086 0x29D2 CHIP_I9XX|CHIP_I915 "Intel Q33" +0x8086 0x2A42 CHIP_I9XX|CHIP_I965 "Intel Integrated Graphics Device"  [imagine]  0x105d 0x2309 IMAGINE_128 "Imagine 128" diff --git a/shared-core/drm_sarea.h b/shared-core/drm_sarea.h index 34050a6d..8b677522 100644 --- a/shared-core/drm_sarea.h +++ b/shared-core/drm_sarea.h @@ -45,7 +45,7 @@  #endif  /** Maximum number of drawables in the SAREA */ -#define SAREA_MAX_DRAWABLES 		256 +#define SAREA_MAX_DRAWABLES		256  #define SAREA_DRAWABLE_CLAIMED_ENTRY    0x80000000 diff --git a/shared-core/i915_dma.c b/shared-core/i915_dma.c index 24a4ec4a..a36ca37e 100644 --- a/shared-core/i915_dma.c +++ b/shared-core/i915_dma.c @@ -3,7 +3,7 @@  /*   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -11,11 +11,11 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. @@ -23,7 +23,7 @@   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - *  + *   */  #include "drmP.h" @@ -51,8 +51,6 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)  		if (ring->space >= n)  			return 0; -		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -  		if (ring->head != last_head)  			i = 0; @@ -73,9 +71,6 @@ void i915_kernel_lost_context(struct drm_device * dev)  	ring->space = ring->head - (ring->tail + 8);  	if (ring->space < 0)  		ring->space += ring->Size; - -	if (ring->head == ring->tail) -		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;  }  static int i915_dma_cleanup(struct drm_device * dev) @@ -165,6 +160,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)  	 * private backbuffer/depthbuffer usage.  	 */  	dev_priv->use_mi_batchbuffer_start = 0; +	if (IS_I965G(dev)) /* 965 doesn't support older method */ +		dev_priv->use_mi_batchbuffer_start = 1;  	/* Allow hardware batchbuffers unless told otherwise.  	 */ @@ -172,11 +169,11 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)  	/* Enable vblank on pipe A for older X servers  	 */ -    	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; +	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;  	/* Program Hardware Status Page */  	if (!IS_G33(dev)) { -		dev_priv->status_page_dmah =  +		dev_priv->status_page_dmah =  			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);  		if (!dev_priv->status_page_dmah) { @@ -192,7 +189,9 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)  		I915_WRITE(0x02080, dev_priv->dma_status_page);  	}  	DRM_DEBUG("Enabled hardware status page\n"); +#ifdef I915_HAVE_BUFFER  	mutex_init(&dev_priv->cmdbuf_mutex); +#endif  	return 0;  } @@ -200,7 +199,7 @@ static int i915_dma_resume(struct drm_device * dev)  {  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (!dev_priv->sarea) {  		DRM_ERROR("can not find sarea!\n"); @@ -329,12 +328,12 @@ static int validate_cmd(int cmd)  {  	int ret = do_validate_cmd(cmd); -/* 	printk("validate_cmd( %x ): %d\n", cmd, ret); */ +/*	printk("validate_cmd( %x ): %d\n", cmd, ret); */  	return ret;  } -static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, +static int i915_emit_cmds(struct drm_device *dev, int __user *buffer,  			  int dwords)  {  	drm_i915_private_t *dev_priv = dev->dev_private; @@ -365,12 +364,12 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer,  			OUT_RING(cmd);  		}  	} -		 +  	if (dwords & 1)  		OUT_RING(0);  	ADVANCE_LP_RING(); -		 +  	return 0;  } @@ -414,7 +413,7 @@ static int i915_emit_box(struct drm_device * dev,  }  /* XXX: Emitting the counter should really be moved to part of the IRQ - * emit.  For now, do it in both places: + * emit. For now, do it in both places:   */  void i915_emit_breadcrumb(struct drm_device *dev) @@ -490,7 +489,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,  			return ret;  	} -	i915_emit_breadcrumb( dev ); +	i915_emit_breadcrumb(dev);  #ifdef I915_HAVE_FENCE  	drm_fence_flush_old(dev, 0, dev_priv->counter);  #endif @@ -544,7 +543,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,  		}  	} -	i915_emit_breadcrumb( dev ); +	i915_emit_breadcrumb(dev);  #ifdef I915_HAVE_FENCE  	drm_fence_flush_old(dev, 0, dev_priv->counter);  #endif @@ -608,8 +607,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)  	drm_i915_private_t *dev_priv = dev->dev_private;  	int i; -	DRM_DEBUG("%s: planes=0x%x pfCurrentPage=%d\n", -		  __FUNCTION__, +	DRM_DEBUG("planes=0x%x pfCurrentPage=%d\n",  		  planes, dev_priv->sarea_priv->pf_current_page);  	i915_emit_mi_flush(dev, MI_READ_FLUSH | MI_EXE_FLUSH); @@ -625,7 +623,7 @@ void i915_dispatch_flip(struct drm_device * dev, int planes, int sync)  #endif  } -static int i915_quiescent(struct drm_device * dev) +static int i915_quiescent(struct drm_device *dev)  {  	drm_i915_private_t *dev_priv = dev->dev_private; @@ -704,7 +702,14 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,  	return 0;  } +#if DRM_DEBUG_CODE +#define DRM_DEBUG_RELOCATION	(drm_debug != 0) +#else +#define DRM_DEBUG_RELOCATION	0 +#endif +  #ifdef I915_HAVE_BUFFER +  struct i915_relocatee_info {  	struct drm_buffer_object *buf;  	unsigned long offset; @@ -714,15 +719,20 @@ struct i915_relocatee_info {  	int is_iomem;  }; -static void i915_dereference_buffers_locked(struct drm_buffer_object **buffers, +struct drm_i915_validate_buffer { +	struct drm_buffer_object *buffer; +	int presumed_offset_correct; +}; + +static void i915_dereference_buffers_locked(struct drm_i915_validate_buffer *buffers,  					    unsigned num_buffers)  {  	while (num_buffers--) -		drm_bo_usage_deref_locked(&buffers[num_buffers]); +		drm_bo_usage_deref_locked(&buffers[num_buffers].buffer);  }  int i915_apply_reloc(struct drm_file *file_priv, int num_buffers, -		     struct drm_buffer_object **buffers, +		     struct drm_i915_validate_buffer *buffers,  		     struct i915_relocatee_info *relocatee,  		     uint32_t *reloc)  { @@ -736,11 +746,25 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,  		return -EINVAL;  	} +	/* +	 * Short-circuit relocations that were correctly +	 * guessed by the client +	 */ +	if (buffers[reloc[2]].presumed_offset_correct && !DRM_DEBUG_RELOCATION) +		return 0; +  	new_cmd_offset = reloc[0];  	if (!relocatee->data_page ||  	    !drm_bo_same_page(relocatee->offset, new_cmd_offset)) {  		drm_bo_kunmap(&relocatee->kmap);  		relocatee->offset = new_cmd_offset; +		mutex_lock (&relocatee->buf->mutex); +		ret = drm_bo_wait (relocatee->buf, 0, 0, FALSE); +		mutex_unlock (&relocatee->buf->mutex); +		if (ret) { +			DRM_ERROR("Could not wait for buffer to apply relocs\n %08lx", new_cmd_offset); +			return ret; +		}  		ret = drm_bo_kmap(relocatee->buf, new_cmd_offset >> PAGE_SHIFT,  				  1, &relocatee->kmap);  		if (ret) { @@ -753,12 +777,19 @@ int i915_apply_reloc(struct drm_file *file_priv, int num_buffers,  		relocatee->page_offset = (relocatee->offset & PAGE_MASK);  	} -	val = buffers[reloc[2]]->offset; +	val = buffers[reloc[2]].buffer->offset;  	index = (reloc[0] - relocatee->page_offset) >> 2;  	/* add in validate */  	val = val + reloc[1]; +	if (DRM_DEBUG_RELOCATION) { +		if (buffers[reloc[2]].presumed_offset_correct && +		    relocatee->data_page[index] != val) { +			DRM_DEBUG ("Relocation mismatch source %d target %d buffer %d user %08x kernel %08x\n", +				   reloc[0], reloc[1], reloc[2], relocatee->data_page[index], val); +		} +	}  	relocatee->data_page[index] = val;  	return 0;  } @@ -767,7 +798,7 @@ int i915_process_relocs(struct drm_file *file_priv,  			uint32_t buf_handle,  			uint32_t *reloc_buf_handle,  			struct i915_relocatee_info *relocatee, -			struct drm_buffer_object **buffers, +			struct drm_i915_validate_buffer *buffers,  			uint32_t num_buffers)  {  	struct drm_device *dev = file_priv->head->dev; @@ -853,15 +884,30 @@ out:  static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,  			   drm_handle_t buf_reloc_handle, -			   struct drm_buffer_object **buffers, +			   struct drm_i915_validate_buffer *buffers,  			   uint32_t buf_count)  {  	struct drm_device *dev = file_priv->head->dev;  	struct i915_relocatee_info relocatee;  	int ret = 0; +	int b; -	memset(&relocatee, 0, sizeof(relocatee)); +	/* +	 * Short circuit relocations when all previous +	 * buffers offsets were correctly guessed by +	 * the client +	 */ +	if (!DRM_DEBUG_RELOCATION) { +		for (b = 0; b < buf_count; b++) +			if (!buffers[b].presumed_offset_correct) +				break; +		if (b == buf_count) +			return 0; +	} + +	memset(&relocatee, 0, sizeof(relocatee)); +  	mutex_lock(&dev->struct_mutex);  	relocatee.buf = drm_lookup_buffer_object(file_priv, buf_handle, 1);  	mutex_unlock(&dev->struct_mutex); @@ -870,7 +916,7 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,  		ret = -EINVAL;  		goto out_err;  	} -	 +  	while (buf_reloc_handle) {  		ret = i915_process_relocs(file_priv, buf_handle, &buf_reloc_handle, &relocatee, buffers, buf_count);  		if (ret) { @@ -878,11 +924,11 @@ static int i915_exec_reloc(struct drm_file *file_priv, drm_handle_t buf_handle,  			break;  		}  	} -	 +  	mutex_lock(&dev->struct_mutex);  	drm_bo_usage_deref_locked(&relocatee.buf);  	mutex_unlock(&dev->struct_mutex); -	 +  out_err:  	return ret;  } @@ -892,7 +938,7 @@ out_err:   */  int i915_validate_buffer_list(struct drm_file *file_priv,  			      unsigned int fence_class, uint64_t data, -			      struct drm_buffer_object **buffers, +			      struct drm_i915_validate_buffer *buffers,  			      uint32_t *num_buffers)  {  	struct drm_i915_op_arg arg; @@ -912,9 +958,10 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  			goto out_err;  		} -		buffers[buf_count] = NULL; +		buffers[buf_count].buffer = NULL; +		buffers[buf_count].presumed_offset_correct = 0; -		if (copy_from_user(&arg, (void __user *)(unsigned)data, sizeof(arg))) { +		if (copy_from_user(&arg, (void __user *)(unsigned long)data, sizeof(arg))) {  			ret = -EFAULT;  			goto out_err;  		} @@ -922,7 +969,7 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  		if (arg.handled) {  			data = arg.next;  			mutex_lock(&dev->struct_mutex); -			buffers[buf_count] = drm_lookup_buffer_object(file_priv, req->arg_handle, 1); +			buffers[buf_count].buffer = drm_lookup_buffer_object(file_priv, req->arg_handle, 1);  			mutex_unlock(&dev->struct_mutex);  			buf_count++;  			continue; @@ -947,25 +994,31 @@ int i915_validate_buffer_list(struct drm_file *file_priv,  		}  		rep.ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, -						 req->bo_req.fence_class, -						 req->bo_req.flags, -						 req->bo_req.mask, +						 req->bo_req.flags, req->bo_req.mask,  						 req->bo_req.hint, -						 0, +						 req->bo_req.fence_class, 0,  						 &rep.bo_info, -						 &buffers[buf_count]); +						 &buffers[buf_count].buffer);  		if (rep.ret) {  			DRM_ERROR("error on handle validate %d\n", rep.ret);  			goto out_err;  		} - +		/* +		 * If the user provided a presumed offset hint, check whether +		 * the buffer is in the same place, if so, relocations relative to +		 * this buffer need not be performed +		 */ +		if ((req->bo_req.hint & DRM_BO_HINT_PRESUMED_OFFSET) && +		    buffers[buf_count].buffer->offset == req->bo_req.presumed_offset) { +			buffers[buf_count].presumed_offset_correct = 1; +		}  		next = arg.next;  		arg.handled = 1;  		arg.d.rep = rep; -		if (copy_to_user((void __user *)(unsigned)data, &arg, sizeof(arg))) +		if (copy_to_user((void __user *)(unsigned long)data, &arg, sizeof(arg)))  			return -EFAULT;  		data = next; @@ -993,7 +1046,7 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	struct drm_fence_arg *fence_arg = &exec_buf->fence_arg;  	int num_buffers;  	int ret; -	struct drm_buffer_object **buffers; +	struct drm_i915_validate_buffer *buffers;  	struct drm_fence_object *fence;  	if (!dev_priv->allow_batchbuffer) { @@ -1012,12 +1065,12 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	ret = drm_bo_read_lock(&dev->bm.bm_lock); -	if (ret)  +	if (ret)  		return ret;  	/*  	 * The cmdbuf_mutex makes sure the validate-submit-fence -	 * operation is atomic.  +	 * operation is atomic.  	 */  	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex); @@ -1028,12 +1081,12 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	num_buffers = exec_buf->num_buffers; -	buffers = drm_calloc(num_buffers, sizeof(struct drm_buffer_object *), DRM_MEM_DRIVER); +	buffers = drm_calloc(num_buffers, sizeof(struct drm_i915_validate_buffer), DRM_MEM_DRIVER);  	if (!buffers) { -	        drm_bo_read_unlock(&dev->bm.bm_lock); +		drm_bo_read_unlock(&dev->bm.bm_lock);  		mutex_unlock(&dev_priv->cmdbuf_mutex);  		return -ENOMEM; -        } +	}  	/* validate buffer list + fixup relocations */  	ret = i915_validate_buffer_list(file_priv, 0, exec_buf->ops_list, @@ -1043,9 +1096,10 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	/* make sure all previous memory operations have passed */  	DRM_MEMORYBARRIER(); +	drm_agp_chipset_flush(dev);  	/* submit buffer */ -	batch->start = buffers[num_buffers-1]->offset; +	batch->start = buffers[num_buffers-1].buffer->offset;  	DRM_DEBUG("i915 exec batchbuffer, start %x used %d cliprects %d\n",  		  batch->start, batch->used, batch->num_cliprects); @@ -1057,7 +1111,8 @@ static int i915_execbuffer(struct drm_device *dev, void *data,  	sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);  	/* fence */ -	ret = drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence); +	ret = drm_fence_buffer_objects(dev, NULL, fence_arg->flags,  +				       NULL, &fence);  	if (ret)  		goto out_err0; @@ -1092,13 +1147,13 @@ static int i915_do_cleanup_pageflip(struct drm_device * dev)  	drm_i915_private_t *dev_priv = dev->dev_private;  	int i, planes, num_pages = dev_priv->sarea_priv->third_handle ? 3 : 2; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	for (i = 0, planes = 0; i < 2; i++)  		if (dev_priv->sarea_priv->pf_current_page & (0x3 << (2 * i))) {  			dev_priv->sarea_priv->pf_current_page =  				(dev_priv->sarea_priv->pf_current_page & -				 ~(0x3 << (2 * i))) | (num_pages - 1) << (2 * i); +				 ~(0x3 << (2 * i))) | ((num_pages - 1) << (2 * i));  			planes |= 1 << i;  		} @@ -1113,7 +1168,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data, struct drm_file *f  {  	drm_i915_flip_t *param = data; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1138,7 +1193,7 @@ static int i915_getparam(struct drm_device *dev, void *data,  	int value;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1172,13 +1227,14 @@ static int i915_setparam(struct drm_device *dev, void *data,  	drm_i915_setparam_t *param = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	}  	switch (param->param) {  	case I915_SETPARAM_USE_MI_BATCHBUFFER_START: -		dev_priv->use_mi_batchbuffer_start = param->value; +		if (!IS_I965G(dev)) +			dev_priv->use_mi_batchbuffer_start = param->value;  		break;  	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:  		dev_priv->tex_lru_log_granularity = param->value; @@ -1199,7 +1255,7 @@ drm_i915_mmio_entry_t mmio_table[] = {  		I915_MMIO_MAY_READ|I915_MMIO_MAY_WRITE,  		0x2350,  		8 -	}	 +	}  };  static int mmio_table_size = sizeof(mmio_table)/sizeof(drm_i915_mmio_entry_t); @@ -1209,13 +1265,13 @@ static int i915_mmio(struct drm_device *dev, void *data,  {  	uint32_t buf[8];  	drm_i915_private_t *dev_priv = dev->dev_private; -	drm_i915_mmio_entry_t *e;	  +	drm_i915_mmio_entry_t *e;  	drm_i915_mmio_t *mmio = data;  	void __iomem *base;  	int i;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1226,27 +1282,27 @@ static int i915_mmio(struct drm_device *dev, void *data,  	base = (u8 *) dev_priv->mmio_map->handle + e->offset;  	switch (mmio->read_write) { -		case I915_MMIO_READ: -			if (!(e->flag & I915_MMIO_MAY_READ)) -				return -EINVAL; -			for (i = 0; i < e->size / 4; i++) -				buf[i] = I915_READ(e->offset + i * 4); -			if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { -				DRM_ERROR("DRM_COPY_TO_USER failed\n"); -				return -EFAULT; -			} -			break; - -		case I915_MMIO_WRITE: -			if (!(e->flag & I915_MMIO_MAY_WRITE)) -				return -EINVAL; -			if(DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { -				DRM_ERROR("DRM_COPY_TO_USER failed\n"); -				return -EFAULT; -			} -			for (i = 0; i < e->size / 4; i++) -				I915_WRITE(e->offset + i * 4, buf[i]); -			break; +	case I915_MMIO_READ: +		if (!(e->flag & I915_MMIO_MAY_READ)) +			return -EINVAL; +		for (i = 0; i < e->size / 4; i++) +			buf[i] = I915_READ(e->offset + i * 4); +		if (DRM_COPY_TO_USER(mmio->data, buf, e->size)) { +			DRM_ERROR("DRM_COPY_TO_USER failed\n"); +			return -EFAULT; +		} +		break; +		 +	case I915_MMIO_WRITE: +		if (!(e->flag & I915_MMIO_MAY_WRITE)) +			return -EINVAL; +		if (DRM_COPY_FROM_USER(buf, mmio->data, e->size)) { +			DRM_ERROR("DRM_COPY_TO_USER failed\n"); +			return -EFAULT; +		} +		for (i = 0; i < e->size / 4; i++) +			I915_WRITE(e->offset + i * 4, buf[i]); +		break;  	}  	return 0;  } @@ -1258,7 +1314,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,  	drm_i915_hws_addr_t *hws = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	}  	DRM_DEBUG("set status page addr 0x%08x\n", (u32)hws->addr); @@ -1314,8 +1370,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)  	base = drm_get_resource_start(dev, mmio_bar);  	size = drm_get_resource_len(dev, mmio_bar); -	ret = drm_addmap(dev, base, size, _DRM_REGISTERS, _DRM_KERNEL, -			 &dev_priv->mmio_map); +	ret = drm_addmap(dev, base, size, _DRM_REGISTERS, +		_DRM_KERNEL | _DRM_DRIVER, &dev_priv->mmio_map); + +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +	intel_init_chipset_flush_compat(dev); +#endif +#endif  	return ret;  } @@ -1329,6 +1391,11 @@ int i915_driver_unload(struct drm_device *dev)  	drm_free(dev->dev_private, sizeof(drm_i915_private_t),  		 DRM_MEM_DRIVER); +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +	intel_fini_chipset_flush_compat(dev); +#endif +#endif  	return 0;  } diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h index a6c3cf30..cfa3f93a 100644 --- a/shared-core/i915_drm.h +++ b/shared-core/i915_drm.h @@ -1,7 +1,7 @@  /*   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -9,11 +9,11 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. @@ -21,7 +21,7 @@   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - *  + *   */  #ifndef _I915_DRM_H_ @@ -178,6 +178,7 @@ typedef struct _drm_i915_sarea {  #define DRM_IOCTL_I915_SET_VBLANK_PIPE	DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t)  #define DRM_IOCTL_I915_GET_VBLANK_PIPE	DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t)  #define DRM_IOCTL_I915_VBLANK_SWAP	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) +#define DRM_IOCTL_I915_MMIO             DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_MMIO, drm_i915_mmio)  #define DRM_IOCTL_I915_EXECBUFFER	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_EXECBUFFER, struct drm_i915_execbuffer)  /* Asynchronous page flipping: @@ -274,7 +275,7 @@ typedef struct drm_i915_mem_init_heap {   * rotate):   */  typedef struct drm_i915_mem_destroy_heap { -	        int region; +	int region;  } drm_i915_mem_destroy_heap_t;  /* Allow X server to configure which pipes to monitor for vblank signals @@ -294,11 +295,11 @@ typedef struct drm_i915_vblank_swap {  	unsigned int sequence;  } drm_i915_vblank_swap_t; -#define I915_MMIO_READ 	0 +#define I915_MMIO_READ	0  #define I915_MMIO_WRITE 1 -#define I915_MMIO_MAY_READ  	0x1 -#define I915_MMIO_MAY_WRITE  	0x2 +#define I915_MMIO_MAY_READ	0x1 +#define I915_MMIO_MAY_WRITE	0x2  #define MMIO_REGS_IA_PRIMATIVES_COUNT		0  #define MMIO_REGS_IA_VERTICES_COUNT		1 @@ -314,12 +315,12 @@ typedef struct drm_i915_mmio_entry {  	unsigned int flag;  	unsigned int offset;  	unsigned int size; -}drm_i915_mmio_entry_t; +} drm_i915_mmio_entry_t;  typedef struct drm_i915_mmio {  	unsigned int read_write:1;  	unsigned int reg:31; -	void __user *data;	 +	void __user *data;  } drm_i915_mmio_t;  typedef struct drm_i915_hws_addr { @@ -359,6 +360,7 @@ struct drm_i915_execbuffer {  	uint64_t ops_list;  	uint32_t num_buffers;  	struct _drm_i915_batchbuffer batch; +	drm_context_t context; /* for lockless use in the future */  	struct drm_fence_arg fence_arg;  }; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index d34621e5..b8d027d7 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -1,10 +1,10 @@  /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-   */  /* - *  + *   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -12,11 +12,11 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. @@ -24,7 +24,7 @@   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - *  + *   */  #ifndef _I915_DRV_H_ @@ -57,10 +57,11 @@   * 1.9: Usable page flipping and triple buffering   * 1.10: Plane/pipe disentangling   * 1.11: TTM superioctl + * 1.12: TTM relocation optimization   */  #define DRIVER_MAJOR		1  #if defined(I915_HAVE_FENCE) && defined(I915_HAVE_BUFFER) -#define DRIVER_MINOR		11 +#define DRIVER_MINOR		12  #else  #define DRIVER_MINOR		6  #endif @@ -146,76 +147,76 @@ typedef struct drm_i915_private {  	drm_i915_vbl_swap_t vbl_swaps;  	unsigned int swaps_pending; - 	/* Register state */ +	/* Register state */  	u8 saveLBB; - 	u32 saveDSPACNTR; - 	u32 saveDSPBCNTR; - 	u32 savePIPEACONF; - 	u32 savePIPEBCONF; - 	u32 savePIPEASRC; - 	u32 savePIPEBSRC; - 	u32 saveFPA0; - 	u32 saveFPA1; - 	u32 saveDPLL_A; - 	u32 saveDPLL_A_MD; - 	u32 saveHTOTAL_A; - 	u32 saveHBLANK_A; - 	u32 saveHSYNC_A; - 	u32 saveVTOTAL_A; - 	u32 saveVBLANK_A; - 	u32 saveVSYNC_A; +	u32 saveDSPACNTR; +	u32 saveDSPBCNTR; +	u32 savePIPEACONF; +	u32 savePIPEBCONF; +	u32 savePIPEASRC; +	u32 savePIPEBSRC; +	u32 saveFPA0; +	u32 saveFPA1; +	u32 saveDPLL_A; +	u32 saveDPLL_A_MD; +	u32 saveHTOTAL_A; +	u32 saveHBLANK_A; +	u32 saveHSYNC_A; +	u32 saveVTOTAL_A; +	u32 saveVBLANK_A; +	u32 saveVSYNC_A;  	u32 saveBCLRPAT_A; - 	u32 saveDSPASTRIDE; - 	u32 saveDSPASIZE; - 	u32 saveDSPAPOS; - 	u32 saveDSPABASE; - 	u32 saveDSPASURF; +	u32 saveDSPASTRIDE; +	u32 saveDSPASIZE; +	u32 saveDSPAPOS; +	u32 saveDSPABASE; +	u32 saveDSPASURF;  	u32 saveDSPATILEOFF;  	u32 savePFIT_PGM_RATIOS;  	u32 saveBLC_PWM_CTL;  	u32 saveBLC_PWM_CTL2; - 	u32 saveFPB0; - 	u32 saveFPB1; - 	u32 saveDPLL_B; - 	u32 saveDPLL_B_MD; - 	u32 saveHTOTAL_B; - 	u32 saveHBLANK_B; - 	u32 saveHSYNC_B; - 	u32 saveVTOTAL_B; - 	u32 saveVBLANK_B; - 	u32 saveVSYNC_B; +	u32 saveFPB0; +	u32 saveFPB1; +	u32 saveDPLL_B; +	u32 saveDPLL_B_MD; +	u32 saveHTOTAL_B; +	u32 saveHBLANK_B; +	u32 saveHSYNC_B; +	u32 saveVTOTAL_B; +	u32 saveVBLANK_B; +	u32 saveVSYNC_B;  	u32 saveBCLRPAT_B; - 	u32 saveDSPBSTRIDE; - 	u32 saveDSPBSIZE; - 	u32 saveDSPBPOS; - 	u32 saveDSPBBASE; - 	u32 saveDSPBSURF; +	u32 saveDSPBSTRIDE; +	u32 saveDSPBSIZE; +	u32 saveDSPBPOS; +	u32 saveDSPBBASE; +	u32 saveDSPBSURF;  	u32 saveDSPBTILEOFF; - 	u32 saveVCLK_DIVISOR_VGA0; - 	u32 saveVCLK_DIVISOR_VGA1; - 	u32 saveVCLK_POST_DIV; - 	u32 saveVGACNTRL; - 	u32 saveADPA; - 	u32 saveLVDS; +	u32 saveVCLK_DIVISOR_VGA0; +	u32 saveVCLK_DIVISOR_VGA1; +	u32 saveVCLK_POST_DIV; +	u32 saveVGACNTRL; +	u32 saveADPA; +	u32 saveLVDS;  	u32 saveLVDSPP_ON;  	u32 saveLVDSPP_OFF; - 	u32 saveDVOA; - 	u32 saveDVOB; - 	u32 saveDVOC; - 	u32 savePP_ON; - 	u32 savePP_OFF; - 	u32 savePP_CONTROL; - 	u32 savePP_CYCLE; - 	u32 savePFIT_CONTROL; - 	u32 save_palette_a[256]; - 	u32 save_palette_b[256]; +	u32 saveDVOA; +	u32 saveDVOB; +	u32 saveDVOC; +	u32 savePP_ON; +	u32 savePP_OFF; +	u32 savePP_CONTROL; +	u32 savePP_CYCLE; +	u32 savePFIT_CONTROL; +	u32 save_palette_a[256]; +	u32 save_palette_b[256];  	u32 saveFBC_CFB_BASE;  	u32 saveFBC_LL_BASE;  	u32 saveFBC_CONTROL;  	u32 saveFBC_CONTROL2; - 	u32 saveSWF0[16]; - 	u32 saveSWF1[16]; - 	u32 saveSWF2[3]; +	u32 saveSWF0[16]; +	u32 saveSWF1[16]; +	u32 saveSWF2[3];  	u8 saveMSR;  	u8 saveSR[8];  	u8 saveGR[24]; @@ -294,7 +295,7 @@ extern void i915_mem_release(struct drm_device * dev,  extern void i915_fence_handler(struct drm_device *dev);  extern int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,  				    uint32_t flags, -				    uint32_t *sequence,  +				    uint32_t *sequence,  				    uint32_t *native_type);  extern void i915_poke_flush(struct drm_device *dev, uint32_t class);  extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); @@ -303,20 +304,27 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f  #ifdef I915_HAVE_BUFFER  /* i915_buffer.c */  extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); -extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *fclass, -	                    uint32_t *type); +extern int i915_fence_type(struct drm_buffer_object *bo, uint32_t *fclass, +			   uint32_t *type);  extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);  extern int i915_init_mem_type(struct drm_device *dev, uint32_t type,  			       struct drm_mem_type_manager *man); -extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern uint64_t i915_evict_flags(struct drm_buffer_object *bo);  extern int i915_move(struct drm_buffer_object *bo, int evict, -	      	int no_wait, struct drm_bo_mem_reg *new_mem); +		int no_wait, struct drm_bo_mem_reg *new_mem);  void i915_flush_ttm(struct drm_ttm *ttm);  #endif +#ifdef __linux__ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) +extern void intel_init_chipset_flush_compat(struct drm_device *dev); +extern void intel_fini_chipset_flush_compat(struct drm_device *dev); +#endif +#endif +  #define I915_READ(reg)          DRM_READ32(dev_priv->mmio_map, (reg))  #define I915_WRITE(reg,val)     DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) -#define I915_READ16(reg) 	DRM_READ16(dev_priv->mmio_map, (reg)) +#define I915_READ16(reg)	DRM_READ16(dev_priv->mmio_map, (reg))  #define I915_WRITE16(reg,val)	DRM_WRITE16(dev_priv->mmio_map, (reg), (val))  #define I915_VERBOSE 0 @@ -326,8 +334,8 @@ void i915_flush_ttm(struct drm_ttm *ttm);  #define BEGIN_LP_RING(n) do {				\  	if (I915_VERBOSE)				\ -		DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n",	\ -	                         (n), __FUNCTION__);           \ +		DRM_DEBUG("BEGIN_LP_RING(%d)\n",	\ +	                         (n));		        \  	if (dev_priv->ring.space < (n)*4)                      \  		i915_wait_ring(dev, (n)*4, __FUNCTION__);      \  	outcount = 0;					\ @@ -397,7 +405,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define VGA_CR_INDEX_CGA 0x3d4  #define VGA_CR_DATA_CGA 0x3d5 -#define GFX_OP_USER_INTERRUPT 		((0<<29)|(2<<23)) +#define GFX_OP_USER_INTERRUPT		((0<<29)|(2<<23))  #define GFX_OP_BREAKPOINT_INTERRUPT	((0<<29)|(1<<23))  #define CMD_REPORT_HEAD			(7<<23)  #define CMD_STORE_DWORD_IDX		((0x21<<23) | 0x1) @@ -459,7 +467,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define I915REG_HWSTAM		0x02098  #define I915REG_INT_IDENTITY_R	0x020a4 -#define I915REG_INT_MASK_R 	0x020a8 +#define I915REG_INT_MASK_R	0x020a8  #define I915REG_INT_ENABLE_R	0x020a0  #define I915REG_INSTPM	        0x020c0 @@ -502,7 +510,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define SRX_INDEX		0x3c4  #define SRX_DATA		0x3c5  #define SR01			1 -#define SR01_SCREEN_OFF 	(1<<5) +#define SR01_SCREEN_OFF		(1<<5)  #define PPCR			0x61204  #define PPCR_ON			(1<<0) @@ -522,29 +530,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define ADPA_DPMS_OFF		(3<<10)  #define NOPID                   0x2094 -#define LP_RING     		0x2030 -#define HP_RING     		0x2040 +#define LP_RING			0x2030 +#define HP_RING			0x2040  /* The binner has its own ring buffer:   */  #define HWB_RING		0x2400 -#define RING_TAIL      		0x00 +#define RING_TAIL		0x00  #define TAIL_ADDR		0x001FFFF8 -#define RING_HEAD      		0x04 -#define HEAD_WRAP_COUNT     	0xFFE00000 -#define HEAD_WRAP_ONE       	0x00200000 -#define HEAD_ADDR           	0x001FFFFC -#define RING_START     		0x08 -#define START_ADDR          	0x0xFFFFF000 -#define RING_LEN       		0x0C -#define RING_NR_PAGES       	0x001FF000 -#define RING_REPORT_MASK    	0x00000006 -#define RING_REPORT_64K     	0x00000002 -#define RING_REPORT_128K    	0x00000004 -#define RING_NO_REPORT      	0x00000000 -#define RING_VALID_MASK     	0x00000001 -#define RING_VALID          	0x00000001 -#define RING_INVALID        	0x00000000 +#define RING_HEAD		0x04 +#define HEAD_WRAP_COUNT		0xFFE00000 +#define HEAD_WRAP_ONE		0x00200000 +#define HEAD_ADDR		0x001FFFFC +#define RING_START		0x08 +#define START_ADDR		0x0xFFFFF000 +#define RING_LEN		0x0C +#define RING_NR_PAGES		0x001FF000 +#define RING_REPORT_MASK	0x00000006 +#define RING_REPORT_64K		0x00000002 +#define RING_REPORT_128K	0x00000004 +#define RING_NO_REPORT		0x00000000 +#define RING_VALID_MASK		0x00000001 +#define RING_VALID		0x00000001 +#define RING_INVALID		0x00000000  /* Instruction parser error reg:   */ @@ -562,7 +570,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);   */  #define DMA_FADD_S		0x20d4 -/* Cache mode 0 reg.   +/* Cache mode 0 reg.   *  - Manipulating render cache behaviour is central   *    to the concept of zone rendering, tuning this reg can help avoid   *    unnecessary render cache reads and even writes (for z/stencil) @@ -591,7 +599,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define BINCTL			0x2420  #define BC_MASK			(1 << 9) -/* Binned scene info.   +/* Binned scene info.   */  #define BINSCENE		0x2428  #define BS_OP_LOAD		(1 << 8) @@ -609,7 +617,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);   */  #define BDCD			0x2488 -/* Binner pointer cache debug reg:  +/* Binner pointer cache debug reg:   */  #define BPCD			0x248c @@ -666,9 +674,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)  #define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20) -#define MI_BATCH_BUFFER 	((0x30<<23)|1) -#define MI_BATCH_BUFFER_START 	(0x31<<23) -#define MI_BATCH_BUFFER_END 	(0xA<<23) +#define MI_BATCH_BUFFER		((0x30<<23)|1) +#define MI_BATCH_BUFFER_START	(0x31<<23) +#define MI_BATCH_BUFFER_END	(0xA<<23)  #define MI_BATCH_NON_SECURE	(1)  #define MI_BATCH_NON_SECURE_I965 (1<<8) @@ -764,20 +772,20 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  /* I830 CRTC registers */  #define HTOTAL_A	0x60000  #define HBLANK_A	0x60004 -#define HSYNC_A 	0x60008 +#define HSYNC_A		0x60008  #define VTOTAL_A	0x6000c  #define VBLANK_A	0x60010 -#define VSYNC_A 	0x60014 +#define VSYNC_A		0x60014  #define PIPEASRC	0x6001c  #define BCLRPAT_A	0x60020  #define VSYNCSHIFT_A	0x60028  #define HTOTAL_B	0x61000  #define HBLANK_B	0x61004 -#define HSYNC_B 	0x61008 +#define HSYNC_B		0x61008  #define VTOTAL_B	0x6100c  #define VBLANK_B	0x61010 -#define VSYNC_B 	0x61014 +#define VSYNC_B		0x61014  #define PIPEBSRC	0x6101c  #define BCLRPAT_B	0x61020  #define VSYNCSHIFT_B	0x61028 @@ -912,7 +920,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);   */  # define DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00  # define DPLL_MD_UDI_MULTIPLIER_SHIFT		8 -/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.  +/** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.   * This best be set to the default value (3) or the CRT won't work. No,   * I don't entirely understand what this does...   */ @@ -933,7 +941,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  # define DPLLA_INPUT_BUFFER_ENABLE		(1 << 0)  #define ADPA			0x61100 -#define ADPA_DAC_ENABLE 	(1<<31) +#define ADPA_DAC_ENABLE		(1<<31)  #define ADPA_DAC_DISABLE	0  #define ADPA_PIPE_SELECT_MASK	(1<<30)  #define ADPA_PIPE_A_SELECT	0 @@ -1063,7 +1071,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define PIPEACONF_PIPE_UNLOCKED 0  #define PIPEACONF_PIPE_LOCKED	(1<<25)  #define PIPEACONF_PALETTE	0 -#define PIPEACONF_GAMMA 	(1<<24) +#define PIPEACONF_GAMMA		(1<<24)  #define PIPECONF_FORCE_BORDER	(1<<25)  #define PIPECONF_PROGRESSIVE	(0 << 21)  #define PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21) @@ -1074,7 +1082,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define PIPEBCONF_DISABLE	0  #define PIPEBCONF_DOUBLE_WIDE	(1<<30)  #define PIPEBCONF_DISABLE	0 -#define PIPEBCONF_GAMMA 	(1<<24) +#define PIPEBCONF_GAMMA		(1<<24)  #define PIPEBCONF_PALETTE	0  #define PIPEBGCMAXRED		0x71010 @@ -1086,7 +1094,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define DSPACNTR		0x70180  #define DSPBCNTR		0x71180 -#define DISPLAY_PLANE_ENABLE 			(1<<31) +#define DISPLAY_PLANE_ENABLE			(1<<31)  #define DISPLAY_PLANE_DISABLE			0  #define DISPPLANE_GAMMA_ENABLE			(1<<30)  #define DISPPLANE_GAMMA_DISABLE			0 @@ -1094,7 +1102,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define DISPPLANE_8BPP				(0x2<<26)  #define DISPPLANE_15_16BPP			(0x4<<26)  #define DISPPLANE_16BPP				(0x5<<26) -#define DISPPLANE_32BPP_NO_ALPHA 		(0x6<<26) +#define DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)  #define DISPPLANE_32BPP				(0x7<<26)  #define DISPPLANE_STEREO_ENABLE			(1<<25)  #define DISPPLANE_STEREO_DISABLE		0 @@ -1174,35 +1182,38 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);  #define PALETTE_A		0x0a000  #define PALETTE_B		0x0a800 -#define IS_I830(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82830_CGC) -#define IS_845G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82845G_IG) -#define IS_I85X(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) -#define IS_I855(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82855GM_IG) -#define IS_I865G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82865_IG) +#define IS_I830(dev) ((dev)->pci_device == 0x3577) +#define IS_845G(dev) ((dev)->pci_device == 0x2562) +#define IS_I85X(dev) ((dev)->pci_device == 0x3582) +#define IS_I855(dev) ((dev)->pci_device == 0x3582) +#define IS_I865G(dev) ((dev)->pci_device == 0x2572) -#define IS_I915G(dev) (dev->pci_device == PCI_DEVICE_ID_INTEL_82915G_IG)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/ -#define IS_I915GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82915GM_IG) -#define IS_I945G(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945G_IG) -#define IS_I945GM(dev) ((dev)->pci_device == PCI_DEVICE_ID_INTEL_82945GM_IG) +#define IS_I915G(dev) (dev->pci_device == 0x2582)/* || dev->pci_device == PCI_DEVICE_ID_INTELPCI_CHIP_E7221_G)*/ +#define IS_I915GM(dev) ((dev)->pci_device == 0x2592) +#define IS_I945G(dev) ((dev)->pci_device == 0x2772) +#define IS_I945GM(dev) ((dev)->pci_device == 0x27A2)  #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \  		       (dev)->pci_device == 0x2982 || \  		       (dev)->pci_device == 0x2992 || \  		       (dev)->pci_device == 0x29A2 || \  		       (dev)->pci_device == 0x2A02 || \ -		       (dev)->pci_device == 0x2A12) +		       (dev)->pci_device == 0x2A12 || \ +		       (dev)->pci_device == 0x2A42)  #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) +#define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42) +  #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\ -		   	(dev)->pci_device == 0x29B2 ||	\ +			(dev)->pci_device == 0x29B2 ||	\  			(dev)->pci_device == 0x29D2)  #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ -		      IS_I945GM(dev) || IS_I965G(dev)) +		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))  #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ -			IS_I945GM(dev) || IS_I965GM(dev)) +			IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev))  #define PRIMARY_RINGBUFFER_SIZE         (128*1024) diff --git a/shared-core/i915_irq.c b/shared-core/i915_irq.c index 46d09663..9b46b127 100644 --- a/shared-core/i915_irq.c +++ b/shared-core/i915_irq.c @@ -3,7 +3,7 @@  /*   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved. - *  + *   * Permission is hereby granted, free of charge, to any person obtaining a   * copy of this software and associated documentation files (the   * "Software"), to deal in the Software without restriction, including @@ -11,11 +11,11 @@   * distribute, sub license, and/or sell copies of the Software, and to   * permit persons to whom the Software is furnished to do so, subject to   * the following conditions: - *  + *   * The above copyright notice and this permission notice (including the   * next paragraph) shall be included in all copies or substantial portions   * of the Software. - *  + *   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS   * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF   * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. @@ -23,7 +23,7 @@   * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,   * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE   * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - *  + *   */  #include "drmP.h" @@ -339,11 +339,11 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)  	pipea_stats = I915_READ(I915REG_PIPEASTAT);  	pipeb_stats = I915_READ(I915REG_PIPEBSTAT); -		 +  	temp = I915_READ16(I915REG_INT_IDENTITY_R);  #if 0 -	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); +	DRM_DEBUG("flag=%08x\n", temp);  #endif  	if (temp == 0)  		return IRQ_NONE; @@ -375,7 +375,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)  	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {  		if (dev_priv->swaps_pending > 0)  			drm_locked_tasklet(dev, i915_vblank_tasklet); -		I915_WRITE(I915REG_PIPEASTAT,  +		I915_WRITE(I915REG_PIPEASTAT,  			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|  			I915_VBLANK_CLEAR);  		I915_WRITE(I915REG_PIPEBSTAT, @@ -386,15 +386,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)  	return IRQ_HANDLED;  } -int i915_emit_irq(struct drm_device * dev) +int i915_emit_irq(struct drm_device *dev)  { -	  	drm_i915_private_t *dev_priv = dev->dev_private;  	RING_LOCALS;  	i915_kernel_lost_context(dev); -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	i915_emit_breadcrumb(dev); @@ -404,8 +403,6 @@ int i915_emit_irq(struct drm_device * dev)  	ADVANCE_LP_RING();  	return dev_priv->counter; - -  }  void i915_user_irq_on(drm_i915_private_t *dev_priv) @@ -418,7 +415,7 @@ void i915_user_irq_on(drm_i915_private_t *dev_priv)  	DRM_SPINUNLOCK(&dev_priv->user_irq_lock);  } -		 +  void i915_user_irq_off(drm_i915_private_t *dev_priv)  {  	DRM_SPINLOCK(&dev_priv->user_irq_lock); @@ -428,29 +425,26 @@ void i915_user_irq_off(drm_i915_private_t *dev_priv)  	}  	DRM_SPINUNLOCK(&dev_priv->user_irq_lock);  } -		 +  static int i915_wait_irq(struct drm_device * dev, int irq_nr)  {  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;  	int ret = 0; -	DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr, +	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,  		  READ_BREADCRUMB(dev_priv));  	if (READ_BREADCRUMB(dev_priv) >= irq_nr)  		return 0; -	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; -	  	i915_user_irq_on(dev_priv);  	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,  		    READ_BREADCRUMB(dev_priv) >= irq_nr);  	i915_user_irq_off(dev_priv);  	if (ret == -EBUSY) { -		DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n", -			  __FUNCTION__, +		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",  			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);  	} @@ -460,7 +454,8 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)  /* Needs the lock as it touches the ring.   */ -int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv) +int i915_irq_emit(struct drm_device *dev, void *data, +			 struct drm_file *file_priv)  {  	drm_i915_private_t *dev_priv = dev->dev_private;  	drm_i915_irq_emit_t *emit = data; @@ -469,7 +464,7 @@ int i915_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -492,7 +487,7 @@ int i915_irq_wait(struct drm_device *dev, void *data,  	drm_i915_irq_wait_t *irqwait = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -560,13 +555,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,  	drm_i915_vblank_pipe_t *pipe = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	}  	if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { -		DRM_ERROR("%s called with invalid pipe 0x%x\n",  -			  __FUNCTION__, pipe->pipe); +		DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);  		return -EINVAL;  	} @@ -583,7 +577,7 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,  	u16 flag;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -743,7 +737,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,  	DRM_SPINLOCK_IRQSAVE(&dev_priv->swaps_lock, irqflags); -	list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head); +	list_add_tail(&vbl_swap->head, &dev_priv->vbl_swaps.head);  	dev_priv->swaps_pending++;  	DRM_SPINUNLOCK_IRQRESTORE(&dev_priv->swaps_lock, irqflags); @@ -788,7 +782,7 @@ int i915_driver_irq_postinstall(struct drm_device * dev)  	 * Initialize the hardware status page IRQ location.  	 */ -	I915_WRITE(I915REG_INSTPM, ( 1 << 5) | ( 1 << 21)); +	I915_WRITE(I915REG_INSTPM, (1 << 5) | (1 << 21));  	return 0;  } @@ -796,6 +790,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)  {  	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;  	u16 temp; +  	if (!dev_priv)  		return; diff --git a/shared-core/i915_mem.c b/shared-core/i915_mem.c index 5bf29a1e..6126a60d 100644 --- a/shared-core/i915_mem.c +++ b/shared-core/i915_mem.c @@ -276,7 +276,7 @@ int i915_mem_alloc(struct drm_device *dev, void *data,  	struct mem_block *block, **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -314,7 +314,7 @@ int i915_mem_free(struct drm_device *dev, void *data,  	struct mem_block *block, **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -342,7 +342,7 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,  	struct mem_block **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -366,7 +366,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,  	struct mem_block **heap;  	if ( !dev_priv ) { -		DRM_ERROR( "%s called with no initialization\n", __FUNCTION__ ); +		DRM_ERROR( "called with no initialization\n" );  		return -EINVAL;  	} @@ -375,7 +375,7 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,  		DRM_ERROR("get_heap failed");  		return -EFAULT;  	} -	 +  	if (!*heap) {  		DRM_ERROR("heap not initialized?");  		return -EFAULT; diff --git a/shared-core/mach64_dma.c b/shared-core/mach64_dma.c index e0a67458..339234fa 100644 --- a/shared-core/mach64_dma.c +++ b/shared-core/mach64_dma.c @@ -6,7 +6,7 @@   * \author Gareth Hughes <gareth@valinux.com>   * \author Frank C. Earl <fearl@airmail.net>   * \author Leif Delgass <ldelgass@retinalburn.net> - * \author Jose Fonseca <j_r_fonseca@yahoo.co.uk> + * \author José Fonseca <j_r_fonseca@yahoo.co.uk>   */  /* @@ -53,11 +53,11 @@   *   * \param dev_priv pointer to device private data structure.   * \param entries number of free entries in the FIFO to wait for. - *  + *   * \returns zero on success, or -EBUSY if the timeout (specificed by   * drm_mach64_private::usec_timeout) occurs.   */ -int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries) +int mach64_do_wait_for_fifo(drm_mach64_private_t *dev_priv, int entries)  {  	int slots = 0, i; @@ -68,15 +68,14 @@ int mach64_do_wait_for_fifo(drm_mach64_private_t * dev_priv, int entries)  		DRM_UDELAY(1);  	} -	DRM_INFO("%s failed! slots=%d entries=%d\n", __FUNCTION__, slots, -		 entries); +	DRM_INFO("failed! slots=%d entries=%d\n", slots, entries);  	return -EBUSY;  }  /**   * Wait for the draw engine to be idle.   */ -int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv) +int mach64_do_wait_for_idle(drm_mach64_private_t *dev_priv)  {  	int i, ret; @@ -85,14 +84,12 @@ int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)  		return ret;  	for (i = 0; i < dev_priv->usec_timeout; i++) { -		if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) { +		if (!(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE))  			return 0; -		}  		DRM_UDELAY(1);  	} -	DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__, -		 MACH64_READ(MACH64_GUI_STAT)); +	DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));  	mach64_dump_ring_info(dev_priv);  	return -EBUSY;  } @@ -107,16 +104,16 @@ int mach64_do_wait_for_idle(drm_mach64_private_t * dev_priv)   *   * This function should be called before writing new entries to the ring   * buffer. - *  + *   * \param dev_priv pointer to device private data structure.   * \param n number of free entries in the ring buffer to wait for. - *  + *   * \returns zero on success, or -EBUSY if the timeout (specificed by   * drm_mach64_private_t::usec_timeout) occurs.   *   * \sa mach64_dump_ring_info()   */ -int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n) +int mach64_wait_ring(drm_mach64_private_t *dev_priv, int n)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	int i; @@ -124,9 +121,8 @@ int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)  	for (i = 0; i < dev_priv->usec_timeout; i++) {  		mach64_update_ring_snapshot(dev_priv);  		if (ring->space >= n) { -			if (i > 0) { -				DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i); -			} +			if (i > 0) +				DRM_DEBUG("%d usecs\n", i);  			return 0;  		}  		DRM_UDELAY(1); @@ -139,11 +135,11 @@ int mach64_wait_ring(drm_mach64_private_t * dev_priv, int n)  }  /** - * Wait until all DMA requests have been processed...  + * Wait until all DMA requests have been processed...   *   * \sa mach64_wait_ring()   */ -static int mach64_ring_idle(drm_mach64_private_t * dev_priv) +static int mach64_ring_idle(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	u32 head; @@ -155,9 +151,8 @@ static int mach64_ring_idle(drm_mach64_private_t * dev_priv)  		mach64_update_ring_snapshot(dev_priv);  		if (ring->head == ring->tail &&  		    !(MACH64_READ(MACH64_GUI_STAT) & MACH64_GUI_ACTIVE)) { -			if (i > 0) { -				DRM_DEBUG("%s: %d usecs\n", __FUNCTION__, i); -			} +			if (i > 0) +				DRM_DEBUG("%d usecs\n", i);  			return 0;  		}  		if (ring->head == head) { @@ -169,8 +164,7 @@ static int mach64_ring_idle(drm_mach64_private_t * dev_priv)  		DRM_UDELAY(1);  	} -	DRM_INFO("%s failed! GUI_STAT=0x%08x\n", __FUNCTION__, -		 MACH64_READ(MACH64_GUI_STAT)); +	DRM_INFO("failed! GUI_STAT=0x%08x\n", MACH64_READ(MACH64_GUI_STAT));  	mach64_dump_ring_info(dev_priv);  	return -EBUSY;  } @@ -180,7 +174,7 @@ static int mach64_ring_idle(drm_mach64_private_t * dev_priv)   *   * \sa mach64_do_engine_reset()   */ -static void mach64_ring_reset(drm_mach64_private_t * dev_priv) +static void mach64_ring_reset(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; @@ -198,7 +192,7 @@ static void mach64_ring_reset(drm_mach64_private_t * dev_priv)  /**   * Ensure the all the queued commands will be processed.   */ -int mach64_do_dma_flush(drm_mach64_private_t * dev_priv) +int mach64_do_dma_flush(drm_mach64_private_t *dev_priv)  {  	/* FIXME: It's not necessary to wait for idle when flushing  	 * we just need to ensure the ring will be completely processed @@ -210,14 +204,14 @@ int mach64_do_dma_flush(drm_mach64_private_t * dev_priv)  /**   * Stop all DMA activity.   */ -int mach64_do_dma_idle(drm_mach64_private_t * dev_priv) +int mach64_do_dma_idle(drm_mach64_private_t *dev_priv)  {  	int ret;  	/* wait for completion */  	if ((ret = mach64_ring_idle(dev_priv)) < 0) { -		DRM_ERROR("%s failed BM_GUI_TABLE=0x%08x tail: %u\n", -			  __FUNCTION__, MACH64_READ(MACH64_BM_GUI_TABLE), +		DRM_ERROR("failed BM_GUI_TABLE=0x%08x tail: %u\n", +			  MACH64_READ(MACH64_BM_GUI_TABLE),  			  dev_priv->ring.tail);  		return ret;  	} @@ -232,11 +226,11 @@ int mach64_do_dma_idle(drm_mach64_private_t * dev_priv)  /**   * Reset the engine.  This will stop the DMA if it is running.   */ -int mach64_do_engine_reset(drm_mach64_private_t * dev_priv) +int mach64_do_engine_reset(drm_mach64_private_t *dev_priv)  {  	u32 tmp; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	/* Kill off any outstanding DMA transfers.  	 */ @@ -276,7 +270,7 @@ int mach64_do_engine_reset(drm_mach64_private_t * dev_priv)  /**   * Dump engine registers values.   */ -void mach64_dump_engine_info(drm_mach64_private_t * dev_priv) +void mach64_dump_engine_info(drm_mach64_private_t *dev_priv)  {  	DRM_INFO("\n");  	if (!dev_priv->is_pci) { @@ -417,8 +411,8 @@ void mach64_dump_engine_info(drm_mach64_private_t * dev_priv)   * Used by mach64_dump_ring_info() to dump the contents of the current buffer   * pointed by the ring head.   */ -static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv, -				 struct drm_buf * buf) +static void mach64_dump_buf_info(drm_mach64_private_t *dev_priv, +				 struct drm_buf *buf)  {  	u32 addr = GETBUFADDR(buf);  	u32 used = buf->used >> 2; @@ -477,7 +471,7 @@ static void mach64_dump_buf_info(drm_mach64_private_t * dev_priv,   * Dump the ring state and contents, including the contents of the buffer being   * processed by the graphics engine.   */ -void mach64_dump_ring_info(drm_mach64_private_t * dev_priv) +void mach64_dump_ring_info(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	int i, skipped; @@ -526,9 +520,8 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)  			u32 buf_addr = GETBUFADDR(buf); -			if (buf_addr <= addr && addr < buf_addr + buf->used) { +			if (buf_addr <= addr && addr < buf_addr + buf->used)  				mach64_dump_buf_info(dev_priv, buf); -			}  		}  	} @@ -559,6 +552,259 @@ void mach64_dump_ring_info(drm_mach64_private_t * dev_priv)  /*******************************************************************/ +/** \name DMA descriptor ring macros */ +/*@{*/ + +/** + * Add the end mark to the ring's new tail position. + * + * The bus master engine will keep processing the DMA buffers listed in the ring + * until it finds this mark, making it stop. + * + * \sa mach64_clear_dma_eol + */  +static __inline__ void mach64_set_dma_eol(volatile u32 *addr) +{ +#if defined(__i386__) +	int nr = 31; + +	/* Taken from include/asm-i386/bitops.h linux header */ +	__asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr) +			     :"Ir"(nr)); +#elif defined(__powerpc__) +	u32 old; +	u32 mask = cpu_to_le32(MACH64_DMA_EOL); + +	/* Taken from the include/asm-ppc/bitops.h linux header */ +	__asm__ __volatile__("\n\ +1:	lwarx	%0,0,%3 \n\ +	or	%0,%0,%2 \n\ +	stwcx.	%0,0,%3 \n\ +	bne-	1b":"=&r"(old), "=m"(*addr) +			     :"r"(mask), "r"(addr), "m"(*addr) +			     :"cc"); +#elif defined(__alpha__) +	u32 temp; +	u32 mask = MACH64_DMA_EOL; + +	/* Taken from the include/asm-alpha/bitops.h linux header */ +	__asm__ __volatile__("1:	ldl_l %0,%3\n" +			     "	bis %0,%2,%0\n" +			     "	stl_c %0,%1\n" +			     "	beq %0,2f\n" +			     ".subsection 2\n" +			     "2:	br 1b\n" +			     ".previous":"=&r"(temp), "=m"(*addr) +			     :"Ir"(mask), "m"(*addr)); +#else +	u32 mask = cpu_to_le32(MACH64_DMA_EOL); + +	*addr |= mask; +#endif +} + +/** + * Remove the end mark from the ring's old tail position. + * + * It should be called after calling mach64_set_dma_eol to mark the ring's new + * tail position. + * + * We update the end marks while the bus master engine is in operation. Since + * the bus master engine may potentially be reading from the same position + * that we write, we must change atomically to avoid having intermediary bad + * data. + */ +static __inline__ void mach64_clear_dma_eol(volatile u32 *addr) +{ +#if defined(__i386__) +	int nr = 31; + +	/* Taken from include/asm-i386/bitops.h linux header */ +	__asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr) +			     :"Ir"(nr)); +#elif defined(__powerpc__) +	u32 old; +	u32 mask = cpu_to_le32(MACH64_DMA_EOL); + +	/* Taken from the include/asm-ppc/bitops.h linux header */ +	__asm__ __volatile__("\n\ +1:	lwarx	%0,0,%3 \n\ +	andc	%0,%0,%2 \n\ +	stwcx.	%0,0,%3 \n\ +	bne-	1b":"=&r"(old), "=m"(*addr) +			     :"r"(mask), "r"(addr), "m"(*addr) +			     :"cc"); +#elif defined(__alpha__) +	u32 temp; +	u32 mask = ~MACH64_DMA_EOL; + +	/* Taken from the include/asm-alpha/bitops.h linux header */ +	__asm__ __volatile__("1:	ldl_l %0,%3\n" +			     "	and %0,%2,%0\n" +			     "	stl_c %0,%1\n" +			     "	beq %0,2f\n" +			     ".subsection 2\n" +			     "2:	br 1b\n" +			     ".previous":"=&r"(temp), "=m"(*addr) +			     :"Ir"(mask), "m"(*addr)); +#else +	u32 mask = cpu_to_le32(~MACH64_DMA_EOL); + +	*addr &= mask; +#endif +} + +#define RING_LOCALS							\ +	int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring + +#define RING_WRITE_OFS  _ring_write + +#define BEGIN_RING(n)							\ +	do {								\ +		if (MACH64_VERBOSE) {					\ +			DRM_INFO( "BEGIN_RING( %d ) \n",		\ +				  (n) );				\ +		}							\ +		if (dev_priv->ring.space <= (n) * sizeof(u32)) {	\ +			int ret;					\ +			if ((ret = mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) { \ +				DRM_ERROR( "wait_ring failed, resetting engine\n"); \ +				mach64_dump_engine_info( dev_priv );	\ +				mach64_do_engine_reset( dev_priv );	\ +				return ret;				\ +			}						\ +		}							\ +		dev_priv->ring.space -= (n) * sizeof(u32);		\ +		_ring = (u32 *) dev_priv->ring.start;			\ +		_ring_tail = _ring_write = dev_priv->ring.tail;		\ +		_ring_mask = dev_priv->ring.tail_mask;			\ +	} while (0) + +#define OUT_RING( x )						\ +do {								\ +	if (MACH64_VERBOSE) {					\ +		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",	\ +			   (unsigned int)(x), _ring_write );	\ +	}							\ +	_ring[_ring_write++] = cpu_to_le32( x );		\ +	_ring_write &= _ring_mask;				\ +} while (0) + +#define ADVANCE_RING()							\ +do {									\ +	if (MACH64_VERBOSE) {						\ +		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\ +			  _ring_write, _ring_tail );			\ +	}								\ +	DRM_MEMORYBARRIER();						\ +	mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );	\ +	DRM_MEMORYBARRIER();						\ +	dev_priv->ring.tail = _ring_write;				\ +	mach64_ring_tick( dev_priv, &(dev_priv)->ring );		\ +} while (0) + +/** + * Queue a DMA buffer of registers writes into the ring buffer. + */  +int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, +                           drm_mach64_freelist_t *entry) +{ +	int bytes, pages, remainder; +	u32 address, page; +	int i; +	struct drm_buf *buf = entry->buf; +	RING_LOCALS; + +	bytes = buf->used; +	address = GETBUFADDR( buf ); +	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; + +	BEGIN_RING( pages * 4 ); + +	for ( i = 0 ; i < pages-1 ; i++ ) { +		page = address + i * MACH64_DMA_CHUNKSIZE; +		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); +		OUT_RING( page ); +		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); +		OUT_RING( 0 ); +	} + +	/* generate the final descriptor for any remaining commands in this buffer */ +	page = address + i * MACH64_DMA_CHUNKSIZE; +	remainder = bytes - i * MACH64_DMA_CHUNKSIZE; + +	/* Save dword offset of last descriptor for this buffer. +	 * This is needed to check for completion of the buffer in freelist_get +	 */ +	entry->ring_ofs = RING_WRITE_OFS; + +	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); +	OUT_RING( page ); +	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); +	OUT_RING( 0 ); + +	ADVANCE_RING(); +	 +	return 0; +} + +/** + * Queue DMA buffer controlling host data tranfers (e.g., blit). + *  + * Almost identical to mach64_add_buf_to_ring. + */ +int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, +                                    drm_mach64_freelist_t *entry) +{ +	int bytes, pages, remainder; +	u32 address, page; +	int i; +	struct drm_buf *buf = entry->buf; +	RING_LOCALS; +	 +	bytes = buf->used - MACH64_HOSTDATA_BLIT_OFFSET; +	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE; +	address = GETBUFADDR( buf ); +	 +	BEGIN_RING( 4 + pages * 4 ); +	 +	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR ); +	OUT_RING( address ); +	OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET ); +	OUT_RING( 0 ); +	address += MACH64_HOSTDATA_BLIT_OFFSET; +	 +	for ( i = 0 ; i < pages-1 ; i++ ) { +		page = address + i * MACH64_DMA_CHUNKSIZE; +		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); +		OUT_RING( page ); +		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET ); +		OUT_RING( 0 ); +	} +	 +	/* generate the final descriptor for any remaining commands in this buffer */ +	page = address + i * MACH64_DMA_CHUNKSIZE; +	remainder = bytes - i * MACH64_DMA_CHUNKSIZE; +	 +	/* Save dword offset of last descriptor for this buffer. +	 * This is needed to check for completion of the buffer in freelist_get +	 */ +	entry->ring_ofs = RING_WRITE_OFS; +	 +	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA ); +	OUT_RING( page ); +	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL ); +	OUT_RING( 0 ); +	 +	ADVANCE_RING(); +	 +	return 0; +} + +/*@}*/ + + +/*******************************************************************/  /** \name DMA test and initialization */  /*@{*/ @@ -582,7 +828,7 @@ static int mach64_bm_dma_test(struct drm_device * dev)  	u32 src_cntl, pat_reg0, pat_reg1;  	int i, count, failed; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	table = (u32 *) dev_priv->ring.start; @@ -758,7 +1004,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)  	u32 tmp;  	int i, ret; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	dev_priv = drm_alloc(sizeof(drm_mach64_private_t), DRM_MEM_DRIVER);  	if (dev_priv == NULL) @@ -968,7 +1214,7 @@ static int mach64_do_dma_init(struct drm_device * dev, drm_mach64_init_t * init)  /** MMIO Pseudo-DMA (intended primarily for debugging, not performance)   */ -int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv) +int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	volatile u32 *ring_read; @@ -983,9 +1229,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)  	target = MACH64_BM_ADDR;  	if ((ret = mach64_do_wait_for_idle(dev_priv)) < 0) { -		DRM_INFO -		    ("%s: idle failed before pseudo-dma dispatch, resetting engine\n", -		     __FUNCTION__); +		DRM_INFO("idle failed before pseudo-dma dispatch, resetting engine\n");  		mach64_dump_engine_info(dev_priv);  		mach64_do_engine_reset(dev_priv);  		return ret; @@ -1106,7 +1350,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)  	MACH64_WRITE(MACH64_BM_GUI_TABLE_CMD,  		     ring->head_addr | MACH64_CIRCULAR_BUF_SIZE_16KB); -	DRM_DEBUG("%s completed\n", __FUNCTION__); +	DRM_DEBUG("completed\n");  	return 0;  } @@ -1119,7 +1363,7 @@ int mach64_do_dispatch_pseudo_dma(drm_mach64_private_t * dev_priv)  int mach64_do_cleanup_dma(struct drm_device * dev)  { -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	/* Make sure interrupts are disabled here because the uninstall ioctl  	 * may not have been called from userspace and after dev_private @@ -1163,7 +1407,7 @@ int mach64_dma_init(struct drm_device *dev, void *data,  {  	drm_mach64_init_t *init = data; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1182,7 +1426,7 @@ int mach64_dma_idle(struct drm_device *dev, void *data,  {  	drm_mach64_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1194,7 +1438,7 @@ int mach64_dma_flush(struct drm_device *dev, void *data,  {  	drm_mach64_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1206,7 +1450,7 @@ int mach64_engine_reset(struct drm_device *dev, void *data,  {  	drm_mach64_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1228,8 +1472,7 @@ int mach64_init_freelist(struct drm_device * dev)  	struct list_head *ptr;  	int i; -	DRM_DEBUG("%s: adding %d buffers to freelist\n", __FUNCTION__, -		  dma->buf_count); +	DRM_DEBUG("adding %d buffers to freelist\n", dma->buf_count);  	for (i = 0; i < dma->buf_count; i++) {  		if ((entry = @@ -1253,7 +1496,7 @@ void mach64_destroy_freelist(struct drm_device * dev)  	struct list_head *ptr;  	struct list_head *tmp; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	list_for_each_safe(ptr, tmp, &dev_priv->pending) {  		list_del(ptr); @@ -1276,7 +1519,7 @@ void mach64_destroy_freelist(struct drm_device * dev)  /* IMPORTANT: This function should only be called when the engine is idle or locked up,   * as it assumes all buffers in the pending list have been completed by the hardware.   */ -int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv) +int mach64_do_release_used_buffers(drm_mach64_private_t *dev_priv)  {  	struct list_head *ptr;  	struct list_head *tmp; @@ -1298,13 +1541,12 @@ int mach64_do_release_used_buffers(drm_mach64_private_t * dev_priv)  		}  	} -	DRM_DEBUG("%s: released %d buffers from pending list\n", __FUNCTION__, -		  i); +	DRM_DEBUG("released %d buffers from pending list\n", i);  	return 0;  } -static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv) +static int mach64_do_reclaim_completed(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	struct list_head *ptr; @@ -1326,8 +1568,7 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv)  #endif  		/* last pass is complete, so release everything */  		mach64_do_release_used_buffers(dev_priv); -		DRM_DEBUG("%s: idle engine, freed all buffers.\n", -		     __FUNCTION__); +		DRM_DEBUG("idle engine, freed all buffers.\n");  		if (list_empty(&dev_priv->free_list)) {  			DRM_ERROR("Freelist empty with idle engine\n");  			return -1; @@ -1368,9 +1609,9 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv)  			list_del(ptr);  			list_add_tail(ptr, &dev_priv->free_list);  			DRM_DEBUG -			    ("%s: freed processed buffer (head=%d tail=%d " +			    ("freed processed buffer (head=%d tail=%d "  			     "buf ring ofs=%d).\n", -			     __FUNCTION__, head, tail, ofs); +			     head, tail, ofs);  			return 0;  		}  	} @@ -1378,7 +1619,7 @@ static int mach64_do_reclaim_completed(drm_mach64_private_t * dev_priv)  	return 1;  } -struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv) +struct drm_buf *mach64_freelist_get(drm_mach64_private_t *dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring;  	drm_mach64_freelist_t *entry; @@ -1424,7 +1665,7 @@ struct drm_buf *mach64_freelist_get(drm_mach64_private_t * dev_priv)  	return entry->buf;  } -int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_buf) +int mach64_freelist_put(drm_mach64_private_t *dev_priv, struct drm_buf *copy_buf)  {  	struct list_head *ptr;  	drm_mach64_freelist_t *entry; @@ -1433,8 +1674,7 @@ int mach64_freelist_put(drm_mach64_private_t * dev_priv, struct drm_buf * copy_b  	list_for_each(ptr, &dev_priv->pending) {  		entry = list_entry(ptr, drm_mach64_freelist_t, list);  		if (copy_buf == entry->buf) { -			DRM_ERROR("%s: Trying to release a pending buf\n", -			     __FUNCTION__); +			DRM_ERROR("Trying to release a pending buf\n");  			return -EFAULT;  		}  	} diff --git a/shared-core/mach64_drv.h b/shared-core/mach64_drv.h index cebd4c6e..fb8a7724 100644 --- a/shared-core/mach64_drv.h +++ b/shared-core/mach64_drv.h @@ -29,7 +29,7 @@   *    Gareth Hughes <gareth@valinux.com>   *    Frank C. Earl <fearl@airmail.net>   *    Leif Delgass <ldelgass@retinalburn.net> - *    Jos�Fonseca <j_r_fonseca@yahoo.co.uk> + *    José Fonseca <j_r_fonseca@yahoo.co.uk>   */  #ifndef __MACH64_DRV_H__ @@ -96,6 +96,8 @@ typedef struct drm_mach64_private {  	unsigned int depth_bpp;  	unsigned int depth_offset, depth_pitch; +	atomic_t vbl_received;          /**< Number of vblanks received. */ +  	u32 front_offset_pitch;  	u32 back_offset_pitch;  	u32 depth_offset_pitch; @@ -140,6 +142,11 @@ extern void mach64_dump_engine_info(drm_mach64_private_t * dev_priv);  extern void mach64_dump_ring_info(drm_mach64_private_t * dev_priv);  extern int mach64_do_engine_reset(drm_mach64_private_t * dev_priv); +extern int mach64_add_buf_to_ring(drm_mach64_private_t *dev_priv, +                                  drm_mach64_freelist_t *_entry); +extern int mach64_add_hostdata_buf_to_ring(drm_mach64_private_t *dev_priv, +                                           drm_mach64_freelist_t *_entry); +  extern int mach64_do_dma_idle(drm_mach64_private_t * dev_priv);  extern int mach64_do_dma_flush(drm_mach64_private_t * dev_priv);  extern int mach64_do_cleanup_dma(struct drm_device * dev); @@ -155,13 +162,14 @@ extern int mach64_dma_blit(struct drm_device *dev, void *data,  			   struct drm_file *file_priv);  extern int mach64_get_param(struct drm_device *dev, void *data,  			    struct drm_file *file_priv); -extern int mach64_driver_vblank_wait(struct drm_device * dev, -				     unsigned int *sequence); +extern u32 mach64_get_vblank_counter(struct drm_device *dev, int crtc); +extern int mach64_enable_vblank(struct drm_device *dev, int crtc); +extern void mach64_disable_vblank(struct drm_device *dev, int crtc);  extern irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS); -extern void mach64_driver_irq_preinstall(struct drm_device * dev); -extern void mach64_driver_irq_postinstall(struct drm_device * dev); -extern void mach64_driver_irq_uninstall(struct drm_device * dev); +extern void mach64_driver_irq_preinstall(struct drm_device *dev); +extern int mach64_driver_irq_postinstall(struct drm_device *dev); +extern void mach64_driver_irq_uninstall(struct drm_device *dev);  /* ================================================================   * Registers @@ -171,14 +179,14 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);  #define MACH64_AGP_CNTL				0x014c  #define MACH64_ALPHA_TST_CNTL			0x0550 -#define MACH64_DSP_CONFIG 			0x0420 -#define MACH64_DSP_ON_OFF 			0x0424 -#define MACH64_EXT_MEM_CNTL 			0x04ac -#define MACH64_GEN_TEST_CNTL 			0x04d0 -#define MACH64_HW_DEBUG 			0x047c -#define MACH64_MEM_ADDR_CONFIG 			0x0434 -#define MACH64_MEM_BUF_CNTL 			0x042c -#define MACH64_MEM_CNTL 			0x04b0 +#define MACH64_DSP_CONFIG			0x0420 +#define MACH64_DSP_ON_OFF			0x0424 +#define MACH64_EXT_MEM_CNTL			0x04ac +#define MACH64_GEN_TEST_CNTL			0x04d0 +#define MACH64_HW_DEBUG				0x047c +#define MACH64_MEM_ADDR_CONFIG			0x0434 +#define MACH64_MEM_BUF_CNTL			0x042c +#define MACH64_MEM_CNTL				0x04b0  #define MACH64_BM_ADDR				0x0648  #define MACH64_BM_COMMAND			0x0188 @@ -205,16 +213,16 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);  #define MACH64_CLR_CMP_CLR			0x0700  #define MACH64_CLR_CMP_CNTL			0x0708  #define MACH64_CLR_CMP_MASK			0x0704 -#define MACH64_CONFIG_CHIP_ID 			0x04e0 -#define MACH64_CONFIG_CNTL 			0x04dc -#define MACH64_CONFIG_STAT0 			0x04e4 -#define MACH64_CONFIG_STAT1 			0x0494 -#define MACH64_CONFIG_STAT2 			0x0498 +#define MACH64_CONFIG_CHIP_ID			0x04e0 +#define MACH64_CONFIG_CNTL			0x04dc +#define MACH64_CONFIG_STAT0			0x04e4 +#define MACH64_CONFIG_STAT1			0x0494 +#define MACH64_CONFIG_STAT2			0x0498  #define MACH64_CONTEXT_LOAD_CNTL		0x072c  #define MACH64_CONTEXT_MASK			0x0720  #define MACH64_COMPOSITE_SHADOW_ID		0x0798 -#define MACH64_CRC_SIG 				0x04e8 -#define MACH64_CUSTOM_MACRO_CNTL 		0x04d4 +#define MACH64_CRC_SIG				0x04e8 +#define MACH64_CUSTOM_MACRO_CNTL		0x04d4  #define MACH64_DP_BKGD_CLR			0x06c0  #define MACH64_DP_FOG_CLR			0x06c4 @@ -358,7 +366,7 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);  #define MACH64_TEX_0_OFF			0x05c0  #define MACH64_TEX_CNTL				0x0774  #define MACH64_TEX_SIZE_PITCH			0x0770 -#define MACH64_TIMER_CONFIG 			0x0428 +#define MACH64_TIMER_CONFIG			0x0428  #define MACH64_VERTEX_1_ARGB			0x0254  #define MACH64_VERTEX_1_S			0x0240 @@ -521,95 +529,17 @@ extern void mach64_driver_irq_uninstall(struct drm_device * dev);  #define MACH64_APERTURE_OFFSET	        0x7ff800	/* frame-buffer offset for gui-masters */  /* ================================================================ - * Misc helper macros + * Ring operations + * + * Since the Mach64 bus master engine requires polling, these functions end + * up being called frequently, hence being inline.   */ -static __inline__ void mach64_set_dma_eol(volatile u32 * addr) -{ -#if defined(__i386__) -	int nr = 31; - -	/* Taken from include/asm-i386/bitops.h linux header */ -	__asm__ __volatile__("lock;" "btsl %1,%0":"=m"(*addr) -			     :"Ir"(nr)); -#elif defined(__powerpc__) -	u32 old; -	u32 mask = cpu_to_le32(MACH64_DMA_EOL); - -	/* Taken from the include/asm-ppc/bitops.h linux header */ -	__asm__ __volatile__("\n\ -1:	lwarx	%0,0,%3 \n\ -	or	%0,%0,%2 \n\ -	stwcx.	%0,0,%3 \n\ -	bne-	1b":"=&r"(old), "=m"(*addr) -			     :"r"(mask), "r"(addr), "m"(*addr) -			     :"cc"); -#elif defined(__alpha__) -	u32 temp; -	u32 mask = MACH64_DMA_EOL; - -	/* Taken from the include/asm-alpha/bitops.h linux header */ -	__asm__ __volatile__("1:	ldl_l %0,%3\n" -			     "	bis %0,%2,%0\n" -			     "	stl_c %0,%1\n" -			     "	beq %0,2f\n" -			     ".subsection 2\n" -			     "2:	br 1b\n" -			     ".previous":"=&r"(temp), "=m"(*addr) -			     :"Ir"(mask), "m"(*addr)); -#else -	u32 mask = cpu_to_le32(MACH64_DMA_EOL); - -	*addr |= mask; -#endif -} - -static __inline__ void mach64_clear_dma_eol(volatile u32 * addr) -{ -#if defined(__i386__) -	int nr = 31; - -	/* Taken from include/asm-i386/bitops.h linux header */ -	__asm__ __volatile__("lock;" "btrl %1,%0":"=m"(*addr) -			     :"Ir"(nr)); -#elif defined(__powerpc__) -	u32 old; -	u32 mask = cpu_to_le32(MACH64_DMA_EOL); - -	/* Taken from the include/asm-ppc/bitops.h linux header */ -	__asm__ __volatile__("\n\ -1:	lwarx	%0,0,%3 \n\ -	andc	%0,%0,%2 \n\ -	stwcx.	%0,0,%3 \n\ -	bne-	1b":"=&r"(old), "=m"(*addr) -			     :"r"(mask), "r"(addr), "m"(*addr) -			     :"cc"); -#elif defined(__alpha__) -	u32 temp; -	u32 mask = ~MACH64_DMA_EOL; - -	/* Taken from the include/asm-alpha/bitops.h linux header */ -	__asm__ __volatile__("1:	ldl_l %0,%3\n" -			     "	and %0,%2,%0\n" -			     "	stl_c %0,%1\n" -			     "	beq %0,2f\n" -			     ".subsection 2\n" -			     "2:	br 1b\n" -			     ".previous":"=&r"(temp), "=m"(*addr) -			     :"Ir"(mask), "m"(*addr)); -#else -	u32 mask = cpu_to_le32(~MACH64_DMA_EOL); - -	*addr &= mask; -#endif -} -  static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; -	DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n", -		  __FUNCTION__, +	DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",  		  ring->head_addr, ring->head, ring->tail, ring->space);  	if (mach64_do_wait_for_idle(dev_priv) < 0) { @@ -635,8 +565,7 @@ static __inline__ void mach64_ring_start(drm_mach64_private_t * dev_priv)  static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,  					  drm_mach64_descriptor_ring_t * ring)  { -	DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n", -		  __FUNCTION__, +	DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",  		  ring->head_addr, ring->head, ring->tail, ring->space);  	/* reset descriptor table ring head */ @@ -655,8 +584,7 @@ static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,  		MACH64_WRITE(MACH64_DST_HEIGHT_WIDTH, 0);  		if (dev_priv->driver_mode == MACH64_MODE_DMA_SYNC) {  			if ((mach64_do_wait_for_idle(dev_priv)) < 0) { -				DRM_ERROR("%s: idle failed, resetting engine\n", -					  __FUNCTION__); +				DRM_ERROR("idle failed, resetting engine\n");  				mach64_dump_engine_info(dev_priv);  				mach64_do_engine_reset(dev_priv);  				return; @@ -666,11 +594,22 @@ static __inline__ void mach64_ring_resume(drm_mach64_private_t * dev_priv,  	}  } +/** + * Poll the ring head and make sure the bus master is alive. + *  + * Mach64's bus master engine will stop if there are no more entries to process. + * This function polls the engine for the last processed entry and calls  + * mach64_ring_resume if there is an unprocessed entry. + *  + * Note also that, since we update the ring tail while the bus master engine is  + * in operation, it is possible that the last tail update was too late to be  + * processed, and the bus master engine stops at the previous tail position.  + * Therefore it is important to call this function frequently.  + */  static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,  					drm_mach64_descriptor_ring_t * ring)  { -	DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n", -		  __FUNCTION__, +	DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",  		  ring->head_addr, ring->head, ring->tail, ring->space);  	if (!dev_priv->ring_running) { @@ -717,8 +656,7 @@ static __inline__ void mach64_ring_tick(drm_mach64_private_t * dev_priv,  static __inline__ void mach64_ring_stop(drm_mach64_private_t * dev_priv)  { -	DRM_DEBUG("%s: head_addr: 0x%08x head: %d tail: %d space: %d\n", -		  __FUNCTION__, +	DRM_DEBUG("head_addr: 0x%08x head: %d tail: %d space: %d\n",  		  dev_priv->ring.head_addr, dev_priv->ring.head,  		  dev_priv->ring.tail, dev_priv->ring.space); @@ -739,7 +677,7 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)  {  	drm_mach64_descriptor_ring_t *ring = &dev_priv->ring; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	mach64_ring_tick(dev_priv, ring); @@ -750,70 +688,22 @@ mach64_update_ring_snapshot(drm_mach64_private_t * dev_priv)  }  /* ================================================================ - * DMA descriptor ring macros - */ - -#define RING_LOCALS									\ -	int _ring_tail, _ring_write; unsigned int _ring_mask; volatile u32 *_ring - -#define RING_WRITE_OFS  _ring_write - -#define BEGIN_RING( n ) 								\ -do {											\ -	if ( MACH64_VERBOSE ) {								\ -		DRM_INFO( "BEGIN_RING( %d ) in %s\n",					\ -			   (n), __FUNCTION__ );						\ -	}										\ -	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {				\ -		int ret;								\ -		if ((ret=mach64_wait_ring( dev_priv, (n) * sizeof(u32))) < 0 ) {	\ -			DRM_ERROR( "wait_ring failed, resetting engine\n");		\ -			mach64_dump_engine_info( dev_priv );				\ -			mach64_do_engine_reset( dev_priv );				\ -			return ret;							\ -		}									\ -	}										\ -	dev_priv->ring.space -= (n) * sizeof(u32);					\ -	_ring = (u32 *) dev_priv->ring.start;						\ -	_ring_tail = _ring_write = dev_priv->ring.tail;					\ -	_ring_mask = dev_priv->ring.tail_mask;						\ -} while (0) - -#define OUT_RING( x )						\ -do {								\ -	if ( MACH64_VERBOSE ) {					\ -		DRM_INFO( "   OUT_RING( 0x%08x ) at 0x%x\n",	\ -			   (unsigned int)(x), _ring_write );	\ -	}							\ -	_ring[_ring_write++] = cpu_to_le32( x );		\ -	_ring_write &= _ring_mask;				\ -} while (0) - -#define ADVANCE_RING() 							\ -do {									\ -	if ( MACH64_VERBOSE ) {						\ -		DRM_INFO( "ADVANCE_RING() wr=0x%06x tail=0x%06x\n",	\ -			  _ring_write, _ring_tail );			\ -	}								\ -	DRM_MEMORYBARRIER();						\ -	mach64_clear_dma_eol( &_ring[(_ring_tail - 2) & _ring_mask] );	\ -	DRM_MEMORYBARRIER();						\ -	dev_priv->ring.tail = _ring_write;				\ -	mach64_ring_tick( dev_priv, &(dev_priv)->ring );		\ -} while (0) - -/* ================================================================   * DMA macros + *  + * Mach64's ring buffer doesn't take register writes directly. These  + * have to be written indirectly in DMA buffers. These macros simplify  + * the task of setting up a buffer, writing commands to it, and  + * queuing the buffer in the ring.    */  #define DMALOCALS				\  	drm_mach64_freelist_t *_entry = NULL;	\ -	struct drm_buf *_buf = NULL; 		\ +	struct drm_buf *_buf = NULL;		\  	u32 *_buf_wptr; int _outcount  #define GETBUFPTR( __buf )						\ -((dev_priv->is_pci) ? 							\ -	((u32 *)(__buf)->address) : 					\ +((dev_priv->is_pci) ?							\ +	((u32 *)(__buf)->address) :					\  	((u32 *)((char *)dev_priv->dev_buffers->handle + (__buf)->offset)))  #define GETBUFADDR( __buf ) ((u32)(__buf)->bus_address) @@ -828,7 +718,7 @@ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *  	struct list_head *ptr;  #if MACH64_EXTRA_CHECKING  	if (list_empty(&dev_priv->pending)) { -		DRM_ERROR("Empty pending list in %s\n", __FUNCTION__); +		DRM_ERROR("Empty pending list in \n");  		return -EINVAL;  	}  #endif @@ -844,7 +734,7 @@ static __inline__ int mach64_find_pending_buf_entry(drm_mach64_private_t *  	return 0;  } -#define DMASETPTR( _p ) 			\ +#define DMASETPTR( _p )				\  do {						\  	_buf = (_p);				\  	_outcount = 0;				\ @@ -855,18 +745,15 @@ do {						\  #define DMAGETPTR( file_priv, dev_priv, n )				\  do {									\  	if ( MACH64_VERBOSE ) {						\ -		DRM_INFO( "DMAGETPTR( %d ) in %s\n",			\ -			  n, __FUNCTION__ );				\ +		DRM_INFO( "DMAGETPTR( %d )\n", (n) );			\  	}								\  	_buf = mach64_freelist_get( dev_priv );				\  	if (_buf == NULL) {						\ -		DRM_ERROR("%s: couldn't get buffer in DMAGETPTR\n",	\ -			   __FUNCTION__ );				\ +		DRM_ERROR("couldn't get buffer in DMAGETPTR\n");	\  		return -EAGAIN;					\  	}								\  	if (_buf->pending) {						\ -	        DRM_ERROR("%s: pending buf in DMAGETPTR\n",		\ -			   __FUNCTION__ );				\ +	        DRM_ERROR("pending buf in DMAGETPTR\n");		\  		return -EFAULT;					\  	}								\  	_buf->file_priv = file_priv;					\ @@ -886,173 +773,87 @@ do {								\  	_buf->used += 8;					\  } while (0) -#define DMAADVANCE( dev_priv, _discard )						     \ -do {											     \ -	struct list_head *ptr;								     \ -	RING_LOCALS;									     \ -											     \ -	if ( MACH64_VERBOSE ) {								     \ -		DRM_INFO( "DMAADVANCE() in %s\n", __FUNCTION__ );			     \ -	}										     \ -											     \ -	if (_buf->used <= 0) {								     \ -		DRM_ERROR( "DMAADVANCE() in %s: sending empty buf %d\n",		     \ -				   __FUNCTION__, _buf->idx );				     \ -		return -EFAULT;							     \ -	}										     \ -	if (_buf->pending) {								     \ -                /* This is a resued buffer, so we need to find it in the pending list */     \ -		int ret;								     \ -		if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) {	     \ -			DRM_ERROR( "DMAADVANCE() in %s: couldn't find pending buf %d\n",     \ -				   __FUNCTION__, _buf->idx );				     \ -			return ret;							     \ -		}									     \ -		if (_entry->discard) {							     \ -			DRM_ERROR( "DMAADVANCE() in %s: sending discarded pending buf %d\n", \ -				   __FUNCTION__, _buf->idx );				     \ -			return -EFAULT;						     \ -		}									     \ -     	} else {									     \ -		if (list_empty(&dev_priv->placeholders)) {				     \ -			DRM_ERROR( "DMAADVANCE() in %s: empty placeholder list\n",	     \ -			   	__FUNCTION__ );						     \ -			return -EFAULT;						     \ -		}									     \ -		ptr = dev_priv->placeholders.next;					     \ -		list_del(ptr);								     \ -		_entry = list_entry(ptr, drm_mach64_freelist_t, list);			     \ -		_buf->pending = 1;							     \ -		_entry->buf = _buf;							     \ -		list_add_tail(ptr, &dev_priv->pending);					     \ -	}										     \ -	_entry->discard = (_discard);							     \ -	ADD_BUF_TO_RING( dev_priv );							     \ -} while (0) - -#define DMADISCARDBUF()									\ -do {											\ -	if (_entry == NULL) {								\ -		int ret;								\ -		if ( (ret=mach64_find_pending_buf_entry(dev_priv, &_entry, _buf)) ) {	\ -			DRM_ERROR( "%s: couldn't find pending buf %d\n",		\ -				   __FUNCTION__, _buf->idx );				\ -			return ret;							\ -		}									\ -	}										\ -	_entry->discard = 1;								\ -} while(0) - -#define ADD_BUF_TO_RING( dev_priv )							\ -do {											\ -	int bytes, pages, remainder;							\ -	u32 address, page;								\ -	int i;										\ -											\ -	bytes = _buf->used;								\ -	address = GETBUFADDR( _buf );							\ -											\ -	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;		\ -											\ -	BEGIN_RING( pages * 4 );							\ -											\ -	for ( i = 0 ; i < pages-1 ; i++ ) {						\ -		page = address + i * MACH64_DMA_CHUNKSIZE;				\ -		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );			\ -		OUT_RING( page );							\ -		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );		\ -		OUT_RING( 0 );								\ -	}										\ -											\ -	/* generate the final descriptor for any remaining commands in this buffer */	\ -	page = address + i * MACH64_DMA_CHUNKSIZE;					\ -	remainder = bytes - i * MACH64_DMA_CHUNKSIZE;					\ -											\ -	/* Save dword offset of last descriptor for this buffer.			\ -	 * This is needed to check for completion of the buffer in freelist_get		\ -	 */										\ -	_entry->ring_ofs = RING_WRITE_OFS;						\ -											\ -	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );				\ -	OUT_RING( page );								\ -	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );		\ -	OUT_RING( 0 );									\ -											\ -	ADVANCE_RING();									\ -} while(0) - -#define DMAADVANCEHOSTDATA( dev_priv )							\ -do {											\ -	struct list_head *ptr;								\ -	RING_LOCALS;									\ -											\ -	if ( MACH64_VERBOSE ) {								\ -		DRM_INFO( "DMAADVANCEHOSTDATA() in %s\n", __FUNCTION__ );		\ -	}										\ -											\ -	if (_buf->used <= 0) {								\ -		DRM_ERROR( "DMAADVANCEHOSTDATA() in %s: sending empty buf %d\n",	\ -				   __FUNCTION__, _buf->idx );				\ -		return -EFAULT;							\ -	}										\ -	if (list_empty(&dev_priv->placeholders)) {					\ -		DRM_ERROR( "%s: empty placeholder list in DMAADVANCEHOSTDATA()\n",	\ -			   __FUNCTION__ );						\ -		return -EFAULT;							\ -	}										\ -											\ -        ptr = dev_priv->placeholders.next;						\ -	list_del(ptr);									\ -	_entry = list_entry(ptr, drm_mach64_freelist_t, list);				\ -	_entry->buf = _buf;								\ -	_entry->buf->pending = 1;							\ -	list_add_tail(ptr, &dev_priv->pending);						\ -	_entry->discard = 1;								\ -	ADD_HOSTDATA_BUF_TO_RING( dev_priv );						\ -} while (0) - -#define ADD_HOSTDATA_BUF_TO_RING( dev_priv )						 \ -do {											 \ -	int bytes, pages, remainder;							 \ -	u32 address, page;								 \ -	int i;										 \ -											 \ -	bytes = _buf->used - MACH64_HOSTDATA_BLIT_OFFSET;				 \ -	pages = (bytes + MACH64_DMA_CHUNKSIZE - 1) / MACH64_DMA_CHUNKSIZE;		 \ -	address = GETBUFADDR( _buf );							 \ -											 \ -	BEGIN_RING( 4 + pages * 4 );							 \ -											 \ -	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_ADDR );				 \ -	OUT_RING( address );								 \ -	OUT_RING( MACH64_HOSTDATA_BLIT_OFFSET | MACH64_DMA_HOLD_OFFSET );		 \ -	OUT_RING( 0 );									 \ -											 \ -	address += MACH64_HOSTDATA_BLIT_OFFSET;						 \ -											 \ -	for ( i = 0 ; i < pages-1 ; i++ ) {						 \ -		page = address + i * MACH64_DMA_CHUNKSIZE;				 \ -		OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );		 \ -		OUT_RING( page );							 \ -		OUT_RING( MACH64_DMA_CHUNKSIZE | MACH64_DMA_HOLD_OFFSET );		 \ -		OUT_RING( 0 );								 \ -	}										 \ -											 \ -	/* generate the final descriptor for any remaining commands in this buffer */	 \ -	page = address + i * MACH64_DMA_CHUNKSIZE;					 \ -	remainder = bytes - i * MACH64_DMA_CHUNKSIZE;					 \ -											 \ -	/* Save dword offset of last descriptor for this buffer.			 \ -	 * This is needed to check for completion of the buffer in freelist_get		 \ -	 */										 \ -	_entry->ring_ofs = RING_WRITE_OFS;						 \ -											 \ -	OUT_RING( MACH64_APERTURE_OFFSET + MACH64_BM_HOSTDATA );			 \ -	OUT_RING( page );								 \ -	OUT_RING( remainder | MACH64_DMA_HOLD_OFFSET | MACH64_DMA_EOL );		 \ -	OUT_RING( 0 );									 \ -											 \ -	ADVANCE_RING();									 \ -} while(0) +#define DMAADVANCE( dev_priv, _discard )				\ +	do {								\ +		struct list_head *ptr;					\ +		int ret;						\ +									\ +		if ( MACH64_VERBOSE ) {					\ +			DRM_INFO( "DMAADVANCE() in \n" );		\ +		}							\ +									\ +		if (_buf->used <= 0) {					\ +			DRM_ERROR( "DMAADVANCE(): sending empty buf %d\n", \ +				   _buf->idx );				\ +			return -EFAULT;					\ +		}							\ +		if (_buf->pending) {					\ +			/* This is a resued buffer, so we need to find it in the pending list */ \ +			if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ +				DRM_ERROR( "DMAADVANCE(): couldn't find pending buf %d\n", _buf->idx );	\ +				return ret;				\ +			}						\ +			if (_entry->discard) {				\ +				DRM_ERROR( "DMAADVANCE(): sending discarded pending buf %d\n", _buf->idx ); \ +				return -EFAULT;				\ +			}						\ +		} else {						\ +			if (list_empty(&dev_priv->placeholders)) {	\ +				DRM_ERROR( "DMAADVANCE(): empty placeholder list\n"); \ +				return -EFAULT;				\ +			}						\ +			ptr = dev_priv->placeholders.next;		\ +			list_del(ptr);					\ +			_entry = list_entry(ptr, drm_mach64_freelist_t, list); \ +			_buf->pending = 1;				\ +			_entry->buf = _buf;				\ +			list_add_tail(ptr, &dev_priv->pending);		\ +		}							\ +		_entry->discard = (_discard);				\ +		if ((ret = mach64_add_buf_to_ring( dev_priv, _entry ))) \ +			return ret;					\ +	} while (0) + +#define DMADISCARDBUF()							\ +	do {								\ +		if (_entry == NULL) {					\ +			int ret;					\ +			if ((ret = mach64_find_pending_buf_entry(dev_priv, &_entry, _buf))) { \ +				DRM_ERROR( "couldn't find pending buf %d\n", \ +					   _buf->idx );			\ +				return ret;				\ +			}						\ +		}							\ +		_entry->discard = 1;					\ +	} while(0) + +#define DMAADVANCEHOSTDATA( dev_priv )					\ +	do {								\ +		struct list_head *ptr;					\ +		int ret;						\ +									\ +		if ( MACH64_VERBOSE ) {					\ +			DRM_INFO( "DMAADVANCEHOSTDATA() in \n" );	\ +		}							\ +									\ +		if (_buf->used <= 0) {					\ +			DRM_ERROR( "DMAADVANCEHOSTDATA(): sending empty buf %d\n", _buf->idx );	\ +			return -EFAULT;					\ +		}							\ +		if (list_empty(&dev_priv->placeholders)) {		\ +			DRM_ERROR( "empty placeholder list in DMAADVANCEHOSTDATA()\n" ); \ +			return -EFAULT;					\ +		}							\ +									\ +		ptr = dev_priv->placeholders.next;			\ +		list_del(ptr);						\ +		_entry = list_entry(ptr, drm_mach64_freelist_t, list);	\ +		_entry->buf = _buf;					\ +		_entry->buf->pending = 1;				\ +		list_add_tail(ptr, &dev_priv->pending);			\ +		_entry->discard = 1;					\ +		if ((ret = mach64_add_hostdata_buf_to_ring( dev_priv, _entry ))) \ +			return ret;					\ +	} while (0)  #endif				/* __MACH64_DRV_H__ */ diff --git a/shared-core/mach64_irq.c b/shared-core/mach64_irq.c index 4122dd91..2d522a6c 100644 --- a/shared-core/mach64_irq.c +++ b/shared-core/mach64_irq.c @@ -42,9 +42,8 @@  irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)  { -	struct drm_device *dev = (struct drm_device *) arg; -	drm_mach64_private_t *dev_priv = -	    (drm_mach64_private_t *) dev->dev_private; +	struct drm_device *dev = arg; +	drm_mach64_private_t *dev_priv = dev->dev_private;  	int status;  	status = MACH64_READ(MACH64_CRTC_INT_CNTL); @@ -62,74 +61,81 @@ irqreturn_t mach64_driver_irq_handler(DRM_IRQ_ARGS)  			     (status & ~MACH64_CRTC_INT_ACKS)  			     | MACH64_CRTC_VBLANK_INT); -		atomic_inc(&dev->vbl_received); -		DRM_WAKEUP(&dev->vbl_queue); -		drm_vbl_send_signals(dev); +		atomic_inc(&dev_priv->vbl_received); +		drm_handle_vblank(dev, 0);  		return IRQ_HANDLED;  	}  	return IRQ_NONE;  } -int mach64_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) +u32 mach64_get_vblank_counter(struct drm_device * dev, int crtc)  { -	unsigned int cur_vblank; -	int ret = 0; - -	/* Assume that the user has missed the current sequence number -	 * by about a day rather than she wants to wait for years -	 * using vertical blanks... -	 */ -	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, -		    (((cur_vblank = atomic_read(&dev->vbl_received)) -		      - *sequence) <= (1 << 23))); +	const drm_mach64_private_t *const dev_priv = dev->dev_private; +		 +	if (crtc != 0) { +		return 0; +	} +	 +	return atomic_read(&dev_priv->vbl_received); +} -	*sequence = cur_vblank; +int mach64_enable_vblank(struct drm_device * dev, int crtc) +{ +	drm_mach64_private_t *dev_priv = dev->dev_private; +	u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); +	 +	if (crtc != 0) { +		DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", crtc); +		return 0; +	} +	 +	DRM_DEBUG("before enable vblank CRTC_INT_CTNL: 0x%08x\n", status); +	 +	/* Turn on VBLANK interrupt */ +	MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) +		     | MACH64_CRTC_VBLANK_INT_EN); -	return ret; +	return 0;  } -/* drm_dma.h hooks -*/ -void mach64_driver_irq_preinstall(struct drm_device * dev) -{ -	drm_mach64_private_t *dev_priv = -	    (drm_mach64_private_t *) dev->dev_private; +void mach64_disable_vblank(struct drm_device * dev, int crtc) +{ +	drm_mach64_private_t *dev_priv = dev->dev_private;  	u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); -	DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status); +	DRM_DEBUG("before disable vblank CRTC_INT_CTNL: 0x%08x\n", status);  	/* Disable and clear VBLANK interrupt */  	MACH64_WRITE(MACH64_CRTC_INT_CNTL, (status & ~MACH64_CRTC_VBLANK_INT_EN)  		     | MACH64_CRTC_VBLANK_INT);  } -void mach64_driver_irq_postinstall(struct drm_device * dev) +/* drm_dma.h hooks +*/ +void mach64_driver_irq_preinstall(struct drm_device * dev)  { -	drm_mach64_private_t *dev_priv = -	    (drm_mach64_private_t *) dev->dev_private; +	drm_mach64_private_t *dev_priv = dev->dev_private; -	/* Turn on VBLANK interrupt */ -	MACH64_WRITE(MACH64_CRTC_INT_CNTL, MACH64_READ(MACH64_CRTC_INT_CNTL) -		     | MACH64_CRTC_VBLANK_INT_EN); +	u32 status = MACH64_READ(MACH64_CRTC_INT_CNTL); -	DRM_DEBUG("after install CRTC_INT_CTNL: 0x%08x\n", -		  MACH64_READ(MACH64_CRTC_INT_CNTL)); +	DRM_DEBUG("before install CRTC_INT_CTNL: 0x%08x\n", status); +	mach64_disable_vblank(dev,0); +} + +int mach64_driver_irq_postinstall(struct drm_device * dev) +{ +	return drm_vblank_init(dev, 1);  }  void mach64_driver_irq_uninstall(struct drm_device * dev)  { -	drm_mach64_private_t *dev_priv = -	    (drm_mach64_private_t *) dev->dev_private; +	drm_mach64_private_t *dev_priv = dev->dev_private;  	if (!dev_priv)  		return; -	/* Disable and clear VBLANK interrupt */ -	MACH64_WRITE(MACH64_CRTC_INT_CNTL, -		     (MACH64_READ(MACH64_CRTC_INT_CNTL) & -		      ~MACH64_CRTC_VBLANK_INT_EN) -		     | MACH64_CRTC_VBLANK_INT); +	mach64_disable_vblank(dev, 0);  	DRM_DEBUG("after uninstall CRTC_INT_CTNL: 0x%08x\n",  		  MACH64_READ(MACH64_CRTC_INT_CNTL)); diff --git a/shared-core/mach64_state.c b/shared-core/mach64_state.c index 89b6c6ce..c82f38bb 100644 --- a/shared-core/mach64_state.c +++ b/shared-core/mach64_state.c @@ -27,7 +27,7 @@   * Authors:   *    Gareth Hughes <gareth@valinux.com>   *    Leif Delgass <ldelgass@retinalburn.net> - *    Jos�Fonseca <j_r_fonseca@yahoo.co.uk> + *    José Fonseca <j_r_fonseca@yahoo.co.uk>   */  #include "drmP.h" @@ -95,7 +95,7 @@ static int mach64_emit_cliprect(struct drm_file *file_priv,  	drm_mach64_context_regs_t *regs = &sarea_priv->context_state;  	DMALOCALS; -	DRM_DEBUG("%s: box=%p\n", __FUNCTION__, box); +	DRM_DEBUG("box=%p\n", box);  	/* Get GL scissor */  	/* FIXME: store scissor in SAREA as a cliprect instead of in @@ -146,7 +146,7 @@ static __inline__ int mach64_emit_state(struct drm_file *file_priv,  	if (MACH64_VERBOSE) {  		mach64_print_dirty(__FUNCTION__, dirty);  	} else { -		DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty); +		DRM_DEBUG("dirty=0x%08x\n", dirty);  	}  	DMAGETPTR(file_priv, dev_priv, 17);	/* returns on failure to get buffer */ @@ -229,7 +229,7 @@ static int mach64_dma_dispatch_clear(struct drm_device * dev,  	int i;  	DMALOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	switch (dev_priv->fb_bpp) {  	case 16: @@ -368,7 +368,7 @@ static int mach64_dma_dispatch_swap(struct drm_device * dev,  	int i;  	DMALOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	switch (dev_priv->fb_bpp) {  	case 16: @@ -445,7 +445,7 @@ static int mach64_do_get_frames_queued(drm_mach64_private_t * dev_priv)  	int i, start;  	u32 head, tail, ofs; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (sarea_priv->frames_queued == 0)  		return 0; @@ -525,15 +525,14 @@ static __inline__ int copy_from_user_vertex(u32 *to,  				from += count;  				to += count;  			} else { -				DRM_ERROR("%s: Got bad command: 0x%04x\n", -					  __FUNCTION__, reg); +				DRM_ERROR("Got bad command: 0x%04x\n", reg);  				drm_free(orig_from, bytes, DRM_MEM_DRIVER);  				return -EACCES;  			}  		} else {  			DRM_ERROR -			    ("%s: Got bad command count(=%u) dwords remaining=%lu\n", -			     __FUNCTION__, count, n); +			    ("Got bad command count(=%u) dwords remaining=%lu\n", +			     count, n);  			drm_free(orig_from, bytes, DRM_MEM_DRIVER);  			return -EINVAL;  		} @@ -543,7 +542,7 @@ static __inline__ int copy_from_user_vertex(u32 *to,  	if (n == 0)  		return 0;  	else { -		DRM_ERROR("%s: Bad buf->used(=%lu)\n", __FUNCTION__, bytes); +		DRM_ERROR("Bad buf->used(=%lu)\n", bytes);  		return -EINVAL;  	}  } @@ -563,18 +562,22 @@ static int mach64_dma_dispatch_vertex(struct drm_device * dev,  	int verify_ret = 0;  	DMALOCALS; -	DRM_DEBUG("%s: buf=%p used=%lu nbox=%d\n", -		  __FUNCTION__, buf, used, sarea_priv->nbox); +	DRM_DEBUG("buf=%p used=%lu nbox=%d\n", +		  buf, used, sarea_priv->nbox);  	if (!used)  		goto _vertex_done;  	copy_buf = mach64_freelist_get(dev_priv);  	if (copy_buf == NULL) { -		DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); +		DRM_ERROR("couldn't get buffer\n");  		return -EAGAIN;  	} +	/* Mach64's vertex data is actually register writes. To avoid security +	 * compromises these register writes have to be verified and copied from +	 * user space into a private DMA buffer. +	 */  	verify_ret = copy_from_user_vertex(GETBUFPTR(copy_buf), buf, used);  	if (verify_ret != 0) { @@ -694,10 +697,20 @@ static int mach64_dma_dispatch_blit(struct drm_device * dev,  	copy_buf = mach64_freelist_get(dev_priv);  	if (copy_buf == NULL) { -		DRM_ERROR("%s: couldn't get buffer\n", __FUNCTION__); +		DRM_ERROR("couldn't get buffer\n");  		return -EAGAIN;  	} +	/* Copy the blit data from userspace. +	 *  +	 * XXX: This is overkill. The most efficient solution would be having  +	 * two sets of buffers (one set private for vertex data, the other set  +	 * client-writable for blits). However that would bring more complexity  +	 * and would break backward compatability. The solution currently  +	 * implemented is keeping all buffers private, allowing to secure the +	 * driver, without increasing complexity at the expense of some speed  +	 * transfering data. +	 */  	verify_ret = copy_from_user_blit(GETBUFPTR(copy_buf), blit->buf, used);  	if (verify_ret != 0) { @@ -745,7 +758,7 @@ static int mach64_dma_dispatch_blit(struct drm_device * dev,  	DMAOUTREG(MACH64_DST_X_Y, (blit->y << 16) | blit->x);  	DMAOUTREG(MACH64_DST_WIDTH_HEIGHT, (blit->height << 16) | blit->width); -	DRM_DEBUG("%s: %lu bytes\n", __FUNCTION__, used); +	DRM_DEBUG("%lu bytes\n", used);  	/* Add the buffer to the queue */  	DMAADVANCEHOSTDATA(dev_priv); @@ -766,7 +779,7 @@ int mach64_dma_clear(struct drm_device *dev, void *data,  	drm_mach64_clear_t *clear = data;  	int ret; -	DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID); +	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -791,7 +804,7 @@ int mach64_dma_swap(struct drm_device *dev, void *data,  	drm_mach64_sarea_t *sarea_priv = dev_priv->sarea_priv;  	int ret; -	DRM_DEBUG("%s: pid=%d\n", __FUNCTION__, DRM_CURRENTPID); +	DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -816,12 +829,12 @@ int mach64_dma_vertex(struct drm_device *dev, void *data,  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} -	DRM_DEBUG("%s: pid=%d buf=%p used=%lu discard=%d\n", -		  __FUNCTION__, DRM_CURRENTPID, +	DRM_DEBUG("pid=%d buf=%p used=%lu discard=%d\n", +		  DRM_CURRENTPID,  		  vertex->buf, vertex->used, vertex->discard);  	if (vertex->prim < 0 || vertex->prim > MACH64_PRIM_POLYGON) { @@ -868,10 +881,10 @@ int mach64_get_param(struct drm_device *dev, void *data,  	drm_mach64_getparam_t *param = data;  	int value; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} diff --git a/shared-core/mga_dma.c b/shared-core/mga_dma.c index a86dd31c..d56f4d7a 100644 --- a/shared-core/mga_dma.c +++ b/shared-core/mga_dma.c @@ -28,7 +28,7 @@  /**   * \file mga_dma.c   * DMA support for MGA G200 / G400. - *  + *   * \author Rickard E. (Rik) Faith <faith@valinux.com>   * \author Jeff Hartmann <jhartmann@valinux.com>   * \author Keith Whitwell <keith@tungstengraphics.com> @@ -46,7 +46,7 @@  #define MINIMAL_CLEANUP    0  #define FULL_CLEANUP       1 -static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup); +static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup);  /* ================================================================   * Engine control @@ -395,7 +395,7 @@ int mga_freelist_put(struct drm_device * dev, struct drm_buf * buf)  int mga_driver_load(struct drm_device *dev, unsigned long flags)  { -	drm_mga_private_t * dev_priv; +	drm_mga_private_t *dev_priv;  	dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);  	if (!dev_priv) @@ -420,7 +420,7 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)  /**   * Bootstrap the driver for AGP DMA. - *  + *   * \todo   * Investigate whether there is any benifit to storing the WARP microcode in   * AGP memory.  If not, the microcode may as well always be put in PCI @@ -436,10 +436,11 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)  static int mga_do_agp_dma_bootstrap(struct drm_device *dev,  				    drm_mga_dma_bootstrap_t * dma_bs)  { -	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; +	drm_mga_private_t *const dev_priv = +		(drm_mga_private_t *)dev->dev_private;  	unsigned int warp_size = mga_warp_microcode_size(dev_priv);  	int err; -	unsigned  offset; +	unsigned offset;  	const unsigned secondary_size = dma_bs->secondary_bin_count  		* dma_bs->secondary_bin_size;  	const unsigned agp_size = (dma_bs->agp_size << 20); @@ -481,11 +482,10 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,  		}  	} -  	/* Allocate and bind AGP memory. */  	agp_req.size = agp_size;  	agp_req.type = 0; -	err = drm_agp_alloc( dev, & agp_req ); +	err = drm_agp_alloc(dev, &agp_req);  	if (err) {  		dev_priv->agp_size = 0;  		DRM_ERROR("Unable to allocate %uMB AGP memory\n", @@ -511,36 +511,36 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,  		warp_size = PAGE_SIZE;  	offset = 0; -	err = drm_addmap( dev, offset, warp_size, -			  _DRM_AGP, _DRM_READ_ONLY, & dev_priv->warp ); +	err = drm_addmap(dev, offset, warp_size, +			 _DRM_AGP, _DRM_READ_ONLY, &dev_priv->warp);  	if (err) {  		DRM_ERROR("Unable to map WARP microcode: %d\n", err);  		return err;  	}  	offset += warp_size; -	err = drm_addmap( dev, offset, dma_bs->primary_size, -			  _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary ); +	err = drm_addmap(dev, offset, dma_bs->primary_size, +			 _DRM_AGP, _DRM_READ_ONLY, & dev_priv->primary);  	if (err) {  		DRM_ERROR("Unable to map primary DMA region: %d\n", err);  		return err;  	}  	offset += dma_bs->primary_size; -	err = drm_addmap( dev, offset, secondary_size, -			  _DRM_AGP, 0, & dev->agp_buffer_map ); +	err = drm_addmap(dev, offset, secondary_size, +			 _DRM_AGP, 0, & dev->agp_buffer_map);  	if (err) {  		DRM_ERROR("Unable to map secondary DMA region: %d\n", err);  		return err;  	} -	(void) memset( &req, 0, sizeof(req) ); +	(void)memset( &req, 0, sizeof(req) );  	req.count = dma_bs->secondary_bin_count;  	req.size = dma_bs->secondary_bin_size;  	req.flags = _DRM_AGP_BUFFER;  	req.agp_start = offset; -	err = drm_addbufs_agp( dev, & req ); +	err = drm_addbufs_agp(dev, &req);  	if (err) {  		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);  		return err; @@ -563,8 +563,8 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,  #endif  	offset += secondary_size; -	err = drm_addmap( dev, offset, agp_size - offset, -			  _DRM_AGP, 0, & dev_priv->agp_textures ); +	err = drm_addmap(dev, offset, agp_size - offset, +			 _DRM_AGP, 0, & dev_priv->agp_textures);  	if (err) {  		DRM_ERROR("Unable to map AGP texture region: %d\n", err);  		return err; @@ -591,7 +591,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,  /**   * Bootstrap the driver for PCI DMA. - *  + *   * \todo   * The algorithm for decreasing the size of the primary DMA buffer could be   * better.  The size should be rounded up to the nearest page size, then @@ -600,20 +600,21 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,   * \todo   * Determine whether the maximum address passed to drm_pci_alloc is correct.   * The same goes for drm_addbufs_pci. - *  + *   * \sa mga_do_dma_bootstrap, mga_do_agp_dma_bootstrap   */  static int mga_do_pci_dma_bootstrap(struct drm_device * dev,  				    drm_mga_dma_bootstrap_t * dma_bs)  { -	drm_mga_private_t * const dev_priv = (drm_mga_private_t *) dev->dev_private; +	drm_mga_private_t *const dev_priv = +		(drm_mga_private_t *) dev->dev_private;  	unsigned int warp_size = mga_warp_microcode_size(dev_priv);  	unsigned int primary_size;  	unsigned int bin_count;  	int err;  	struct drm_buf_desc req; -	 +  	if (dev->dma == NULL) {  		DRM_ERROR("dev->dma is NULL\n");  		return -EFAULT; @@ -639,9 +640,8 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,  	 * alignment of the primary or secondary DMA buffers.  	 */ -	for ( primary_size = dma_bs->primary_size -	      ; primary_size != 0 -	      ; primary_size >>= 1 ) { +	for (primary_size = dma_bs->primary_size; primary_size != 0; +	     primary_size >>= 1 ) {  		/* The proper alignment for this mapping is 0x04 */  		err = drm_addmap(dev, 0, primary_size, _DRM_CONSISTENT,  				 _DRM_READ_ONLY, &dev_priv->primary); @@ -656,24 +656,23 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,  	if (dev_priv->primary->size != dma_bs->primary_size) {  		DRM_INFO("Primary DMA buffer size reduced from %u to %u.\n", -			 dma_bs->primary_size,  -			 (unsigned) dev_priv->primary->size); +			 dma_bs->primary_size, +			 (unsigned)dev_priv->primary->size);  		dma_bs->primary_size = dev_priv->primary->size;  	} -	for ( bin_count = dma_bs->secondary_bin_count -	      ; bin_count > 0  -	      ; bin_count-- ) { -		(void) memset( &req, 0, sizeof(req) ); +	for (bin_count = dma_bs->secondary_bin_count; bin_count > 0; +	     bin_count-- ) { +		(void)memset(&req, 0, sizeof(req));  		req.count = bin_count;  		req.size = dma_bs->secondary_bin_size; -		err = drm_addbufs_pci( dev, & req ); +		err = drm_addbufs_pci(dev, &req);  		if (!err) {  			break;  		}  	} -	 +  	if (bin_count == 0) {  		DRM_ERROR("Unable to add secondary DMA buffers: %d\n", err);  		return err; @@ -696,12 +695,12 @@ static int mga_do_pci_dma_bootstrap(struct drm_device * dev,  } -static int mga_do_dma_bootstrap(struct drm_device * dev, -				drm_mga_dma_bootstrap_t * dma_bs) +static int mga_do_dma_bootstrap(struct drm_device *dev, +				drm_mga_dma_bootstrap_t *dma_bs)  {  	const int is_agp = (dma_bs->agp_mode != 0) && drm_device_is_agp(dev);  	int err; -	drm_mga_private_t * const dev_priv = +	drm_mga_private_t *const dev_priv =  		(drm_mga_private_t *) dev->dev_private; @@ -710,17 +709,17 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,  	/* The first steps are the same for both PCI and AGP based DMA.  Map  	 * the cards MMIO registers and map a status page.  	 */ -	err = drm_addmap( dev, dev_priv->mmio_base, dev_priv->mmio_size, -			  _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio ); +	err = drm_addmap(dev, dev_priv->mmio_base, dev_priv->mmio_size, +			 _DRM_REGISTERS, _DRM_READ_ONLY, & dev_priv->mmio);  	if (err) {  		DRM_ERROR("Unable to map MMIO region: %d\n", err);  		return err;  	} -	err = drm_addmap( dev, 0, SAREA_MAX, _DRM_SHM, -			  _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, -			  & dev_priv->status ); +	err = drm_addmap(dev, 0, SAREA_MAX, _DRM_SHM, +			 _DRM_READ_ONLY | _DRM_LOCKED | _DRM_KERNEL, +			 & dev_priv->status);  	if (err) {  		DRM_ERROR("Unable to map status region: %d\n", err);  		return err; @@ -736,7 +735,7 @@ static int mga_do_dma_bootstrap(struct drm_device * dev,  	if (is_agp) {  		err = mga_do_agp_dma_bootstrap(dev, dma_bs);  	} -	 +  	/* If we attempted to initialize the card for AGP DMA but failed,  	 * clean-up any mess that may have been created.  	 */ @@ -768,7 +767,7 @@ int mga_dma_bootstrap(struct drm_device *dev, void *data,  	drm_mga_dma_bootstrap_t *bootstrap = data;  	int err;  	static const int modes[] = { 0, 1, 2, 2, 4, 4, 4, 4 }; -	const drm_mga_private_t * const dev_priv =  +	const drm_mga_private_t *const dev_priv =  		(drm_mga_private_t *) dev->dev_private; @@ -829,7 +828,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)  		return -EINVAL;  	} -	if (! dev_priv->used_new_dma_init) { +	if (!dev_priv->used_new_dma_init) {  		dev_priv->dma_access = MGA_PAGPXFER;  		dev_priv->wagp_enable = MGA_WAGP_ENABLE; @@ -855,7 +854,8 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)  			return -EINVAL;  		}  		dev->agp_buffer_token = init->buffers_offset; -		dev->agp_buffer_map = drm_core_findmap(dev, init->buffers_offset); +		dev->agp_buffer_map = +			drm_core_findmap(dev, init->buffers_offset);  		if (!dev->agp_buffer_map) {  			DRM_ERROR("failed to find dma buffer region!\n");  			return -EINVAL; @@ -898,10 +898,6 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)  	/* Init the primary DMA registers.  	 */  	MGA_WRITE(MGA_PRIMADDRESS, dev_priv->primary->offset | MGA_DMA_GENERAL); -#if 0 -	MGA_WRITE(MGA_PRIMPTR, virt_to_bus((void *)dev_priv->prim.status) | MGA_PRIMPTREN0 |	/* Soft trap, SECEND, SETUPEND */ -		  MGA_PRIMPTREN1);	/* DWGSYNC */ -#endif  	dev_priv->prim.start = (u8 *) dev_priv->primary->handle;  	dev_priv->prim.end = ((u8 *) dev_priv->primary->handle @@ -932,7 +928,7 @@ static int mga_do_init_dma(struct drm_device * dev, drm_mga_init_t * init)  	return 0;  } -static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup) +static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)  {  	int err = 0;  	DRM_DEBUG("\n"); @@ -951,7 +947,7 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)  		    && (dev_priv->warp->type != _DRM_CONSISTENT))  			drm_core_ioremapfree(dev_priv->warp, dev); -		if ((dev_priv->primary != NULL)  +		if ((dev_priv->primary != NULL)  		    && (dev_priv->primary->type != _DRM_CONSISTENT))  			drm_core_ioremapfree(dev_priv->primary, dev); @@ -993,14 +989,15 @@ static int mga_do_cleanup_dma(struct drm_device * dev, int full_cleanup)  		memset(&dev_priv->prim, 0, sizeof(dev_priv->prim));  		dev_priv->warp_pipe = 0; -		memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); +		memset(dev_priv->warp_pipe_phys, 0, +		       sizeof(dev_priv->warp_pipe_phys));  		if (dev_priv->head != NULL) {  			mga_freelist_cleanup(dev);  		}  	} -	return 0; +	return err;  }  int mga_dma_init(struct drm_device *dev, void *data, @@ -1015,7 +1012,7 @@ int mga_dma_init(struct drm_device *dev, void *data,  	case MGA_INIT_DMA:  		err = mga_do_init_dma(dev, init);  		if (err) { -			(void) mga_do_cleanup_dma(dev, FULL_CLEANUP); +			(void)mga_do_cleanup_dma(dev, FULL_CLEANUP);  		}  		return err;  	case MGA_CLEANUP_DMA: @@ -1052,7 +1049,7 @@ int mga_dma_flush(struct drm_device *dev, void *data,  #if MGA_DMA_DEBUG  		int ret = mga_do_wait_for_idle(dev_priv);  		if (ret < 0) -			DRM_INFO("%s: -EBUSY\n", __FUNCTION__); +			DRM_INFO("-EBUSY\n");  		return ret;  #else  		return mga_do_wait_for_idle(dev_priv); diff --git a/shared-core/mga_drm.h b/shared-core/mga_drm.h index 15c2dea2..c03d3220 100644 --- a/shared-core/mga_drm.h +++ b/shared-core/mga_drm.h @@ -302,10 +302,10 @@ typedef struct drm_mga_init {  typedef struct drm_mga_dma_bootstrap {  	/**  	 * \name AGP texture region -	 *  +	 *  	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, these fields will  	 * be filled in with the actual AGP texture settings. -	 *  +	 *  	 * \warning  	 * If these fields are non-zero, but dma_mga_dma_bootstrap::agp_mode  	 * is zero, it means that PCI memory (most likely through the use of @@ -319,7 +319,7 @@ typedef struct drm_mga_dma_bootstrap {  	/**  	 * Requested size of the primary DMA region. -	 *  +	 *  	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be  	 * filled in with the actual AGP mode.  If AGP was not available  	 */ @@ -328,18 +328,18 @@ typedef struct drm_mga_dma_bootstrap {  	/**  	 * Requested number of secondary DMA buffers. -	 *  +	 *  	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be  	 * filled in with the actual number of secondary DMA buffers  	 * allocated.  Particularly when PCI DMA is used, this may be  	 * (subtantially) less than the number requested.  	 */  	uint32_t secondary_bin_count; -	 -	 + +  	/**  	 * Requested size of each secondary DMA buffer. -	 *  +	 *  	 * While the kernel \b is free to reduce  	 * dma_mga_dma_bootstrap::secondary_bin_count, it is \b not allowed  	 * to reduce dma_mga_dma_bootstrap::secondary_bin_size. @@ -352,7 +352,7 @@ typedef struct drm_mga_dma_bootstrap {  	 * \c AGPSTAT2_2X, and \c AGPSTAT2_4X are supported.  If this value is  	 * zero, it means that PCI DMA should be used, even if AGP is  	 * possible. -	 *  +	 *  	 * On return from the DRM_MGA_DMA_BOOTSTRAP ioctl, this field will be  	 * filled in with the actual AGP mode.  If AGP was not available  	 * (i.e., PCI DMA was used), this value will be zero. diff --git a/shared-core/mga_drv.h b/shared-core/mga_drv.h index b961155a..bf3be808 100644 --- a/shared-core/mga_drv.h +++ b/shared-core/mga_drv.h @@ -109,7 +109,7 @@ typedef struct drm_mga_private {  	/**  	 * \name MMIO region parameters. -	 *  +	 *  	 * \sa drm_mga_private_t::mmio  	 */  	/*@{*/ @@ -144,7 +144,7 @@ typedef struct drm_mga_private {  	drm_local_map_t *warp;  	drm_local_map_t *primary;  	drm_local_map_t *agp_textures; -	 +  	unsigned long agp_handle;  	unsigned int agp_size;  } drm_mga_private_t; @@ -220,8 +220,8 @@ static inline u32 _MGA_READ(u32 * addr)  #define MGA_WRITE( reg, val )	DRM_WRITE32(dev_priv->mmio, (reg), (val))  #endif -#define DWGREG0 	0x1c00 -#define DWGREG0_END 	0x1dff +#define DWGREG0		0x1c00 +#define DWGREG0_END	0x1dff  #define DWGREG1		0x2c00  #define DWGREG1_END	0x2dff @@ -253,7 +253,7 @@ do {									\  		} else if ( dev_priv->prim.space <			\  			    dev_priv->prim.high_mark ) {		\  			if ( MGA_DMA_DEBUG )				\ -				DRM_INFO( "%s: wrap...\n", __FUNCTION__ );	\ +				DRM_INFO( "wrap...\n");		\  			return -EBUSY;			\  		}							\  	}								\ @@ -264,7 +264,7 @@ do {									\  	if ( test_bit( 0, &dev_priv->prim.wrapped ) ) {			\  		if ( mga_do_wait_for_idle( dev_priv ) < 0 ) {		\  			if ( MGA_DMA_DEBUG )				\ -				DRM_INFO( "%s: wrap...\n", __FUNCTION__ );	\ +				DRM_INFO( "wrap...\n");		\  			return -EBUSY;			\  		}							\  		mga_do_dma_wrap_end( dev_priv );			\ @@ -284,8 +284,7 @@ do {									\  #define BEGIN_DMA( n )							\  do {									\  	if ( MGA_VERBOSE ) {						\ -		DRM_INFO( "BEGIN_DMA( %d ) in %s\n",			\ -			  (n), __FUNCTION__ );				\ +		DRM_INFO( "BEGIN_DMA( %d )\n", (n) );		\  		DRM_INFO( "   space=0x%x req=0x%Zx\n",			\  			  dev_priv->prim.space, (n) * DMA_BLOCK_SIZE );	\  	}								\ @@ -296,7 +295,7 @@ do {									\  #define BEGIN_DMA_WRAP()						\  do {									\  	if ( MGA_VERBOSE ) {						\ -		DRM_INFO( "BEGIN_DMA() in %s\n", __FUNCTION__ );		\ +		DRM_INFO( "BEGIN_DMA()\n" );				\  		DRM_INFO( "   space=0x%x\n", dev_priv->prim.space );	\  	}								\  	prim = dev_priv->prim.start;					\ @@ -315,7 +314,7 @@ do {									\  #define FLUSH_DMA()							\  do {									\  	if ( 0 ) {							\ -		DRM_INFO( "%s:\n", __FUNCTION__ );				\ +		DRM_INFO( "\n" );					\  		DRM_INFO( "   tail=0x%06x head=0x%06lx\n",		\  			  dev_priv->prim.tail,				\  			  MGA_READ( MGA_PRIMADDRESS ) -			\ @@ -398,22 +397,22 @@ do {									\  #define MGA_VINTCLR			(1 << 4)  #define MGA_VINTEN			(1 << 5) -#define MGA_ALPHACTRL 			0x2c7c -#define MGA_AR0 			0x1c60 -#define MGA_AR1 			0x1c64 -#define MGA_AR2 			0x1c68 -#define MGA_AR3 			0x1c6c -#define MGA_AR4 			0x1c70 -#define MGA_AR5 			0x1c74 -#define MGA_AR6 			0x1c78 +#define MGA_ALPHACTRL			0x2c7c +#define MGA_AR0				0x1c60 +#define MGA_AR1				0x1c64 +#define MGA_AR2				0x1c68 +#define MGA_AR3				0x1c6c +#define MGA_AR4				0x1c70 +#define MGA_AR5				0x1c74 +#define MGA_AR6				0x1c78  #define MGA_CXBNDRY			0x1c80 -#define MGA_CXLEFT 			0x1ca0 +#define MGA_CXLEFT			0x1ca0  #define MGA_CXRIGHT			0x1ca4 -#define MGA_DMAPAD 			0x1c54 -#define MGA_DSTORG 			0x2cb8 -#define MGA_DWGCTL 			0x1c00 +#define MGA_DMAPAD			0x1c54 +#define MGA_DSTORG			0x2cb8 +#define MGA_DWGCTL			0x1c00  #	define MGA_OPCOD_MASK			(15 << 0)  #	define MGA_OPCOD_TRAP			(4 << 0)  #	define MGA_OPCOD_TEXTURE_TRAP		(6 << 0) @@ -459,27 +458,27 @@ do {									\  #	define MGA_CLIPDIS			(1 << 31)  #define MGA_DWGSYNC			0x2c4c -#define MGA_FCOL 			0x1c24 -#define MGA_FIFOSTATUS 			0x1e10 -#define MGA_FOGCOL 			0x1cf4 +#define MGA_FCOL			0x1c24 +#define MGA_FIFOSTATUS			0x1e10 +#define MGA_FOGCOL			0x1cf4  #define MGA_FXBNDRY			0x1c84 -#define MGA_FXLEFT 			0x1ca8 +#define MGA_FXLEFT			0x1ca8  #define MGA_FXRIGHT			0x1cac -#define MGA_ICLEAR 			0x1e18 +#define MGA_ICLEAR			0x1e18  #	define MGA_SOFTRAPICLR			(1 << 0)  #	define MGA_VLINEICLR			(1 << 5) -#define MGA_IEN 			0x1e1c +#define MGA_IEN				0x1e1c  #	define MGA_SOFTRAPIEN			(1 << 0)  #	define MGA_VLINEIEN			(1 << 5) -#define MGA_LEN 			0x1c5c +#define MGA_LEN				0x1c5c  #define MGA_MACCESS			0x1c04 -#define MGA_PITCH 			0x1c8c -#define MGA_PLNWT 			0x1c1c -#define MGA_PRIMADDRESS 		0x1e58 +#define MGA_PITCH			0x1c8c +#define MGA_PLNWT			0x1c1c +#define MGA_PRIMADDRESS			0x1e58  #	define MGA_DMA_GENERAL			(0 << 0)  #	define MGA_DMA_BLIT			(1 << 0)  #	define MGA_DMA_VECTOR			(2 << 0) @@ -491,43 +490,43 @@ do {									\  #	define MGA_PRIMPTREN0			(1 << 0)  #	define MGA_PRIMPTREN1			(1 << 1) -#define MGA_RST 			0x1e40 +#define MGA_RST				0x1e40  #	define MGA_SOFTRESET			(1 << 0)  #	define MGA_SOFTEXTRST			(1 << 1) -#define MGA_SECADDRESS 			0x2c40 -#define MGA_SECEND 			0x2c44 -#define MGA_SETUPADDRESS 		0x2cd0 -#define MGA_SETUPEND 			0x2cd4 +#define MGA_SECADDRESS			0x2c40 +#define MGA_SECEND			0x2c44 +#define MGA_SETUPADDRESS		0x2cd0 +#define MGA_SETUPEND			0x2cd4  #define MGA_SGN				0x1c58  #define MGA_SOFTRAP			0x2c48 -#define MGA_SRCORG 			0x2cb4 +#define MGA_SRCORG			0x2cb4  #	define MGA_SRMMAP_MASK			(1 << 0)  #	define MGA_SRCMAP_FB			(0 << 0)  #	define MGA_SRCMAP_SYSMEM		(1 << 0)  #	define MGA_SRCACC_MASK			(1 << 1)  #	define MGA_SRCACC_PCI			(0 << 1)  #	define MGA_SRCACC_AGP			(1 << 1) -#define MGA_STATUS 			0x1e14 +#define MGA_STATUS			0x1e14  #	define MGA_SOFTRAPEN			(1 << 0)  #	define MGA_VSYNCPEN			(1 << 4)  #	define MGA_VLINEPEN			(1 << 5)  #	define MGA_DWGENGSTS			(1 << 16)  #	define MGA_ENDPRDMASTS			(1 << 17)  #define MGA_STENCIL			0x2cc8 -#define MGA_STENCILCTL 			0x2ccc +#define MGA_STENCILCTL			0x2ccc -#define MGA_TDUALSTAGE0 		0x2cf8 -#define MGA_TDUALSTAGE1 		0x2cfc -#define MGA_TEXBORDERCOL 		0x2c5c -#define MGA_TEXCTL 			0x2c30 +#define MGA_TDUALSTAGE0			0x2cf8 +#define MGA_TDUALSTAGE1			0x2cfc +#define MGA_TEXBORDERCOL		0x2c5c +#define MGA_TEXCTL			0x2c30  #define MGA_TEXCTL2			0x2c3c  #	define MGA_DUALTEX			(1 << 7)  #	define MGA_G400_TC2_MAGIC		(1 << 15)  #	define MGA_MAP1_ENABLE			(1 << 31) -#define MGA_TEXFILTER 			0x2c58 -#define MGA_TEXHEIGHT 			0x2c2c -#define MGA_TEXORG 			0x2c24 +#define MGA_TEXFILTER			0x2c58 +#define MGA_TEXHEIGHT			0x2c2c +#define MGA_TEXORG			0x2c24  #	define MGA_TEXORGMAP_MASK		(1 << 0)  #	define MGA_TEXORGMAP_FB			(0 << 0)  #	define MGA_TEXORGMAP_SYSMEM		(1 << 0) @@ -538,45 +537,45 @@ do {									\  #define MGA_TEXORG2			0x2ca8  #define MGA_TEXORG3			0x2cac  #define MGA_TEXORG4			0x2cb0 -#define MGA_TEXTRANS 			0x2c34 -#define MGA_TEXTRANSHIGH 		0x2c38 -#define MGA_TEXWIDTH 			0x2c28 - -#define MGA_WACCEPTSEQ 			0x1dd4 -#define MGA_WCODEADDR 			0x1e6c -#define MGA_WFLAG 			0x1dc4 -#define MGA_WFLAG1 			0x1de0 +#define MGA_TEXTRANS			0x2c34 +#define MGA_TEXTRANSHIGH		0x2c38 +#define MGA_TEXWIDTH			0x2c28 + +#define MGA_WACCEPTSEQ			0x1dd4 +#define MGA_WCODEADDR			0x1e6c +#define MGA_WFLAG			0x1dc4 +#define MGA_WFLAG1			0x1de0  #define MGA_WFLAGNB			0x1e64 -#define MGA_WFLAGNB1 			0x1e08 +#define MGA_WFLAGNB1			0x1e08  #define MGA_WGETMSB			0x1dc8 -#define MGA_WIADDR 			0x1dc0 +#define MGA_WIADDR			0x1dc0  #define MGA_WIADDR2			0x1dd8  #	define MGA_WMODE_SUSPEND		(0 << 0)  #	define MGA_WMODE_RESUME			(1 << 0)  #	define MGA_WMODE_JUMP			(2 << 0)  #	define MGA_WMODE_START			(3 << 0)  #	define MGA_WAGP_ENABLE			(1 << 2) -#define MGA_WMISC 			0x1e70 +#define MGA_WMISC			0x1e70  #	define MGA_WUCODECACHE_ENABLE		(1 << 0)  #	define MGA_WMASTER_ENABLE		(1 << 1)  #	define MGA_WCACHEFLUSH_ENABLE		(1 << 3)  #define MGA_WVRTXSZ			0x1dcc -#define MGA_YBOT 			0x1c9c -#define MGA_YDST 			0x1c90 +#define MGA_YBOT			0x1c9c +#define MGA_YDST			0x1c90  #define MGA_YDSTLEN			0x1c88  #define MGA_YDSTORG			0x1c94 -#define MGA_YTOP 			0x1c98 +#define MGA_YTOP			0x1c98 -#define MGA_ZORG 			0x1c0c +#define MGA_ZORG			0x1c0c  /* This finishes the current batch of commands   */ -#define MGA_EXEC 			0x0100 +#define MGA_EXEC			0x0100  /* AGP PLL encoding (for G200 only).   */ -#define MGA_AGP_PLL 			0x1e4c +#define MGA_AGP_PLL			0x1e4c  #	define MGA_AGP2XPLL_DISABLE		(0 << 0)  #	define MGA_AGP2XPLL_ENABLE		(1 << 0) diff --git a/shared-core/mga_irq.c b/shared-core/mga_irq.c index 0d4b473c..4fe86322 100644 --- a/shared-core/mga_irq.c +++ b/shared-core/mga_irq.c @@ -70,7 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)  	/* SOFTRAP interrupt */  	if (status & MGA_SOFTRAPEN) {  		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); -		const u32 prim_end   = MGA_READ(MGA_PRIMEND); +		const u32 prim_end = MGA_READ(MGA_PRIMEND);  		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)  		/* In addition to clearing the interrupt-pending bit, we  		 * have to write to MGA_PRIMEND to re-start the DMA operation.  		 */ -		if ( (prim_start & ~0x03) != (prim_end & ~0x03) ) { +		if ((prim_start & ~0x03) != (prim_end & ~0x03)) {  			MGA_WRITE(MGA_PRIMEND, prim_end);  		} @@ -87,7 +87,9 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)  		handled = 1;  	} -	return (handled) ? IRQ_HANDLED : IRQ_NONE; +	if (handled) +		return IRQ_HANDLED; +	return IRQ_NONE;  }  int mga_enable_vblank(struct drm_device *dev, int crtc) @@ -175,6 +177,6 @@ void mga_driver_irq_uninstall(struct drm_device * dev)  	/* Disable *all* interrupts */  	MGA_WRITE(MGA_IEN, 0); -	 +  	dev->irq_enabled = 0;  } diff --git a/shared-core/mga_state.c b/shared-core/mga_state.c index 70b7caa0..2da108be 100644 --- a/shared-core/mga_state.c +++ b/shared-core/mga_state.c @@ -62,8 +62,7 @@ static void mga_emit_clip_rect(drm_mga_private_t * dev_priv,  	}  	DMA_BLOCK(MGA_DMAPAD, 0x00000000,  		  MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, -		  MGA_YTOP, box->y1 * pitch, -		  MGA_YBOT, (box->y2 - 1) * pitch); +		  MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);  	ADVANCE_DMA();  } @@ -78,18 +77,15 @@ static __inline__ void mga_g200_emit_context(drm_mga_private_t * dev_priv)  	DMA_BLOCK(MGA_DSTORG, ctx->dstorg,  		  MGA_MACCESS, ctx->maccess, -		  MGA_PLNWT, ctx->plnwt, -		  MGA_DWGCTL, ctx->dwgctl); +		  MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);  	DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,  		  MGA_FOGCOL, ctx->fogcolor, -		  MGA_WFLAG, ctx->wflag, -		  MGA_ZORG, dev_priv->depth_offset); +		  MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);  	DMA_BLOCK(MGA_FCOL, ctx->fcol,  		  MGA_DMAPAD, 0x00000000, -		  MGA_DMAPAD, 0x00000000, -		  MGA_DMAPAD, 0x00000000); +		  MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);  	ADVANCE_DMA();  } @@ -162,8 +158,8 @@ static __inline__ void mga_g400_emit_tex0(drm_mga_private_t * dev_priv)  	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];  	DMA_LOCALS; -/*  	printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ -/*  	       tex->texctl, tex->texctl2); */ +/*	printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */ +/*	       tex->texctl, tex->texctl2); */  	BEGIN_DMA(6); @@ -206,8 +202,8 @@ static __inline__ void mga_g400_emit_tex1(drm_mga_private_t * dev_priv)  	drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];  	DMA_LOCALS; -/*  	printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */ -/*  	       tex->texctl, tex->texctl2); */ +/*	printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg,  */ +/*	       tex->texctl, tex->texctl2); */  	BEGIN_DMA(5); @@ -276,7 +272,7 @@ static __inline__ void mga_g400_emit_pipe(drm_mga_private_t * dev_priv)  	unsigned int pipe = sarea_priv->warp_pipe;  	DMA_LOCALS; -/*  	printk("mga_g400_emit_pipe %x\n", pipe); */ +/*	printk("mga_g400_emit_pipe %x\n", pipe); */  	BEGIN_DMA(10); @@ -648,7 +644,7 @@ static void mga_dma_dispatch_swap(struct drm_device * dev)  	FLUSH_DMA(); -	DRM_DEBUG("%s... done.\n", __FUNCTION__); +	DRM_DEBUG("... done.\n");  }  static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * buf) @@ -660,7 +656,7 @@ static void mga_dma_dispatch_vertex(struct drm_device * dev, struct drm_buf * bu  	u32 length = (u32) buf->used;  	int i = 0;  	DMA_LOCALS; -	DRM_DEBUG("vertex: buf=%d used=%d\n", buf->idx, buf->used); +	DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);  	if (buf->used) {  		buf_priv->dispatched = 1; @@ -707,7 +703,7 @@ static void mga_dma_dispatch_indices(struct drm_device * dev, struct drm_buf * b  	u32 address = (u32) buf->bus_address;  	int i = 0;  	DMA_LOCALS; -	DRM_DEBUG("indices: buf=%d start=%d end=%d\n", buf->idx, start, end); +	DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);  	if (start != end) {  		buf_priv->dispatched = 1; @@ -992,7 +988,7 @@ static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *fi  #if 0  	if (mga_do_wait_for_idle(dev_priv) < 0) {  		if (MGA_DMA_DEBUG) -			DRM_INFO("%s: -EBUSY\n", __FUNCTION__); +			DRM_INFO("-EBUSY\n");  		return -EBUSY;  	}  #endif @@ -1051,7 +1047,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil  	int value;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1083,7 +1079,7 @@ static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *fi  	DMA_LOCALS;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1112,7 +1108,7 @@ static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *f  	u32 *fence = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} diff --git a/shared-core/nouveau_dma.c b/shared-core/nouveau_dma.c index b33df588..dff786d4 100644 --- a/shared-core/nouveau_dma.c +++ b/shared-core/nouveau_dma.c @@ -133,10 +133,10 @@ nouveau_dma_channel_takedown(struct drm_device *dev)  #define RING_SKIPS 8 -#define READ_GET() ((NV_READ(NV03_FIFO_REGS_DMAGET(dchan->chan->id)) -         \ -		     dchan->chan->pushbuf_base) >> 2) +#define READ_GET() ((NV_READ(dchan->chan->get) -                               \ +		    dchan->chan->pushbuf_base) >> 2)  #define WRITE_PUT(val) do {                                                    \ -	NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id),                       \ +	NV_WRITE(dchan->chan->put,                                             \  		 ((val) << 2) + dchan->chan->pushbuf_base);                    \  } while(0) @@ -177,4 +177,3 @@ nouveau_dma_wait(struct drm_device *dev, int size)  	return 0;  } - diff --git a/shared-core/nouveau_dma.h b/shared-core/nouveau_dma.h index 5e51c1c4..ce3c58cb 100644 --- a/shared-core/nouveau_dma.h +++ b/shared-core/nouveau_dma.h @@ -89,10 +89,8 @@ typedef enum {  	if (dchan->cur != dchan->put) {                                        \  		DRM_MEMORYBARRIER();                                           \  		dchan->put = dchan->cur;                                       \ -		NV_WRITE(NV03_FIFO_REGS_DMAPUT(dchan->chan->id),               \ -			 (dchan->put<<2));                                     \ +		NV_WRITE(dchan->chan->put, dchan->put << 2);                   \  	}                                                                      \  } while(0)  #endif - diff --git a/shared-core/nouveau_drm.h b/shared-core/nouveau_drm.h index 988d467a..3bf40089 100644 --- a/shared-core/nouveau_drm.h +++ b/shared-core/nouveau_drm.h @@ -158,4 +158,3 @@ struct drm_nouveau_sarea {  #define DRM_NOUVEAU_MEM_FREE           0x09  #endif /* __NOUVEAU_DRM_H__ */ - diff --git a/shared-core/nouveau_drv.h b/shared-core/nouveau_drv.h index 95878345..4184aa5b 100644 --- a/shared-core/nouveau_drv.h +++ b/shared-core/nouveau_drv.h @@ -59,7 +59,7 @@ enum nouveau_flags {  };  #define NVOBJ_ENGINE_SW		0 -#define NVOBJ_ENGINE_GR  	1 +#define NVOBJ_ENGINE_GR		1  #define NVOBJ_ENGINE_INT	0xdeadbeef  #define NVOBJ_FLAG_ALLOW_NO_REFS	(1 << 0) @@ -106,11 +106,20 @@ struct nouveau_channel  	/* mapping of the regs controling the fifo */  	drm_local_map_t *regs; +	/* Fencing */ +	uint32_t next_sequence; +  	/* DMA push buffer */  	struct nouveau_gpuobj_ref *pushbuf;  	struct mem_block          *pushbuf_mem;  	uint32_t                   pushbuf_base; +	/* FIFO user control regs */ +	uint32_t user, user_size; +	uint32_t put; +	uint32_t get; +	uint32_t ref_cnt; +  	/* Notifier memory */  	struct mem_block *notifier_block;  	struct mem_block *notifier_heap; @@ -190,9 +199,13 @@ struct nouveau_fb_engine {  struct nouveau_fifo_engine {  	void *priv; +	int  channels; +  	int  (*init)(struct drm_device *);  	void (*takedown)(struct drm_device *); +	int  (*channel_id)(struct drm_device *); +  	int  (*create_context)(struct nouveau_channel *);  	void (*destroy_context)(struct nouveau_channel *);  	int  (*load_context)(struct nouveau_channel *); @@ -218,6 +231,7 @@ struct nouveau_engine {  	struct nouveau_fifo_engine    fifo;  }; +#define NOUVEAU_MAX_CHANNEL_NR 128  struct drm_nouveau_private {  	enum {  		NOUVEAU_CARD_INIT_DOWN, @@ -225,6 +239,8 @@ struct drm_nouveau_private {  		NOUVEAU_CARD_INIT_FAILED  	} init_state; +	int ttm; +  	/* the card type, takes NV_* as values */  	int card_type;  	/* exact chipset, derived from NV_PMC_BOOT_0 */ @@ -236,7 +252,7 @@ struct drm_nouveau_private {  	drm_local_map_t *ramin; /* NV40 onwards */  	int fifo_alloc_count; -	struct nouveau_channel *fifos[NV_MAX_FIFO_NUMBER]; +	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];  	struct nouveau_engine Engine;  	struct nouveau_drm_channel channel; @@ -344,6 +360,7 @@ extern struct mem_block* nouveau_mem_alloc(struct drm_device *,  					   int flags, struct drm_file *);  extern void nouveau_mem_free(struct drm_device *dev, struct mem_block*);  extern int  nouveau_mem_init(struct drm_device *); +extern int  nouveau_mem_init_ttm(struct drm_device *);  extern void nouveau_mem_close(struct drm_device *);  /* nouveau_notifier.c */ @@ -358,7 +375,6 @@ extern int  nouveau_ioctl_notifier_free(struct drm_device *, void *data,  /* nouveau_fifo.c */  extern int  nouveau_fifo_init(struct drm_device *); -extern int  nouveau_fifo_number(struct drm_device *);  extern int  nouveau_fifo_ctx_size(struct drm_device *);  extern void nouveau_fifo_cleanup(struct drm_device *, struct drm_file *);  extern int  nouveau_fifo_owner(struct drm_device *, struct drm_file *, @@ -446,12 +462,14 @@ extern int  nv40_fb_init(struct drm_device *);  extern void nv40_fb_takedown(struct drm_device *);  /* nv04_fifo.c */ +extern int  nv04_fifo_channel_id(struct drm_device *);  extern int  nv04_fifo_create_context(struct nouveau_channel *);  extern void nv04_fifo_destroy_context(struct nouveau_channel *);  extern int  nv04_fifo_load_context(struct nouveau_channel *);  extern int  nv04_fifo_save_context(struct nouveau_channel *);  /* nv10_fifo.c */ +extern int  nv10_fifo_channel_id(struct drm_device *);  extern int  nv10_fifo_create_context(struct nouveau_channel *);  extern void nv10_fifo_destroy_context(struct nouveau_channel *);  extern int  nv10_fifo_load_context(struct nouveau_channel *); @@ -467,6 +485,7 @@ extern int  nv40_fifo_save_context(struct nouveau_channel *);  /* nv50_fifo.c */  extern int  nv50_fifo_init(struct drm_device *);  extern void nv50_fifo_takedown(struct drm_device *); +extern int  nv50_fifo_channel_id(struct drm_device *);  extern int  nv50_fifo_create_context(struct nouveau_channel *);  extern void nv50_fifo_destroy_context(struct nouveau_channel *);  extern int  nv50_fifo_load_context(struct nouveau_channel *); @@ -553,6 +572,13 @@ extern void nv04_timer_takedown(struct drm_device *);  extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,  				 unsigned long arg); +/* nouveau_buffer.c */ +extern struct drm_bo_driver nouveau_bo_driver; + +/* nouveau_fence.c */ +extern struct drm_fence_driver nouveau_fence_driver; +extern void nouveau_fence_handler(struct drm_device *dev, int channel); +  #if defined(__powerpc__)  #define NV_READ(reg)        in_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) )  #define NV_WRITE(reg,val)   out_be32((void __iomem *)(dev_priv->mmio)->handle + (reg) , (val) ) @@ -574,4 +600,3 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,  #define INSTANCE_WR(o,i,v) NV_WI32((o)->im_pramin->start + ((i)<<2), (v))  #endif /* __NOUVEAU_DRV_H__ */ - diff --git a/shared-core/nouveau_fifo.c b/shared-core/nouveau_fifo.c index f82d130b..d00f1938 100644 --- a/shared-core/nouveau_fifo.c +++ b/shared-core/nouveau_fifo.c @@ -1,4 +1,4 @@ -/*  +/*   * Copyright 2005-2006 Stephane Marchesin   * All Rights Reserved.   * @@ -28,22 +28,6 @@  #include "nouveau_drm.h" -/* returns the number of hw fifos */ -int nouveau_fifo_number(struct drm_device *dev) -{ -	struct drm_nouveau_private *dev_priv=dev->dev_private; -	switch(dev_priv->card_type) -	{ -		case NV_04: -		case NV_05: -			return 16; -		case NV_50: -			return 128; -		default: -			return 32; -	} -} -  /* returns the size of fifo context */  int nouveau_fifo_ctx_size(struct drm_device *dev)  { @@ -63,7 +47,7 @@ int nouveau_fifo_ctx_size(struct drm_device *dev)  /* voir nv_xaa.c : NVResetGraphics   * mémoire mappée par nv_driver.c : NVMapMem - * voir nv_driver.c : NVPreInit  + * voir nv_driver.c : NVPreInit   */  static int nouveau_fifo_instmem_configure(struct drm_device *dev) @@ -71,7 +55,7 @@ static int nouveau_fifo_instmem_configure(struct drm_device *dev)  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	NV_WRITE(NV03_PFIFO_RAMHT, -			(0x03 << 24) /* search 128 */ |  +			(0x03 << 24) /* search 128 */ |  			((dev_priv->ramht_bits - 9) << 16) |  			(dev_priv->ramht_offset >> 8)  			); @@ -166,7 +150,7 @@ int nouveau_fifo_init(struct drm_device *dev)  				      NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_4 |  #ifdef __BIG_ENDIAN  				      NV_PFIFO_CACHE1_BIG_ENDIAN | -#endif				       +#endif  				      0x00000000);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001); @@ -282,18 +266,19 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,  	/*  	 * Alright, here is the full story -	 * Nvidia cards have multiple hw fifo contexts (praise them for that,  +	 * Nvidia cards have multiple hw fifo contexts (praise them for that,  	 * no complicated crash-prone context switches) -	 * We allocate a new context for each app and let it write to it directly  +	 * We allocate a new context for each app and let it write to it directly  	 * (woo, full userspace command submission !)  	 * When there are no more contexts, you lost  	 */ -	for(channel=0; channel<nouveau_fifo_number(dev); channel++) { +	for (channel = 0; channel < engine->fifo.channels; channel++) {  		if (dev_priv->fifos[channel] == NULL)  			break;  	} +  	/* no more fifos. you lost. */ -	if (channel==nouveau_fifo_number(dev)) +	if (channel == engine->fifo.channels)  		return -EINVAL;  	dev_priv->fifos[channel] = drm_calloc(1, sizeof(struct nouveau_channel), @@ -309,6 +294,28 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,  	DRM_INFO("Allocating FIFO number %d\n", channel); +	/* Locate channel's user control regs */ +	if (dev_priv->card_type < NV_40) { +		chan->user = NV03_USER(channel); +		chan->user_size = NV03_USER_SIZE; +		chan->put = NV03_USER_DMA_PUT(channel); +		chan->get = NV03_USER_DMA_GET(channel); +		chan->ref_cnt = NV03_USER_REF_CNT(channel); +	} else +	if (dev_priv->card_type < NV_50) { +		chan->user = NV40_USER(channel); +		chan->user_size = NV40_USER_SIZE; +		chan->put = NV40_USER_DMA_PUT(channel); +		chan->get = NV40_USER_DMA_GET(channel); +		chan->ref_cnt = NV40_USER_REF_CNT(channel); +	} else { +		chan->user = NV50_USER(channel); +		chan->user_size = NV50_USER_SIZE; +		chan->put = NV50_USER_DMA_PUT(channel); +		chan->get = NV50_USER_DMA_GET(channel); +		chan->ref_cnt = NV50_USER_REF_CNT(channel); +	} +  	/* Allocate space for per-channel fixed notifier memory */  	ret = nouveau_notifier_init_channel(chan);  	if (ret) { @@ -352,14 +359,11 @@ nouveau_fifo_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,  		return ret;  	} -	/* setup channel's default get/put values */ -	if (dev_priv->card_type < NV_50) { -		NV_WRITE(NV03_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base); -		NV_WRITE(NV03_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); -	} else { -		NV_WRITE(NV50_FIFO_REGS_DMAPUT(channel), chan->pushbuf_base); -		NV_WRITE(NV50_FIFO_REGS_DMAGET(channel), chan->pushbuf_base); -	} +	/* setup channel's default get/put values +	 * XXX: quite possibly extremely pointless.. +	 */ +	NV_WRITE(chan->get, chan->pushbuf_base); +	NV_WRITE(chan->put, chan->pushbuf_base);  	/* If this is the first channel, setup PFIFO ourselves.  For any  	 * other case, the GPU will handle this when it switches contexts. @@ -398,9 +402,37 @@ void nouveau_fifo_free(struct nouveau_channel *chan)  	struct drm_device *dev = chan->dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	struct nouveau_engine *engine = &dev_priv->Engine; +	uint64_t t_start;  	DRM_INFO("%s: freeing fifo %d\n", __func__, chan->id); +	/* Disable channel switching, if this channel isn't currenly +	 * active re-enable it if there's still pending commands. +	 * We really should do a manual context switch here, but I'm +	 * not sure I trust our ability to do this reliably yet.. +	 */ +	NV_WRITE(NV03_PFIFO_CACHES, 0); +	if (engine->fifo.channel_id(dev) != chan->id && +	    NV_READ(chan->get) != NV_READ(chan->put)) { +		NV_WRITE(NV03_PFIFO_CACHES, 1); +	} + +	/* Give the channel a chance to idle, wait 2s (hopefully) */ +	t_start = engine->timer.read(dev); +	while (NV_READ(chan->get) != NV_READ(chan->put) || +	       NV_READ(NV03_PFIFO_CACHE1_GET) != +	       NV_READ(NV03_PFIFO_CACHE1_PUT)) { +		if (engine->timer.read(dev) - t_start > 2000000000ULL) { +			DRM_ERROR("Failed to idle channel %d before destroy." +				  "Prepare for strangeness..\n", chan->id); +			break; +		} +	} + +	/*XXX: Maybe should wait for PGRAPH to finish with the stuff it fetched +	 *     from CACHE1 too? +	 */ +  	/* disable the fifo caches */  	NV_WRITE(NV03_PFIFO_CACHES, 0x00000000);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUSH, NV_READ(NV04_PFIFO_CACHE1_DMA_PUSH)&(~0x1)); @@ -408,14 +440,12 @@ void nouveau_fifo_free(struct nouveau_channel *chan)  	NV_WRITE(NV04_PFIFO_CACHE1_PULL0, 0x00000000);  	/* stop the fifo, otherwise it could be running and -	 * it will crash when removing gpu objects */ -	if (dev_priv->card_type < NV_50) { -		NV_WRITE(NV03_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base); -		NV_WRITE(NV03_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base); -	} else { -		NV_WRITE(NV50_FIFO_REGS_DMAPUT(chan->id), chan->pushbuf_base); -		NV_WRITE(NV50_FIFO_REGS_DMAGET(chan->id), chan->pushbuf_base); -	} +	 * it will crash when removing gpu objects +	 *XXX: from real-world evidence, absolutely useless.. +	 */ +	NV_WRITE(chan->get, chan->pushbuf_base); +	NV_WRITE(chan->put, chan->pushbuf_base); +  	// FIXME XXX needs more code  	engine->fifo.destroy_context(chan); @@ -451,10 +481,11 @@ void nouveau_fifo_free(struct nouveau_channel *chan)  void nouveau_fifo_cleanup(struct drm_device *dev, struct drm_file *file_priv)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	int i;  	DRM_DEBUG("clearing FIFO enables from file_priv\n"); -	for(i = 0; i < nouveau_fifo_number(dev); i++) { +	for(i = 0; i < engine->fifo.channels; i++) {  		struct nouveau_channel *chan = dev_priv->fifos[i];  		if (chan && chan->file_priv == file_priv) @@ -467,8 +498,9 @@ nouveau_fifo_owner(struct drm_device *dev, struct drm_file *file_priv,  		   int channel)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine; -	if (channel >= nouveau_fifo_number(dev)) +	if (channel >= engine->fifo.channels)  		return 0;  	if (dev_priv->fifos[channel] == NULL)  		return 0; @@ -508,14 +540,8 @@ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,  	/* make the fifo available to user space */  	/* first, the fifo control regs */ -	init->ctrl = dev_priv->mmio->offset; -	if (dev_priv->card_type < NV_50) { -		init->ctrl      += NV03_FIFO_REGS(init->channel); -		init->ctrl_size  = NV03_FIFO_REGS_SIZE; -	} else { -		init->ctrl      += NV50_FIFO_REGS(init->channel); -		init->ctrl_size  = NV50_FIFO_REGS_SIZE; -	} +	init->ctrl = dev_priv->mmio->offset + chan->user; +	init->ctrl_size = chan->user_size;  	res = drm_addmap(dev, init->ctrl, init->ctrl_size, _DRM_REGISTERS,  			 0, &chan->regs);  	if (res != 0) diff --git a/shared-core/nouveau_irq.c b/shared-core/nouveau_irq.c index e4b76766..bceb81ab 100644 --- a/shared-core/nouveau_irq.c +++ b/shared-core/nouveau_irq.c @@ -70,6 +70,7 @@ static void  nouveau_fifo_irq_handler(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	uint32_t status;  	while ((status = NV_READ(NV03_PFIFO_INTR_0))) { @@ -77,14 +78,13 @@ nouveau_fifo_irq_handler(struct drm_device *dev)  		NV_WRITE(NV03_PFIFO_CACHES, 0); -		chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & -				(nouveau_fifo_number(dev) - 1); +		chid = engine->fifo.channel_id(dev);  		get  = NV_READ(NV03_PFIFO_CACHE1_GET);  		if (status & NV_PFIFO_INTR_CACHE_ERROR) {  			uint32_t mthd, data;  			int ptr; -			 +  			ptr = get >> 2;  			if (dev_priv->card_type < NV_40) {  				mthd = NV_READ(NV04_PFIFO_CACHE1_METHOD(ptr)); @@ -117,7 +117,7 @@ nouveau_fifo_irq_handler(struct drm_device *dev)  		}  		if (status) { -			DRM_INFO("Unhandled PFIFO_INTR - 0x%8x\n", status); +			DRM_INFO("Unhandled PFIFO_INTR - 0x%08x\n", status);  			NV_WRITE(NV03_PFIFO_INTR_0, status);  		} @@ -192,6 +192,7 @@ static int  nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	int channel;  	if (dev_priv->card_type < NV_10) { @@ -236,8 +237,7 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)  		}  	} -	if (channel > nouveau_fifo_number(dev) || -	    dev_priv->fifos[channel] == NULL) { +	if (channel > engine->fifo.channels || !dev_priv->fifos[channel]) {  		DRM_ERROR("AIII, invalid/inactive channel id %d\n", channel);  		return -EINVAL;  	} @@ -246,39 +246,53 @@ nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)  	return 0;  } +struct nouveau_pgraph_trap { +	int channel; +	int class; +	int subc, mthd, size; +	uint32_t data, data2; +}; +  static void -nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id) +nouveau_graph_trap_info(struct drm_device *dev, +			struct nouveau_pgraph_trap *trap)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	uint32_t address; -	uint32_t channel, class; -	uint32_t method, subc, data, data2; -	uint32_t nsource, nstatus; - -	if (nouveau_graph_trapped_channel(dev, &channel)) -		channel = -1; -	data    = NV_READ(NV04_PGRAPH_TRAPPED_DATA); +	if (nouveau_graph_trapped_channel(dev, &trap->channel)) +		trap->channel = -1;  	address = NV_READ(NV04_PGRAPH_TRAPPED_ADDR); -	method  = address & 0x1FFC; + +	trap->mthd = address & 0x1FFC; +	trap->data = NV_READ(NV04_PGRAPH_TRAPPED_DATA);  	if (dev_priv->card_type < NV_10) { -		subc = (address >> 13) & 0x7; -		data2= 0; +		trap->subc  = (address >> 13) & 0x7;  	} else { -		subc = (address >> 16) & 0x7; -		data2= NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH); +		trap->subc  = (address >> 16) & 0x7; +		trap->data2 = NV_READ(NV10_PGRAPH_TRAPPED_DATA_HIGH);  	} -	nsource = NV_READ(NV03_PGRAPH_NSOURCE); -	nstatus = NV_READ(NV03_PGRAPH_NSTATUS); +  	if (dev_priv->card_type < NV_10) { -		class = NV_READ(0x400180 + subc*4) & 0xFF; +		trap->class = NV_READ(0x400180 + trap->subc*4) & 0xFF;  	} else if (dev_priv->card_type < NV_40) { -		class = NV_READ(0x400160 + subc*4) & 0xFFF; +		trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFF;  	} else if (dev_priv->card_type < NV_50) { -		class = NV_READ(0x400160 + subc*4) & 0xFFFF; +		trap->class = NV_READ(0x400160 + trap->subc*4) & 0xFFFF;  	} else { -		class = NV_READ(0x400814); +		trap->class = NV_READ(0x400814);  	} +} + +static void +nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id, +			     struct nouveau_pgraph_trap *trap) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	uint32_t nsource, nstatus; + +	nsource = NV_READ(NV03_PGRAPH_NSOURCE); +	nstatus = NV_READ(NV03_PGRAPH_NSTATUS);  	DRM_INFO("%s - nSource:", id);  	nouveau_print_bitfield_names(nsource, nouveau_nsource_names, @@ -293,54 +307,70 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id)  	printk("\n");  	DRM_INFO("%s - Ch %d/%d Class 0x%04x Mthd 0x%04x Data 0x%08x:0x%08x\n", -		 id, channel, subc, class, method, data2, data); +		 id, trap->channel, trap->subc, trap->class, trap->mthd, +		 trap->data2, trap->data);  }  static inline void  nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)  { -	struct drm_nouveau_private *dev_priv = dev->dev_private; -	int handled = 0; +	struct nouveau_pgraph_trap trap; +	int unhandled = 0; -	DRM_DEBUG("PGRAPH notify interrupt\n"); -	if (dev_priv->card_type == NV_04 && -	    (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD)) { -		uint32_t class, mthd; +	nouveau_graph_trap_info(dev, &trap); +	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {  		/* NV4 (nvidia TNT 1) reports software methods with  		 * PGRAPH NOTIFY ILLEGAL_MTHD  		 */ -		mthd = NV_READ(NV04_PGRAPH_TRAPPED_ADDR) & 0x1FFC; -		class = NV_READ(NV04_PGRAPH_CTX_SWITCH1) & 0xFFF;  		DRM_DEBUG("Got NV04 software method method %x for class %#x\n", -			  mthd, class); +			  trap.mthd, trap.class); -		if (nouveau_sw_method_execute(dev, class, mthd)) { +		if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) {  			DRM_ERROR("Unable to execute NV04 software method %x "  				  "for object class %x. Please report.\n", -				  mthd, class); -		} else { -			handled = 1; +				  trap.mthd, trap.class); +			unhandled = 1;  		} +	} else { +		unhandled = 1;  	} -	if (!handled) -		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY"); +	if (unhandled) +		nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);  }  static inline void  nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)  { -	nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR"); +	struct nouveau_pgraph_trap trap; +	int unhandled = 0; + +	nouveau_graph_trap_info(dev, &trap); + +	if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) { +		if (trap.channel >= 0 && trap.mthd == 0x0150) { +			nouveau_fence_handler(dev, trap.channel); +		} else +		if (nouveau_sw_method_execute(dev, trap.class, trap.mthd)) { +			unhandled = 1; +		} +	} else { +		unhandled = 1; +	} + +	if (unhandled) +		nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);  }  static inline void  nouveau_pgraph_intr_context_switch(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	uint32_t chid; -	 -	chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1) & (nouveau_fifo_number(dev)-1); + +	chid = engine->fifo.channel_id(dev);  	DRM_DEBUG("PGRAPH context switch interrupt channel %x\n", chid);  	switch(dev_priv->card_type) { @@ -391,7 +421,7 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)  		}  		if (status) { -			DRM_INFO("Unhandled PGRAPH_INTR - 0x%8x\n", status); +			DRM_INFO("Unhandled PGRAPH_INTR - 0x%08x\n", status);  			NV_WRITE(NV03_PGRAPH_INTR, status);  		} @@ -447,4 +477,3 @@ nouveau_irq_handler(DRM_IRQ_ARGS)  	return IRQ_HANDLED;  } - diff --git a/shared-core/nouveau_mem.c b/shared-core/nouveau_mem.c index 448b69d3..80540a5c 100644 --- a/shared-core/nouveau_mem.c +++ b/shared-core/nouveau_mem.c @@ -159,7 +159,7 @@ int nouveau_mem_init_heap(struct mem_block **heap, uint64_t start,  	return 0;  } -/*  +/*   * Free all blocks associated with the releasing file_priv   */  void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap) @@ -189,7 +189,7 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)  	}  } -/*  +/*   * Cleanup everything   */  void nouveau_mem_takedown(struct mem_block **heap) @@ -288,7 +288,7 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)  			} else {  				uint64_t mem; -				mem = (NV_READ(NV04_FIFO_DATA) &  +				mem = (NV_READ(NV04_FIFO_DATA) &  				       NV10_FIFO_DATA_RAM_AMOUNT_MB_MASK) >>  				      NV10_FIFO_DATA_RAM_AMOUNT_MB_SHIFT;  				return mem*1024*1024; @@ -301,13 +301,11 @@ uint64_t nouveau_mem_fb_amount(struct drm_device *dev)  }  static int -nouveau_mem_init_agp(struct drm_device *dev) +nouveau_mem_init_agp(struct drm_device *dev, int ttm)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	struct drm_agp_info info;  	struct drm_agp_mode mode; -	struct drm_agp_buffer agp_req; -	struct drm_agp_binding bind_req;  	int ret;  	ret = drm_agp_acquire(dev); @@ -330,20 +328,25 @@ nouveau_mem_init_agp(struct drm_device *dev)  		return ret;  	} -	agp_req.size = info.aperture_size; -	agp_req.type = 0; -	ret = drm_agp_alloc(dev, &agp_req); -	if (ret) { -		DRM_ERROR("Unable to alloc AGP: %d\n", ret); -		return ret; -	} +	if (!ttm) { +		struct drm_agp_buffer agp_req; +		struct drm_agp_binding bind_req; -	bind_req.handle = agp_req.handle; -	bind_req.offset = 0; -	ret = drm_agp_bind(dev, &bind_req); -	if (ret) { -		DRM_ERROR("Unable to bind AGP: %d\n", ret); -		return ret; +		agp_req.size = info.aperture_size; +		agp_req.type = 0; +		ret = drm_agp_alloc(dev, &agp_req); +		if (ret) { +			DRM_ERROR("Unable to alloc AGP: %d\n", ret); +				return ret; +		} + +		bind_req.handle = agp_req.handle; +		bind_req.offset = 0; +		ret = drm_agp_bind(dev, &bind_req); +		if (ret) { +			DRM_ERROR("Unable to bind AGP: %d\n", ret); +			return ret; +		}  	}  	dev_priv->gart_info.type	= NOUVEAU_GART_AGP; @@ -352,6 +355,73 @@ nouveau_mem_init_agp(struct drm_device *dev)  	return 0;  } +#define HACK_OLD_MM +int +nouveau_mem_init_ttm(struct drm_device *dev) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	uint32_t vram_size, bar1_size; +	int ret; + +	dev_priv->agp_heap = dev_priv->pci_heap = dev_priv->fb_heap = NULL; +	dev_priv->fb_phys = drm_get_resource_start(dev,1); +	dev_priv->gart_info.type = NOUVEAU_GART_NONE; + +	drm_bo_driver_init(dev); + +	/* non-mappable vram */ +	dev_priv->fb_available_size = nouveau_mem_fb_amount(dev); +	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram; +	vram_size = dev_priv->fb_available_size >> PAGE_SHIFT; +	bar1_size = drm_get_resource_len(dev, 1) >> PAGE_SHIFT; +	if (bar1_size < vram_size) { +		if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_PRIV0, +					  bar1_size, vram_size - bar1_size))) { +			DRM_ERROR("Failed PRIV0 mm init: %d\n", ret); +			return ret; +		} +		vram_size = bar1_size; +	} + +	/* mappable vram */ +#ifdef HACK_OLD_MM +	vram_size /= 4; +#endif +	if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_VRAM, 0, vram_size))) { +		DRM_ERROR("Failed VRAM mm init: %d\n", ret); +		return ret; +	} + +	/* GART */ +#ifndef __powerpc__ +	if (drm_device_is_agp(dev) && dev->agp) { +		if ((ret = nouveau_mem_init_agp(dev, 1))) +			DRM_ERROR("Error initialising AGP: %d\n", ret); +	} +#endif + +	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) { +		if ((ret = nouveau_sgdma_init(dev))) +			DRM_ERROR("Error initialising PCI SGDMA: %d\n", ret); +	} + +	if ((ret = drm_bo_init_mm(dev, DRM_BO_MEM_TT, 0, +				  dev_priv->gart_info.aper_size >> +				  PAGE_SHIFT))) { +		DRM_ERROR("Failed TT mm init: %d\n", ret); +		return ret; +	} + +#ifdef HACK_OLD_MM +	vram_size <<= PAGE_SHIFT; +	DRM_INFO("Old MM using %dKiB VRAM\n", (vram_size * 3) >> 10); +	if (nouveau_mem_init_heap(&dev_priv->fb_heap, vram_size, vram_size * 3)) +		return -ENOMEM; +#endif + +	return 0; +} +  int nouveau_mem_init(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -378,7 +448,7 @@ int nouveau_mem_init(struct drm_device *dev)  	DRM_DEBUG("Available VRAM: %dKiB\n", fb_size>>10);  	if (fb_size>256*1024*1024) { -		/* On cards with > 256Mb, you can't map everything.  +		/* On cards with > 256Mb, you can't map everything.  		 * So we create a second FB heap for that type of memory */  		if (nouveau_mem_init_heap(&dev_priv->fb_heap,  					  0, 256*1024*1024)) @@ -392,11 +462,13 @@ int nouveau_mem_init(struct drm_device *dev)  		dev_priv->fb_nomap_heap=NULL;  	} +#ifndef __powerpc__  	/* Init AGP / NV50 PCIEGART */  	if (drm_device_is_agp(dev) && dev->agp) { -		if ((ret = nouveau_mem_init_agp(dev))) +		if ((ret = nouveau_mem_init_agp(dev, 0)))  			DRM_ERROR("Error initialising AGP: %d\n", ret);  	} +#endif  	/*Note: this is *not* just NV50 code, but only used on NV50 for now */  	if (dev_priv->gart_info.type == NOUVEAU_GART_NONE && @@ -405,7 +477,7 @@ int nouveau_mem_init(struct drm_device *dev)  		if (!ret) {  			ret = nouveau_sgdma_nottm_hack_init(dev);  			if (ret) -				nouveau_sgdma_takedown(dev);  +				nouveau_sgdma_takedown(dev);  		}  		if (ret) @@ -417,7 +489,7 @@ int nouveau_mem_init(struct drm_device *dev)  					  0, dev_priv->gart_info.aper_size)) {  			if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {  				nouveau_sgdma_nottm_hack_takedown(dev); -				nouveau_sgdma_takedown(dev);  +				nouveau_sgdma_takedown(dev);  			}  		}  	} @@ -435,7 +507,7 @@ int nouveau_mem_init(struct drm_device *dev)  		} else {  			if (nouveau_mem_init_heap(&dev_priv->pci_heap, 0,  						  dev->sg->pages * PAGE_SIZE)) { -				DRM_ERROR("Unable to initialize pci_heap!");	 +				DRM_ERROR("Unable to initialize pci_heap!");  			}  		}  	} @@ -451,8 +523,8 @@ struct mem_block* nouveau_mem_alloc(struct drm_device *dev, int alignment,  	int type;  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	/*  -	 * Make things easier on ourselves: all allocations are page-aligned.  +	/* +	 * Make things easier on ourselves: all allocations are page-aligned.  	 * We need that to map allocated regions into the user space  	 */  	if (alignment < PAGE_SHIFT) @@ -534,7 +606,7 @@ alloc_ok:  			ret = drm_addmap(dev, block->start, block->size,  					 _DRM_SCATTER_GATHER, 0, &block->map); -		if (ret) {  +		if (ret) {  			nouveau_mem_free_block(block);  			return NULL;  		} @@ -547,7 +619,7 @@ alloc_ok:  		block->map_handle = entry->user_token;  	} -	DRM_DEBUG("allocated 0x%llx type=0x%08x\n", block->start, block->flags); +	DRM_DEBUG("allocated %d bytes at 0x%x type=0x%08x\n", block->size, block->start, block->flags);  	return block;  } @@ -604,5 +676,3 @@ int nouveau_ioctl_mem_free(struct drm_device *dev, void *data, struct drm_file *  	nouveau_mem_free(dev, block);  	return 0;  } - - diff --git a/shared-core/nouveau_notifier.c b/shared-core/nouveau_notifier.c index 31e2b244..82c8ab7d 100644 --- a/shared-core/nouveau_notifier.c +++ b/shared-core/nouveau_notifier.c @@ -115,7 +115,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,  		} else {  			target = NV_DMA_TARGET_AGP;  		} -	} else  +	} else  	if (chan->notifier_block->flags & NOUVEAU_MEM_PCI) {  		target = NV_DMA_TARGET_PCI_NONLINEAR;  	} else { @@ -163,4 +163,3 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data,  	return 0;  } - diff --git a/shared-core/nouveau_object.c b/shared-core/nouveau_object.c index fbce7702..b6bf759d 100644 --- a/shared-core/nouveau_object.c +++ b/shared-core/nouveau_object.c @@ -524,7 +524,7 @@ nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,  	struct nouveau_gpuobj_ref *ref;  	struct list_head *entry, *tmp; -	list_for_each_safe(entry, tmp, &chan->ramht_refs) {		 +	list_for_each_safe(entry, tmp, &chan->ramht_refs) {  		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);  		if (ref->handle == handle) { @@ -616,7 +616,7 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)     DMA objects are used to reference a piece of memory in the     framebuffer, PCI or AGP address space. Each object is 16 bytes big     and looks as follows: -    +     entry[0]     11:0  class (seems like I can always use 0 here)     12    page table present? @@ -648,7 +648,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	int ret;  	uint32_t is_scatter_gather = 0; -	 +  	/* Total number of pages covered by the request.  	 */  	const unsigned int page_count = (size + PAGE_SIZE - 1) / PAGE_SIZE; @@ -671,7 +671,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,          default:                  break;          } -	 +  	ret = nouveau_gpuobj_new(dev, chan,  				 is_scatter_gather ? ((page_count << 2) + 12) : nouveau_gpuobj_class_instmem_size(dev, class),  				 16, @@ -687,11 +687,11 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,  		adjust = offset &  0x00000fff;  		if (access != NV_DMA_ACCESS_RO)  				pte_flags |= (1<<1); -		 -		if ( ! is_scatter_gather )  + +		if ( ! is_scatter_gather )  			{  			frame  = offset & ~0x00000fff; -			 +  			INSTANCE_WR(*gpuobj, 0, ((1<<12) | (1<<13) |  					(adjust << 20) |  					 (access << 14) | @@ -701,7 +701,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,  			INSTANCE_WR(*gpuobj, 2, frame | pte_flags);  			INSTANCE_WR(*gpuobj, 3, frame | pte_flags);  			} -		else  +		else  			{  			/* Intial page entry in the scatter-gather area that  			 * corresponds to the base offset @@ -728,7 +728,7 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,  			/*write starting at the third dword*/  			instance_offset = 2; -  +  			/*for each PAGE, get its bus address, fill in the page table entry, and advance*/  			for (i = 0; i < page_count; i++) {  				if (dev->sg->busaddr[idx] == 0) { @@ -745,12 +745,12 @@ nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,  				}  				frame = (uint32_t) dev->sg->busaddr[idx]; -				INSTANCE_WR(*gpuobj, instance_offset,  +				INSTANCE_WR(*gpuobj, instance_offset,  					    frame | pte_flags); -  +  				idx++;  				instance_offset ++; - 			} +			}  			}  	} else {  		uint32_t flags0, flags5; @@ -848,7 +848,7 @@ nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,     entry[0]:     11:0  class  (maybe uses more bits here?)     17    user clip enable -   21:19 patch config  +   21:19 patch config     25    patch status valid ?     entry[1]:     15:0  DMA notifier  (maybe 20:0) @@ -986,7 +986,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,  	/* NV50 VM, point offset 0-512MiB at shared PCIEGART table  */  	if (dev_priv->card_type >= NV_50) {  		uint32_t vm_offset; -		 +  		vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;  		vm_offset += chan->ramin->gpuobj->im_pramin->start;  		if ((ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000, @@ -1074,7 +1074,7 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)  	DRM_DEBUG("ch%d\n", chan->id); -	list_for_each_safe(entry, tmp, &chan->ramht_refs) {		 +	list_for_each_safe(entry, tmp, &chan->ramht_refs) {  		ref = list_entry(entry, struct nouveau_gpuobj_ref, list);  		nouveau_gpuobj_ref_del(dev, &ref); @@ -1104,7 +1104,7 @@ int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,  	NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);  	//FIXME: check args, only allow trusted objects to be created -	 +  	if (init->handle == ~0)  		return -EINVAL; @@ -1145,4 +1145,3 @@ int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,  	return 0;  } - diff --git a/shared-core/nouveau_reg.h b/shared-core/nouveau_reg.h index 4dc3b7fa..a2506146 100644 --- a/shared-core/nouveau_reg.h +++ b/shared-core/nouveau_reg.h @@ -45,16 +45,40 @@  #define NV_CLASS_NULL                                      0x00000030  #define NV_CLASS_DMA_IN_MEMORY                             0x0000003D +#define NV03_USER(i)                             (0x00800000+(i*NV03_USER_SIZE)) +#define NV03_USER__SIZE                                                       16 +#define NV10_USER__SIZE                                                       32 +#define NV03_USER_SIZE                                                0x00010000 +#define NV03_USER_DMA_PUT(i)                     (0x00800040+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_PUT__SIZE                                               16 +#define NV10_USER_DMA_PUT__SIZE                                               32 +#define NV03_USER_DMA_GET(i)                     (0x00800044+(i*NV03_USER_SIZE)) +#define NV03_USER_DMA_GET__SIZE                                               16 +#define NV10_USER_DMA_GET__SIZE                                               32 +#define NV03_USER_REF_CNT(i)                     (0x00800048+(i*NV03_USER_SIZE)) +#define NV03_USER_REF_CNT__SIZE                                               16 +#define NV10_USER_REF_CNT__SIZE                                               32 + +#define NV40_USER(i)                             (0x00c00000+(i*NV40_USER_SIZE)) +#define NV40_USER_SIZE                                                0x00001000 +#define NV40_USER_DMA_PUT(i)                     (0x00c00040+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_PUT__SIZE                                               32 +#define NV40_USER_DMA_GET(i)                     (0x00c00044+(i*NV40_USER_SIZE)) +#define NV40_USER_DMA_GET__SIZE                                               32 +#define NV40_USER_REF_CNT(i)                     (0x00c00048+(i*NV40_USER_SIZE)) +#define NV40_USER_REF_CNT__SIZE                                               32 + +#define NV50_USER(i)                             (0x00c00000+(i*NV50_USER_SIZE)) +#define NV50_USER_SIZE                                                0x00002000 +#define NV50_USER_DMA_PUT(i)                     (0x00c00040+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_PUT__SIZE                                              128 +#define NV50_USER_DMA_GET(i)                     (0x00c00044+(i*NV50_USER_SIZE)) +#define NV50_USER_DMA_GET__SIZE                                              128 +/*XXX: I don't think this actually exists.. */ +#define NV50_USER_REF_CNT(i)                     (0x00c00048+(i*NV50_USER_SIZE)) +#define NV50_USER_REF_CNT__SIZE                                              128 +  #define NV03_FIFO_SIZE                                     0x8000UL -#define NV_MAX_FIFO_NUMBER                                 128 -#define NV03_FIFO_REGS_SIZE                                0x10000 -#define NV03_FIFO_REGS(i)                                  (0x00800000+i*NV03_FIFO_REGS_SIZE) -#    define NV03_FIFO_REGS_DMAPUT(i)                       (NV03_FIFO_REGS(i)+0x40) -#    define NV03_FIFO_REGS_DMAGET(i)                       (NV03_FIFO_REGS(i)+0x44) -#define NV50_FIFO_REGS_SIZE                                0x2000 -#define NV50_FIFO_REGS(i)                                  (0x00c00000+i*NV50_FIFO_REGS_SIZE) -#    define NV50_FIFO_REGS_DMAPUT(i)                       (NV50_FIFO_REGS(i)+0x40) -#    define NV50_FIFO_REGS_DMAGET(i)                       (NV50_FIFO_REGS(i)+0x44)  #define NV03_PMC_BOOT_0                                    0x00000000  #define NV03_PMC_BOOT_1                                    0x00000004 @@ -88,6 +112,9 @@  #define NV50_PUNK_BAR3_CTXDMA_VALID                           (1<<31)  #define NV50_PUNK_UNK1710                                  0x00001710 +#define NV04_PBUS_PCI_NV_1                                 0x00001804 +#define NV04_PBUS_PCI_NV_19                                0x0000184C +  #define NV04_PTIMER_INTR_0                                 0x00009100  #define NV04_PTIMER_INTR_EN_0                              0x00009140  #define NV04_PTIMER_NUMERATOR                              0x00009200 @@ -406,6 +433,12 @@  #define NV04_PFIFO_CACHE0_PULL1                            0x00003054  #define NV03_PFIFO_CACHE1_PUSH0                            0x00003200  #define NV03_PFIFO_CACHE1_PUSH1                            0x00003204 +#define NV03_PFIFO_CACHE1_PUSH1_DMA                            (1<<8) +#define NV40_PFIFO_CACHE1_PUSH1_DMA                           (1<<16) +#define NV03_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000000f +#define NV10_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000001f +#define NV50_PFIFO_CACHE1_PUSH1_CHID_MASK                  0x0000007f +#define NV03_PFIFO_CACHE1_PUT                              0x00003210  #define NV04_PFIFO_CACHE1_DMA_PUSH                         0x00003220  #define NV04_PFIFO_CACHE1_DMA_FETCH                        0x00003224  #    define NV_PFIFO_CACHE1_DMA_FETCH_TRIG_8_BYTES         0x00000000 @@ -550,4 +583,3 @@  #define NV40_RAMFC_UNK_48                                        0x48  #define NV40_RAMFC_UNK_4C                                        0x4C  #define NV40_RAMFC_UNK_50                                        0x50 - diff --git a/shared-core/nouveau_state.c b/shared-core/nouveau_state.c index c617bfd3..7086a0ab 100644 --- a/shared-core/nouveau_state.c +++ b/shared-core/nouveau_state.c @@ -1,4 +1,4 @@ -/*  +/*   * Copyright 2005 Stephane Marchesin   * All Rights Reserved.   * @@ -40,7 +40,7 @@ static int nouveau_init_card_mappings(struct drm_device *dev)  	/* map the mmio regs */  	ret = drm_addmap(dev, drm_get_resource_start(dev, 0), -			      drm_get_resource_len(dev, 0),  +			      drm_get_resource_len(dev, 0),  			      _DRM_REGISTERS, _DRM_READ_ONLY, &dev_priv->mmio);  	if (ret) {  		DRM_ERROR("Unable to initialize the mmio mapping (%d). " @@ -88,7 +88,6 @@ static int nouveau_init_card_mappings(struct drm_device *dev)  static int nouveau_stub_init(struct drm_device *dev) { return 0; }  static void nouveau_stub_takedown(struct drm_device *dev) {} -static uint64_t nouveau_stub_timer_read(struct drm_device *dev) { return 0; }  static int nouveau_init_engine_ptrs(struct drm_device *dev)  { @@ -116,8 +115,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv04_graph_destroy_context;  		engine->graph.load_context	= nv04_graph_load_context;  		engine->graph.save_context	= nv04_graph_save_context; +		engine->fifo.channels	= 16;  		engine->fifo.init	= nouveau_fifo_init;  		engine->fifo.takedown	= nouveau_stub_takedown; +		engine->fifo.channel_id		= nv04_fifo_channel_id;  		engine->fifo.create_context	= nv04_fifo_create_context;  		engine->fifo.destroy_context	= nv04_fifo_destroy_context;  		engine->fifo.load_context	= nv04_fifo_load_context; @@ -143,8 +144,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv10_graph_destroy_context;  		engine->graph.load_context	= nv10_graph_load_context;  		engine->graph.save_context	= nv10_graph_save_context; +		engine->fifo.channels	= 32;  		engine->fifo.init	= nouveau_fifo_init;  		engine->fifo.takedown	= nouveau_stub_takedown; +		engine->fifo.channel_id		= nv10_fifo_channel_id;  		engine->fifo.create_context	= nv10_fifo_create_context;  		engine->fifo.destroy_context	= nv10_fifo_destroy_context;  		engine->fifo.load_context	= nv10_fifo_load_context; @@ -170,8 +173,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv20_graph_destroy_context;  		engine->graph.load_context	= nv20_graph_load_context;  		engine->graph.save_context	= nv20_graph_save_context; +		engine->fifo.channels	= 32;  		engine->fifo.init	= nouveau_fifo_init;  		engine->fifo.takedown	= nouveau_stub_takedown; +		engine->fifo.channel_id		= nv10_fifo_channel_id;  		engine->fifo.create_context	= nv10_fifo_create_context;  		engine->fifo.destroy_context	= nv10_fifo_destroy_context;  		engine->fifo.load_context	= nv10_fifo_load_context; @@ -197,8 +202,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv20_graph_destroy_context;  		engine->graph.load_context	= nv20_graph_load_context;  		engine->graph.save_context	= nv20_graph_save_context; +		engine->fifo.channels	= 32;  		engine->fifo.init	= nouveau_fifo_init;  		engine->fifo.takedown	= nouveau_stub_takedown; +		engine->fifo.channel_id		= nv10_fifo_channel_id;  		engine->fifo.create_context	= nv10_fifo_create_context;  		engine->fifo.destroy_context	= nv10_fifo_destroy_context;  		engine->fifo.load_context	= nv10_fifo_load_context; @@ -224,8 +231,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv40_graph_destroy_context;  		engine->graph.load_context	= nv40_graph_load_context;  		engine->graph.save_context	= nv40_graph_save_context; +		engine->fifo.channels	= 32;  		engine->fifo.init	= nv40_fifo_init;  		engine->fifo.takedown	= nouveau_stub_takedown; +		engine->fifo.channel_id		= nv10_fifo_channel_id;  		engine->fifo.create_context	= nv40_fifo_create_context;  		engine->fifo.destroy_context	= nv40_fifo_destroy_context;  		engine->fifo.load_context	= nv40_fifo_load_context; @@ -241,9 +250,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->instmem.unbind		= nv50_instmem_unbind;  		engine->mc.init		= nv50_mc_init;  		engine->mc.takedown	= nv50_mc_takedown; -		engine->timer.init	= nouveau_stub_init; -		engine->timer.read	= nouveau_stub_timer_read; -		engine->timer.takedown	= nouveau_stub_takedown; +		engine->timer.init	= nv04_timer_init; +		engine->timer.read	= nv04_timer_read; +		engine->timer.takedown	= nv04_timer_takedown;  		engine->fb.init		= nouveau_stub_init;  		engine->fb.takedown	= nouveau_stub_takedown;  		engine->graph.init	= nv50_graph_init; @@ -252,8 +261,10 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)  		engine->graph.destroy_context	= nv50_graph_destroy_context;  		engine->graph.load_context	= nv50_graph_load_context;  		engine->graph.save_context	= nv50_graph_save_context; +		engine->fifo.channels	= 128;  		engine->fifo.init	= nv50_fifo_init;  		engine->fifo.takedown	= nv50_fifo_takedown; +		engine->fifo.channel_id		= nv50_fifo_channel_id;  		engine->fifo.create_context	= nv50_fifo_create_context;  		engine->fifo.destroy_context	= nv50_fifo_destroy_context;  		engine->fifo.load_context	= nv50_fifo_load_context; @@ -278,18 +289,7 @@ nouveau_card_init(struct drm_device *dev)  	if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)  		return 0; - -	/* Map any PCI resources we need on the card */ -	ret = nouveau_init_card_mappings(dev); -	if (ret) return ret; - -#if defined(__powerpc__) -	/* Put the card in BE mode if it's not */ -	if (NV_READ(NV03_PMC_BOOT_1)) -		NV_WRITE(NV03_PMC_BOOT_1,0x00000001); - -	DRM_MEMORYBARRIER(); -#endif +	dev_priv->ttm = 0;  	/* Determine exact chipset we're running on */  	if (dev_priv->card_type < NV_10) @@ -315,8 +315,13 @@ nouveau_card_init(struct drm_device *dev)  	if (ret) return ret;  	/* Setup the memory manager */ -	ret = nouveau_mem_init(dev); -	if (ret) return ret; +	if (dev_priv->ttm) { +		ret = nouveau_mem_init_ttm(dev); +		if (ret) return ret; +	} else { +		ret = nouveau_mem_init(dev); +		if (ret) return ret; +	}  	ret = nouveau_gpuobj_init(dev);  	if (ret) return ret; @@ -405,9 +410,53 @@ void nouveau_preclose(struct drm_device *dev, struct drm_file *file_priv)  /* first module load, setup the mmio/fb mapping */  int nouveau_firstopen(struct drm_device *dev)  { +#if defined(__powerpc__) +	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct device_node *dn; +#endif +	int ret; +	/* Map any PCI resources we need on the card */ +	ret = nouveau_init_card_mappings(dev); +	if (ret) return ret; + +#if defined(__powerpc__) +	/* Put the card in BE mode if it's not */ +	if (NV_READ(NV03_PMC_BOOT_1)) +		NV_WRITE(NV03_PMC_BOOT_1,0x00000001); + +	DRM_MEMORYBARRIER(); +#endif + +#if defined(__linux__) && defined(__powerpc__) +	/* if we have an OF card, copy vbios to RAMIN */ +	dn = pci_device_to_OF_node(dev->pdev); +	if (dn) +	{ +		int size; +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)) +		const uint32_t *bios = of_get_property(dn, "NVDA,BMP", &size); +#else +		const uint32_t *bios = get_property(dn, "NVDA,BMP", &size); +#endif +		if (bios) +		{ +			int i; +			for(i=0;i<size;i+=4) +				NV_WI32(i, bios[i/4]); +			DRM_INFO("OF bios successfully copied (%d bytes)\n",size); +		} +		else +			DRM_INFO("Unable to get the OF bios\n"); +	} +	else +		DRM_INFO("Unable to get the OF node\n"); +#endif  	return 0;  } +#define NV40_CHIPSET_MASK 0x00000baf +#define NV44_CHIPSET_MASK 0x00005450 +  int nouveau_load(struct drm_device *dev, unsigned long flags)  {  	struct drm_nouveau_private *dev_priv; @@ -425,7 +474,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)  	DRM_DEBUG("vendor: 0x%X device: 0x%X class: 0x%X\n", dev->pci_vendor, dev->pci_device, dev->pdev->class);  	/* Time to determine the card architecture */ -	regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);  +	regs = ioremap_nocache(pci_resource_start(dev->pdev, 0), 0x8);  	if (!regs) {  		DRM_ERROR("Could not ioremap to determine register\n");  		return -ENOMEM; @@ -449,12 +498,23 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)  	iounmap(regs); -	if (architecture >= 0x50) { +	if (architecture >= 0x80) {  		dev_priv->card_type = NV_50; -	} else if (architecture >= 0x44) { +	} else if (architecture >= 0x60) { +		/* FIXME we need to figure out who's who for NV6x */  		dev_priv->card_type = NV_44; +	} else if (architecture >= 0x50) { +		dev_priv->card_type = NV_50;  	} else if (architecture >= 0x40) { -		dev_priv->card_type = NV_40; +		uint8_t subarch = architecture & 0xf; +		/* Selection criteria borrowed from NV40EXA */ +		if (NV40_CHIPSET_MASK & (1 << subarch)) { +			dev_priv->card_type = NV_40; +		} else if (NV44_CHIPSET_MASK & (1 << subarch)) { +			dev_priv->card_type = NV_44; +		} else { +			dev_priv->card_type = NV_UNKNOWN; +		}  	} else if (architecture >= 0x30) {  		dev_priv->card_type = NV_30;  	} else if (architecture >= 0x20) { @@ -553,7 +613,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data, struct drm_file *  	case NOUVEAU_GETPARAM_PCI_PHYSICAL:  		if ( dev -> sg )  			getparam->value=(uint64_t) dev->sg->virtual; -		else  +		else  		     {  		     DRM_ERROR("Requested PCIGART address, while no PCIGART was created\n");  		     return -EINVAL; @@ -635,5 +695,3 @@ void nouveau_wait_for_idle(struct drm_device *dev)  	}  	}  } - - diff --git a/shared-core/nouveau_swmthd.c b/shared-core/nouveau_swmthd.c index 66ef6233..c3666bfd 100644 --- a/shared-core/nouveau_swmthd.c +++ b/shared-core/nouveau_swmthd.c @@ -189,5 +189,3 @@ static void nouveau_NV04_setcontext_sw_method(struct drm_device *dev, uint32_t o  	 return 1;   } - - diff --git a/shared-core/nouveau_swmthd.h b/shared-core/nouveau_swmthd.h index df8c7400..5b9409fb 100644 --- a/shared-core/nouveau_swmthd.h +++ b/shared-core/nouveau_swmthd.h @@ -31,4 +31,3 @@   */  int nouveau_sw_method_execute(struct drm_device *dev, uint32_t oclass, uint32_t method); /* execute the given software method, returns 0 on success */ - diff --git a/shared-core/nv04_fb.c b/shared-core/nv04_fb.c index 534fb50b..58a92470 100644 --- a/shared-core/nv04_fb.c +++ b/shared-core/nv04_fb.c @@ -21,4 +21,3 @@ void  nv04_fb_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv04_fifo.c b/shared-core/nv04_fifo.c index d750ced8..230c8e72 100644 --- a/shared-core/nv04_fifo.c +++ b/shared-core/nv04_fifo.c @@ -36,6 +36,15 @@  #define NV04_RAMFC__SIZE 32  int +nv04_fifo_channel_id(struct drm_device *dev) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & +			NV03_PFIFO_CACHE1_PUSH1_CHID_MASK); +} + +int  nv04_fifo_create_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev; @@ -71,7 +80,7 @@ nv04_fifo_destroy_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	 +  	NV_WRITE(NV04_PFIFO_MODE, NV_READ(NV04_PFIFO_MODE)&~(1<<chan->id));  	nouveau_gpuobj_ref_del(dev, &chan->ramfc); @@ -84,15 +93,16 @@ nv04_fifo_load_context(struct nouveau_channel *chan)  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	uint32_t tmp; -	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, (1<<8) | chan->id); +	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, +		 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET, RAMFC_RD(DMA_GET));  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT, RAMFC_RD(DMA_PUT)); -	 +  	tmp = RAMFC_RD(DMA_INSTANCE);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16); -	 +  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_STATE, RAMFC_RD(DMA_STATE));  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_FETCH, RAMFC_RD(DMA_FETCH));  	NV_WRITE(NV04_PFIFO_CACHE1_ENGINE, RAMFC_RD(ENGINE)); @@ -123,7 +133,6 @@ nv04_fifo_save_context(struct nouveau_channel *chan)  	RAMFC_WR(DMA_FETCH, NV_READ(NV04_PFIFO_CACHE1_DMA_FETCH));  	RAMFC_WR(ENGINE, NV_READ(NV04_PFIFO_CACHE1_ENGINE));  	RAMFC_WR(PULL1_ENGINE, NV_READ(NV04_PFIFO_CACHE1_PULL1)); -	 +  	return 0;  } - diff --git a/shared-core/nv04_graph.c b/shared-core/nv04_graph.c index cffa3e4a..6caae257 100644 --- a/shared-core/nv04_graph.c +++ b/shared-core/nv04_graph.c @@ -1,4 +1,4 @@ -/*  +/*   * Copyright 2007 Stephane Marchesin   * All Rights Reserved.   * @@ -353,6 +353,7 @@ struct graph_state {  void nouveau_nv04_context_switch(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	struct nouveau_channel *next, *last;  	int chid; @@ -370,7 +371,7 @@ void nouveau_nv04_context_switch(struct drm_device *dev)  		return;  	} -	chid = NV_READ(NV03_PFIFO_CACHE1_PUSH1)&(nouveau_fifo_number(dev)-1); +	chid = engine->fifo.channel_id(dev);  	next = dev_priv->fifos[chid];  	if (!next) { @@ -378,7 +379,7 @@ void nouveau_nv04_context_switch(struct drm_device *dev)  		return;  	} -	chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); +	chid = (NV_READ(NV04_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);  	last = dev_priv->fifos[chid];  	if (!last) { @@ -496,7 +497,7 @@ int nv04_graph_init(struct drm_device *dev) {  	/*haiku same*/  	/*NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xfad4ff31);*/ -	NV_WRITE(NV04_PGRAPH_DEBUG_3, 0x10d4ff31); +	NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xf0d4ff31);  	/*haiku and blob 10d4*/  	NV_WRITE(NV04_PGRAPH_STATE        , 0xFFFFFFFF); diff --git a/shared-core/nv04_instmem.c b/shared-core/nv04_instmem.c index 5a446450..804f9a75 100644 --- a/shared-core/nv04_instmem.c +++ b/shared-core/nv04_instmem.c @@ -33,6 +33,7 @@ static void  nv04_instmem_configure_fixed_tables(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	/* FIFO hash table (RAMHT)  	 *   use 4k hash table at RAMIN+0x10000 @@ -61,8 +62,8 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev)  		case NV_40:  		case NV_44:  			dev_priv->ramfc_offset = 0x20000; -			dev_priv->ramfc_size   = nouveau_fifo_number(dev) * -				nouveau_fifo_ctx_size(dev); +			dev_priv->ramfc_size   = engine->fifo.channels * +						 nouveau_fifo_ctx_size(dev);  			break;  		case NV_30:  		case NV_20: @@ -72,8 +73,8 @@ nv04_instmem_configure_fixed_tables(struct drm_device *dev)  		case NV_04:  		default:  			dev_priv->ramfc_offset = 0x11400; -			dev_priv->ramfc_size   = nouveau_fifo_number(dev) * -				nouveau_fifo_ctx_size(dev); +			dev_priv->ramfc_size   = engine->fifo.channels * +						 nouveau_fifo_ctx_size(dev);  			break;  	}  	DRM_DEBUG("RAMFC offset=0x%x, size=%d\n", dev_priv->ramfc_offset, @@ -134,7 +135,7 @@ nv04_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)  		if (gpuobj->im_bound)  			dev_priv->Engine.instmem.unbind(dev, gpuobj);  		gpuobj->im_backing = NULL; -	}	 +	}  }  int @@ -156,4 +157,3 @@ nv04_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)  	gpuobj->im_bound = 0;  	return 0;  } - diff --git a/shared-core/nv04_mc.c b/shared-core/nv04_mc.c index eee0c50c..766f3a33 100644 --- a/shared-core/nv04_mc.c +++ b/shared-core/nv04_mc.c @@ -7,12 +7,25 @@ int  nv04_mc_init(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	uint32_t saved_pci_nv_1, saved_pci_nv_19; + +	saved_pci_nv_1 = NV_READ(NV04_PBUS_PCI_NV_1); +	saved_pci_nv_19 = NV_READ(NV04_PBUS_PCI_NV_19); + +	/* clear busmaster bit */ +	NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~(0x00000001 << 2)); +	/* clear SBA and AGP bits */ +	NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19 & 0xfffff0ff);  	/* Power up everything, resetting each individual unit will  	 * be done later if needed.  	 */  	NV_WRITE(NV03_PMC_ENABLE, 0xFFFFFFFF); +	/* and restore (gives effect of resetting AGP) */ +	NV_WRITE(NV04_PBUS_PCI_NV_19, saved_pci_nv_19); +	NV_WRITE(NV04_PBUS_PCI_NV_1, saved_pci_nv_1); +  	return 0;  } @@ -20,4 +33,3 @@ void  nv04_mc_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv04_timer.c b/shared-core/nv04_timer.c index 08a27f4f..88dff36d 100644 --- a/shared-core/nv04_timer.c +++ b/shared-core/nv04_timer.c @@ -42,4 +42,3 @@ void  nv04_timer_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv10_fb.c b/shared-core/nv10_fb.c index 7fff5b3f..6e0773ac 100644 --- a/shared-core/nv10_fb.c +++ b/shared-core/nv10_fb.c @@ -23,4 +23,3 @@ void  nv10_fb_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv10_fifo.c b/shared-core/nv10_fifo.c index c86725d2..6d50b6ca 100644 --- a/shared-core/nv10_fifo.c +++ b/shared-core/nv10_fifo.c @@ -37,6 +37,15 @@  #define NV10_RAMFC__SIZE ((dev_priv->chipset) >= 0x17 ? 64 : 32)  int +nv10_fifo_channel_id(struct drm_device *dev) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & +			NV10_PFIFO_CACHE1_PUSH1_CHID_MASK); +} + +int  nv10_fifo_create_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev; @@ -87,7 +96,8 @@ nv10_fifo_load_context(struct nouveau_channel *chan)  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	uint32_t tmp; -	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1            , 0x00000100 | chan->id); +	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, +		 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_GET          , RAMFC_RD(DMA_GET));  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_PUT          , RAMFC_RD(DMA_PUT)); @@ -157,4 +167,3 @@ nv10_fifo_save_context(struct nouveau_channel *chan)  	return 0;  } - diff --git a/shared-core/nv10_graph.c b/shared-core/nv10_graph.c index c6319b8f..d0c2285f 100644 --- a/shared-core/nv10_graph.c +++ b/shared-core/nv10_graph.c @@ -1,4 +1,4 @@ -/*  +/*   * Copyright 2007 Matthieu CASTET <castet.matthieu@free.fr>   * All Rights Reserved.   * @@ -692,6 +692,7 @@ int nv10_graph_save_context(struct nouveau_channel *chan)  void nouveau_nv10_context_switch(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv; +	struct nouveau_engine *engine;  	struct nouveau_channel *next, *last;  	int chid; @@ -708,8 +709,10 @@ void nouveau_nv10_context_switch(struct drm_device *dev)  		DRM_DEBUG("Invalid drm_nouveau_private->fifos\n");  		return;  	} +	engine = &dev_priv->Engine; -	chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20)&(nouveau_fifo_number(dev)-1); +	chid = (NV_READ(NV04_PGRAPH_TRAPPED_ADDR) >> 20) & +		(engine->fifo.channels - 1);  	next = dev_priv->fifos[chid];  	if (!next) { @@ -717,7 +720,8 @@ void nouveau_nv10_context_switch(struct drm_device *dev)  		return;  	} -	chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); +	chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) &  +		(engine->fifo.channels - 1);  	last = dev_priv->fifos[chid];  	if (!last) { @@ -732,7 +736,7 @@ void nouveau_nv10_context_switch(struct drm_device *dev)  	if (last) {  		nouveau_wait_for_idle(dev);  		nv10_graph_save_context(last); -	}	 +	}  	nouveau_wait_for_idle(dev); @@ -827,13 +831,14 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private; +	struct nouveau_engine *engine = &dev_priv->Engine;  	struct graph_state* pgraph_ctx = chan->pgraph_ctx;  	int chid;  	drm_free(pgraph_ctx, sizeof(*pgraph_ctx), DRM_MEM_DRIVER);  	chan->pgraph_ctx = NULL; -	chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (nouveau_fifo_number(dev)-1); +	chid = (NV_READ(NV10_PGRAPH_CTX_USER) >> 24) & (engine->fifo.channels - 1);  	/* This code seems to corrupt the 3D pipe, but blob seems to do similar things ????  	 */ @@ -907,4 +912,3 @@ int nv10_graph_init(struct drm_device *dev) {  void nv10_graph_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv20_graph.c b/shared-core/nv20_graph.c index e6ccf672..37a147b5 100644 --- a/shared-core/nv20_graph.c +++ b/shared-core/nv20_graph.c @@ -642,6 +642,7 @@ int nv20_graph_load_context(struct nouveau_channel *chan)  	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);  	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_XFER,  		 NV20_PGRAPH_CHANNEL_CTX_XFER_LOAD); +	NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100);  	nouveau_wait_for_idle(dev);  	return 0; @@ -667,10 +668,16 @@ int nv20_graph_save_context(struct nouveau_channel *chan)  static void nv20_graph_rdi(struct drm_device *dev) {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	int i; +	int i, writecount = 32; +	uint32_t rdi_index = 0x2c80000; + +	if (dev_priv->chipset == 0x20) { +		rdi_index = 0x3d0000; +		writecount = 15; +	} -	NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x2c80000); -	for (i = 0; i < 32; i++) +	NV_WRITE(NV10_PGRAPH_RDI_INDEX, rdi_index); +	for (i = 0; i < writecount; i++)  		NV_WRITE(NV10_PGRAPH_RDI_DATA, 0);  	nouveau_wait_for_idle(dev); @@ -706,7 +713,7 @@ int nv20_graph_init(struct drm_device *dev) {  	NV_WRITE(NV04_PGRAPH_DEBUG_0, 0xFFFFFFFF);  	NV_WRITE(NV04_PGRAPH_DEBUG_0, 0x00000000);  	NV_WRITE(NV04_PGRAPH_DEBUG_1, 0x00118700); -	NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF20E0435); /* 0x4 = auto ctx switch */ +	NV_WRITE(NV04_PGRAPH_DEBUG_3, 0xF3CE0475); /* 0x4 = auto ctx switch */  	NV_WRITE(NV10_PGRAPH_DEBUG_4, 0x00000000);  	NV_WRITE(0x40009C           , 0x00000040); @@ -718,9 +725,9 @@ int nv20_graph_init(struct drm_device *dev) {  		NV_WRITE(0x400098, 0x40000080);  		NV_WRITE(0x400B88, 0x000000ff);  	} else { -		NV_WRITE(0x400880, 0x00080000); +		NV_WRITE(0x400880, 0x00080000); /* 0x0008c7df */  		NV_WRITE(0x400094, 0x00000005); -		NV_WRITE(0x400B80, 0x45CAA208); +		NV_WRITE(0x400B80, 0x45CAA208); /* 0x45eae20e */  		NV_WRITE(0x400B84, 0x24000000);  		NV_WRITE(0x400098, 0x00000040);  		NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00E00038); @@ -730,14 +737,30 @@ int nv20_graph_init(struct drm_device *dev) {  	}  	/* copy tile info from PFB */ -	for (i=0; i<NV10_PFB_TILE__SIZE; i++) { -		NV_WRITE(NV10_PGRAPH_TILE(i), NV_READ(NV10_PFB_TILE(i))); -		NV_WRITE(NV10_PGRAPH_TLIMIT(i), NV_READ(NV10_PFB_TLIMIT(i))); -		NV_WRITE(NV10_PGRAPH_TSIZE(i), NV_READ(NV10_PFB_TSIZE(i))); -		NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i))); +	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) { +		NV_WRITE(0x00400904 + i*0x10, NV_READ(NV10_PFB_TLIMIT(i))); +			/* which is NV40_PGRAPH_TLIMIT0(i) ?? */ +		NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0030+i*4); +		NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TLIMIT(i))); +		NV_WRITE(0x00400908 + i*0x10, NV_READ(NV10_PFB_TSIZE(i))); +			/* which is NV40_PGRAPH_TSIZE0(i) ?? */ +		NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0050+i*4); +		NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TSIZE(i))); +		NV_WRITE(0x00400900 + i*0x10, NV_READ(NV10_PFB_TILE(i))); +			/* which is NV40_PGRAPH_TILE0(i) ?? */ +		NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0010+i*4); +		NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(NV10_PFB_TILE(i))); +	} +	for (i = 0; i < 8; i++) { +		NV_WRITE(0x400980+i*4, NV_READ(0x100300+i*4)); +		NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA0090+i*4); +		NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100300+i*4));  	} +	NV_WRITE(0x4009a0, NV_READ(0x100324)); +	NV_WRITE(NV10_PGRAPH_RDI_INDEX, 0x00EA000C); +	NV_WRITE(NV10_PGRAPH_RDI_DATA, NV_READ(0x100324)); -	NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); +	NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);  	NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);  	NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001); @@ -832,7 +855,7 @@ int nv30_graph_init(struct drm_device *dev)  		NV_WRITE(NV10_PGRAPH_TSTATUS(i), NV_READ(NV10_PFB_TSTATUS(i)));  	} -	NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10010100); +	NV_WRITE(NV10_PGRAPH_CTX_CONTROL, 0x10000100);  	NV_WRITE(NV10_PGRAPH_STATE      , 0xFFFFFFFF);  	NV_WRITE(NV04_PGRAPH_FIFO       , 0x00000001); @@ -865,4 +888,3 @@ int nv30_graph_init(struct drm_device *dev)  	return 0;  } - diff --git a/shared-core/nv40_fb.c b/shared-core/nv40_fb.c index 2cbb40e4..ceae8079 100644 --- a/shared-core/nv40_fb.c +++ b/shared-core/nv40_fb.c @@ -53,4 +53,3 @@ void  nv40_fb_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv40_fifo.c b/shared-core/nv40_fifo.c index ce3f8fdd..7f9d5e31 100644 --- a/shared-core/nv40_fifo.c +++ b/shared-core/nv40_fifo.c @@ -135,7 +135,9 @@ nv40_fifo_load_context(struct nouveau_channel *chan)  	NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, tmp);  	/* Set channel active, and in DMA mode */ -	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1  , 0x00010000 | chan->id); +	NV_WRITE(NV03_PFIFO_CACHE1_PUSH1, +		 NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id); +  	/* Reset DMA_CTL_AT_INFO to INVALID */  	tmp = NV_READ(NV04_PFIFO_CACHE1_DMA_CTL) & ~(1<<31);  	NV_WRITE(NV04_PFIFO_CACHE1_DMA_CTL, tmp); @@ -205,4 +207,3 @@ nv40_fifo_init(struct drm_device *dev)  	NV_WRITE(NV04_PFIFO_DMA_TIMESLICE, 0x2101ffff);  	return 0;  } - diff --git a/shared-core/nv40_graph.c b/shared-core/nv40_graph.c index 7ce4273d..fdf51519 100644 --- a/shared-core/nv40_graph.c +++ b/shared-core/nv40_graph.c @@ -304,7 +304,7 @@ nv43_graph_context_init(struct drm_device *dev, struct nouveau_gpuobj *ctx)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private;  	int i; -	 +  	INSTANCE_WR(ctx, 0x00000/4, ctx->im_pramin->start);  	INSTANCE_WR(ctx, 0x00024/4, 0x0000ffff);  	INSTANCE_WR(ctx, 0x00028/4, 0x0000ffff); @@ -1555,7 +1555,7 @@ nv40_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)  	tmp |= save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE :  		      NV40_PGRAPH_CTXCTL_0310_XFER_LOAD;  	NV_WRITE(NV40_PGRAPH_CTXCTL_0310, tmp); -	 +  	tmp  = NV_READ(NV40_PGRAPH_CTXCTL_0304);  	tmp |= NV40_PGRAPH_CTXCTL_0304_XFER_CTX;  	NV_WRITE(NV40_PGRAPH_CTXCTL_0304, tmp); @@ -1877,35 +1877,35 @@ static uint32_t nv49_4b_ctx_voodoo[] ={  static uint32_t nv4a_ctx_voodoo[] = { -	0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001,  -	0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06,  -	0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080,  -	0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061,  -	0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d,  -	0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4,  -	0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e,  -	0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143,  -	0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10,  -	0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1,  -	0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b,  -	0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6,  -	0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700,  -	0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003,  -	0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a,  -	0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940,  -	0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00,  -	0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00,  -	0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300,  -	0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a,  -	0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100,  -	0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a,  -	0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000,  -	0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004,  -	0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080,  -	0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88,  -	0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000,  -	0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020,  -	0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,  +	0x00400889, 0x00200000, 0x0060000a, 0x00200000, 0x00300000, 0x00800001, +	0x00700009, 0x0060000e, 0x00400d64, 0x00400d05, 0x00409965, 0x00409e06, +	0x0040ac68, 0x00200000, 0x0060000a, 0x00700000, 0x00106000, 0x00700080, +	0x004014e6, 0x007000a0, 0x00401a84, 0x00700082, 0x00600001, 0x00500061, +	0x00600002, 0x00401b68, 0x00500060, 0x00200001, 0x0060000a, 0x0011814d, +	0x00110158, 0x00105401, 0x0020003a, 0x00100051, 0x001040c5, 0x0010c1c4, +	0x001041c9, 0x0010c1dc, 0x00150210, 0x0012c225, 0x00108238, 0x0010823e, +	0x001242c0, 0x00200040, 0x00100280, 0x00128100, 0x00128120, 0x00128143, +	0x0011415f, 0x0010815c, 0x0010c140, 0x00104029, 0x00110400, 0x00104d10, +	0x001046ec, 0x00500060, 0x00403a87, 0x0060000d, 0x00407de6, 0x002000f1, +	0x0060000a, 0x00148653, 0x00104668, 0x0010c66d, 0x00120682, 0x0011068b, +	0x00168691, 0x001046ae, 0x001046b0, 0x001206b4, 0x001046c4, 0x001146c6, +	0x001646cc, 0x001186e6, 0x001046ed, 0x001246f0, 0x002000c0, 0x00100700, +	0x0010c3d7, 0x001043e1, 0x00500060, 0x00405800, 0x00405884, 0x00600003, +	0x00500067, 0x00600008, 0x00500060, 0x00700082, 0x00200232, 0x0060000a, +	0x00104800, 0x00108901, 0x00104910, 0x00124920, 0x0020001f, 0x00100940, +	0x00140965, 0x00148a00, 0x00108a14, 0x00160b00, 0x00134b2c, 0x0010cd00, +	0x0010cd04, 0x0010cd08, 0x00104d80, 0x00104e00, 0x0012d600, 0x00105c00, +	0x00104f06, 0x002002c8, 0x0060000a, 0x00300000, 0x00200080, 0x00407300, +	0x00200084, 0x00800001, 0x00200510, 0x0060000a, 0x002037e0, 0x0040798a, +	0x00201320, 0x00800029, 0x00407d84, 0x00201560, 0x00800002, 0x00409100, +	0x00600006, 0x00700003, 0x00408ae6, 0x00700080, 0x0020007a, 0x0060000a, +	0x00104280, 0x002002c8, 0x0060000a, 0x00200004, 0x00800001, 0x00700000, +	0x00200000, 0x0060000a, 0x00106002, 0x0040ac84, 0x00700002, 0x00600004, +	0x0040ac68, 0x00700000, 0x00200000, 0x0060000a, 0x00106002, 0x00700080, +	0x00400a84, 0x00700002, 0x00400a68, 0x00500060, 0x00600007, 0x00409d88, +	0x0060000f, 0x00000000, 0x00500060, 0x00200000, 0x0060000a, 0x00700000, +	0x00106001, 0x00700083, 0x00910880, 0x00901ffe, 0x01940000, 0x00200020, +	0x0060000b, 0x00500069, 0x0060000c, 0x00401b68, 0x0040ae06, 0x0040af05,  	0x00600009, 0x00700005, 0x00700006, 0x0060000e, ~0  }; @@ -2026,7 +2026,7 @@ nv40_graph_init(struct drm_device *dev)  			NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, ctx_voodoo[i]);  			i++;  		} -	}	 +	}  	/* No context present currently */  	NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0x00000000); @@ -2221,4 +2221,3 @@ nv40_graph_init(struct drm_device *dev)  void nv40_graph_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv40_mc.c b/shared-core/nv40_mc.c index c7db9023..ead6f87f 100644 --- a/shared-core/nv40_mc.c +++ b/shared-core/nv40_mc.c @@ -36,4 +36,3 @@ void  nv40_mc_takedown(struct drm_device *dev)  {  } - diff --git a/shared-core/nv50_fifo.c b/shared-core/nv50_fifo.c index 7859544a..edf4edbf 100644 --- a/shared-core/nv50_fifo.c +++ b/shared-core/nv50_fifo.c @@ -28,9 +28,10 @@  #include "drm.h"  #include "nouveau_drv.h" -typedef struct { -	struct nouveau_gpuobj_ref *thingo; -} nv50_fifo_priv; +struct nv50_fifo_priv { +	struct nouveau_gpuobj_ref *thingo[2]; +	int cur_thingo; +};  #define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50) @@ -38,23 +39,23 @@ static void  nv50_fifo_init_thingo(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; -	struct nouveau_gpuobj_ref *thingo = priv->thingo; -	int i, fi=2; +	struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; +	struct nouveau_gpuobj_ref *cur; +	int i, nr;  	DRM_DEBUG("\n"); -	INSTANCE_WR(thingo->gpuobj, 0, 0x7e); -	INSTANCE_WR(thingo->gpuobj, 1, 0x7e); -	for (i = 1; i < 127; i++, fi) { +	cur = priv->thingo[priv->cur_thingo]; +	priv->cur_thingo = !priv->cur_thingo; + +	/* We never schedule channel 0 or 127 */ +	for (i = 1, nr = 0; i < 127; i++) {  		if (dev_priv->fifos[i]) { -			INSTANCE_WR(thingo->gpuobj, fi, i); -			fi++; +			INSTANCE_WR(cur->gpuobj, nr++, i);  		}  	} - -	NV_WRITE(0x32f4, thingo->instance >> 12); -	NV_WRITE(0x32ec, fi); +	NV_WRITE(0x32f4, cur->instance >> 12); +	NV_WRITE(0x32ec, nr);  	NV_WRITE(0x2500, 0x101);  } @@ -98,14 +99,12 @@ static void  nv50_fifo_init_reset(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	uint32_t pmc_e; +	uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;  	DRM_DEBUG("\n"); -	pmc_e = NV_READ(NV03_PMC_ENABLE); -	NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PFIFO); -	pmc_e = NV_READ(NV03_PMC_ENABLE); -	NV_WRITE(NV03_PMC_ENABLE, pmc_e |  NV_PMC_ENABLE_PFIFO); +	NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); +	NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);  }  static void @@ -141,7 +140,7 @@ nv50_fifo_init_regs__nv(struct drm_device *dev)  	NV_WRITE(0x250c, 0x6f3cfc34);  } -static int +static void  nv50_fifo_init_regs(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; @@ -158,15 +157,13 @@ nv50_fifo_init_regs(struct drm_device *dev)  	/* Enable dummy channels setup by nv50_instmem.c */  	nv50_fifo_channel_enable(dev, 0, 1);  	nv50_fifo_channel_enable(dev, 127, 1); - -	return 0;  }  int  nv50_fifo_init(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	nv50_fifo_priv *priv; +	struct nv50_fifo_priv *priv;  	int ret;  	DRM_DEBUG("\n"); @@ -179,18 +176,23 @@ nv50_fifo_init(struct drm_device *dev)  	nv50_fifo_init_reset(dev);  	nv50_fifo_init_intr(dev); -	if ((ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, (128+2)*4, 0x1000, -				   NVOBJ_FLAG_ZERO_ALLOC, -				   &priv->thingo))) { -		DRM_ERROR("error creating thingo: %d\n", ret); +	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, +				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]); +	if (ret) { +		DRM_ERROR("error creating thingo0: %d\n", ret);  		return ret;  	} -	nv50_fifo_init_context_table(dev); +	ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000, +				     NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]); +	if (ret) { +		DRM_ERROR("error creating thingo1: %d\n", ret); +		return ret; +	} +	nv50_fifo_init_context_table(dev);  	nv50_fifo_init_regs__nv(dev); -	if ((ret = nv50_fifo_init_regs(dev))) -		return ret; +	nv50_fifo_init_regs(dev);  	return 0;  } @@ -199,20 +201,30 @@ void  nv50_fifo_takedown(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv; +	struct nv50_fifo_priv *priv = dev_priv->Engine.fifo.priv;  	DRM_DEBUG("\n");  	if (!priv)  		return; -	nouveau_gpuobj_ref_del(dev, &priv->thingo); +	nouveau_gpuobj_ref_del(dev, &priv->thingo[0]); +	nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);  	dev_priv->Engine.fifo.priv = NULL;  	drm_free(priv, sizeof(*priv), DRM_MEM_DRIVER);  }  int +nv50_fifo_channel_id(struct drm_device *dev) +{ +	struct drm_nouveau_private *dev_priv = dev->dev_private; + +	return (NV_READ(NV03_PFIFO_CACHE1_PUSH1) & +			NV50_PFIFO_CACHE1_PUSH1_CHID_MASK); +} + +int  nv50_fifo_create_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev; @@ -225,18 +237,18 @@ nv50_fifo_create_context(struct nouveau_channel *chan)  	if (IS_G80) {  		uint32_t ramfc_offset = chan->ramin->gpuobj->im_pramin->start;  		uint32_t vram_offset = chan->ramin->gpuobj->im_backing->start; -		if ((ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, -						   vram_offset, 0x100, -						   NVOBJ_FLAG_ZERO_ALLOC | -						   NVOBJ_FLAG_ZERO_FREE, -						   &ramfc, &chan->ramfc))) -				return ret; +		ret = nouveau_gpuobj_new_fake(dev, ramfc_offset, vram_offset, +					      0x100, NVOBJ_FLAG_ZERO_ALLOC | +					      NVOBJ_FLAG_ZERO_FREE, &ramfc, +					      &chan->ramfc); +		if (ret) +			return ret;  	} else { -		if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, -						  256, -						  NVOBJ_FLAG_ZERO_ALLOC | -						  NVOBJ_FLAG_ZERO_FREE, -						  &chan->ramfc))) +		ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256, +					     NVOBJ_FLAG_ZERO_ALLOC | +					     NVOBJ_FLAG_ZERO_FREE, +					     &chan->ramfc); +		if (ret)  			return ret;  		ramfc = chan->ramfc->gpuobj;  	} @@ -263,7 +275,8 @@ nv50_fifo_create_context(struct nouveau_channel *chan)  		INSTANCE_WR(ramfc, 0x98/4, chan->ramin->instance >> 12);  	} -	if ((ret = nv50_fifo_channel_enable(dev, chan->id, 0))) { +	ret = nv50_fifo_channel_enable(dev, chan->id, 0); +	if (ret) {  		DRM_ERROR("error enabling ch%d: %d\n", chan->id, ret);  		nouveau_gpuobj_ref_del(dev, &chan->ramfc);  		return ret; @@ -324,4 +337,3 @@ nv50_fifo_save_context(struct nouveau_channel *chan)  	DRM_ERROR("stub!\n");  	return 0;  } - diff --git a/shared-core/nv50_graph.c b/shared-core/nv50_graph.c index e5bbf65e..503f45dd 100644 --- a/shared-core/nv50_graph.c +++ b/shared-core/nv50_graph.c @@ -34,14 +34,12 @@ static void  nv50_graph_init_reset(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	uint32_t pmc_e; +	uint32_t pmc_e = NV_PMC_ENABLE_PGRAPH | (1 << 21);  	DRM_DEBUG("\n"); -	pmc_e = NV_READ(NV03_PMC_ENABLE); -	NV_WRITE(NV03_PMC_ENABLE, pmc_e & ~NV_PMC_ENABLE_PGRAPH); -	pmc_e = NV_READ(NV03_PMC_ENABLE); -	NV_WRITE(NV03_PMC_ENABLE, pmc_e |  NV_PMC_ENABLE_PGRAPH); +	NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) & ~pmc_e); +	NV_WRITE(NV03_PMC_ENABLE, NV_READ(NV03_PMC_ENABLE) |  pmc_e);  }  static void @@ -51,6 +49,7 @@ nv50_graph_init_intr(struct drm_device *dev)  	DRM_DEBUG("\n");  	NV_WRITE(NV03_PGRAPH_INTR, 0xffffffff); +	NV_WRITE(0x400138, 0xffffffff);  	NV_WRITE(NV40_PGRAPH_INTR_EN, 0xffffffff);  } @@ -146,12 +145,53 @@ static uint32_t nv84_ctx_voodoo[] = {  	0x00415e06, 0x00415f05, 0x0060000d, 0x00700005, 0x0070000d, 0x00700006,  	0x0070000b, 0x0070000e, 0x0070001c, 0x0060000c, ~0  }; +  +static uint32_t nv86_ctx_voodoo[] = { +	0x0070008e, 0x0070009c, 0x00200020, 0x00600008, 0x0050004c, 0x00400e89,  +	0x00200000, 0x00600007, 0x00300000, 0x00c000ff, 0x00200000, 0x008000ff,  +	0x00700009, 0x0040dd4d, 0x00402944, 0x00402905, 0x0040290d, 0x0040b906,  +	0x00600005, 0x004015c5, 0x00600011, 0x0040270b, 0x004021c5, 0x00700000,  +	0x00700081, 0x00600004, 0x0050004a, 0x00216d80, 0x00600007, 0x00c02801,  +	0x0020002e, 0x00800001, 0x005000cb, 0x0090ffff, 0x0091ffff, 0x00200020,  +	0x00600008, 0x0050004c, 0x00600009, 0x0040b945, 0x0040d44d, 0x0070009d,  +	0x00402dcf, 0x0070009f, 0x0050009f, 0x00402ac0, 0x00200200, 0x00600008,  +	0x00402a4f, 0x00402ac0, 0x004030cc, 0x00700081, 0x00200000, 0x00600006,  +	0x00700000, 0x00111bfc, 0x00700083, 0x00300000, 0x00216d80, 0x00600007,  +	0x00c00b01, 0x0020001e, 0x00800001, 0x005000cb, 0x00c000ff, 0x00700080,  +	0x00700083, 0x00200047, 0x00600006, 0x0011020a, 0x00200280, 0x00600007,  +	0x00300000, 0x00c000ff, 0x00c800ff, 0x0040c407, 0x00202916, 0x008000ff,  +	0x0040508c, 0x005000cb, 0x00a0023f, 0x00200040, 0x00600006, 0x0070000f,  +	0x00170202, 0x0011020a, 0x00200032, 0x0010020d, 0x001c0242, 0x00120302,  +	0x00140402, 0x00180500, 0x00130509, 0x00150550, 0x00110605, 0x0020000f,  +	0x00100607, 0x00110700, 0x00110900, 0x00120902, 0x00110a00, 0x00160b02,  +	0x00120b28, 0x00140b2b, 0x00110c01, 0x00111400, 0x00111405, 0x00111407,  +	0x00111409, 0x0011140b, 0x002000cb, 0x00101500, 0x0040790f, 0x0040794b,  +	0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x0070008f, 0x0040798c,  +	0x005000cb, 0x00000000, 0x0020002b, 0x00101a05, 0x00131c00, 0x00121c04,  +	0x00141c20, 0x00111c25, 0x00131c40, 0x00121c44, 0x00141c60, 0x00111c65,  +	0x00131f00, 0x00191f40, 0x004099e0, 0x002001d9, 0x00600006, 0x00200044,  +	0x00102080, 0x001120c6, 0x001520c9, 0x001920d0, 0x00122100, 0x00122103,  +	0x00162200, 0x00122207, 0x00112280, 0x00112300, 0x00112302, 0x00122380,  +	0x0011238b, 0x00112394, 0x0011239c, 0x00000000, 0x0040a00f, 0x005000cb,  +	0x00214b40, 0x00600007, 0x00200442, 0x008800ff, 0x005000cb, 0x0040a387,  +	0x0060000a, 0x00000000, 0x0040b200, 0x007000a0, 0x00700080, 0x00200280,  +	0x00600007, 0x00200004, 0x00c000ff, 0x008000ff, 0x005000cb, 0x00700000,  +	0x00200000, 0x00600006, 0x00111bfe, 0x0040d44d, 0x00700000, 0x00200000,  +	0x00600006, 0x00111bfe, 0x00700080, 0x0070001d, 0x0040114d, 0x00700081,  +	0x00600004, 0x0050004a, 0x0040be88, 0x0060000b, 0x00200000, 0x00600006,  +	0x00700000, 0x0040d40b, 0x00111bfd, 0x0040424d, 0x00202916, 0x008000fd,  +	0x005000cb, 0x00c00002, 0x00200280, 0x00600007, 0x00200160, 0x00800002,  +	0x005000cb, 0x00c01802, 0x002027b6, 0x00800002, 0x005000cb, 0x00404e4d,  +	0x0060000b, 0x0040d24d, 0x00700001, 0x00700003, 0x0040d806, 0x0040d905,  +	0x0060000d, 0x00700005, 0x0070000d, 0x00700006, 0x0070000b, 0x0070000e,  +	0x0070001c, 0x0060000c, ~0 +}; -static void +static int  nv50_graph_init_ctxctl(struct drm_device *dev)  {  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	uint32_t *voodoo; +	uint32_t *voodoo = NULL;  	DRM_DEBUG("\n"); @@ -159,34 +199,42 @@ nv50_graph_init_ctxctl(struct drm_device *dev)  	case 0x84:  		voodoo = nv84_ctx_voodoo;  		break; +	case 0x86: +		voodoo = nv86_ctx_voodoo; +		break;  	default:  		DRM_ERROR("no voodoo for chipset NV%02x\n", dev_priv->chipset); -		break; +		return -EINVAL;  	} -	if (voodoo) { -		NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); -		while (*voodoo != ~0) { -			NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); -			voodoo++; -		} +	NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0); +	while (*voodoo != ~0) { +		NV_WRITE(NV40_PGRAPH_CTXCTL_UCODE_DATA, *voodoo); +		voodoo++;  	}  	NV_WRITE(0x400320, 4);  	NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, 0);  	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, 0); + +	return 0;  } -int  +int  nv50_graph_init(struct drm_device *dev)  { +	int ret; +  	DRM_DEBUG("\n");  	nv50_graph_init_reset(dev);  	nv50_graph_init_intr(dev);  	nv50_graph_init_regs__nv(dev);  	nv50_graph_init_regs(dev); -	nv50_graph_init_ctxctl(dev); + +	ret = nv50_graph_init_ctxctl(dev); +	if (ret) +		return ret;  	return 0;  } @@ -209,11 +257,10 @@ nv50_graph_create_context(struct nouveau_channel *chan)  	DRM_DEBUG("ch%d\n", chan->id); -	if ((ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, -					  grctx_size, 0x1000, -					  NVOBJ_FLAG_ZERO_ALLOC | -					  NVOBJ_FLAG_ZERO_FREE, -					  &chan->ramin_grctx))) +	ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000, +				     NVOBJ_FLAG_ZERO_ALLOC | +				     NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx); +	if (ret)  		return ret;  	hdr = IS_G80 ? 0x200 : 0x20; @@ -225,11 +272,16 @@ nv50_graph_create_context(struct nouveau_channel *chan)  	INSTANCE_WR(ramin, (hdr + 0x10)/4, 0);  	INSTANCE_WR(ramin, (hdr + 0x14)/4, 0x00010000); -	if ((ret = engine->graph.load_context(chan))) { +	ret = engine->graph.load_context(chan); +	if (ret) {  		DRM_ERROR("Error hacking up initial context: %d\n", ret);  		return ret;  	} +	INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x00000/4, +		    chan->ramin->instance >> 12); +	INSTANCE_WR(chan->ramin_grctx->gpuobj, 0x0011c/4, 0x00000002); +  	return 0;  } @@ -259,10 +311,10 @@ nv50_graph_transfer_context(struct drm_device *dev, uint32_t inst, int save)  	DRM_DEBUG("inst=0x%08x, save=%d\n", inst, save);  	old_cp = NV_READ(NV20_PGRAPH_CHANNEL_CTX_POINTER); -	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst | (1<<31)); +	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);  	NV_WRITE(0x400824, NV_READ(0x400824) |  		 (save ? NV40_PGRAPH_CTXCTL_0310_XFER_SAVE : -		  	 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD)); +			 NV40_PGRAPH_CTXCTL_0310_XFER_LOAD));  	NV_WRITE(NV40_PGRAPH_CTXCTL_0304, NV40_PGRAPH_CTXCTL_0304_XFER_CTX);  	for (i = 0; i < tv; i++) { @@ -286,7 +338,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev;  	struct drm_nouveau_private *dev_priv = dev->dev_private; -	uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); +	uint32_t inst = chan->ramin->instance >> 12;  	int ret; (void)ret;  	DRM_DEBUG("ch%d\n", chan->id); @@ -298,7 +350,7 @@ nv50_graph_load_context(struct nouveau_channel *chan)  	NV_WRITE(NV20_PGRAPH_CHANNEL_CTX_POINTER, inst);  	NV_WRITE(0x400320, 4); -	NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst); +	NV_WRITE(NV40_PGRAPH_CTXCTL_CUR, inst | (1<<31));  	return 0;  } @@ -307,10 +359,9 @@ int  nv50_graph_save_context(struct nouveau_channel *chan)  {  	struct drm_device *dev = chan->dev; -	uint32_t inst = ((chan->ramin->instance >> 12) | (1<<31)); +	uint32_t inst = chan->ramin->instance >> 12;  	DRM_DEBUG("ch%d\n", chan->id);  	return nv50_graph_transfer_context(dev, inst, 1);  } - diff --git a/shared-core/nv50_instmem.c b/shared-core/nv50_instmem.c index 1eeb54df..9687ecbb 100644 --- a/shared-core/nv50_instmem.c +++ b/shared-core/nv50_instmem.c @@ -69,7 +69,11 @@ nv50_instmem_init(struct drm_device *dev)  		return -ENOMEM;  	dev_priv->Engine.instmem.priv = priv; -	/* Reserve the last MiB of VRAM, we should probably try to avoid  +	/* Save state, will restore at takedown. */ +	for (i = 0x1700; i <= 0x1710; i+=4) +		priv->save1700[(i-0x1700)/4] = NV_READ(i); + +	/* Reserve the last MiB of VRAM, we should probably try to avoid  	 * setting up the below tables over the top of the VBIOS image at  	 * some point.  	 */ @@ -144,7 +148,7 @@ nv50_instmem_init(struct drm_device *dev)  			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v | 1);  		else  			BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000009); -		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);  +		BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);  	}  	BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63); @@ -259,7 +263,7 @@ nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)  			dev_priv->Engine.instmem.unbind(dev, gpuobj);  		nouveau_mem_free(dev, gpuobj->im_backing);  		gpuobj->im_backing = NULL; -	}	 +	}  }  int @@ -317,4 +321,3 @@ nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)  	gpuobj->im_bound = 0;  	return 0;  } - diff --git a/shared-core/r128_cce.c b/shared-core/r128_cce.c index 5bed45bc..204ea37d 100644 --- a/shared-core/r128_cce.c +++ b/shared-core/r128_cce.c @@ -324,7 +324,7 @@ static void r128_cce_init_ring_buffer(struct drm_device * dev,  		ring_start = dev_priv->cce_ring->offset - dev->agp->base;  	else  #endif -		ring_start = dev_priv->cce_ring->offset -  +		ring_start = dev_priv->cce_ring->offset -  				(unsigned long)dev->sg->virtual;  	R128_WRITE(R128_PM4_BUFFER_OFFSET, ring_start | R128_AGP_OFFSET); @@ -649,7 +649,7 @@ int r128_cce_start(struct drm_device *dev, void *data, struct drm_file *file_pri  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (dev_priv->cce_running || dev_priv->cce_mode == R128_PM4_NONPM4) { -		DRM_DEBUG("%s while CCE running\n", __FUNCTION__); +		DRM_DEBUG("while CCE running\n");  		return 0;  	} @@ -708,7 +708,7 @@ int r128_cce_reset(struct drm_device *dev, void *data, struct drm_file *file_pri  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_DEBUG("%s called before init done\n", __FUNCTION__); +		DRM_DEBUG("called before init done\n");  		return -EINVAL;  	} diff --git a/shared-core/r128_drv.h b/shared-core/r128_drv.h index cb0c41cb..ab8b6297 100644 --- a/shared-core/r128_drv.h +++ b/shared-core/r128_drv.h @@ -120,7 +120,7 @@ typedef struct drm_r128_private {  	drm_local_map_t *cce_ring;  	drm_local_map_t *ring_rptr;  	drm_local_map_t *agp_textures; -	struct ati_pcigart_info gart_info; +	struct drm_ati_pcigart_info gart_info;  } drm_r128_private_t;  typedef struct drm_r128_buf_priv { @@ -465,8 +465,7 @@ do {									\  #define BEGIN_RING( n ) do {						\  	if ( R128_VERBOSE ) {						\ -		DRM_INFO( "BEGIN_RING( %d ) in %s\n",			\ -			   (n), __FUNCTION__ );				\ +		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\  	}								\  	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {		\  		COMMIT_RING();						\ @@ -496,7 +495,7 @@ do {									\  			write * sizeof(u32) );				\  	}								\  	if (((dev_priv->ring.tail + _nr) & tail_mask) != write) {	\ -		DRM_ERROR( 						\ +		DRM_ERROR(						\  			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\  			((dev_priv->ring.tail + _nr) & tail_mask),	\  			write, __LINE__);				\ diff --git a/shared-core/r128_state.c b/shared-core/r128_state.c index b7f483ca..51a9afce 100644 --- a/shared-core/r128_state.c +++ b/shared-core/r128_state.c @@ -42,7 +42,7 @@ static void r128_emit_clip_rects(drm_r128_private_t * dev_priv,  {  	u32 aux_sc_cntl = 0x00000000;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING((count < 3 ? count : 3) * 5 + 2); @@ -85,7 +85,7 @@ static __inline__ void r128_emit_core(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(2); @@ -100,7 +100,7 @@ static __inline__ void r128_emit_context(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(13); @@ -126,7 +126,7 @@ static __inline__ void r128_emit_setup(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(3); @@ -142,7 +142,7 @@ static __inline__ void r128_emit_masks(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(5); @@ -161,7 +161,7 @@ static __inline__ void r128_emit_window(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	drm_r128_context_regs_t *ctx = &sarea_priv->context_state;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(2); @@ -178,7 +178,7 @@ static __inline__ void r128_emit_tex0(drm_r128_private_t * dev_priv)  	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[0];  	int i;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(7 + R128_MAX_TEXTURE_LEVELS); @@ -204,7 +204,7 @@ static __inline__ void r128_emit_tex1(drm_r128_private_t * dev_priv)  	drm_r128_texture_regs_t *tex = &sarea_priv->tex_state[1];  	int i;  	RING_LOCALS; -	DRM_DEBUG("    %s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(5 + R128_MAX_TEXTURE_LEVELS); @@ -226,7 +226,7 @@ static void r128_emit_state(drm_r128_private_t * dev_priv)  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv;  	unsigned int dirty = sarea_priv->dirty; -	DRM_DEBUG("%s: dirty=0x%08x\n", __FUNCTION__, dirty); +	DRM_DEBUG("dirty=0x%08x\n", dirty);  	if (dirty & R128_UPLOAD_CORE) {  		r128_emit_core(dev_priv); @@ -362,7 +362,7 @@ static void r128_cce_dispatch_clear(struct drm_device * dev,  	unsigned int flags = clear->flags;  	int i;  	RING_LOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (dev_priv->page_flipping && dev_priv->current_page == 1) {  		unsigned int tmp = flags; @@ -466,7 +466,7 @@ static void r128_cce_dispatch_swap(struct drm_device * dev)  	struct drm_clip_rect *pbox = sarea_priv->boxes;  	int i;  	RING_LOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  #if R128_PERFORMANCE_BOXES  	/* Do some trivial performance monitoring... @@ -528,8 +528,7 @@ static void r128_cce_dispatch_flip(struct drm_device * dev)  {  	drm_r128_private_t *dev_priv = dev->dev_private;  	RING_LOCALS; -	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", -		  __FUNCTION__, +	DRM_DEBUG("page=%d pfCurrentPage=%d\n",  		  dev_priv->current_page, dev_priv->sarea_priv->pfCurrentPage);  #if R128_PERFORMANCE_BOXES @@ -1156,7 +1155,7 @@ static int r128_cce_dispatch_read_pixels(struct drm_device * dev,  	int count, *x, *y;  	int i, xbuf_size, ybuf_size;  	RING_LOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	count = depth->n;  	if (count > 4096 || count <= 0) @@ -1226,7 +1225,7 @@ static void r128_cce_dispatch_stipple(struct drm_device * dev, u32 * stipple)  	drm_r128_private_t *dev_priv = dev->dev_private;  	int i;  	RING_LOCALS; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	BEGIN_RING(33); @@ -1309,7 +1308,7 @@ static int r128_do_cleanup_pageflip(struct drm_device * dev)  static int r128_cce_flip(struct drm_device *dev, void *data, struct drm_file *file_priv)  {  	drm_r128_private_t *dev_priv = dev->dev_private; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1328,7 +1327,7 @@ static int r128_cce_swap(struct drm_device *dev, void *data, struct drm_file *fi  {  	drm_r128_private_t *dev_priv = dev->dev_private;  	drm_r128_sarea_t *sarea_priv = dev_priv->sarea_priv; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv); @@ -1356,7 +1355,7 @@ static int r128_cce_vertex(struct drm_device *dev, void *data, struct drm_file *  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1412,7 +1411,7 @@ static int r128_cce_indices(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -1557,11 +1556,11 @@ static int r128_cce_indirect(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} -	DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", +	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",  		  indirect->idx, indirect->start, indirect->end,  		  indirect->discard); @@ -1622,7 +1621,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi  	int value;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} diff --git a/shared-core/r300_cmdbuf.c b/shared-core/r300_cmdbuf.c index fe46c2d2..a26a71d5 100644 --- a/shared-core/r300_cmdbuf.c +++ b/shared-core/r300_cmdbuf.c @@ -77,23 +77,31 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,  				return -EFAULT;  			} -			box.x1 = -			    (box.x1 + -			     R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; -			box.y1 = -			    (box.y1 + -			     R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; -			box.x2 = -			    (box.x2 + -			     R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; -			box.y2 = -			    (box.y2 + -			     R300_CLIPRECT_OFFSET) & R300_CLIPRECT_MASK; +			if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { +				box.x1 = (box.x1) & +					R300_CLIPRECT_MASK; +				box.y1 = (box.y1) & +					R300_CLIPRECT_MASK; +				box.x2 = (box.x2) & +					R300_CLIPRECT_MASK; +				box.y2 = (box.y2) & +					R300_CLIPRECT_MASK; +			} else { +				box.x1 = (box.x1 + R300_CLIPRECT_OFFSET) & +					R300_CLIPRECT_MASK; +				box.y1 = (box.y1 + R300_CLIPRECT_OFFSET) & +					R300_CLIPRECT_MASK; +				box.x2 = (box.x2 + R300_CLIPRECT_OFFSET) & +					R300_CLIPRECT_MASK; +				box.y2 = (box.y2 + R300_CLIPRECT_OFFSET) & +					R300_CLIPRECT_MASK; +			}  			OUT_RING((box.x1 << R300_CLIPRECT_X_SHIFT) |  				 (box.y1 << R300_CLIPRECT_Y_SHIFT));  			OUT_RING((box.x2 << R300_CLIPRECT_X_SHIFT) |  				 (box.y2 << R300_CLIPRECT_Y_SHIFT)); +  		}  		OUT_RING_REG(R300_RE_CLIPRECT_CNTL, r300_cliprect_cntl[nr - 1]); @@ -133,9 +141,11 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv,  static u8 r300_reg_flags[0x10000 >> 2]; -void r300_init_reg_flags(void) +void r300_init_reg_flags(struct drm_device *dev)  {  	int i; +	drm_radeon_private_t *dev_priv = dev->dev_private; +  	memset(r300_reg_flags, 0, 0x10000 >> 2);  #define ADD_RANGE_MARK(reg, count,mark) \  		for(i=((reg)>>2);i<((reg)>>2)+(count);i++)\ @@ -230,6 +240,9 @@ void r300_init_reg_flags(void)  	ADD_RANGE(R300_VAP_INPUT_ROUTE_0_0, 8);  	ADD_RANGE(R300_VAP_INPUT_ROUTE_1_0, 8); +	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RV515) { +		ADD_RANGE(0x4074, 16); +	}  }  static __inline__ int r300_check_range(unsigned reg, int count) @@ -486,7 +499,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,  	if (cmd[0] & 0x8000) {  		u32 offset; -		if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL  +		if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL  			      | RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {  			offset = cmd[2] << 10;  			ret = !radeon_check_offset(dev_priv, offset); @@ -504,7 +517,7 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,  				DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);  				return -EINVAL;  			} -			 +  		}  	} @@ -723,53 +736,53 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,  	u32 *ref_age_base;  	u32 i, buf_idx, h_pending;  	RING_LOCALS; -	 +  	if (cmdbuf->bufsz < sizeof(uint64_t) + header.scratch.n_bufs * sizeof(buf_idx) ) {  		return -EINVAL;  	} -	 +  	if (header.scratch.reg >= 5) {  		return -EINVAL;  	} -	 +  	dev_priv->scratch_ages[header.scratch.reg] ++; -	 +  	ref_age_base = (u32 *)(unsigned long)*((uint64_t *)cmdbuf->buf); -	 +  	cmdbuf->buf += sizeof(uint64_t);  	cmdbuf->bufsz -= sizeof(uint64_t); -	 +  	for (i=0; i < header.scratch.n_bufs; i++) {  		buf_idx = *(u32 *)cmdbuf->buf;  		buf_idx *= 2; /* 8 bytes per buf */ -		 +  		if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {  			return -EINVAL;  		} -					 +  		if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {  			return -EINVAL;  		} -					 +  		if (h_pending == 0) {  			return -EINVAL;  		} -					 +  		h_pending--; -						 +  		if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {  			return -EINVAL;  		} -					 +  		cmdbuf->buf += sizeof(buf_idx);  		cmdbuf->bufsz -= sizeof(buf_idx);  	} -	 +  	BEGIN_RING(2);  	OUT_RING( CP_PACKET0( RADEON_SCRATCH_REG0 + header.scratch.reg * 4, 0 ) );  	OUT_RING( dev_priv->scratch_ages[header.scratch.reg] );  	ADVANCE_RING(); -	 +  	return 0;  } @@ -918,7 +931,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,  				goto cleanup;  			}  			break; -			 +  		default:  			DRM_ERROR("bad cmd_type %i at %p\n",  				  header.header.cmd_type, diff --git a/shared-core/r300_reg.h b/shared-core/r300_reg.h index e59919be..29198c8a 100644 --- a/shared-core/r300_reg.h +++ b/shared-core/r300_reg.h @@ -856,13 +856,13 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.  #	define R300_TX_FORMAT_W8Z8Y8X8		    0xC  #	define R300_TX_FORMAT_W2Z10Y10X10	    0xD  #	define R300_TX_FORMAT_W16Z16Y16X16	    0xE -#	define R300_TX_FORMAT_DXT1	    	    0xF -#	define R300_TX_FORMAT_DXT3	    	    0x10 -#	define R300_TX_FORMAT_DXT5	    	    0x11 +#	define R300_TX_FORMAT_DXT1		    0xF +#	define R300_TX_FORMAT_DXT3		    0x10 +#	define R300_TX_FORMAT_DXT5		    0x11  #	define R300_TX_FORMAT_D3DMFT_CxV8U8	    0x12     /* no swizzle */ -#	define R300_TX_FORMAT_A8R8G8B8	    	    0x13     /* no swizzle */ -#	define R300_TX_FORMAT_B8G8_B8G8	    	    0x14     /* no swizzle */ -#	define R300_TX_FORMAT_G8R8_G8B8	    	    0x15     /* no swizzle */ +#	define R300_TX_FORMAT_A8R8G8B8		    0x13     /* no swizzle */ +#	define R300_TX_FORMAT_B8G8_B8G8		    0x14     /* no swizzle */ +#	define R300_TX_FORMAT_G8R8_G8B8		    0x15     /* no swizzle */  	/* 0x16 - some 16 bit green format.. ?? */  #	define R300_TX_FORMAT_UNK25		   (1 << 25) /* no swizzle */  #	define R300_TX_FORMAT_CUBIC_MAP		   (1 << 26) @@ -870,19 +870,19 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.  	/* gap */  	/* Floating point formats */  	/* Note - hardware supports both 16 and 32 bit floating point */ -#	define R300_TX_FORMAT_FL_I16	    	    0x18 -#	define R300_TX_FORMAT_FL_I16A16	    	    0x19 +#	define R300_TX_FORMAT_FL_I16		    0x18 +#	define R300_TX_FORMAT_FL_I16A16		    0x19  #	define R300_TX_FORMAT_FL_R16G16B16A16	    0x1A -#	define R300_TX_FORMAT_FL_I32	    	    0x1B -#	define R300_TX_FORMAT_FL_I32A32	    	    0x1C +#	define R300_TX_FORMAT_FL_I32		    0x1B +#	define R300_TX_FORMAT_FL_I32A32		    0x1C  #	define R300_TX_FORMAT_FL_R32G32B32A32	    0x1D  	/* alpha modes, convenience mostly */  	/* if you have alpha, pick constant appropriate to the  	   number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */ -# 	define R300_TX_FORMAT_ALPHA_1CH		    0x000 -# 	define R300_TX_FORMAT_ALPHA_2CH		    0x200 -# 	define R300_TX_FORMAT_ALPHA_4CH		    0x600 -# 	define R300_TX_FORMAT_ALPHA_NONE	    0xA00 +#	define R300_TX_FORMAT_ALPHA_1CH		    0x000 +#	define R300_TX_FORMAT_ALPHA_2CH		    0x200 +#	define R300_TX_FORMAT_ALPHA_4CH		    0x600 +#	define R300_TX_FORMAT_ALPHA_NONE	    0xA00  	/* Swizzling */  	/* constants */  #	define R300_TX_FORMAT_X		0 @@ -1363,11 +1363,11 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.  #       define R300_RB3D_Z_DISABLED_2            0x00000014  #       define R300_RB3D_Z_TEST                  0x00000012  #       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016 -#       define R300_RB3D_Z_WRITE_ONLY        	 0x00000006 +#       define R300_RB3D_Z_WRITE_ONLY		 0x00000006  #       define R300_RB3D_Z_TEST                  0x00000012  #       define R300_RB3D_Z_TEST_AND_WRITE        0x00000016 -#       define R300_RB3D_Z_WRITE_ONLY        	 0x00000006 +#       define R300_RB3D_Z_WRITE_ONLY		 0x00000006  #	define R300_RB3D_STENCIL_ENABLE		 0x00000001  #define R300_RB3D_ZSTENCIL_CNTL_1                   0x4F04 diff --git a/shared-core/radeon_cp.c b/shared-core/radeon_cp.c index 06861381..3eb81efc 100644 --- a/shared-core/radeon_cp.c +++ b/shared-core/radeon_cp.c @@ -695,7 +695,7 @@ static const u32 R300_cp_microcode[][2] = {  	{ 0x0000e571, 0x00000004 },  	{ 0x0000e572, 0x0000000c },  	{ 0x0000a000, 0x00000004 }, -	{ 0x0140a000, 0x00000004 },  +	{ 0x0140a000, 0x00000004 },  	{ 0x0000e568, 0x00000004 },  	{ 0x000c2000, 0x00000004 },  	{ 0x00000089, 0x00000018 }, @@ -816,6 +816,46 @@ static const u32 R300_cp_microcode[][2] = {  	{ 0000000000, 0000000000 },  }; +static u32 RADEON_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) +{ +	u32 ret; +	RADEON_WRITE(R520_MC_IND_INDEX, 0x7f0000 | (addr & 0xff)); +	ret = RADEON_READ(R520_MC_IND_DATA); +	RADEON_WRITE(R520_MC_IND_INDEX, 0); +	return ret; +} + +u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv) +{ +	 +	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) +		return RADEON_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); +	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) +		return RADEON_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); +	else +		return RADEON_READ(RADEON_MC_FB_LOCATION); +} + +static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc) +{ +	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) +		RADEON_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); +	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) +		RADEON_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); +	else +		RADEON_WRITE(RADEON_MC_FB_LOCATION, fb_loc); +} + +static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_loc) +{ +	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) +		RADEON_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); +	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) +		RADEON_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); +	else +		RADEON_WRITE(RADEON_MC_AGP_LOCATION, agp_loc); +} +  static int RADEON_READ_PLL(struct drm_device * dev, int addr)  {  	drm_radeon_private_t *dev_priv = dev->dev_private; @@ -1074,41 +1114,43 @@ static int radeon_do_engine_reset(struct drm_device * dev)  	radeon_do_pixcache_flush(dev_priv); -	clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); -	mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); - -	RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | -					    RADEON_FORCEON_MCLKA | -					    RADEON_FORCEON_MCLKB | -					    RADEON_FORCEON_YCLKA | -					    RADEON_FORCEON_YCLKB | -					    RADEON_FORCEON_MC | -					    RADEON_FORCEON_AIC)); - -	rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); - -	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | -					      RADEON_SOFT_RESET_CP | -					      RADEON_SOFT_RESET_HI | -					      RADEON_SOFT_RESET_SE | -					      RADEON_SOFT_RESET_RE | -					      RADEON_SOFT_RESET_PP | -					      RADEON_SOFT_RESET_E2 | -					      RADEON_SOFT_RESET_RB)); -	RADEON_READ(RADEON_RBBM_SOFT_RESET); -	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & -					      ~(RADEON_SOFT_RESET_CP | -						RADEON_SOFT_RESET_HI | -						RADEON_SOFT_RESET_SE | -						RADEON_SOFT_RESET_RE | -						RADEON_SOFT_RESET_PP | -						RADEON_SOFT_RESET_E2 | -						RADEON_SOFT_RESET_RB))); -	RADEON_READ(RADEON_RBBM_SOFT_RESET); - -	RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); -	RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); -	RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); +	if ((dev_priv->flags & RADEON_FAMILY_MASK) < CHIP_RV515) { +		clock_cntl_index = RADEON_READ(RADEON_CLOCK_CNTL_INDEX); +		mclk_cntl = RADEON_READ_PLL(dev, RADEON_MCLK_CNTL); +		 +		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, (mclk_cntl | +						    RADEON_FORCEON_MCLKA | +						    RADEON_FORCEON_MCLKB | +						    RADEON_FORCEON_YCLKA | +						    RADEON_FORCEON_YCLKB | +						    RADEON_FORCEON_MC | +						    RADEON_FORCEON_AIC)); +		 +		rbbm_soft_reset = RADEON_READ(RADEON_RBBM_SOFT_RESET); +		 +		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset | +						      RADEON_SOFT_RESET_CP | +						      RADEON_SOFT_RESET_HI | +						      RADEON_SOFT_RESET_SE | +						      RADEON_SOFT_RESET_RE | +						      RADEON_SOFT_RESET_PP | +						      RADEON_SOFT_RESET_E2 | +						      RADEON_SOFT_RESET_RB)); +		RADEON_READ(RADEON_RBBM_SOFT_RESET); +		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, (rbbm_soft_reset & +						      ~(RADEON_SOFT_RESET_CP | +							RADEON_SOFT_RESET_HI | +							RADEON_SOFT_RESET_SE | +							RADEON_SOFT_RESET_RE | +							RADEON_SOFT_RESET_PP | +							RADEON_SOFT_RESET_E2 | +							RADEON_SOFT_RESET_RB))); +		RADEON_READ(RADEON_RBBM_SOFT_RESET); +		 +		RADEON_WRITE_PLL(RADEON_MCLK_CNTL, mclk_cntl); +		RADEON_WRITE(RADEON_CLOCK_CNTL_INDEX, clock_cntl_index); +		RADEON_WRITE(RADEON_RBBM_SOFT_RESET, rbbm_soft_reset); +	}  	/* Reset the CP ring */  	radeon_do_cp_reset(dev_priv); @@ -1127,21 +1169,21 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,  {  	u32 ring_start, cur_read_ptr;  	u32 tmp; -	 +  	/* Initialize the memory controller. With new memory map, the fb location  	 * is not changed, it should have been properly initialized already. Part  	 * of the problem is that the code below is bogus, assuming the GART is  	 * always appended to the fb which is not necessarily the case  	 */  	if (!dev_priv->new_memmap) -		RADEON_WRITE(RADEON_MC_FB_LOCATION, +		radeon_write_fb_location(dev_priv,  			     ((dev_priv->gart_vm_start - 1) & 0xffff0000)  			     | (dev_priv->fb_location >> 16));  #if __OS_HAS_AGP  	if (dev_priv->flags & RADEON_IS_AGP) {  		RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev->agp->base); -		RADEON_WRITE(RADEON_MC_AGP_LOCATION, +		radeon_write_agp_location(dev_priv,  			     (((dev_priv->gart_vm_start - 1 +  				dev_priv->gart_size) & 0xffff0000) |  			      (dev_priv->gart_vm_start >> 16))); @@ -1305,7 +1347,7 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)  		RADEON_WRITE(RADEON_AGP_BASE, (unsigned int)dev_priv->gart_vm_start);  		dev_priv->gart_size = 32*1024*1024; -		RADEON_WRITE(RADEON_MC_AGP_LOCATION, +		radeon_write_agp_location(dev_priv,  			     (((dev_priv->gart_vm_start - 1 +  			       dev_priv->gart_size) & 0xffff0000) |  			     (dev_priv->gart_vm_start >> 16))); @@ -1339,7 +1381,7 @@ static void radeon_set_pciegart(drm_radeon_private_t * dev_priv, int on)  				  dev_priv->gart_vm_start +  				  dev_priv->gart_size - 1); -		RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0);	/* ?? */ +		radeon_write_agp_location(dev_priv, 0xffffffc0); /* ?? */  		RADEON_WRITE_PCIE(RADEON_PCIE_TX_GART_CNTL,  				  RADEON_PCIE_TX_GART_EN); @@ -1364,7 +1406,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)  		return;  	} - 	tmp = RADEON_READ(RADEON_AIC_CNTL); +	tmp = RADEON_READ(RADEON_AIC_CNTL);  	if (on) {  		RADEON_WRITE(RADEON_AIC_CNTL, @@ -1382,7 +1424,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)  		/* Turn off AGP aperture -- is this required for PCI GART?  		 */ -		RADEON_WRITE(RADEON_MC_AGP_LOCATION, 0xffffffc0);	/* ?? */ +		radeon_write_agp_location(dev_priv, 0xffffffc0);  		RADEON_WRITE(RADEON_AGP_COMMAND, 0);	/* clear AGP_COMMAND */  	} else {  		RADEON_WRITE(RADEON_AIC_CNTL, @@ -1590,10 +1632,9 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)  			  dev->agp_buffer_map->handle);  	} -	dev_priv->fb_location = (RADEON_READ(RADEON_MC_FB_LOCATION) -				 & 0xffff) << 16; -	dev_priv->fb_size =  -		((RADEON_READ(RADEON_MC_FB_LOCATION) & 0xffff0000u) + 0x10000) +	dev_priv->fb_location = (radeon_read_fb_location(dev_priv) & 0xffff) << 16; +	dev_priv->fb_size = +		((radeon_read_fb_location(dev_priv) & 0xffff0000u) + 0x10000)  		- dev_priv->fb_location;  	dev_priv->front_pitch_offset = (((dev_priv->front_pitch / 64) << 22) | @@ -1639,7 +1680,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)  			    ((base + dev_priv->gart_size) & 0xfffffffful) < base)  				base = dev_priv->fb_location  					- dev_priv->gart_size; -		}		 +		}  		dev_priv->gart_vm_start = base & 0xffc00000u;  		if (dev_priv->gart_vm_start != base)  			DRM_INFO("GART aligned down from 0x%08x to 0x%08x\n", @@ -1694,7 +1735,7 @@ static int radeon_do_init_cp(struct drm_device * dev, drm_radeon_init_t * init)  			dev_priv->gart_info.bus_addr =  			    dev_priv->pcigart_offset + dev_priv->fb_location;  			dev_priv->gart_info.mapping.offset = -			    dev_priv->gart_info.bus_addr; +			    dev_priv->pcigart_offset + dev_priv->fb_aper_offset;  			dev_priv->gart_info.mapping.size =  			    dev_priv->gart_info.table_size; @@ -1845,7 +1886,7 @@ int radeon_cp_init(struct drm_device *dev, void *data, struct drm_file *file_pri  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (init->func == RADEON_INIT_R300_CP) -		r300_init_reg_flags(); +		r300_init_reg_flags(dev);  	switch (init->func) {  	case RADEON_INIT_CP: @@ -1867,12 +1908,12 @@ int radeon_cp_start(struct drm_device *dev, void *data, struct drm_file *file_pr  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (dev_priv->cp_running) { -		DRM_DEBUG("%s while CP running\n", __FUNCTION__); +		DRM_DEBUG("while CP running\n");  		return 0;  	}  	if (dev_priv->cp_mode == RADEON_CSQ_PRIDIS_INDDIS) { -		DRM_DEBUG("%s called with bogus CP mode (%d)\n", -			  __FUNCTION__, dev_priv->cp_mode); +		DRM_DEBUG("called with bogus CP mode (%d)\n", +			  dev_priv->cp_mode);  		return 0;  	} @@ -1938,7 +1979,7 @@ void radeon_do_release(struct drm_device * dev)  				schedule();  #else  #if defined(__FreeBSD__) && __FreeBSD_version > 500000 -				msleep(&ret, &dev->dev_lock, PZERO, "rdnrel", +				mtx_sleep(&ret, &dev->dev_lock, PZERO, "rdnrel",  				       1);  #else  				tsleep(&ret, PZERO, "rdnrel", 1); @@ -1982,7 +2023,7 @@ int radeon_cp_reset(struct drm_device *dev, void *data, struct drm_file *file_pr  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_DEBUG("%s called before init done\n", __FUNCTION__); +		DRM_DEBUG("called before init done\n");  		return -EINVAL;  	} @@ -2259,6 +2300,10 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)  	case CHIP_R350:  	case CHIP_R420:  	case CHIP_RV410: +	case CHIP_RV515: +	case CHIP_R520: +	case CHIP_RV570: +	case CHIP_R580:  		dev_priv->flags |= RADEON_HAS_HIERZ;  		break;  	default: @@ -2295,7 +2340,8 @@ int radeon_driver_firstopen(struct drm_device *dev)  	if (ret != 0)  		return ret; -	ret = drm_addmap(dev, drm_get_resource_start(dev, 0), +	dev_priv->fb_aper_offset = drm_get_resource_start(dev, 0); +	ret = drm_addmap(dev, dev_priv->fb_aper_offset,  			 drm_get_resource_len(dev, 0), _DRM_FRAME_BUFFER,  			 _DRM_WRITE_COMBINING, &map);  	if (ret != 0) diff --git a/shared-core/radeon_drm.h b/shared-core/radeon_drm.h index b0ef702b..0971f970 100644 --- a/shared-core/radeon_drm.h +++ b/shared-core/radeon_drm.h @@ -223,10 +223,10 @@ typedef union {  #define R300_CMD_CP_DELAY		5  #define R300_CMD_DMA_DISCARD		6  #define R300_CMD_WAIT			7 -#	define R300_WAIT_2D  		0x1 -#	define R300_WAIT_3D  		0x2 -#	define R300_WAIT_2D_CLEAN  	0x3 -#	define R300_WAIT_3D_CLEAN  	0x4 +#	define R300_WAIT_2D		0x1 +#	define R300_WAIT_3D		0x2 +#	define R300_WAIT_2D_CLEAN	0x3 +#	define R300_WAIT_3D_CLEAN	0x4  #define R300_CMD_SCRATCH		8  typedef union { @@ -656,6 +656,7 @@ typedef struct drm_radeon_indirect {  #define RADEON_PARAM_SCRATCH_OFFSET        11  #define RADEON_PARAM_CARD_TYPE             12  #define RADEON_PARAM_VBLANK_CRTC           13   /* VBLANK CRTC */ +#define RADEON_PARAM_FB_LOCATION           14   /* FB location */  typedef struct drm_radeon_getparam {  	int param; @@ -723,7 +724,7 @@ typedef struct drm_radeon_surface_free {  	unsigned int address;  } drm_radeon_surface_free_t; -#define	DRM_RADEON_VBLANK_CRTC1 	1 -#define	DRM_RADEON_VBLANK_CRTC2 	2 +#define	DRM_RADEON_VBLANK_CRTC1		1 +#define	DRM_RADEON_VBLANK_CRTC2		2  #endif diff --git a/shared-core/radeon_drv.h b/shared-core/radeon_drv.h index a71b0bee..bd51de14 100644 --- a/shared-core/radeon_drv.h +++ b/shared-core/radeon_drv.h @@ -124,6 +124,12 @@ enum radeon_family {  	CHIP_R420,  	CHIP_RV410,  	CHIP_RS400, +	CHIP_RV515, +	CHIP_R520, +	CHIP_RV530, +	CHIP_RV560, +	CHIP_RV570, +	CHIP_R580,  	CHIP_LAST,  }; @@ -291,11 +297,11 @@ typedef struct drm_radeon_private {  	int irq_enabled;  	struct radeon_surface surfaces[RADEON_MAX_SURFACES]; -	struct radeon_virt_surface virt_surfaces[2*RADEON_MAX_SURFACES]; +	struct radeon_virt_surface virt_surfaces[2 * RADEON_MAX_SURFACES];  	unsigned long pcigart_offset;  	unsigned int pcigart_offset_set; -	struct ati_pcigart_info gart_info; +	struct drm_ati_pcigart_info gart_info;  	u32 scratch_ages[5]; @@ -304,6 +310,7 @@ typedef struct drm_radeon_private {  	/* starting from here on, data is preserved accross an open */  	uint32_t flags;		/* see radeon_chip_flags */ +	unsigned long fb_aper_offset;  } drm_radeon_private_t; @@ -347,6 +354,7 @@ extern int radeon_cp_resume(struct drm_device *dev, void *data, struct drm_file  extern int radeon_engine_reset(struct drm_device *dev, void *data, struct drm_file *file_priv);  extern int radeon_fullscreen(struct drm_device *dev, void *data, struct drm_file *file_priv);  extern int radeon_cp_buffers(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv);  extern void radeon_freelist_reset(struct drm_device * dev);  extern struct drm_buf *radeon_freelist_get(struct drm_device * dev); @@ -392,11 +400,11 @@ extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,  					 unsigned long arg);  /* r300_cmdbuf.c */ -extern void r300_init_reg_flags(void); +extern void r300_init_reg_flags(struct drm_device *dev);  extern int r300_do_cp_cmdbuf(struct drm_device *dev,  			     struct drm_file *file_priv, -			     drm_radeon_kcmd_buffer_t* cmdbuf); +			     drm_radeon_kcmd_buffer_t *cmdbuf);  /* Flags for stats.boxes   */ @@ -438,7 +446,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,  #define RADEON_PCIE_INDEX               0x0030  #define RADEON_PCIE_DATA                0x0034  #define RADEON_PCIE_TX_GART_CNTL	0x10 -#	define RADEON_PCIE_TX_GART_EN   	(1 << 0) +#	define RADEON_PCIE_TX_GART_EN		(1 << 0)  #	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_PASS_THRU (0<<1)  #	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_CLAMP_LO  (1<<1)  #	define RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD   (3<<1) @@ -448,7 +456,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,  #	define RADEON_PCIE_TX_GART_INVALIDATE_TLB	(1<<8)  #define RADEON_PCIE_TX_DISCARD_RD_ADDR_LO 0x11  #define RADEON_PCIE_TX_DISCARD_RD_ADDR_HI 0x12 -#define RADEON_PCIE_TX_GART_BASE  	0x13 +#define RADEON_PCIE_TX_GART_BASE	0x13  #define RADEON_PCIE_TX_GART_START_LO	0x14  #define RADEON_PCIE_TX_GART_START_HI	0x15  #define RADEON_PCIE_TX_GART_END_LO	0x16 @@ -463,6 +471,15 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,  #define RADEON_IGPGART_ENABLE           0x38  #define RADEON_IGPGART_UNK_39           0x39 +#define R520_MC_IND_INDEX 0x70 +#define R520_MC_IND_WR_EN (1<<24) +#define R520_MC_IND_DATA  0x74 + +#define RV515_MC_FB_LOCATION 0x01 +#define RV515_MC_AGP_LOCATION 0x02 + +#define R520_MC_FB_LOCATION 0x04 +#define R520_MC_AGP_LOCATION 0x05  #define RADEON_MPP_TB_CONFIG		0x01c0  #define RADEON_MEM_CNTL			0x0140 @@ -528,12 +545,12 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,  #define RADEON_GEN_INT_STATUS		0x0044  #	define RADEON_CRTC_VBLANK_STAT		(1 << 0) -#	define RADEON_CRTC_VBLANK_STAT_ACK   	(1 << 0) +#	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)  #	define RADEON_CRTC2_VBLANK_STAT		(1 << 9) -#	define RADEON_CRTC2_VBLANK_STAT_ACK   	(1 << 9) +#	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)  #	define RADEON_GUI_IDLE_INT_TEST_ACK     (1 << 19)  #	define RADEON_SW_INT_TEST		(1 << 25) -#	define RADEON_SW_INT_TEST_ACK   	(1 << 25) +#	define RADEON_SW_INT_TEST_ACK		(1 << 25)  #	define RADEON_SW_INT_FIRE		(1 << 26)  #define RADEON_HOST_PATH_CNTL		0x0130 @@ -652,30 +669,30 @@ extern int r300_do_cp_cmdbuf(struct drm_device *dev,   */  #define RADEON_RBBM_STATUS		0x0e40  /* Same as the previous RADEON_RBBM_STATUS; this is a mirror of that register.  */ -/* #define RADEON_RBBM_STATUS 		0x1740 */ +/* #define RADEON_RBBM_STATUS		0x1740 */  /* bits 6:0 are dword slots available in the cmd fifo */  #	define RADEON_RBBM_FIFOCNT_MASK		0x007f -#	define RADEON_HIRQ_ON_RBB 	(1 <<  8) -#	define RADEON_CPRQ_ON_RBB 	(1 <<  9) -#	define RADEON_CFRQ_ON_RBB 	(1 << 10) -#	define RADEON_HIRQ_IN_RTBUF 	(1 << 11) -#	define RADEON_CPRQ_IN_RTBUF 	(1 << 12) -#	define RADEON_CFRQ_IN_RTBUF 	(1 << 13) -#	define RADEON_PIPE_BUSY 	(1 << 14) -#	define RADEON_ENG_EV_BUSY 	(1 << 15) -#	define RADEON_CP_CMDSTRM_BUSY 	(1 << 16) -#	define RADEON_E2_BUSY 		(1 << 17) -#	define RADEON_RB2D_BUSY 	(1 << 18) -#	define RADEON_RB3D_BUSY 	(1 << 19) /* not used on r300 */ -#	define RADEON_VAP_BUSY 		(1 << 20) -#	define RADEON_RE_BUSY 		(1 << 21) /* not used on r300 */ -#	define RADEON_TAM_BUSY 		(1 << 22) /* not used on r300 */ -#	define RADEON_TDM_BUSY 		(1 << 23) /* not used on r300 */ -#	define RADEON_PB_BUSY 		(1 << 24) /* not used on r300 */ -#	define RADEON_TIM_BUSY 		(1 << 25) /* not used on r300 */ -#	define RADEON_GA_BUSY 		(1 << 26) -#	define RADEON_CBA2D_BUSY 	(1 << 27) -#	define RADEON_RBBM_ACTIVE 	(1 << 31) +#	define RADEON_HIRQ_ON_RBB	(1 <<  8) +#	define RADEON_CPRQ_ON_RBB	(1 <<  9) +#	define RADEON_CFRQ_ON_RBB	(1 << 10) +#	define RADEON_HIRQ_IN_RTBUF	(1 << 11) +#	define RADEON_CPRQ_IN_RTBUF	(1 << 12) +#	define RADEON_CFRQ_IN_RTBUF	(1 << 13) +#	define RADEON_PIPE_BUSY		(1 << 14) +#	define RADEON_ENG_EV_BUSY	(1 << 15) +#	define RADEON_CP_CMDSTRM_BUSY	(1 << 16) +#	define RADEON_E2_BUSY		(1 << 17) +#	define RADEON_RB2D_BUSY		(1 << 18) +#	define RADEON_RB3D_BUSY		(1 << 19) /* not used on r300 */ +#	define RADEON_VAP_BUSY		(1 << 20) +#	define RADEON_RE_BUSY		(1 << 21) /* not used on r300 */ +#	define RADEON_TAM_BUSY		(1 << 22) /* not used on r300 */ +#	define RADEON_TDM_BUSY		(1 << 23) /* not used on r300 */ +#	define RADEON_PB_BUSY		(1 << 24) /* not used on r300 */ +#	define RADEON_TIM_BUSY		(1 << 25) /* not used on r300 */ +#	define RADEON_GA_BUSY		(1 << 26) +#	define RADEON_CBA2D_BUSY	(1 << 27) +#	define RADEON_RBBM_ACTIVE	(1 << 31)  #define RADEON_RE_LINE_PATTERN		0x1cd0  #define RADEON_RE_MISC			0x26c4  #define RADEON_RE_TOP_LEFT		0x26c0 @@ -1092,6 +1109,13 @@ do {									\  	RADEON_WRITE( RADEON_PCIE_DATA, (val) );			\  } while (0) +#define RADEON_WRITE_MCIND( addr, val )					\ +	do {								\ +		RADEON_WRITE(R520_MC_IND_INDEX, 0xff0000 | ((addr) & 0xff));	\ +		RADEON_WRITE(R520_MC_IND_DATA, (val));			\ +		RADEON_WRITE(R520_MC_IND_INDEX, 0);	\ +	} while (0) +  #define CP_PACKET0( reg, n )						\  	(RADEON_CP_PACKET0 | ((n) << 16) | ((reg) >> 2))  #define CP_PACKET0_TABLE( reg, n )					\ @@ -1202,8 +1226,7 @@ do {									\  #define BEGIN_RING( n ) do {						\  	if ( RADEON_VERBOSE ) {						\ -		DRM_INFO( "BEGIN_RING( %d ) in %s\n",			\ -			   n, __FUNCTION__ );				\ +		DRM_INFO( "BEGIN_RING( %d )\n", (n));			\  	}								\  	if ( dev_priv->ring.space <= (n) * sizeof(u32) ) {		\  		COMMIT_RING();						\ @@ -1221,7 +1244,7 @@ do {									\  			  write, dev_priv->ring.tail );			\  	}								\  	if (((dev_priv->ring.tail + _nr) & mask) != write) {		\ -		DRM_ERROR( 						\ +		DRM_ERROR(						\  			"ADVANCE_RING(): mismatch: nr: %x write: %x line: %d\n",	\  			((dev_priv->ring.tail + _nr) & mask),		\  			write, __LINE__);						\ diff --git a/shared-core/radeon_irq.c b/shared-core/radeon_irq.c index 6ba3c147..79e4e866 100644 --- a/shared-core/radeon_irq.c +++ b/shared-core/radeon_irq.c @@ -181,7 +181,7 @@ u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)  	u32 crtc_cnt_reg, crtc_status_reg;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -209,7 +209,7 @@ int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_pr  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -231,7 +231,7 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr  	drm_radeon_irq_wait_t *irqwait = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} diff --git a/shared-core/radeon_mem.c b/shared-core/radeon_mem.c index 9947e940..1e582ee0 100644 --- a/shared-core/radeon_mem.c +++ b/shared-core/radeon_mem.c @@ -224,7 +224,7 @@ int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_p  	struct mem_block *block, **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -259,7 +259,7 @@ int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_pr  	struct mem_block *block, **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -285,7 +285,7 @@ int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *fi  	struct mem_block **heap;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} diff --git a/shared-core/radeon_state.c b/shared-core/radeon_state.c index e3aadfb9..6f2e05b3 100644 --- a/shared-core/radeon_state.c +++ b/shared-core/radeon_state.c @@ -898,7 +898,7 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,  			int w = pbox[i].x2 - x;  			int h = pbox[i].y2 - y; -			DRM_DEBUG("dispatch clear %d,%d-%d,%d flags 0x%x\n", +			DRM_DEBUG("%d,%d-%d,%d flags 0x%x\n",  				  x, y, w, h, flags);  			if (flags & RADEON_FRONT) { @@ -1368,7 +1368,7 @@ static void radeon_cp_dispatch_swap(struct drm_device * dev)  		int w = pbox[i].x2 - x;  		int h = pbox[i].y2 - y; -		DRM_DEBUG("dispatch swap %d,%d-%d,%d\n", x, y, w, h); +		DRM_DEBUG("%d,%d-%d,%d\n", x, y, w, h);  		BEGIN_RING(9); @@ -1422,8 +1422,7 @@ static void radeon_cp_dispatch_flip(struct drm_device * dev)  	int offset = (dev_priv->sarea_priv->pfCurrentPage == 1)  	    ? dev_priv->front_offset : dev_priv->back_offset;  	RING_LOCALS; -	DRM_DEBUG("%s: pfCurrentPage=%d\n", -		  __FUNCTION__, +	DRM_DEBUG("pfCurrentPage=%d\n",  		  dev_priv->sarea_priv->pfCurrentPage);  	/* Do some trivial performance monitoring... @@ -1562,7 +1561,7 @@ static void radeon_cp_dispatch_indirect(struct drm_device * dev,  {  	drm_radeon_private_t *dev_priv = dev->dev_private;  	RING_LOCALS; -	DRM_DEBUG("indirect: buf=%d s=0x%x e=0x%x\n", buf->idx, start, end); +	DRM_DEBUG("buf=%d s=0x%x e=0x%x\n", buf->idx, start, end);  	if (start != end) {  		int offset = (dev_priv->gart_buffers_offset @@ -1758,7 +1757,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,  			buf = radeon_freelist_get(dev);  		}  		if (!buf) { -			DRM_DEBUG("radeon_cp_dispatch_texture: EAGAIN\n"); +			DRM_DEBUG("EAGAIN\n");  			if (DRM_COPY_TO_USER(tex->image, image, sizeof(*image)))  				return -EFAULT;  			return -EAGAIN; @@ -2084,7 +2083,7 @@ static int radeon_surface_alloc(struct drm_device *dev, void *data, struct drm_f  	drm_radeon_surface_alloc_t *alloc = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -2100,7 +2099,7 @@ static int radeon_surface_free(struct drm_device *dev, void *data, struct drm_fi  	drm_radeon_surface_free_t *memfree = data;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -2215,7 +2214,7 @@ static int radeon_cp_vertex(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -2299,7 +2298,7 @@ static int radeon_cp_indices(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	}  	sarea_priv = dev_priv->sarea_priv; @@ -2437,11 +2436,11 @@ static int radeon_cp_indirect(struct drm_device *dev, void *data, struct drm_fil  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} -	DRM_DEBUG("indirect: idx=%d s=%d e=%d d=%d\n", +	DRM_DEBUG("idx=%d s=%d e=%d d=%d\n",  		  indirect->idx, indirect->start, indirect->end,  		  indirect->discard); @@ -2509,7 +2508,7 @@ static int radeon_cp_vertex2(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -2814,7 +2813,7 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)  	drm_radeon_private_t *dev_priv = dev->dev_private;  	RING_LOCALS; -	DRM_DEBUG("%s: %x\n", __FUNCTION__, flags); +	DRM_DEBUG("%x\n", flags);  	switch (flags) {  	case RADEON_WAIT_2D:  		BEGIN_RING(2); @@ -2852,7 +2851,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -3013,7 +3012,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil  	int value;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -3069,7 +3068,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil  			return -EINVAL;  		value = RADEON_SCRATCH_REG_OFFSET;  		break; -	 +  	case RADEON_PARAM_CARD_TYPE:  		if (dev_priv->flags & RADEON_IS_PCIE)  			value = RADEON_CARD_PCIE; @@ -3081,6 +3080,9 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil  	case RADEON_PARAM_VBLANK_CRTC:  		value = radeon_vblank_crtc_get(dev);  		break; +	case RADEON_PARAM_FB_LOCATION: +		value = radeon_read_fb_location(dev_priv); +		break;  	default:  		DRM_DEBUG( "Invalid parameter %d\n", param->param );  		return -EINVAL; @@ -3101,7 +3103,7 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil  	struct drm_radeon_driver_file_fields *radeon_priv;  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} @@ -3154,7 +3156,7 @@ static int radeon_cp_setparam(struct drm_device *dev, void *data, struct drm_fil   *   * DRM infrastructure takes care of reclaiming dma buffers.   */ -void radeon_driver_preclose(struct drm_device * dev, +void radeon_driver_preclose(struct drm_device *dev,  			    struct drm_file *file_priv)  {  	if (dev->dev_private) { @@ -3166,7 +3168,7 @@ void radeon_driver_preclose(struct drm_device * dev,  	}  } -void radeon_driver_lastclose(struct drm_device * dev) +void radeon_driver_lastclose(struct drm_device *dev)  {  	if (dev->dev_private) {  		drm_radeon_private_t *dev_priv = dev->dev_private; @@ -3179,7 +3181,7 @@ void radeon_driver_lastclose(struct drm_device * dev)  	radeon_do_release(dev);  } -int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv) +int radeon_driver_open(struct drm_device *dev, struct drm_file *file_priv)  {  	drm_radeon_private_t *dev_priv = dev->dev_private;  	struct drm_radeon_driver_file_fields *radeon_priv; @@ -3201,7 +3203,7 @@ int radeon_driver_open(struct drm_device * dev, struct drm_file *file_priv)  	return 0;  } -void radeon_driver_postclose(struct drm_device * dev, struct drm_file *file_priv) +void radeon_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)  {  	struct drm_radeon_driver_file_fields *radeon_priv =  	    file_priv->driver_priv; diff --git a/shared-core/savage_bci.c b/shared-core/savage_bci.c index 32ac5ac2..4b8a89fe 100644 --- a/shared-core/savage_bci.c +++ b/shared-core/savage_bci.c @@ -364,7 +364,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)  	unsigned int cur = dev_priv->current_dma_page;  	unsigned int rest = SAVAGE_DMA_PAGE_SIZE -  		dev_priv->dma_pages[cur].used; -	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE-1) / +	unsigned int nr_pages = (n - rest + SAVAGE_DMA_PAGE_SIZE - 1) /  		SAVAGE_DMA_PAGE_SIZE;  	uint32_t *dma_ptr;  	unsigned int i; @@ -374,7 +374,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)  	if (cur + nr_pages < dev_priv->nr_dma_pages) {  		dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle + -		    cur*SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used; +		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;  		if (n < rest)  			rest = n;  		dev_priv->dma_pages[cur].used += rest; @@ -383,7 +383,7 @@ uint32_t *savage_dma_alloc(drm_savage_private_t *dev_priv, unsigned int n)  	} else {  		dev_priv->dma_flush(dev_priv);  		nr_pages = -		    (n + SAVAGE_DMA_PAGE_SIZE-1) / SAVAGE_DMA_PAGE_SIZE; +		    (n + SAVAGE_DMA_PAGE_SIZE - 1) / SAVAGE_DMA_PAGE_SIZE;  		for (i = cur; i < dev_priv->nr_dma_pages; ++i) {  			dev_priv->dma_pages[i].age = dev_priv->last_dma_age;  			dev_priv->dma_pages[i].used = 0; @@ -443,7 +443,7 @@ static void savage_dma_flush(drm_savage_private_t *dev_priv)  		uint32_t *dma_ptr = (uint32_t *)dev_priv->cmd_dma->handle +  		    cur * SAVAGE_DMA_PAGE_SIZE + dev_priv->dma_pages[cur].used;  		dev_priv->dma_pages[cur].used += pad; -		while(pad != 0) { +		while (pad != 0) {  			*dma_ptr++ = BCI_CMD_WAIT;  			pad--;  		} @@ -584,18 +584,18 @@ int savage_driver_firstopen(struct drm_device *dev)  			 * MTRRs. */  			dev_priv->mtrr[0].base = fb_base;  			dev_priv->mtrr[0].size = 0x01000000; -			dev_priv->mtrr[0].handle =  +			dev_priv->mtrr[0].handle =  			    drm_mtrr_add(dev_priv->mtrr[0].base,  					 dev_priv->mtrr[0].size, DRM_MTRR_WC); -			dev_priv->mtrr[1].base = fb_base+0x02000000; +			dev_priv->mtrr[1].base = fb_base + 0x02000000;  			dev_priv->mtrr[1].size = 0x02000000;  			dev_priv->mtrr[1].handle =  			    drm_mtrr_add(dev_priv->mtrr[1].base,  					 dev_priv->mtrr[1].size, DRM_MTRR_WC); -			dev_priv->mtrr[2].base = fb_base+0x04000000; +			dev_priv->mtrr[2].base = fb_base + 0x04000000;  			dev_priv->mtrr[2].size = 0x04000000;  			dev_priv->mtrr[2].handle = -			    drm_mtrr_add(dev_priv->mtrr[2].base,  +			    drm_mtrr_add(dev_priv->mtrr[2].base,  				         dev_priv->mtrr[2].size, DRM_MTRR_WC);  		} else {  			DRM_ERROR("strange pci_resource_len %08lx\n", @@ -615,7 +615,7 @@ int savage_driver_firstopen(struct drm_device *dev)  			 * aperture. */  			dev_priv->mtrr[0].base = fb_base;  			dev_priv->mtrr[0].size = 0x08000000; -			dev_priv->mtrr[0].handle =  +			dev_priv->mtrr[0].handle =  			    drm_mtrr_add(dev_priv->mtrr[0].base,  					 dev_priv->mtrr[0].size, DRM_MTRR_WC);  		} else { @@ -833,8 +833,8 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)  			depth_tile_format = SAVAGE_BD_TILE_DEST;  		}  		front_stride = dev_priv->front_pitch / (dev_priv->fb_bpp / 8); -		back_stride  = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); -		depth_stride =  +		back_stride = dev_priv->back_pitch / (dev_priv->fb_bpp / 8); +		depth_stride =  		    dev_priv->depth_pitch / (dev_priv->depth_bpp / 8);  		dev_priv->front_bd = front_stride | SAVAGE_BD_BW_DISABLE | @@ -888,7 +888,7 @@ static int savage_do_init_bci(struct drm_device *dev, drm_savage_init_t *init)  		return -ENOMEM;  	} -	if (savage_dma_init(dev_priv) <  0) { +	if (savage_dma_init(dev_priv) < 0) {  		DRM_ERROR("could not initialize command DMA\n");  		savage_do_cleanup_bci(dev);  		return -ENOMEM; @@ -983,7 +983,7 @@ static int savage_bci_event_wait(struct drm_device *dev, void *data, struct drm_  	 * - event counter wrapped since the event was emitted or  	 * - the hardware has advanced up to or over the event to wait for.  	 */ -	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e) ) +	if (event_w < hw_w || (event_w == hw_w && event_e <= hw_e))  		return 0;  	else  		return dev_priv->wait_evnt(dev_priv, event_e); @@ -1065,8 +1065,6 @@ void savage_reclaim_buffers(struct drm_device *dev, struct drm_file *file_priv)  	if (!dma->buflist)  		return; -	/*i830_flush_queue(dev);*/ -  	for (i = 0; i < dma->buf_count; i++) {  		struct drm_buf *buf = dma->buflist[i];  		drm_savage_buf_priv_t *buf_priv = buf->dev_private; @@ -1092,4 +1090,3 @@ struct drm_ioctl_desc savage_ioctls[] = {  };  int savage_max_ioctl = DRM_ARRAY_SIZE(savage_ioctls); - diff --git a/shared-core/savage_drv.h b/shared-core/savage_drv.h index d86bac04..b9124b1a 100644 --- a/shared-core/savage_drv.h +++ b/shared-core/savage_drv.h @@ -237,7 +237,7 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,   */  #define SAVAGE_STATUS_WORD0		0x48C00  #define SAVAGE_STATUS_WORD1		0x48C04 -#define SAVAGE_ALT_STATUS_WORD0 	0x48C60 +#define SAVAGE_ALT_STATUS_WORD0		0x48C60  #define SAVAGE_FIFO_USED_MASK_S3D	0x0001ffff  #define SAVAGE_FIFO_USED_MASK_S4	0x001fffff @@ -310,7 +310,7 @@ extern void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,  #define SAVAGE_DESTCTRL_S3D		0x34  #define SAVAGE_SCSTART_S3D		0x35  #define SAVAGE_SCEND_S3D		0x36 -#define SAVAGE_ZWATERMARK_S3D		0x37  +#define SAVAGE_ZWATERMARK_S3D		0x37  #define SAVAGE_DESTTEXRWWATERMARK_S3D	0x38  /* common stuff */  #define SAVAGE_VERTBUFADDR		0x3e diff --git a/shared-core/savage_state.c b/shared-core/savage_state.c index dd593340..1c5a0e2e 100644 --- a/shared-core/savage_state.c +++ b/shared-core/savage_state.c @@ -30,23 +30,23 @@ void savage_emit_clip_rect_s3d(drm_savage_private_t *dev_priv,  			       const struct drm_clip_rect *pbox)  {  	uint32_t scstart = dev_priv->state.s3d.new_scstart; -	uint32_t scend   = dev_priv->state.s3d.new_scend; +	uint32_t scend = dev_priv->state.s3d.new_scend;  	scstart = (scstart & ~SAVAGE_SCISSOR_MASK_S3D) | -		((uint32_t)pbox->x1 & 0x000007ff) |  +		((uint32_t)pbox->x1 & 0x000007ff) |  		(((uint32_t)pbox->y1 << 16) & 0x07ff0000); -	scend   = (scend   & ~SAVAGE_SCISSOR_MASK_S3D) | -		(((uint32_t)pbox->x2-1) & 0x000007ff) | -		((((uint32_t)pbox->y2-1) << 16) & 0x07ff0000); +	scend   = (scend & ~SAVAGE_SCISSOR_MASK_S3D) | +		(((uint32_t)pbox->x2 - 1) & 0x000007ff) | +		((((uint32_t)pbox->y2 - 1) << 16) & 0x07ff0000);  	if (scstart != dev_priv->state.s3d.scstart ||  	    scend   != dev_priv->state.s3d.scend) {  		DMA_LOCALS;  		BEGIN_DMA(4); -		DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); +		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);  		DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D, 2);  		DMA_WRITE(scstart);  		DMA_WRITE(scend);  		dev_priv->state.s3d.scstart = scstart; -		dev_priv->state.s3d.scend   = scend; +		dev_priv->state.s3d.scend = scend;  		dev_priv->waiting = 1;  		DMA_COMMIT();  	} @@ -61,13 +61,13 @@ void savage_emit_clip_rect_s4(drm_savage_private_t *dev_priv,  		((uint32_t)pbox->x1 & 0x000007ff) |  		(((uint32_t)pbox->y1 << 12) & 0x00fff000);  	drawctrl1 = (drawctrl1 & ~SAVAGE_SCISSOR_MASK_S4) | -		(((uint32_t)pbox->x2-1) & 0x000007ff) | -		((((uint32_t)pbox->y2-1) << 12) & 0x00fff000); +		(((uint32_t)pbox->x2 - 1) & 0x000007ff) | +		((((uint32_t)pbox->y2 - 1) << 12) & 0x00fff000);  	if (drawctrl0 != dev_priv->state.s4.drawctrl0 ||  	    drawctrl1 != dev_priv->state.s4.drawctrl1) {  		DMA_LOCALS;  		BEGIN_DMA(4); -		DMA_WRITE(BCI_CMD_WAIT|BCI_CMD_WAIT_3D); +		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);  		DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4, 2);  		DMA_WRITE(drawctrl0);  		DMA_WRITE(drawctrl1); @@ -87,8 +87,8 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,  	}  	if (!(addr & 1)) { /* local */  		addr &= ~7; -		if (addr <  dev_priv->texture_offset || -		    addr >= dev_priv->texture_offset+dev_priv->texture_size) { +		if (addr < dev_priv->texture_offset || +		    addr >= dev_priv->texture_offset + dev_priv->texture_size) {  			DRM_ERROR  			    ("bad texAddr%d %08x (local addr out of range)\n",  			     unit, addr); @@ -114,10 +114,10 @@ static int savage_verify_texaddr(drm_savage_private_t *dev_priv, int unit,  }  #define SAVE_STATE(reg,where)			\ -	if(start <= reg && start+count > reg)	\ +	if(start <= reg && start + count > reg)	\  		dev_priv->state.where = regs[reg - start]  #define SAVE_STATE_MASK(reg,where,mask) do {			\ -	if(start <= reg && start+count > reg) {			\ +	if(start <= reg && start + count > reg) {			\  		uint32_t tmp;					\  		tmp = regs[reg - start];			\  		dev_priv->state.where = (tmp & (mask)) |	\ @@ -129,9 +129,9 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,  				   const uint32_t *regs)  {  	if (start < SAVAGE_TEXPALADDR_S3D || -	    start+count-1 > SAVAGE_DESTTEXRWWATERMARK_S3D) { +	    start + count - 1 > SAVAGE_DESTTEXRWWATERMARK_S3D) {  		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", -			  start, start+count-1); +			  start, start + count - 1);  		return -EINVAL;  	} @@ -142,7 +142,7 @@ static int savage_verify_state_s3d(drm_savage_private_t *dev_priv,  	/* if any texture regs were changed ... */  	if (start <= SAVAGE_TEXCTRL_S3D && -	    start+count > SAVAGE_TEXPALADDR_S3D) { +	    start + count > SAVAGE_TEXPALADDR_S3D) {  		/* ... check texture state */  		SAVE_STATE(SAVAGE_TEXCTRL_S3D, s3d.texctrl);  		SAVE_STATE(SAVAGE_TEXADDR_S3D, s3d.texaddr); @@ -161,9 +161,9 @@ static int savage_verify_state_s4(drm_savage_private_t *dev_priv,  	int ret = 0;  	if (start < SAVAGE_DRAWLOCALCTRL_S4 || -	    start+count-1 > SAVAGE_TEXBLENDCOLOR_S4) { +	    start + count - 1 > SAVAGE_TEXBLENDCOLOR_S4) {  		DRM_ERROR("invalid register range (0x%04x-0x%04x)\n", -			  start, start+count-1); +			  start, start + count - 1);  		return -EINVAL;  	} @@ -212,14 +212,14 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,  			return ret;  		/* scissor regs are emitted in savage_dispatch_draw */  		if (start < SAVAGE_SCSTART_S3D) { -			if (start+count > SAVAGE_SCEND_S3D+1) -				count2 = count - (SAVAGE_SCEND_S3D+1 - start); -			if (start+count > SAVAGE_SCSTART_S3D) +			if (start + count > SAVAGE_SCEND_S3D + 1) +				count2 = count - (SAVAGE_SCEND_S3D + 1 - start); +			if (start + count > SAVAGE_SCSTART_S3D)  				count = SAVAGE_SCSTART_S3D - start;  		} else if (start <= SAVAGE_SCEND_S3D) { -			if (start+count > SAVAGE_SCEND_S3D+1) { -				count -= SAVAGE_SCEND_S3D+1 - start; -				start = SAVAGE_SCEND_S3D+1; +			if (start + count > SAVAGE_SCEND_S3D + 1) { +				count -= SAVAGE_SCEND_S3D + 1 - start; +				start = SAVAGE_SCEND_S3D + 1;  			} else  				return 0;  		} @@ -229,24 +229,24 @@ static int savage_dispatch_state(drm_savage_private_t *dev_priv,  			return ret;  		/* scissor regs are emitted in savage_dispatch_draw */  		if (start < SAVAGE_DRAWCTRL0_S4) { -			if (start+count > SAVAGE_DRAWCTRL1_S4+1) +			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1)  				count2 = count -  					 (SAVAGE_DRAWCTRL1_S4 + 1 - start); -			if (start+count > SAVAGE_DRAWCTRL0_S4) +			if (start + count > SAVAGE_DRAWCTRL0_S4)  				count = SAVAGE_DRAWCTRL0_S4 - start;  		} else if (start <= SAVAGE_DRAWCTRL1_S4) { -			if (start+count > SAVAGE_DRAWCTRL1_S4+1) { -				count -= SAVAGE_DRAWCTRL1_S4+1 - start; -				start = SAVAGE_DRAWCTRL1_S4+1; +			if (start + count > SAVAGE_DRAWCTRL1_S4 + 1) { +				count -= SAVAGE_DRAWCTRL1_S4 + 1 - start; +				start = SAVAGE_DRAWCTRL1_S4 + 1;  			} else  				return 0;  		}  	} -	bci_size = count + (count+254)/255 + count2 + (count2+254)/255; +	bci_size = count + (count + 254) / 255 + count2 + (count2 + 254) / 255;  	if (cmd_header->state.global) { -		BEGIN_DMA(bci_size+1); +		BEGIN_DMA(bci_size + 1);  		DMA_WRITE(BCI_CMD_WAIT | BCI_CMD_WAIT_3D);  		dev_priv->waiting = 1;  	} else { @@ -286,8 +286,8 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,  	BCI_LOCALS;  	if (!dmabuf) { -	    DRM_ERROR("called without dma buffers!\n"); -	    return -EINVAL; +		DRM_ERROR("called without dma buffers!\n"); +		return -EINVAL;  	}  	if (!n) @@ -337,9 +337,9 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,  		}  	} -	if (start + n > dmabuf->total/32) { +	if (start + n > dmabuf->total / 32) {  		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", -			  start, start + n - 1, dmabuf->total/32); +			  start, start + n - 1, dmabuf->total / 32);  		return -EINVAL;  	} @@ -374,33 +374,33 @@ static int savage_dispatch_dma_prim(drm_savage_private_t *dev_priv,  			/* Need to reorder indices for correct flat  			 * shading while preserving the clock sense  			 * for correct culling. Only on Savage3D. */ -			int reorder[3] = {-1, -1, -1}; -			reorder[start%3] = 2; +			int reorder[3] = { -1, -1, -1 }; +			reorder[start % 3] = 2; -			BEGIN_BCI((count+1+1)/2); -			BCI_DRAW_INDICES_S3D(count, prim, start+2); +			BEGIN_BCI((count + 1 + 1) / 2); +			BCI_DRAW_INDICES_S3D(count, prim, start + 2); -			for (i = start+1; i+1 < start+count; i += 2) +			for (i = start + 1; i + 1 < start + count; i += 2)  				BCI_WRITE((i + reorder[i % 3]) |  					  ((i + 1 +  					    reorder[(i + 1) % 3]) << 16)); -			if (i < start+count) -				BCI_WRITE(i + reorder[i%3]); +			if (i < start + count) +				BCI_WRITE(i + reorder[i % 3]);  		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { -			BEGIN_BCI((count+1+1)/2); +			BEGIN_BCI((count + 1 + 1) / 2);  			BCI_DRAW_INDICES_S3D(count, prim, start); -			for (i = start+1; i+1 < start+count; i += 2) -				BCI_WRITE(i | ((i+1) << 16)); -			if (i < start+count) +			for (i = start + 1; i + 1 < start + count; i += 2) +				BCI_WRITE(i | ((i + 1) << 16)); +			if (i < start + count)  				BCI_WRITE(i);  		} else { -			BEGIN_BCI((count+2+1)/2); +			BEGIN_BCI((count + 2 + 1) / 2);  			BCI_DRAW_INDICES_S4(count, prim, skip); -			for (i = start; i+1 < start+count; i += 2) -				BCI_WRITE(i | ((i+1) << 16)); -			if (i < start+count) +			for (i = start; i + 1 < start + count; i += 2) +				BCI_WRITE(i | ((i + 1) << 16)); +			if (i < start + count)  				BCI_WRITE(i);  		} @@ -479,9 +479,9 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,  		return -EINVAL;  	} -	if (start + n > vb_size / (vb_stride*4)) { +	if (start + n > vb_size / (vb_stride * 4)) {  		DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n", -			  start, start + n - 1, vb_size / (vb_stride*4)); +			  start, start + n - 1, vb_size / (vb_stride * 4));  		return -EINVAL;  	} @@ -493,28 +493,28 @@ static int savage_dispatch_vb_prim(drm_savage_private_t *dev_priv,  			/* Need to reorder vertices for correct flat  			 * shading while preserving the clock sense  			 * for correct culling. Only on Savage3D. */ -			int reorder[3] = {-1, -1, -1}; -			reorder[start%3] = 2; +			int reorder[3] = { -1, -1, -1 }; +			reorder[start % 3] = 2; -			BEGIN_DMA(count*vtx_size+1); +			BEGIN_DMA(count * vtx_size + 1);  			DMA_DRAW_PRIMITIVE(count, prim, skip); -			for (i = start; i < start+count; ++i) { +			for (i = start; i < start + count; ++i) {  				unsigned int j = i + reorder[i % 3]; -				DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); +				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);  			}  			DMA_COMMIT();  		} else { -			BEGIN_DMA(count*vtx_size+1); +			BEGIN_DMA(count * vtx_size + 1);  			DMA_DRAW_PRIMITIVE(count, prim, skip);  			if (vb_stride == vtx_size) { -				DMA_COPY(&vtxbuf[vb_stride*start], -					 vtx_size*count); +				DMA_COPY(&vtxbuf[vb_stride * start], +					 vtx_size * count);  			} else { -				for (i = start; i < start+count; ++i) { -					DMA_COPY(&vtxbuf[vb_stride*i], +				for (i = start; i < start + count; ++i) { +					DMA_COPY(&vtxbuf[vb_stride * i],  						 vtx_size);  				}  			} @@ -544,8 +544,8 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,  	BCI_LOCALS;  	if (!dmabuf) { -	    DRM_ERROR("called without dma buffers!\n"); -	    return -EINVAL; +		DRM_ERROR("called without dma buffers!\n"); +		return -EINVAL;  	}  	if (!n) @@ -623,9 +623,9 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,  		/* check indices */  		for (i = 0; i < count; ++i) { -			if (idx[i] > dmabuf->total/32) { +			if (idx[i] > dmabuf->total / 32) {  				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", -					  i, idx[i], dmabuf->total/32); +					  i, idx[i], dmabuf->total / 32);  				return -EINVAL;  			}  		} @@ -634,31 +634,31 @@ static int savage_dispatch_dma_idx(drm_savage_private_t *dev_priv,  			/* Need to reorder indices for correct flat  			 * shading while preserving the clock sense  			 * for correct culling. Only on Savage3D. */ -			int reorder[3] = {2, -1, -1}; +			int reorder[3] = { 2, -1, -1 }; -			BEGIN_BCI((count+1+1)/2); +			BEGIN_BCI((count + 1 + 1) / 2);  			BCI_DRAW_INDICES_S3D(count, prim, idx[2]); -			for (i = 1; i+1 < count; i += 2) +			for (i = 1; i + 1 < count; i += 2)  				BCI_WRITE(idx[i + reorder[i % 3]] |  					  (idx[i + 1 +  					   reorder[(i + 1) % 3]] << 16));  			if (i < count) -				BCI_WRITE(idx[i + reorder[i%3]]); +				BCI_WRITE(idx[i + reorder[i % 3]]);  		} else if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) { -			BEGIN_BCI((count+1+1)/2); +			BEGIN_BCI((count + 1 + 1) / 2);  			BCI_DRAW_INDICES_S3D(count, prim, idx[0]); -			for (i = 1; i+1 < count; i += 2) -				BCI_WRITE(idx[i] | (idx[i+1] << 16)); +			for (i = 1; i + 1 < count; i += 2) +				BCI_WRITE(idx[i] | (idx[i + 1] << 16));  			if (i < count)  				BCI_WRITE(idx[i]);  		} else { -			BEGIN_BCI((count+2+1)/2); +			BEGIN_BCI((count + 2 + 1) / 2);  			BCI_DRAW_INDICES_S4(count, prim, skip); -			for (i = 0; i+1 < count; i += 2) -				BCI_WRITE(idx[i] | (idx[i+1] << 16)); +			for (i = 0; i + 1 < count; i += 2) +				BCI_WRITE(idx[i] | (idx[i + 1] << 16));  			if (i < count)  				BCI_WRITE(idx[i]);  		} @@ -743,9 +743,9 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,  		/* Check indices */  		for (i = 0; i < count; ++i) { -			if (idx[i] > vb_size / (vb_stride*4)) { +			if (idx[i] > vb_size / (vb_stride * 4)) {  				DRM_ERROR("idx[%u]=%u out of range (0-%u)\n", -					  i, idx[i],  vb_size / (vb_stride*4)); +					  i, idx[i],  vb_size / (vb_stride * 4));  				return -EINVAL;  			}  		} @@ -754,24 +754,24 @@ static int savage_dispatch_vb_idx(drm_savage_private_t *dev_priv,  			/* Need to reorder vertices for correct flat  			 * shading while preserving the clock sense  			 * for correct culling. Only on Savage3D. */ -			int reorder[3] = {2, -1, -1}; +			int reorder[3] = { 2, -1, -1 }; -			BEGIN_DMA(count*vtx_size+1); +			BEGIN_DMA(count * vtx_size + 1);  			DMA_DRAW_PRIMITIVE(count, prim, skip);  			for (i = 0; i < count; ++i) {  				unsigned int j = idx[i + reorder[i % 3]]; -				DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); +				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);  			}  			DMA_COMMIT();  		} else { -			BEGIN_DMA(count*vtx_size+1); +			BEGIN_DMA(count * vtx_size + 1);  			DMA_DRAW_PRIMITIVE(count, prim, skip);  			for (i = 0; i < count; ++i) {  				unsigned int j = idx[i]; -				DMA_COPY(&vtxbuf[vb_stride*j], vtx_size); +				DMA_COPY(&vtxbuf[vb_stride * j], vtx_size);  			}  			DMA_COMMIT(); @@ -823,12 +823,12 @@ static int savage_dispatch_clear(drm_savage_private_t *dev_priv,  		x = boxes[i].x1, y = boxes[i].y1;  		w = boxes[i].x2 - boxes[i].x1;  		h = boxes[i].y2 - boxes[i].y1; -		BEGIN_DMA(nbufs*6); +		BEGIN_DMA(nbufs * 6);  		for (buf = SAVAGE_FRONT; buf <= SAVAGE_DEPTH; buf <<= 1) {  			if (!(flags & buf))  				continue;  			DMA_WRITE(clear_cmd); -			switch(buf) { +			switch (buf) {  			case SAVAGE_FRONT:  				DMA_WRITE(dev_priv->front_offset);  				DMA_WRITE(dev_priv->front_bd); @@ -880,8 +880,8 @@ static int savage_dispatch_swap(drm_savage_private_t *dev_priv,  		DMA_WRITE(dev_priv->back_bd);  		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1));  		DMA_WRITE(BCI_X_Y(boxes[i].x1, boxes[i].y1)); -		DMA_WRITE(BCI_W_H(boxes[i].x2-boxes[i].x1, -				  boxes[i].y2-boxes[i].y1)); +		DMA_WRITE(BCI_W_H(boxes[i].x2 - boxes[i].x1, +				  boxes[i].y2 - boxes[i].y1));  		DMA_COMMIT();  	} @@ -966,14 +966,14 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_  	int ret = 0;  	DRM_DEBUG("\n"); -	 +  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	if (dma && dma->buflist) {  		if (cmdbuf->dma_idx > dma->buf_count) {  			DRM_ERROR  			    ("vertex buffer index %u out of range (0-%u)\n", -			     cmdbuf->dma_idx, dma->buf_count-1); +			     cmdbuf->dma_idx, dma->buf_count - 1);  			return -EINVAL;  		}  		dmabuf = dma->buflist[cmdbuf->dma_idx]; @@ -1064,15 +1064,15 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_  		case SAVAGE_CMD_DMA_PRIM:  		case SAVAGE_CMD_VB_PRIM:  			if (!first_draw_cmd) -				first_draw_cmd = cmdbuf->cmd_addr-1; +				first_draw_cmd = cmdbuf->cmd_addr - 1;  			cmdbuf->cmd_addr += j;  			i += j;  			break;  		default:  			if (first_draw_cmd) { -				ret = savage_dispatch_draw ( +				ret = savage_dispatch_draw(  					dev_priv, first_draw_cmd, -					cmdbuf->cmd_addr-1, +					cmdbuf->cmd_addr - 1,  					dmabuf, cmdbuf->vb_addr,  					cmdbuf->vb_size,  					cmdbuf->vb_stride, @@ -1134,7 +1134,7 @@ int savage_bci_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_  	}  	if (first_draw_cmd) { -		ret = savage_dispatch_draw ( +		ret = savage_dispatch_draw(  			dev_priv, first_draw_cmd, cmdbuf->cmd_addr, dmabuf,  			cmdbuf->vb_addr, cmdbuf->vb_size, cmdbuf->vb_stride,  			cmdbuf->nbox, cmdbuf->box_addr); diff --git a/shared-core/sis_drv.h b/shared-core/sis_drv.h index a4a88fe1..db532f3c 100644 --- a/shared-core/sis_drv.h +++ b/shared-core/sis_drv.h @@ -84,8 +84,6 @@ extern int sis_final_context(struct drm_device * dev, int context);  #endif - -  extern struct drm_ioctl_desc sis_ioctls[];  extern int sis_max_ioctl; diff --git a/shared-core/via_3d_reg.h b/shared-core/via_3d_reg.h index cf61bb51..462375d5 100644 --- a/shared-core/via_3d_reg.h +++ b/shared-core/via_3d_reg.h @@ -1643,7 +1643,6 @@  #define HC_HAGPBpID_STOP        0x00000002  #define HC_HAGPBpH_MASK         0x00ffffff -  #define VIA_VIDEO_HEADER5       0xFE040000  #define VIA_VIDEO_HEADER6       0xFE050000  #define VIA_VIDEO_HEADER7       0xFE060000 diff --git a/shared-core/via_dma.c b/shared-core/via_dma.c index bd737a7e..431738a9 100644 --- a/shared-core/via_dma.c +++ b/shared-core/via_dma.c @@ -1,11 +1,11 @@  /* via_dma.c -- DMA support for the VIA Unichrome/Pro - *  + *   * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.   * All Rights Reserved.   *   * Copyright 2004 Digeo, Inc., Palo Alto, CA, U.S.A.   * All Rights Reserved. - *  + *   * Copyright 2004 The Unichrome project.   * All Rights Reserved.   * @@ -23,14 +23,14 @@   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,  - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR  - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE  + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE   * USE OR OTHER DEALINGS IN THE SOFTWARE.   * - * Authors:  - *    Tungsten Graphics,  - *    Erdi Chen,  + * Authors: + *    Tungsten Graphics, + *    Erdi Chen,   *    Thomas Hellstrom.   */ @@ -47,18 +47,18 @@  	dev_priv->dma_low +=8;					\  } -#define via_flush_write_combine() DRM_MEMORYBARRIER()  +#define via_flush_write_combine() DRM_MEMORYBARRIER()  #define VIA_OUT_RING_QW(w1,w2)			\  	*vb++ = (w1);				\  	*vb++ = (w2);				\ -	dev_priv->dma_low += 8;  +	dev_priv->dma_low += 8; -static void via_cmdbuf_start(drm_via_private_t * dev_priv); -static void via_cmdbuf_pause(drm_via_private_t * dev_priv); -static void via_cmdbuf_reset(drm_via_private_t * dev_priv); -static void via_cmdbuf_rewind(drm_via_private_t * dev_priv); -static int via_wait_idle(drm_via_private_t * dev_priv); +static void via_cmdbuf_start(drm_via_private_t *dev_priv); +static void via_cmdbuf_pause(drm_via_private_t *dev_priv); +static void via_cmdbuf_reset(drm_via_private_t *dev_priv); +static void via_cmdbuf_rewind(drm_via_private_t *dev_priv); +static int via_wait_idle(drm_via_private_t *dev_priv);  static void via_pad_cache(drm_via_private_t *dev_priv, int qwords); @@ -70,9 +70,9 @@ static uint32_t via_cmdbuf_space(drm_via_private_t *dev_priv)  {  	uint32_t agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;  	uint32_t hw_addr = *(dev_priv->hw_addr_ptr) - agp_base; -	 -	return ((hw_addr <= dev_priv->dma_low) ?  -		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :  + +	return ((hw_addr <= dev_priv->dma_low) ? +		(dev_priv->dma_high + hw_addr - dev_priv->dma_low) :  		(hw_addr - dev_priv->dma_low));  } @@ -110,7 +110,7 @@ via_cmdbuf_wait(drm_via_private_t * dev_priv, unsigned int size)  		if (count-- == 0) {  			DRM_ERROR  			    ("via_cmdbuf_wait timed out hw %x cur_addr %x next_addr %x\n", -			    hw_addr, cur_addr, next_addr); +			     hw_addr, cur_addr, next_addr);  			return -1;  		}  	} while ((cur_addr < hw_addr) && (next_addr >= hw_addr)); @@ -167,14 +167,12 @@ static int via_initialize(struct drm_device * dev,  	}  	if (dev_priv->ring.virtual_start != NULL) { -		DRM_ERROR("%s called again without calling cleanup\n", -			  __FUNCTION__); +		DRM_ERROR("called again without calling cleanup\n");  		return -EFAULT;  	}  	if (!dev->agp || !dev->agp->base) { -		DRM_ERROR("%s called with no agp memory available\n",  -			  __FUNCTION__); +		DRM_ERROR("called with no agp memory available\n");  		return -EFAULT;  	} @@ -257,8 +255,7 @@ static int via_dispatch_cmdbuffer(struct drm_device * dev, drm_via_cmdbuffer_t *  	dev_priv = (drm_via_private_t *) dev->dev_private;  	if (dev_priv->ring.virtual_start == NULL) { -		DRM_ERROR("%s called without initializing AGP ring buffer.\n", -			  __FUNCTION__); +		DRM_ERROR("called without initializing AGP ring buffer.\n");  		return -EFAULT;  	} @@ -327,8 +324,7 @@ static int via_cmdbuffer(struct drm_device *dev, void *data, struct drm_file *fi  	LOCK_TEST_WITH_RETURN(dev, file_priv); -	DRM_DEBUG("via cmdbuffer, buf %p size %lu\n", cmdbuf->buf, -		  cmdbuf->size); +	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);  	ret = via_dispatch_cmdbuffer(dev, cmdbuf);  	if (ret) { @@ -369,8 +365,7 @@ static int via_pci_cmdbuffer(struct drm_device *dev, void *data, struct drm_file  	LOCK_TEST_WITH_RETURN(dev, file_priv); -	DRM_DEBUG("via_pci_cmdbuffer, buf %p size %lu\n", cmdbuf->buf, -		  cmdbuf->size); +	DRM_DEBUG("buf %p size %lu\n", cmdbuf->buf, cmdbuf->size);  	ret = via_dispatch_pci_cmdbuffer(dev, cmdbuf);  	if (ret) { @@ -450,7 +445,7 @@ static int via_hook_segment(drm_via_private_t * dev_priv, -static int via_wait_idle(drm_via_private_t * dev_priv) +static int via_wait_idle(drm_via_private_t *dev_priv)  {  	int count = 10000000; @@ -462,7 +457,7 @@ static int via_wait_idle(drm_via_private_t * dev_priv)  	return count;  } -static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type, +static uint32_t *via_align_cmd(drm_via_private_t *dev_priv, uint32_t cmd_type,  			       uint32_t addr, uint32_t *cmd_addr_hi,  			       uint32_t *cmd_addr_lo, int skip_wait)  { @@ -472,16 +467,17 @@ static uint32_t *via_align_cmd(drm_via_private_t * dev_priv, uint32_t cmd_type,  	uint32_t qw_pad_count;  	if (!skip_wait) -		via_cmdbuf_wait(dev_priv, 2*CMDBUF_ALIGNMENT_SIZE); +		via_cmdbuf_wait(dev_priv, 2 * CMDBUF_ALIGNMENT_SIZE);  	vb = via_get_dma(dev_priv); -	VIA_OUT_RING_QW( HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | -			 (VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16);  +	VIA_OUT_RING_QW(HC_HEADER2 | ((VIA_REG_TRANSET >> 2) << 12) | +			(VIA_REG_TRANSPACE >> 2), HC_ParaType_PreCR << 16); +  	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr;  	qw_pad_count = (CMDBUF_ALIGNMENT_SIZE >> 3) -  		((dev_priv->dma_low & CMDBUF_ALIGNMENT_MASK) >> 3); -	cmd_addr = (addr) ? addr :  +	cmd_addr = (addr) ? addr :  		agp_base + dev_priv->dma_low - 8 + (qw_pad_count << 3);  	addr_lo = ((HC_SubA_HAGPBpL << 24) | (cmd_type & HC_HAGPBpID_MASK) |  		   (cmd_addr & HC_HAGPBpL_MASK)); @@ -514,8 +510,8 @@ static void via_cmdbuf_start(drm_via_private_t * dev_priv)  	command = ((HC_SubA_HAGPCMNT << 24) | (start_addr >> 24) |  		   ((end_addr & 0xff000000) >> 16)); -	dev_priv->last_pause_ptr =  -		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,  +	dev_priv->last_pause_ptr = +		via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0,  			      &pause_addr_hi, & pause_addr_lo, 1) - 1;  	via_flush_write_combine(); @@ -557,8 +553,8 @@ static void via_pad_cache(drm_via_private_t *dev_priv, int qwords)  	via_cmdbuf_wait(dev_priv, qwords + 2);  	vb = via_get_dma(dev_priv); -	VIA_OUT_RING_QW( HC_HEADER2, HC_ParaType_NotTex << 16); -	via_align_buffer(dev_priv,vb,qwords); +	VIA_OUT_RING_QW(HC_HEADER2, HC_ParaType_NotTex << 16); +	via_align_buffer(dev_priv, vb, qwords);  }  static inline void via_dummy_bitblt(drm_via_private_t * dev_priv) @@ -577,9 +573,9 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)  	volatile uint32_t *last_pause_ptr;  	agp_base = dev_priv->dma_offset + (uint32_t) dev_priv->agpAddr; -	via_align_cmd(dev_priv,  HC_HAGPBpID_JUMP, 0, &jump_addr_hi, +	via_align_cmd(dev_priv, HC_HAGPBpID_JUMP, 0, &jump_addr_hi,  		      &jump_addr_lo, 0); -	 +  	dev_priv->dma_wrap = dev_priv->dma_low; @@ -594,19 +590,18 @@ static void via_cmdbuf_jump(drm_via_private_t * dev_priv)  	via_dummy_bitblt(dev_priv);  	via_dummy_bitblt(dev_priv); -	last_pause_ptr = via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, +	last_pause_ptr = via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,  				       &pause_addr_lo, 0) -1; -	via_align_cmd(dev_priv,  HC_HAGPBpID_PAUSE, 0, &pause_addr_hi, +	via_align_cmd(dev_priv, HC_HAGPBpID_PAUSE, 0, &pause_addr_hi,  		      &pause_addr_lo, 0);  	*last_pause_ptr = pause_addr_lo; -	via_hook_segment( dev_priv, jump_addr_hi, jump_addr_lo, 0); +	via_hook_segment(dev_priv, jump_addr_hi, jump_addr_lo, 0);  } -  static void via_cmdbuf_rewind(drm_via_private_t * dev_priv)  { -	via_cmdbuf_jump(dev_priv);  +	via_cmdbuf_jump(dev_priv);  }  static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type) @@ -614,7 +609,7 @@ static void via_cmdbuf_flush(drm_via_private_t * dev_priv, uint32_t cmd_type)  	uint32_t pause_addr_lo, pause_addr_hi;  	via_align_cmd(dev_priv, cmd_type, 0, &pause_addr_hi, &pause_addr_lo, 0); -	via_hook_segment( dev_priv, pause_addr_hi, pause_addr_lo, 0); +	via_hook_segment(dev_priv, pause_addr_hi, pause_addr_lo, 0);  } @@ -640,20 +635,19 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *  	uint32_t tmp_size, count;  	drm_via_private_t *dev_priv; -	DRM_DEBUG("via cmdbuf_size\n"); +	DRM_DEBUG("\n");  	LOCK_TEST_WITH_RETURN(dev, file_priv);  	dev_priv = (drm_via_private_t *) dev->dev_private;  	if (dev_priv->ring.virtual_start == NULL) { -		DRM_ERROR("%s called without initializing AGP ring buffer.\n", -			  __FUNCTION__); +		DRM_ERROR("called without initializing AGP ring buffer.\n");  		return -EFAULT;  	}  	count = 1000000;  	tmp_size = d_siz->size; -	switch(d_siz->func) { +	switch (d_siz->func) {  	case VIA_CMDBUF_SPACE:  		while (((tmp_size = via_cmdbuf_space(dev_priv)) < d_siz->size)  		       && count--) { @@ -687,12 +681,12 @@ static int via_cmdbuf_size(struct drm_device *dev, void *data, struct drm_file *  }  #ifndef VIA_HAVE_DMABLIT -int  +int  via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) {  	DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");  	return -EINVAL;  } -int  +int  via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) {  	DRM_ERROR("PCI DMA BitBlt is not implemented for this system.\n");  	return -EINVAL; diff --git a/shared-core/via_drm.h b/shared-core/via_drm.h index b15785b3..e6a8ec64 100644 --- a/shared-core/via_drm.h +++ b/shared-core/via_drm.h @@ -49,7 +49,7 @@  #define VIA_DRM_DRIVER_PATCHLEVEL	1  #define VIA_DRM_DRIVER_VERSION	  (((VIA_DRM_DRIVER_MAJOR) << 16) | (VIA_DRM_DRIVER_MINOR)) -#define VIA_NR_SAREA_CLIPRECTS 		8 +#define VIA_NR_SAREA_CLIPRECTS		8  #define VIA_NR_XVMC_PORTS	       10  #define VIA_NR_XVMC_LOCKS	       5  #define VIA_MAX_CACHELINE_SIZE	  64 @@ -114,7 +114,7 @@  #define VIA_MEM_VIDEO   0	/* matches drm constant */  #define VIA_MEM_AGP     1	/* matches drm constant */ -#define VIA_MEM_SYSTEM  2		 +#define VIA_MEM_SYSTEM  2  #define VIA_MEM_MIXED   3  #define VIA_MEM_UNKNOWN 4 @@ -203,7 +203,7 @@ typedef struct _drm_via_sarea {  	unsigned int XvMCDisplaying[VIA_NR_XVMC_PORTS];  	unsigned int XvMCSubPicOn[VIA_NR_XVMC_PORTS]; -	unsigned int XvMCCtxNoGrabbed;	/* Last context to hold decoder */	 +	unsigned int XvMCCtxNoGrabbed;	/* Last context to hold decoder */  	/* Used by the 3d driver only at this point, for pageflipping:  	 */ @@ -228,7 +228,7 @@ typedef enum {  #define VIA_IRQ_FLAGS_MASK 0xF0000000 -enum drm_via_irqs{ +enum drm_via_irqs {  	drm_via_irq_hqv0 = 0,  	drm_via_irq_hqv1,  	drm_via_irq_dma0_dd, @@ -238,7 +238,7 @@ enum drm_via_irqs{  	drm_via_irq_num  }; -struct drm_via_wait_irq_request{ +struct drm_via_wait_irq_request {  	unsigned irq;  	via_irq_seq_type_t type;  	uint32_t sequence; @@ -250,14 +250,14 @@ typedef union drm_via_irqwait {  	struct drm_wait_vblank_reply reply;  } drm_via_irqwait_t; -typedef struct drm_via_blitsync {  +typedef struct drm_via_blitsync {  	uint32_t sync_handle;  	unsigned engine;  } drm_via_blitsync_t; -/*  +/*   * Below,"flags" is currently unused but will be used for possible future - * extensions like kernel space bounce buffers for bad alignments and  + * extensions like kernel space bounce buffers for bad alignments and   * blit engine busy-wait polling for better latency in the absence of   * interrupts.   */ @@ -270,12 +270,12 @@ typedef struct drm_via_dmablit {  	uint32_t fb_stride;  	unsigned char *mem_addr; -	uint32_t  mem_stride; -        -	uint32_t  flags; +	uint32_t mem_stride; + +	uint32_t flags;  	int to_fb; -	drm_via_blitsync_t sync;    +	drm_via_blitsync_t sync;  } drm_via_dmablit_t; diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index 3a6f27fb..a802e4ae 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -74,7 +74,7 @@ static struct drm_bo_driver via_bo_driver = {  	.fence_type = via_fence_types,  	.invalidate_caches = via_invalidate_caches,  	.init_mem_type = via_init_mem_type, -	.evict_mask = via_evict_mask, +	.evict_flags = via_evict_flags,  	.move = NULL,  };  #endif diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index bb1c4857..8dd4a727 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -83,7 +83,7 @@ typedef struct drm_via_irq {  	uint32_t enable_mask;  	wait_queue_head_t irq_queue;  } drm_via_irq_t; -	 +  typedef struct drm_via_private {  	drm_via_sarea_t *sarea_priv;  	drm_local_map_t *sarea; @@ -111,8 +111,8 @@ typedef struct drm_via_private {  	drm_via_irq_t via_irqs[VIA_NUM_IRQS];  	unsigned num_irqs;  	maskarray_t *irq_masks; -	uint32_t irq_enable_mask;  -	uint32_t irq_pending_mask;	 +	uint32_t irq_enable_mask; +	uint32_t irq_pending_mask;  	int *irq_map;  	/* Memory manager stuff */  #ifdef VIA_HAVE_CORE_MM @@ -214,9 +214,9 @@ extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *fclass,  extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags);  extern int via_init_mem_type(struct drm_device *dev, uint32_t type,  			       struct drm_mem_type_manager *man); -extern uint32_t via_evict_mask(struct drm_buffer_object *bo); +extern uint64_t via_evict_flags(struct drm_buffer_object *bo);  extern int via_move(struct drm_buffer_object *bo, int evict, -	      	int no_wait, struct drm_bo_mem_reg *new_mem); +		int no_wait, struct drm_bo_mem_reg *new_mem);  #endif  #endif diff --git a/shared-core/via_irq.c b/shared-core/via_irq.c index a1d33248..b8e652e6 100644 --- a/shared-core/via_irq.c +++ b/shared-core/via_irq.c @@ -63,7 +63,7 @@  /*   * Device-specific IRQs go here. This type might need to be extended with   * the register if there are multiple IRQ control registers. - * Currently we activate the HQV interrupts of  Unichrome Pro group A.  + * Currently we activate the HQV interrupts of  Unichrome Pro group A.   */  static maskarray_t via_pro_group_a_irqs[] = { @@ -71,30 +71,29 @@ static maskarray_t via_pro_group_a_irqs[] = {  	 0x00000000 },  	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,  	 0x00000000 }, -	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,  +	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,  	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},  	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,  	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},  }; -static int via_num_pro_group_a = -    sizeof(via_pro_group_a_irqs)/sizeof(maskarray_t); +static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);  static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};  static maskarray_t via_unichrome_irqs[] = { -	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,  +	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,  	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},  	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,  	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}  }; -static int via_num_unichrome = sizeof(via_unichrome_irqs)/sizeof(maskarray_t); +static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);  static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; -static unsigned time_diff(struct timeval *now,struct timeval *then)  +static unsigned time_diff(struct timeval *now,struct timeval *then)  { -    return (now->tv_usec >= then->tv_usec) ? -	now->tv_usec - then->tv_usec : -	1000000 - (then->tv_usec - now->tv_usec); +	return (now->tv_usec >= then->tv_usec) ? +		now->tv_usec - then->tv_usec : +		1000000 - (then->tv_usec - now->tv_usec);  }  u32 via_get_vblank_counter(struct drm_device *dev, int crtc) @@ -126,7 +125,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)  			microtime(&cur_vblank);  #endif  			if (dev_priv->last_vblank_valid) { -				dev_priv->usec_per_vblank =  +				dev_priv->usec_per_vblank =  					time_diff(&cur_vblank,  						  &dev_priv->last_vblank) >> 4;  			} @@ -135,16 +134,16 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)  		}  		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {  			DRM_DEBUG("US per vblank is: %u\n", -				dev_priv->usec_per_vblank); +				  dev_priv->usec_per_vblank);  		}  		drm_handle_vblank(dev, 0);  		handled = 1;  	} -	 -	for (i=0; i<dev_priv->num_irqs; ++i) { + +	for (i = 0; i < dev_priv->num_irqs; ++i) {  		if (status & cur_irq->pending_mask) { -			atomic_inc( &cur_irq->irq_received ); -			DRM_WAKEUP( &cur_irq->irq_queue ); +			atomic_inc(&cur_irq->irq_received); +			DRM_WAKEUP(&cur_irq->irq_queue);  			handled = 1;  #ifdef VIA_HAVE_DMABLIT  			if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) { @@ -156,7 +155,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)  		}  		cur_irq++;  	} -	 +  	/* Acknowlege interrupts */  	VIA_WRITE(VIA_REG_INTERRUPT, status); @@ -174,7 +173,7 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)  	if (dev_priv) {  		/* Acknowlege interrupts */  		status = VIA_READ(VIA_REG_INTERRUPT); -		VIA_WRITE(VIA_REG_INTERRUPT, status |  +		VIA_WRITE(VIA_REG_INTERRUPT, status |  			  dev_priv->irq_pending_mask);  	}  } @@ -198,11 +197,6 @@ void via_disable_vblank(struct drm_device *dev, int crtc)  {  	if (crtc != 0)  		DRM_ERROR("%s:  bad crtc %d\n", __FUNCTION__, crtc); - -	/* -	 * FIXME: implement proper interrupt disable by using the vblank -	 * counter register (if available). -	 */  }  static int @@ -216,24 +210,23 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc  	maskarray_t *masks;  	int real_irq; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (!dev_priv) { -		DRM_ERROR("%s called with no initialization\n", __FUNCTION__); +		DRM_ERROR("called with no initialization\n");  		return -EINVAL;  	} -	if (irq >= drm_via_irq_num ) { -		DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__, -			  irq); +	if (irq >= drm_via_irq_num) { +		DRM_ERROR("Trying to wait on unknown irq %d\n", irq);  		return -EINVAL;  	} -		 +  	real_irq = dev_priv->irq_map[irq];  	if (real_irq < 0) { -		DRM_ERROR("%s Video IRQ %d not available on this hardware.\n", -			  __FUNCTION__, irq); +		DRM_ERROR("Video IRQ %d not available on this hardware.\n", +			  irq);  		return -EINVAL;  	} @@ -242,14 +235,14 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc  	if (masks[real_irq][2] && !force_sequence) {  		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ, -			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==  +			    ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==  			     masks[irq][4]));  		cur_irq_sequence = atomic_read(&cur_irq->irq_received);  	} else {  		DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,  			    (((cur_irq_sequence =  			       atomic_read(&cur_irq->irq_received)) - -			      *sequence) <= (1 << 23)));		 +			      *sequence) <= (1 << 23)));  	}  	*sequence = cur_irq_sequence;  	return ret; @@ -267,7 +260,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)  	drm_via_irq_t *cur_irq;  	int i; -	DRM_DEBUG("driver_irq_preinstall: dev_priv: %p\n", dev_priv); +	DRM_DEBUG("dev_priv: %p\n", dev_priv);  	if (dev_priv) {  		cur_irq = dev_priv->via_irqs; @@ -285,25 +278,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)  			dev_priv->irq_map = via_irqmap_unichrome;  		} -		for(i=0; i < dev_priv->num_irqs; ++i) { +		for (i = 0; i < dev_priv->num_irqs; ++i) {  			atomic_set(&cur_irq->irq_received, 0); -			cur_irq->enable_mask = dev_priv->irq_masks[i][0];  +			cur_irq->enable_mask = dev_priv->irq_masks[i][0];  			cur_irq->pending_mask = dev_priv->irq_masks[i][1]; -			DRM_INIT_WAITQUEUE( &cur_irq->irq_queue ); +			DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);  			dev_priv->irq_enable_mask |= cur_irq->enable_mask;  			dev_priv->irq_pending_mask |= cur_irq->pending_mask;  			cur_irq++; -			 +  			DRM_DEBUG("Initializing IRQ %d\n", i);  		} -			 +  		dev_priv->last_vblank_valid = 0;  		/* Clear VSync interrupt regs */  		status = VIA_READ(VIA_REG_INTERRUPT); -		VIA_WRITE(VIA_REG_INTERRUPT, status &  +		VIA_WRITE(VIA_REG_INTERRUPT, status &  			  ~(dev_priv->irq_enable_mask)); -		 +  		/* Clear bits if they're already high */  		viadrv_acknowledge_irqs(dev_priv);  	} @@ -334,7 +327,7 @@ void via_driver_irq_uninstall(struct drm_device * dev)  	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;  	u32 status; -	DRM_DEBUG("driver_irq_uninstall)\n"); +	DRM_DEBUG("\n");  	if (dev_priv) {  		/* Some more magic, oh for some data sheets ! */ @@ -343,7 +336,7 @@ void via_driver_irq_uninstall(struct drm_device * dev)  		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);  		status = VIA_READ(VIA_REG_INTERRUPT); -		VIA_WRITE(VIA_REG_INTERRUPT, status &  +		VIA_WRITE(VIA_REG_INTERRUPT, status &  			  ~(VIA_IRQ_VBLANK_ENABLE | dev_priv->irq_enable_mask));  	}  } @@ -361,7 +354,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)  		return -EINVAL;  	if (irqwait->request.irq >= dev_priv->num_irqs) { -		DRM_ERROR("%s Trying to wait on unknown irq %d\n", __FUNCTION__,  +		DRM_ERROR("Trying to wait on unknown irq %d\n",  			  irqwait->request.irq);  		return -EINVAL;  	} @@ -380,8 +373,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)  	}  	if (irqwait->request.type & VIA_IRQ_SIGNAL) { -		DRM_ERROR("%s Signals on Via IRQs not implemented yet.\n",  -			  __FUNCTION__); +		DRM_ERROR("Signals on Via IRQs not implemented yet.\n");  		return -EINVAL;  	} diff --git a/shared-core/via_map.c b/shared-core/via_map.c index 1623df68..11bfa551 100644 --- a/shared-core/via_map.c +++ b/shared-core/via_map.c @@ -30,7 +30,7 @@ static int via_do_init_map(struct drm_device * dev, drm_via_init_t * init)  	drm_via_private_t *dev_priv = dev->dev_private;  	int ret = 0; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	dev_priv->sarea = drm_getsarea(dev);  	if (!dev_priv->sarea) { @@ -95,7 +95,7 @@ int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)  {  	drm_via_init_t *init = data; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	switch (init->func) {  	case VIA_INIT_MAP: @@ -140,4 +140,3 @@ int via_driver_unload(struct drm_device *dev)  	return 0;  } - diff --git a/shared-core/via_verifier.c b/shared-core/via_verifier.c index ded5c4e1..d2b69f74 100644 --- a/shared-core/via_verifier.c +++ b/shared-core/via_verifier.c @@ -77,7 +77,7 @@ typedef enum {  /*   * Associates each hazard above with a possible multi-command   * sequence. For example an address that is split over multiple - * commands and that needs to be checked at the first command  + * commands and that needs to be checked at the first command   * that does not include any part of the address.   */ @@ -249,10 +249,10 @@ eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)   * Partially stolen from drm_memory.h   */ -static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq, +static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,  							  unsigned long offset,  							  unsigned long size, -							  struct drm_device * dev) +							  struct drm_device *dev)  {  #ifdef __linux__  	struct drm_map_list *r_list; @@ -283,10 +283,10 @@ static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t * seq,  }  /* - * Require that all AGP texture levels reside in the same AGP map which should  + * Require that all AGP texture levels reside in the same AGP map which should   * be mappable by the client. This is not a big restriction. - * FIXME: To actually enforce this security policy strictly, drm_rmmap  - * would have to wait for dma quiescent before removing an AGP map.  + * FIXME: To actually enforce this security policy strictly, drm_rmmap + * would have to wait for dma quiescent before removing an AGP map.   * The via_drm_lookup_agp_map call in reality seems to take   * very little CPU time.   */ @@ -451,15 +451,15 @@ investigate_hazard(uint32_t cmd, hazard_t hz, drm_via_state_t * cur_seq)  	case check_texture_addr3:  		cur_seq->unfinished = tex_address;  		tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit); -		if (tmp == 0 &&  +		if (tmp == 0 &&  		    (cmd & HC_HTXnEnPit_MASK)) { -			cur_seq->pitch[cur_seq->texture][tmp] =  +			cur_seq->pitch[cur_seq->texture][tmp] =  				(cmd & HC_HTXnLnPit_MASK);  			cur_seq->tex_npot[cur_seq->texture] = 1;  		} else {  			cur_seq->pitch[cur_seq->texture][tmp] =  				(cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT; -			cur_seq->tex_npot[cur_seq->texture] = 0;			 +			cur_seq->tex_npot[cur_seq->texture] = 0;  			if (cmd & 0x000FFFFF) {  				DRM_ERROR  					("Unimplemented texture level 0 pitch mode.\n"); @@ -1007,7 +1007,7 @@ via_verify_command_stream(const uint32_t * buf, unsigned int size,  			state = via_check_vheader6(&buf, buf_end);  			break;  		case state_command: -			if ((HALCYON_HEADER2 == (cmd = *buf)) &&  +			if ((HALCYON_HEADER2 == (cmd = *buf)) &&  			    supported_3d)  				state = state_header2;  			else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1) diff --git a/shared-core/via_verifier.h b/shared-core/via_verifier.h index dac1db91..abdaa653 100644 --- a/shared-core/via_verifier.h +++ b/shared-core/via_verifier.h @@ -27,25 +27,23 @@  #define _VIA_VERIFIER_H_  typedef enum { -	no_sequence = 0,  +	no_sequence = 0,  	z_address,  	dest_address,  	tex_address  } drm_via_sequence_t; - -  typedef struct {  	unsigned texture; -	uint32_t z_addr;  -	uint32_t d_addr;  +	uint32_t z_addr; +	uint32_t d_addr;  	uint32_t t_addr[2][10];  	uint32_t pitch[2][10];  	uint32_t height[2][10]; -	uint32_t tex_level_lo[2];  +	uint32_t tex_level_lo[2];  	uint32_t tex_level_hi[2];  	uint32_t tex_palette_size[2]; -        uint32_t tex_npot[2]; +	uint32_t tex_npot[2];  	drm_via_sequence_t unfinished;  	int agp_texture;  	int multitex; @@ -56,9 +54,9 @@ typedef struct {  	const uint32_t *buf_start;  } drm_via_state_t; -extern int via_verify_command_stream(const uint32_t * buf, unsigned int size,  +extern int via_verify_command_stream(const uint32_t *buf, unsigned int size,  				    struct drm_device *dev, int agp); -extern int via_parse_command_stream(struct drm_device *dev, const uint32_t * buf, +extern int via_parse_command_stream(struct drm_device *dev, const uint32_t *buf,                                     unsigned int size);  #endif diff --git a/shared-core/via_video.c b/shared-core/via_video.c index c15e75b5..6ec04ac1 100644 --- a/shared-core/via_video.c +++ b/shared-core/via_video.c @@ -33,7 +33,7 @@ void via_init_futex(drm_via_private_t * dev_priv)  {  	unsigned int i; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	for (i = 0; i < VIA_NR_XVMC_LOCKS; ++i) {  		DRM_INIT_WAITQUEUE(&(dev_priv->decoder_queue[i])); @@ -73,7 +73,7 @@ int via_decoder_futex(struct drm_device *dev, void *data, struct drm_file *file_  	drm_via_sarea_t *sAPriv = dev_priv->sarea_priv;  	int ret = 0; -	DRM_DEBUG("%s\n", __FUNCTION__); +	DRM_DEBUG("\n");  	if (fx->lock > VIA_NR_XVMC_LOCKS)  		return -EFAULT; diff --git a/shared-core/xgi_drm.h b/shared-core/xgi_drm.h index de0fb532..ce584420 100644 --- a/shared-core/xgi_drm.h +++ b/shared-core/xgi_drm.h @@ -62,14 +62,14 @@ enum xgi_mem_location {  struct xgi_mem_alloc {  	/**  	 * Memory region to be used for allocation. -	 *  +	 *  	 * Must be one of XGI_MEMLOC_NON_LOCAL or XGI_MEMLOC_LOCAL.  	 */  	unsigned int location;  	/**  	 * Number of bytes request. -	 *  +	 *  	 * On successful allocation, set to the actual number of bytes  	 * allocated.  	 */ @@ -87,7 +87,7 @@ struct xgi_mem_alloc {  	/**  	 * Magic handle used to release memory. -	 *  +	 *  	 * See also DRM_XGI_FREE ioctl.  	 */  	__u32 index; diff --git a/tests/ttmtest/src/ttmtest.c b/tests/ttmtest/src/ttmtest.c index 606fb0cb..36df2428 100644 --- a/tests/ttmtest/src/ttmtest.c +++ b/tests/ttmtest/src/ttmtest.c @@ -35,6 +35,7 @@  #include <X11/Xlib.h>  #include <X11/Xutil.h> +#include <stdint.h>  #include <drm/drm.h>  #include "xf86dri.h"  #include "xf86drm.h" @@ -176,13 +177,11 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,      /*       * Test system memory objects.       */ -      oldTime = fastrdtsc(); -    BM_CKFATAL(drmBOCreate(ctx->drmFD, 0, size, 0, NULL, -	    drm_bo_type_dc, -	    DRM_BO_FLAG_READ | -	    DRM_BO_FLAG_WRITE | -			   DRM_BO_FLAG_MEM_LOCAL /*| DRM_BO_FLAG_NO_MOVE*/, 0, &buf)); +    BM_CKFATAL(drmBOCreate(ctx->drmFD, size, 0, NULL, +			   DRM_BO_FLAG_READ | +			   DRM_BO_FLAG_WRITE | +			   DRM_BO_FLAG_MEM_LOCAL, 0, &buf));      curTime = fastrdtsc();      *ticks++ = time_diff(oldTime, curTime); @@ -216,12 +215,12 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,       * Test TT bound buffer objects.       */ -    BM_CKFATAL(drmGetLock(ctx->drmFD, ctx->hwContext, 0));      oldTime = fastrdtsc(); -    BM_CKFATAL(drmBOValidate(ctx->drmFD, &buf, -	    DRM_BO_FLAG_MEM_TT, DRM_BO_MASK_MEM, DRM_BO_HINT_DONT_FENCE)); +    BM_CKFATAL(drmBOSetStatus(ctx->drmFD, &buf, +			     DRM_BO_FLAG_MEM_TT,  +			     DRM_BO_MASK_MEM,  +			      0,0,0));      curTime = fastrdtsc(); -    BM_CKFATAL(drmUnlock(ctx->drmFD, ctx->hwContext));      *ticks++ = time_diff(oldTime, curTime);      oldTime = fastrdtsc(); @@ -247,10 +246,9 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,      BM_CKFATAL(drmBOUnmap(ctx->drmFD, &buf)); -    BM_CKFATAL(drmGetLock(ctx->drmFD, ctx->hwContext, 0));      oldTime = fastrdtsc(); -    BM_CKFATAL(drmBOValidate(ctx->drmFD, &buf, -	    DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEM, DRM_BO_HINT_DONT_FENCE)); +    BM_CKFATAL(drmBOSetStatus(ctx->drmFD, &buf, +			     DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEM, 0, 0,0));      curTime = fastrdtsc();      *ticks++ = time_diff(oldTime, curTime); @@ -259,15 +257,18 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,       */      oldTime = fastrdtsc(); -    ret = drmBOValidate(ctx->drmFD, &buf, -	DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING, -	DRM_BO_MASK_MEMTYPE | DRM_BO_FLAG_FORCE_CACHING, DRM_BO_HINT_DONT_FENCE); +    ret = drmBOSetStatus(ctx->drmFD, &buf, +			 DRM_BO_FLAG_MEM_TT |  +			 DRM_BO_FLAG_CACHED |  +			 DRM_BO_FLAG_FORCE_CACHING, +			 DRM_BO_MASK_MEMTYPE |  +			 DRM_BO_FLAG_FORCE_CACHING, +			 0, 0, 0);      curTime = fastrdtsc(); -    drmUnlock(ctx->drmFD, ctx->hwContext);      if (ret) {  	printf("Couldn't bind cached. Probably no support\n"); -	BM_CKFATAL(drmBODestroy(ctx->drmFD, &buf)); +	BM_CKFATAL(drmBOUnreference(ctx->drmFD, &buf));  	return 1;      }      *ticks++ = time_diff(oldTime, curTime); @@ -295,7 +296,7 @@ benchmarkBuffer(TinyDRIContext * ctx, unsigned long size,      *ticks++ = time_diff(oldTime, curTime);      BM_CKFATAL(drmBOUnmap(ctx->drmFD, &buf)); -    BM_CKFATAL(drmBODestroy(ctx->drmFD, &buf)); +    BM_CKFATAL(drmBOUnreference(ctx->drmFD, &buf));      return 0;  } diff --git a/tests/ttmtest/src/xf86dri.c b/tests/ttmtest/src/xf86dri.c index ad92504e..5491473c 100644 --- a/tests/ttmtest/src/xf86dri.c +++ b/tests/ttmtest/src/xf86dri.c @@ -45,6 +45,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.  #include <X11/Xlibint.h>  #include <X11/extensions/Xext.h>  #include <X11/extensions/extutil.h> +#include <stdint.h>  #include "xf86dristr.h"  static XExtensionInfo _xf86dri_info_data;  | 
