summaryrefslogtreecommitdiff
path: root/nouveau/nouveau_pushbuf.c
id='n44' href='#n44'>4445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822
AgeCommit message (Expand)Author
2010-10-12nouveau: Let the user choose the push buffer size.Francisco Jerez
2010-02-16nouveau: interface changes for 0.0.16 DRMLuca Barbieri
2009-12-28nouveau: Unreference pushbuf objects on channel destruction.Younes Manton
2009-12-09nouveau: move reloc code down, nothing to see hereBen Skeggs
2009-11-17Move libdrm/ up one levelKristian Høgsberg
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
 * 
 * Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sub license,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: 
 *    Thomas Hellstrom.
 *    Partially based on code obtained from Digeo Inc.
 */


/*
 * Unmaps the DMA mappings. 
 * FIXME: Is this a NoOp on x86? Also 
 * FIXME: What happens if this one is called and a pending blit has previously done 
 * the same DMA mappings? 
 */

#include "drmP.h"
#include "via_drm.h"
#include "via_drv.h"
#include "via_dmablit.h"

#include <linux/pagemap.h>

#define VIA_PGDN(x)             (((unsigned long)(x)) & PAGE_MASK)
#define VIA_PGOFF(x)            (((unsigned long)(x)) & ~PAGE_MASK)
#define VIA_PFN(x)              ((unsigned long)(x) >> PAGE_SHIFT)

typedef struct _drm_via_descriptor {
	uint32_t mem_addr;
	uint32_t dev_addr;
	uint32_t size;
	uint32_t next;
} drm_via_descriptor_t;


/*
 * Unmap a DMA mapping.
 */



static void
via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
	int num_desc = vsg->num_desc;
	unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
	unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
	drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] + 
		descriptor_this_page;
	dma_addr_t next = vsg->chain_start;

	while(num_desc--) {
		if (descriptor_this_page-- == 0) {
			cur_descriptor_page--;
			descriptor_this_page = vsg->descriptors_per_page - 1;
			desc_ptr = vsg->desc_pages[cur_descriptor_page] + 
				descriptor_this_page;
		}
		dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
		dma_unmap_page(&pdev->dev, desc_ptr->mem_addr, desc_ptr->size, vsg->direction);
		next = (dma_addr_t) desc_ptr->next;
		desc_ptr--;
	}
}

/*
 * If mode = 0, count how many descriptors are needed.
 * If mode = 1, Map the DMA pages for the device, put together and map also the descriptors.
 * Descriptors are run in reverse order by the hardware because we are not allowed to update the
 * 'next' field without syncing calls when the descriptor is already mapped.
 */

static void
via_map_blit_for_device(struct pci_dev *pdev,
		   const drm_via_dmablit_t *xfer,
		   drm_via_sg_info_t *vsg, 
		   int mode)
{
	unsigned cur_descriptor_page = 0;
	unsigned num_descriptors_this_page = 0;
	unsigned char *mem_addr = xfer->mem_addr;
	unsigned char *cur_mem;
	unsigned char *first_addr = (unsigned char *)VIA_PGDN(mem_addr);
	uint32_t fb_addr = xfer->fb_addr;
	uint32_t cur_fb;
	unsigned long line_len;
	unsigned remaining_len;
	int num_desc = 0;
	int cur_line;
	dma_addr_t next = 0 | VIA_DMA_DPR_EC;
	drm_via_descriptor_t *desc_ptr = NULL;

	if (mode == 1) 
		desc_ptr = vsg->desc_pages[cur_descriptor_page];

	for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {

		line_len = xfer->line_length;
		cur_fb = fb_addr;
		cur_mem = mem_addr;
		
		while (line_len > 0) {

                        remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
			line_len -= remaining_len;

			if (mode == 1) {
                                desc_ptr->mem_addr = 
					dma_map_page(&pdev->dev, 
						     vsg->pages[VIA_PFN(cur_mem) - 
								VIA_PFN(first_addr)],
						     VIA_PGOFF(cur_mem), remaining_len, 
						     vsg->direction);
                                desc_ptr->dev_addr = cur_fb;
				
                                desc_ptr->size = remaining_len;
				desc_ptr->next = (uint32_t) next;
				next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr), 
						      DMA_TO_DEVICE);
				desc_ptr++;
				if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
					num_descriptors_this_page = 0;
					desc_ptr = vsg->desc_pages[++cur_descriptor_page];
				}
			}
			
			num_desc++;
			cur_mem += remaining_len;
			cur_fb += remaining_len;
		}
		
		mem_addr += xfer->mem_stride;
		fb_addr += xfer->fb_stride;
	}

	if (mode == 1) {
		vsg->chain_start = next;
		vsg->state = dr_via_device_mapped;
	}
	vsg->num_desc = num_desc;
}

/*
 * Function that frees up all resources for a blit. It is usable even if the 
 * blit info has only been partially built as long as the status enum is consistent
 * with the actual status of the used resources.
 */


void
via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) 
{
	struct page *page;
	int i;

	switch(vsg->state) {
	case dr_via_device_mapped:
		via_unmap_blit_from_device(pdev, vsg);
	case dr_via_desc_pages_alloc:
		for (i=0; i<vsg->num_desc_pages; ++i) {
			if (vsg->desc_pages[i] != NULL)
			  free_page((unsigned long)vsg->desc_pages[i]);
		}
		kfree(vsg->desc_pages);
	case dr_via_pages_locked:
		for (i=0; i<vsg->num_pages; ++i) {
			if ( NULL != (page = vsg->pages[i])) {
				if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction)) 
					SetPageDirty(page);
				page_cache_release(page);
			}
		}
	case dr_via_pages_alloc:
		vfree(vsg->pages);
	default:
		vsg->state = dr_via_sg_init;
	}
	if (vsg->bounce_buffer) {
		vfree(vsg->bounce_buffer);
		vsg->bounce_buffer = NULL;
	}
	vsg->free_on_sequence = 0;
}		

/*
 * Fire a blit engine.
 */

static void
via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine)
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

	VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
	VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD | 
		  VIA_DMA_CSR_DE);
	VIA_WRITE(VIA_PCI_DMA_MR0  + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
	VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
	VIA_WRITE(VIA_PCI_DMA_DPR0 + engine*0x10, vsg->chain_start);
	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DE | VIA_DMA_CSR_TS);
}

/*
 * Obtain a page pointer array and lock all pages into system memory. A segmentation violation will
 * occur here if the calling user does not have access to the submitted address.
 */

static int
via_lock_all_dma_pages(drm_via_sg_info_t *vsg,  drm_via_dmablit_t *xfer)
{
	int ret;
	unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
	vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) - 
		first_pfn + 1;
	
	if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
		return DRM_ERR(ENOMEM);
	memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
	down_read(&current->mm->mmap_sem);
	ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
			     vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE), 
			     0, vsg->pages, NULL);

	up_read(&current->mm->mmap_sem);
	if (ret != vsg->num_pages) {
		if (ret < 0) 
			return ret;
		vsg->state = dr_via_pages_locked;
		return DRM_ERR(EINVAL);
	}
	vsg->state = dr_via_pages_locked;
	DRM_DEBUG("DMA pages locked\n");
	return 0;
}

/*
 * Allocate DMA capable memory for the blit descriptor chain, and an array that keeps track of the
 * pages we allocate. We don't want to use kmalloc for the descriptor chain because it may be
 * quite large for some blits, and pages don't need to be contingous.
 */

static int 
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
	int i;
	
	vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
	vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) / 
		vsg->descriptors_per_page;

	if (NULL ==  (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) 
		return DRM_ERR(ENOMEM);
	
	memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
	vsg->state = dr_via_desc_pages_alloc;
	for (i=0; i<vsg->num_desc_pages; ++i) {
		if (NULL == (vsg->desc_pages[i] = 
			     (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
			return DRM_ERR(ENOMEM);
	}
	DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages,
		  vsg->num_desc);
	return 0;
}
			
static void
via_abort_dmablit(drm_device_t *dev, int engine)
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TA);
}

static void
via_dmablit_engine_off(drm_device_t *dev, int engine)
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;

	VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD); 
}



/*
 * The dmablit part of the IRQ handler. Trying to do only reasonably fast things here.
 * The rest, like unmapping and freeing memory for done blits is done in a separate workqueue
 * task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
 * the workqueue task takes care of processing associated with the old blit.
 */
		
void
via_dmablit_handler(drm_device_t *dev, int engine, int from_irq)
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
	int cur;
	int done_transfer;
	unsigned long irqsave=0;
	uint32_t status = 0;

	DRM_DEBUG("DMA blit handler called. engine = %d, from_irq = %d, blitq = 0x%lx\n",
		  engine, from_irq, (unsigned long) blitq);

	if (from_irq) {
		spin_lock(&blitq->blit_lock);
	} else {
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}

	done_transfer = blitq->is_active && 
	  (( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
	done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE)); 

	cur = blitq->cur;
	if (done_transfer) {

		blitq->blits[cur]->aborted = blitq->aborting;
		blitq->done_blit_handle++;
		DRM_WAKEUP(blitq->blit_queue + cur);		

		cur++;
		if (cur >= VIA_NUM_BLIT_SLOTS) 
			cur = 0;
		blitq->cur = cur;

		/*
		 * Clear transfer done flag.
		 */

		VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04,  VIA_DMA_CSR_TD);

		blitq->is_active = 0;
		blitq->aborting = 0;
		schedule_work(&blitq->wq);	

	} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {

		/*
		 * Abort transfer after one second.
		 */

		via_abort_dmablit(dev, engine);
		blitq->aborting = 1;
		blitq->end = jiffies + DRM_HZ;
	}
	  		
	if (!blitq->is_active) {
		if (blitq->num_outstanding) {
			via_fire_dmablit(dev, blitq->blits[cur], engine);
			blitq->is_active = 1;
			blitq->cur = cur;
			blitq->num_outstanding--;
			blitq->end = jiffies + DRM_HZ;
			if (!timer_pending(&blitq->poll_timer)) {
				blitq->poll_timer.expires = jiffies+1;
				add_timer(&blitq->poll_timer);
			}
		} else {
			if (timer_pending(&blitq->poll_timer)) {
				del_timer(&blitq->poll_timer);
			}
			via_dmablit_engine_off(dev, engine);
		}
	}		

	if (from_irq) {
		spin_unlock(&blitq->blit_lock);
	} else {
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	}
} 



/*
 * Check whether this blit is still active, performing necessary locking.
 */

static int
via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_queue_head_t **queue)
{
	unsigned long irqsave;
	uint32_t slot;
	int active;

	spin_lock_irqsave(&blitq->blit_lock, irqsave);

	/*
	 * Allow for handle wraparounds.
	 */

	active = ((blitq->done_blit_handle - handle) > (1 << 23)) &&
		((blitq->cur_blit_handle - handle) <= (1 << 23));

	if (queue && active) {
		slot = handle - blitq->done_blit_handle + blitq->cur -1;
		if (slot >= VIA_NUM_BLIT_SLOTS) {
			slot -= VIA_NUM_BLIT_SLOTS;
		}
		*queue = blitq->blit_queue + slot;
	}

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

	return active;
}
	
/*
 * Sync. Wait for at least three seconds for the blit to be performed.
 */

static int
via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) 
{

	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq = dev_priv->blit_queues + engine;
	wait_queue_head_t *queue;
	int ret = 0;

	if (via_dmablit_active(blitq, engine, handle, &queue)) {
		DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ, 
			    !via_dmablit_active(blitq, engine, handle, NULL));
	}
	DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
		  handle, engine, ret);
	
	return ret;
}


/*
 * A timer that regularly polls the blit engine in cases where we don't have interrupts:
 * a) Broken hardware (typically those that don't have any video capture facility).
 * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted.
 * The timer and hardware IRQ's can and do work in parallel. If the hardware has
 * irqs, it will shorten the latency somewhat.
 */



static void
via_dmablit_timer(unsigned long data)
{
	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
	drm_device_t *dev = blitq->dev;
	int engine = (int)
		(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
		
	DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, 
		  (unsigned long) jiffies);

	via_dmablit_handler(dev, engine, 0);
	
	if (!timer_pending(&blitq->poll_timer)) {
		blitq->poll_timer.expires = jiffies+1;
		add_timer(&blitq->poll_timer);

		/*
		 * Rerun handler to delete timer if engines are off, and
		 * to shorten abort latency. This is a little nasty.
		 */

		via_dmablit_handler(dev, engine, 0);
	}
}




/*
 * Workqueue task that frees data and mappings associated with a blit.
 * Also wakes up waiting processes. Each of these tasks handles one
 * blit engine only and may not be called on each interrupt.
 */


static void 
via_dmablit_workqueue(void *data)
{
	drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
	drm_device_t *dev = blitq->dev;
	unsigned long irqsave;
	drm_via_sg_info_t *cur_sg;
	int cur_released;
	
	
	DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long) 
		  (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));

	spin_lock_irqsave(&blitq->blit_lock, irqsave);
	
	while(blitq->serviced != blitq->cur) {

		cur_released = blitq->serviced++;

		DRM_DEBUG("Releasing blit slot %d\n", cur_released);

		if (blitq->serviced >= VIA_NUM_BLIT_SLOTS) 
			blitq->serviced = 0;
		
		cur_sg = blitq->blits[cur_released];
		blitq->num_free++;
				
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
		
		DRM_WAKEUP(&blitq->busy_queue);
		
		via_free_sg_info(dev->pdev, cur_sg);
		kfree(cur_sg);
		
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
	

/*
 * Init all blit engines. Currently we use two, but some hardware have 4.
 */


void
via_init_dmablit(drm_device_t *dev)
{
	int i,j;
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_blitq_t *blitq;

	pci_set_master(dev->pdev);	
	
	for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
		blitq = dev_priv->blit_queues + i;
		blitq->dev = dev;
		blitq->cur_blit_handle = 0;
		blitq->done_blit_handle = 0;
		blitq->head = 0;
		blitq->cur = 0;
		blitq->serviced = 0;
		blitq->num_free = VIA_NUM_BLIT_SLOTS;
		blitq->num_outstanding = 0;
		blitq->is_active = 0;
		blitq->aborting = 0;
		blitq->blit_lock = SPIN_LOCK_UNLOCKED;
		for (j=0; j<VIA_NUM_BLIT_SLOTS; ++j) {
			DRM_INIT_WAITQUEUE(blitq->blit_queue + j);
		}
		DRM_INIT_WAITQUEUE(&blitq->busy_queue);
		INIT_WORK(&blitq->wq, via_dmablit_workqueue, blitq);
		init_timer(&blitq->poll_timer);
		blitq->poll_timer.function = &via_dmablit_timer;
		blitq->poll_timer.data = (unsigned long) blitq;
	}	
}

/*
 * Build all info and do all mappings required for a blit.
 */
		

static int
via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
	int draw = xfer->to_fb;
	int ret = 0;
	
	vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
	vsg->bounce_buffer = NULL;

	vsg->state = dr_via_sg_init;

	if (xfer->num_lines <= 0 || xfer->line_length <= 0) {
		DRM_ERROR("Zero size bitblt.\n");
		return DRM_ERR(EINVAL);
	}

	/*
	 * Below check is a driver limitation, not a hardware one. We
	 * don't want to lock unused pages, and don't want to incoporate the
	 * extra logic of avoiding them. Make sure there are no. 
	 * (Not a big limitation anyway.)
	 */

	if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) {
		DRM_ERROR("Too large system memory stride. Stride: %d, "
			  "Length: %d\n", xfer->mem_stride, xfer->line_length);
		return DRM_ERR(EINVAL);
	}

	if ((xfer->mem_stride == xfer->line_length) &&
	    (xfer->fb_stride == xfer->line_length)) {
		xfer->mem_stride *= xfer->num_lines;
		xfer->line_length = xfer->mem_stride;
		xfer->fb_stride = xfer->mem_stride;
		xfer->num_lines = 1;
	}

	/*
	 * Don't lock an arbitrary large number of pages, since that causes a
	 * DOS security hole.
	 */

	if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
		DRM_ERROR("Too large PCI DMA bitblt.\n");
		return DRM_ERR(EINVAL);
	}		

	/* 
	 * we allow a negative fb stride to allow flipping of images in
	 * transfer. 
	 */

	if (xfer->mem_stride < xfer->line_length ||
	    abs(xfer->fb_stride) < xfer->line_length) {
		DRM_ERROR("Invalid frame-buffer / memory stride.\n");
		return DRM_ERR(EINVAL);
	}

	/*
	 * A hardware bug seems to be worked around if system memory addresses start on
	 * 16 byte boundaries. This seems a bit restrictive however. VIA is contacted
	 * about this. Meanwhile, impose the following restrictions:
	 */

#ifdef VIA_BUGFREE
	if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) ||
	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) {
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
	        return DRM_ERR(EINVAL);
	}
#else
	if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) ||
	    ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
		DRM_ERROR("Invalid DRM bitblt alignment.\n");
	        return DRM_ERR(EINVAL);
	}	
#endif

	if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
		DRM_ERROR("Could not lock DMA pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}

	via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
	if (0 != (ret = via_alloc_desc_pages(vsg))) {
		DRM_ERROR("Could not allocate DMA descriptor pages.\n");
		via_free_sg_info(dev->pdev, vsg);
		return ret;
	}
	via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
	
	return 0;
}
	

/*
 * Reserve one free slot in the blit queue. Will wait for one second for one
 * to become available. Otherwise -EBUSY is returned.
 */

static int 
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
	int ret=0;
	unsigned long irqsave;

	DRM_DEBUG("Num free is %d\n", blitq->num_free);
	spin_lock_irqsave(&blitq->blit_lock, irqsave);
	while(blitq->num_free == 0) {
		spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

		DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0);
		if (ret) {
			return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret;
		}
		
		spin_lock_irqsave(&blitq->blit_lock, irqsave);
	}
	
	blitq->num_free--;
	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);

	return 0;
}

/*
 * Hand back a free slot if we changed our mind.
 */

static void 
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
	unsigned long irqsave;

	spin_lock_irqsave(&blitq->blit_lock, irqsave);
	blitq->num_free++;
	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	DRM_WAKEUP( &blitq->busy_queue );
}

/*
 * Grab a free slot. Build blit info and queue a blit.
 */


static int 
via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer)	 
{
	drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
	drm_via_sg_info_t *vsg;
	drm_via_blitq_t *blitq;
        int ret;
	int engine;
	unsigned long irqsave;

	if (dev_priv == NULL) {
		DRM_ERROR("Called without initialization.\n");
		return DRM_ERR(EINVAL);
	}

	engine = (xfer->to_fb) ? 0 : 1;
	blitq = dev_priv->blit_queues + engine;
	if (0 != (ret = via_dmablit_grab_slot(blitq, engine))) {
		return ret;
	}
	if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) {
		via_dmablit_release_slot(blitq);
		return DRM_ERR(ENOMEM);
	}
	if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) {
		via_dmablit_release_slot(blitq);
		kfree(vsg);
		return ret;
	}
	spin_lock_irqsave(&blitq->blit_lock, irqsave);

	blitq->blits[blitq->head++] = vsg;
	if (blitq->head >= VIA_NUM_BLIT_SLOTS) 
		blitq->head = 0;
	blitq->num_outstanding++;
	xfer->sync.sync_handle = ++blitq->cur_blit_handle; 

	spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
	xfer->sync.engine = engine;

       	via_dmablit_handler(dev, engine, 0);

	return 0;
}

/*
 * Sync on a previously submitted blit. Note that the X server use signals extensively, and
 * that there is a very big probability that this IOCTL will be interrupted by a signal. In that
 * case it returns with -EAGAIN for the signal to be delivered. 
 * The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
 */

int
via_dma_blit_sync( DRM_IOCTL_ARGS )
{
	drm_via_blitsync_t sync;
	int err;
	DRM_DEVICE;

	DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync));
	
	if (sync.engine >= VIA_NUM_BLIT_ENGINES) 
		return DRM_ERR(EINVAL);

	err = via_dmablit_sync(dev, sync.sync_handle, sync.engine);

	if (DRM_ERR(EINTR) == err)
		err = DRM_ERR(EAGAIN);

	return err;
}
	

/*
 * Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
 * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should 
 * be reissued. See the above IOCTL code.
 */

int 
via_dma_blit( DRM_IOCTL_ARGS )
{
	drm_via_dmablit_t xfer;
	int err;
	DRM_DEVICE;

	DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer));

	err = via_dmablit(dev, &xfer);

	DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer));

	return err;
}