summaryrefslogtreecommitdiff
path: root/bsd/mga
diff options
context:
space:
mode:
authorDoug Rabson <dfr@freebsd.org>2000-06-13 17:38:09 +0000
committerDoug Rabson <dfr@freebsd.org>2000-06-13 17:38:09 +0000
commitd399dbcd569a66f5bf4863ffa2aab95fa8ebd5fc (patch)
treeb8593f0ebff44aae83b89416e193d6ffc1ad0f24 /bsd/mga
parent2fbd4bf0189cf6d421000c7eea85fee3b9c79ed0 (diff)
Merged bsd-1-0-1
Diffstat (limited to 'bsd/mga')
-rw-r--r--bsd/mga/Makefile16
-rw-r--r--bsd/mga/mga_bufs.c604
-rw-r--r--bsd/mga/mga_context.c196
-rw-r--r--bsd/mga/mga_dma.c1106
-rw-r--r--bsd/mga/mga_drv.c714
-rw-r--r--bsd/mga/mga_drv.h420
-rw-r--r--bsd/mga/mga_state.c1074
7 files changed, 4130 insertions, 0 deletions
diff --git a/bsd/mga/Makefile b/bsd/mga/Makefile
new file mode 100644
index 00000000..226728b5
--- /dev/null
+++ b/bsd/mga/Makefile
@@ -0,0 +1,16 @@
+# $FreeBSD$
+
+KMOD = mga
+SRCS = mga_drv.c mga_context.c mga_state.c mga_bufs.c mga_dma.c
+SRCS += device_if.h bus_if.h pci_if.h
+CFLAGS += ${DEBUG_FLAGS} -I..
+KERN = /usr/src/sys
+KMODDEPS = drm
+
+@:
+ ln -sf /sys @
+
+machine:
+ ln -sf /sys/i386/include machine
+
+.include <bsd.kmod.mk>
diff --git a/bsd/mga/mga_bufs.c b/bsd/mga/mga_bufs.c
new file mode 100644
index 00000000..4ae05181
--- /dev/null
+++ b/bsd/mga/mga_bufs.c
@@ -0,0 +1,604 @@
+/* mga_bufs.c -- IOCTLs to manage buffers
+ * Created: Thu Jan 6 01:47:26 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+#include <sys/mman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_map.h>
+
+
+static int
+mga_addbufs_agp(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ drm_buf_entry_t *entry;
+ drm_buf_t *buf;
+ unsigned long offset;
+ unsigned long agp_offset;
+ int count;
+ int order;
+ int size;
+ int alignment;
+ int page_order;
+ int total;
+ int byte_count;
+ int i;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_desc_t *) data;
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+ agp_offset = request.agp_start;
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+ byte_count = 0;
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
+ if (dev->queue_count) return EBUSY; /* Not while in use */
+ simple_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ simple_unlock(&dev->count_lock);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ offset = 0;
+
+
+ while(entry->buf_count < count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+
+ DRM_DEBUG("offset : %ld\n", offset);
+
+ buf->offset = offset; /* Hrm */
+ buf->bus_address = dev->agp->base + agp_offset + offset;
+ buf->address = (void *)(agp_offset + offset + dev->agp->base);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+
+ buf->dev_private = drm_alloc(sizeof(drm_mga_buf_priv_t), DRM_MEM_BUFS);
+ buf->dev_priv_size = sizeof(drm_mga_buf_priv_t);
+
+#if DRM_DMA_HISTOGRAM
+ timespecclear(&buf->time_queued);
+ timespecclear(&buf->time_dispatched);
+ timespecclear(&buf->time_completed);
+ timespecclear(&buf->time_freed);
+#endif
+ offset = offset + alignment;
+ entry->buf_count++;
+ byte_count += PAGE_SIZE << page_order;
+
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+
+ DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
+
+ dma->byte_count += byte_count;
+
+ DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ *(drm_buf_desc_t *) data = request;
+
+ atomic_dec(&dev->buf_alloc);
+
+ DRM_DEBUG("count: %d\n", count);
+ DRM_DEBUG("order: %d\n", order);
+ DRM_DEBUG("size: %d\n", size);
+ DRM_DEBUG("agp_offset: %ld\n", agp_offset);
+ DRM_DEBUG("alignment: %d\n", alignment);
+ DRM_DEBUG("page_order: %d\n", page_order);
+ DRM_DEBUG("total: %d\n", total);
+ DRM_DEBUG("byte_count: %d\n", byte_count);
+
+ dma->flags = _DRM_DMA_USE_AGP;
+
+ DRM_DEBUG("dma->flags : %x\n", dma->flags);
+
+ return 0;
+}
+
+static int
+mga_addbufs_pci(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int count;
+ int order;
+ int size;
+ int total;
+ int page_order;
+ drm_buf_entry_t *entry;
+ unsigned long page;
+ drm_buf_t *buf;
+ int alignment;
+ unsigned long offset;
+ int i;
+ int byte_count;
+ int page_count;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_desc_t *) data;
+
+ count = request.count;
+ order = drm_order(request.size);
+ size = 1 << order;
+
+ DRM_DEBUG("count = %d, size = %d (%d), order = %d, queue_count = %d\n",
+ request.count, request.size, size, order, dev->queue_count);
+
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
+ if (dev->queue_count) return EBUSY; /* Not while in use */
+
+ alignment = (request.flags & _DRM_PAGE_ALIGN) ? round_page(size) :size;
+ page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
+ total = PAGE_SIZE << page_order;
+
+ simple_lock(&dev->count_lock);
+ if (dev->buf_use) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ atomic_inc(&dev->buf_alloc);
+ simple_unlock(&dev->count_lock);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ entry = &dma->bufs[order];
+ if (entry->buf_count) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM; /* May only call once for each order */
+ }
+
+ entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ if (!entry->buflist) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM;
+ }
+ memset(entry->buflist, 0, count * sizeof(*entry->buflist));
+
+ entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
+ DRM_MEM_SEGS);
+ if (!entry->seglist) {
+ drm_free(entry->buflist,
+ count * sizeof(*entry->buflist),
+ DRM_MEM_BUFS);
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ atomic_dec(&dev->buf_alloc);
+ return ENOMEM;
+ }
+ memset(entry->seglist, 0, count * sizeof(*entry->seglist));
+
+ dma->pagelist = drm_realloc(dma->pagelist,
+ dma->page_count * sizeof(*dma->pagelist),
+ (dma->page_count + (count << page_order))
+ * sizeof(*dma->pagelist),
+ DRM_MEM_PAGES);
+ DRM_DEBUG("pagelist: %d entries\n",
+ dma->page_count + (count << page_order));
+
+
+ entry->buf_size = size;
+ entry->page_order = page_order;
+ byte_count = 0;
+ page_count = 0;
+ while (entry->buf_count < count) {
+ if (!(page = drm_alloc_pages(page_order, DRM_MEM_DMA))) break;
+ entry->seglist[entry->seg_count++] = page;
+ for (i = 0; i < (1 << page_order); i++) {
+ DRM_DEBUG("page %d @ 0x%08lx\n",
+ dma->page_count + page_count,
+ page + PAGE_SIZE * i);
+ dma->pagelist[dma->page_count + page_count++]
+ = page + PAGE_SIZE * i;
+ }
+ for (offset = 0;
+ offset + size <= total && entry->buf_count < count;
+ offset += alignment, ++entry->buf_count) {
+ buf = &entry->buflist[entry->buf_count];
+ buf->idx = dma->buf_count + entry->buf_count;
+ buf->total = alignment;
+ buf->order = order;
+ buf->used = 0;
+ buf->offset = (dma->byte_count + byte_count + offset);
+ buf->address = (void *)(page + offset);
+ buf->next = NULL;
+ buf->waiting = 0;
+ buf->pending = 0;
+ buf->dma_wait = 0;
+ buf->pid = 0;
+#if DRM_DMA_HISTOGRAM
+ timespecclear(&buf->time_queued);
+ timespecclear(&buf->time_dispatched);
+ timespecclear(&buf->time_completed);
+ timespecclear(&buf->time_freed);
+#endif
+ DRM_DEBUG("buffer %d @ %p\n",
+ entry->buf_count, buf->address);
+ }
+ byte_count += PAGE_SIZE << page_order;
+ }
+
+ dma->buflist = drm_realloc(dma->buflist,
+ dma->buf_count * sizeof(*dma->buflist),
+ (dma->buf_count + entry->buf_count)
+ * sizeof(*dma->buflist),
+ DRM_MEM_BUFS);
+ for (i = dma->buf_count; i < dma->buf_count + entry->buf_count; i++)
+ dma->buflist[i] = &entry->buflist[i - dma->buf_count];
+
+ dma->buf_count += entry->buf_count;
+ dma->seg_count += entry->seg_count;
+ dma->page_count += entry->seg_count << page_order;
+ dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
+
+ drm_freelist_create(&entry->freelist, entry->buf_count);
+ for (i = 0; i < entry->buf_count; i++) {
+ drm_freelist_put(dev, &entry->freelist, &entry->buflist[i]);
+ }
+
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ request.count = entry->buf_count;
+ request.size = size;
+
+ *(drm_buf_desc_t *) data = request;
+
+ atomic_dec(&dev->buf_alloc);
+ return 0;
+}
+
+int
+mga_addbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_buf_desc_t request;
+
+ request = *(drm_buf_desc_t *) data;
+
+ if(request.flags & _DRM_AGP_BUFFER)
+ return mga_addbufs_agp(kdev, cmd, data, flags, p);
+ else
+ return mga_addbufs_pci(kdev, cmd, data, flags, p);
+}
+
+int
+mga_infobufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_info_t request;
+ int i;
+ int count;
+ int error;
+
+ if (!dma) return EINVAL;
+
+ simple_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ simple_unlock(&dev->count_lock);
+
+ request = *(drm_buf_info_t *) data;
+
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) ++count;
+ }
+
+ DRM_DEBUG("count = %d\n", count);
+
+ if (request.count >= count) {
+ for (i = 0, count = 0; i < DRM_MAX_ORDER+1; i++) {
+ if (dma->bufs[i].buf_count) {
+ error = copyout(&dma->bufs[i].buf_count,
+ &request.list[count].count,
+ sizeof(dma->bufs[0]
+ .buf_count));
+ if (error) return error;
+ error = copyout(&dma->bufs[i].buf_size,
+ &request.list[count].size,
+ sizeof(dma->bufs[0]
+ .buf_size));
+ if (error) return error;
+ error = copyout(&dma->bufs[i]
+ .freelist.low_mark,
+ &request.list[count].low_mark,
+ sizeof(dma->bufs[0]
+ .freelist.low_mark));
+ if (error) return error;
+ error = copyout(&dma->bufs[i]
+ .freelist.high_mark,
+ &request.list[count].high_mark,
+ sizeof(dma->bufs[0]
+ .freelist.high_mark));
+ if (error) return error;
+ DRM_DEBUG("%d %d %d %d %d\n",
+ i,
+ dma->bufs[i].buf_count,
+ dma->bufs[i].buf_size,
+ dma->bufs[i].freelist.low_mark,
+ dma->bufs[i].freelist.high_mark);
+ ++count;
+ }
+ }
+ }
+ request.count = count;
+
+ *(drm_buf_info_t *) data = request;
+
+ return 0;
+}
+
+int
+mga_markbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_desc_t request;
+ int order;
+ drm_buf_entry_t *entry;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_desc_t *) data;
+
+ DRM_DEBUG("%d, %d, %d\n",
+ request.size, request.low_mark, request.high_mark);
+ order = drm_order(request.size);
+ if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return EINVAL;
+ entry = &dma->bufs[order];
+
+ if (request.low_mark < 0 || request.low_mark > entry->buf_count)
+ return EINVAL;
+ if (request.high_mark < 0 || request.high_mark > entry->buf_count)
+ return EINVAL;
+
+ entry->freelist.low_mark = request.low_mark;
+ entry->freelist.high_mark = request.high_mark;
+
+ return 0;
+}
+
+int
+mga_freebufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_free_t request;
+ int i;
+ int error;
+ int idx;
+ drm_buf_t *buf;
+
+ if (!dma) return EINVAL;
+
+ request = *(drm_buf_free_t *) data;
+
+ DRM_DEBUG("%d\n", request.count);
+ for (i = 0; i < request.count; i++) {
+ error = copyin(&request.list[i],
+ &idx,
+ sizeof(idx));
+ if (error) return error;
+ if (idx < 0 || idx >= dma->buf_count) {
+ DRM_ERROR("Index %d (of %d max)\n",
+ idx, dma->buf_count - 1);
+ return EINVAL;
+ }
+ buf = dma->buflist[idx];
+ if (buf->pid != p->p_pid) {
+ DRM_ERROR("Process %d freeing buffer owned by %d\n",
+ p->p_pid, buf->pid);
+ return EINVAL;
+ }
+ drm_free_buffer(dev, buf);
+ }
+
+ return 0;
+}
+
+int
+mga_mapbufs(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ const int zero = 0;
+ vm_offset_t virtual;
+ vm_offset_t address;
+ drm_buf_map_t request;
+ int i;
+
+ if (!dma) return EINVAL;
+
+ DRM_DEBUG("\n");
+
+ simple_lock(&dev->count_lock);
+ if (atomic_read(&dev->buf_alloc)) {
+ simple_unlock(&dev->count_lock);
+ DRM_DEBUG("Busy\n");
+ return EBUSY;
+ }
+ ++dev->buf_use; /* Can't allocate more after this call */
+ simple_unlock(&dev->count_lock);
+
+ request = *(drm_buf_map_t *) data;
+
+ DRM_DEBUG("mga_mapbufs\n");
+ DRM_DEBUG("dma->flags : %x\n", dma->flags);
+
+ if (request.count >= dma->buf_count) {
+ if(dma->flags & _DRM_DMA_USE_AGP) {
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_map_t *map = NULL;
+
+ map = dev->maplist[dev_priv->buffer_map_idx];
+ if (!map) {
+ DRM_DEBUG("map is null\n");
+ retcode = EINVAL;
+ goto done;
+ }
+
+ DRM_DEBUG("map->offset : %lx\n", map->offset);
+ DRM_DEBUG("map->size : %lx\n", map->size);
+ DRM_DEBUG("map->type : %d\n", map->type);
+ DRM_DEBUG("map->flags : %x\n", map->flags);
+ DRM_DEBUG("map->handle : %p\n", map->handle);
+ DRM_DEBUG("map->mtrr : %d\n", map->mtrr);
+ virtual = 0;
+ retcode = vm_mmap(&p->p_vmspace->vm_map,
+ &virtual,
+ map->size,
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ map->offset);
+ } else {
+ virtual = 0;
+ retcode = vm_mmap(&p->p_vmspace->vm_map,
+ &virtual,
+ round_page(dma->byte_count),
+ PROT_READ|PROT_WRITE, VM_PROT_ALL,
+ MAP_SHARED,
+ SLIST_FIRST(&kdev->si_hlist),
+ 0);
+ }
+ if (retcode) {
+ /* Real error */
+ DRM_DEBUG("mmap error\n");
+ goto done;
+ }
+ request.virtual = (void *)virtual;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ retcode = copyout(&dma->buflist[i]->idx,
+ &request.list[i].idx,
+ sizeof(request.list[0].idx));
+ if (retcode) goto done;
+ retcode = copyout(&dma->buflist[i]->total,
+ &request.list[i].total,
+ sizeof(request.list[0].total));
+ if (retcode) goto done;
+ retcode = copyout(&zero,
+ &request.list[i].used,
+ sizeof(request.list[0].used));
+ if (retcode) goto done;
+ address = virtual + dma->buflist[i]->offset;
+ retcode = copyout(&address,
+ &request.list[i].address,
+ sizeof(address));
+ if (retcode) goto done;
+ }
+ }
+ done:
+ request.count = dma->buf_count;
+ DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
+
+ *(drm_buf_map_t *) data = request;
+
+ DRM_DEBUG("retcode : %d\n", retcode);
+
+ return retcode;
+}
diff --git a/bsd/mga/mga_context.c b/bsd/mga/mga_context.c
new file mode 100644
index 00000000..63f1b42b
--- /dev/null
+++ b/bsd/mga/mga_context.c
@@ -0,0 +1,196 @@
+/* mga_context.c -- IOCTLs for mga contexts
+ * Created: Mon Dec 13 09:51:35 1999 by faith@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+
+static int mga_alloc_queue(drm_device_t *dev)
+{
+ int temp = drm_ctxbitmap_next(dev);
+ DRM_DEBUG("mga_alloc_queue: %d\n", temp);
+ return temp;
+}
+
+int mga_context_switch(drm_device_t *dev, int old, int new)
+{
+ char buf[64];
+
+ atomic_inc(&dev->total_ctx);
+
+ if (test_and_set_bit(0, &dev->context_flag)) {
+ DRM_ERROR("Reentering -- FIXME\n");
+ return EBUSY;
+ }
+
+#if DRM_DMA_HISTOGRAM
+ getnanotime(&dev->ctx_start);
+#endif
+
+ DRM_DEBUG("Context switch from %d to %d\n", old, new);
+
+ if (new == dev->last_context) {
+ clear_bit(0, &dev->context_flag);
+ return 0;
+ }
+
+ if (drm_flags & DRM_FLAG_NOCTX) {
+ mga_context_switch_complete(dev, new);
+ } else {
+ sprintf(buf, "C %d %d\n", old, new);
+ drm_write_string(dev, buf);
+ }
+
+ return 0;
+}
+
+int mga_context_switch_complete(drm_device_t *dev, int new)
+{
+ dev->last_context = new; /* PRE/POST: This is the _only_ writer. */
+ dev->last_switch = ticks;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("Lock isn't held after context switch\n");
+ }
+
+ /* If a context switch is ever initiated
+ when the kernel holds the lock, release
+ that lock here. */
+#if DRM_DMA_HISTOGRAM
+ {
+ struct timespec ts;
+ getnanotime(&ts);
+ timespecsub(&ts, &dev->lck_start);
+ atomic_inc(&dev->histo.ctx[drm_histogram_slot(&ts)]);
+ }
+#endif
+ clear_bit(0, &dev->context_flag);
+ wakeup(&dev->context_wait);
+
+ return 0;
+}
+
+int
+mga_resctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_res_t res;
+ drm_ctx_t ctx;
+ int i, error;
+
+ DRM_DEBUG("%d\n", DRM_RESERVED_CONTEXTS);
+ res = *(drm_ctx_res_t *) data;
+ if (res.count >= DRM_RESERVED_CONTEXTS) {
+ memset(&ctx, 0, sizeof(ctx));
+ for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
+ ctx.handle = i;
+ error = copyout(&i, &res.contexts[i], sizeof(i));
+ if (error) return error;
+ }
+ }
+ res.count = DRM_RESERVED_CONTEXTS;
+ *(drm_ctx_res_t *) data = res;
+ return 0;
+}
+
+int
+mga_addctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ if ((ctx.handle = mga_alloc_queue(dev)) == DRM_KERNEL_CONTEXT) {
+ /* Skip kernel's context and get a new one. */
+ ctx.handle = mga_alloc_queue(dev);
+ }
+ if (ctx.handle == -1) {
+ DRM_DEBUG("Not enough free contexts.\n");
+ /* Should this return EBUSY instead? */
+ return ENOMEM;
+ }
+ DRM_DEBUG("%d\n", ctx.handle);
+ *(drm_ctx_t *) data = ctx;
+ return 0;
+}
+
+int
+mga_modctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ /* This does nothing for the mga */
+ return 0;
+}
+
+int mga_getctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ /* This is 0, because we don't hanlde any context flags */
+ ctx.flags = 0;
+ *(drm_ctx_t *) data = ctx;
+ return 0;
+}
+
+int mga_switchctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ return mga_context_switch(dev, dev->last_context, ctx.handle);
+}
+
+int mga_newctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ mga_context_switch_complete(dev, ctx.handle);
+
+ return 0;
+}
+
+int mga_rmctx(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_ctx_t ctx;
+
+ ctx = *(drm_ctx_t *) data;
+ DRM_DEBUG("%d\n", ctx.handle);
+ if(ctx.handle != DRM_KERNEL_CONTEXT) {
+ drm_ctxbitmap_free(dev, ctx.handle);
+ }
+
+ return 0;
+}
diff --git a/bsd/mga/mga_dma.c b/bsd/mga/mga_dma.c
new file mode 100644
index 00000000..7055d28d
--- /dev/null
+++ b/bsd/mga/mga_dma.c
@@ -0,0 +1,1106 @@
+/* mga_dma.c -- DMA support for mga g200/g400
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ * Keith Whitwell <keithw@valinux.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+
+#include <machine/bus.h>
+#include <machine/resource.h>
+#include <sys/rman.h>
+#include <vm/vm.h>
+#include <vm/pmap.h>
+
+#define MGA_REG(reg) 2
+#define MGA_BASE(reg) ((unsigned long) \
+ ((drm_device_t *)dev)->maplist[MGA_REG(reg)]->handle)
+#define MGA_ADDR(reg) (MGA_BASE(reg) + reg)
+#define MGA_DEREF(reg) *(__volatile__ int *)MGA_ADDR(reg)
+#define MGA_READ(reg) MGA_DEREF(reg)
+#define MGA_WRITE(reg,val) do { MGA_DEREF(reg) = val; } while (0)
+
+#define PDEA_pagpxfer_enable 0x2
+
+static int mga_flush_queue(drm_device_t *dev);
+
+static unsigned long mga_alloc_page(drm_device_t *dev)
+{
+ unsigned long address;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ address = (unsigned long) drm_alloc(PAGE_SIZE, DRM_MEM_DMA);
+ if(address == 0UL) {
+ return 0;
+ }
+
+ return address;
+}
+
+static void mga_free_page(drm_device_t *dev, unsigned long page)
+{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(page == 0UL) {
+ return;
+ }
+ drm_free((void *) page, PAGE_SIZE, DRM_MEM_DMA);
+ return;
+}
+
+static void mga_delay(void)
+{
+ return;
+}
+
+void mga_flush_write_combine(void)
+{
+ int xchangeDummy;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ __asm__ volatile(" push %%eax ; xchg %%eax, %0 ; pop %%eax" : : "m" (xchangeDummy));
+ __asm__ volatile(" push %%eax ; push %%ebx ; push %%ecx ; push %%edx ;"
+ " movl $0,%%eax ; cpuid ; pop %%edx ; pop %%ecx ; pop %%ebx ;"
+ " pop %%eax" : /* no outputs */ : /* no inputs */ );
+}
+
+/* These are two age tags that will never be sent to
+ * the hardware */
+#define MGA_BUF_USED 0xffffffff
+#define MGA_BUF_FREE 0
+
+static int mga_freelist_init(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_freelist_t *item;
+ int i;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ dev_priv->head = drm_alloc(sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
+ if(dev_priv->head == NULL) return ENOMEM;
+ memset(dev_priv->head, 0, sizeof(drm_mga_freelist_t));
+ dev_priv->head->age = MGA_BUF_USED;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[ i ];
+ buf_priv = buf->dev_private;
+ item = drm_alloc(sizeof(drm_mga_freelist_t),
+ DRM_MEM_DRIVER);
+ if(item == NULL) return ENOMEM;
+ memset(item, 0, sizeof(drm_mga_freelist_t));
+ item->age = MGA_BUF_FREE;
+ item->prev = dev_priv->head;
+ item->next = dev_priv->head->next;
+ if(dev_priv->head->next != NULL)
+ dev_priv->head->next->prev = item;
+ if(item->next == NULL) dev_priv->tail = item;
+ item->buf = buf;
+ buf_priv->my_freelist = item;
+ buf_priv->discard = 0;
+ buf_priv->dispatched = 0;
+ dev_priv->head->next = item;
+ }
+
+ return 0;
+}
+
+static void mga_freelist_cleanup(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_freelist_t *item;
+ drm_mga_freelist_t *prev;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ item = dev_priv->head;
+ while(item) {
+ prev = item;
+ item = item->next;
+ drm_free(prev, sizeof(drm_mga_freelist_t), DRM_MEM_DRIVER);
+ }
+
+ dev_priv->head = dev_priv->tail = NULL;
+}
+
+/* Frees dispatch lock */
+static __inline void mga_dma_quiescent(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long end;
+ int i;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ end = ticks + (hz*3);
+ while(1) {
+ if(!test_and_set_bit(MGA_IN_DISPATCH,
+ &dev_priv->dispatch_status)) {
+ break;
+ }
+ if((signed)(end - ticks) <= 0) {
+ DRM_ERROR("irqs: %d wanted %d\n",
+ atomic_read(&dev->total_irq),
+ atomic_read(&dma->total_lost));
+ DRM_ERROR("lockup\n");
+ goto out_nolock;
+ }
+ for (i = 0 ; i < 2000 ; i++) mga_delay();
+ }
+ end = ticks + (hz*3);
+ DRM_DEBUG("quiescent status : %x\n", MGA_READ(MGAREG_STATUS));
+ while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
+ if((signed)(end - ticks) <= 0) {
+ DRM_ERROR("irqs: %d wanted %d\n",
+ atomic_read(&dev->total_irq),
+ atomic_read(&dma->total_lost));
+ DRM_ERROR("lockup\n");
+ goto out_status;
+ }
+ for (i = 0 ; i < 2000 ; i++) mga_delay();
+ }
+ sarea_priv->dirty |= MGA_DMA_FLUSH;
+
+out_status:
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
+out_nolock:
+}
+
+static void mga_reset_freelist(drm_device_t *dev)
+{
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ int i;
+
+ for (i = 0; i < dma->buf_count; i++) {
+ buf = dma->buflist[ i ];
+ buf_priv = buf->dev_private;
+ buf_priv->my_freelist->age = MGA_BUF_FREE;
+ }
+}
+
+/* Least recently used :
+ * These operations are not atomic b/c they are protected by the
+ * hardware lock */
+
+drm_buf_t *mga_freelist_get(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_mga_freelist_t *prev;
+ drm_mga_freelist_t *next;
+ static int failed = 0;
+ int ret, s;
+
+ DRM_DEBUG("%s : tail->age : %d last_prim_age : %d\n", __FUNCTION__,
+ dev_priv->tail->age, dev_priv->last_prim_age);
+
+ if(failed >= 1000 && dev_priv->tail->age >= dev_priv->last_prim_age) {
+ DRM_DEBUG("I'm waiting on the freelist!!! %d\n",
+ dev_priv->last_prim_age);
+ s = splsofttq();
+ set_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
+ for (;;) {
+ mga_dma_schedule(dev, 0);
+ if(!test_bit(MGA_IN_GETBUF,
+ &dev_priv->dispatch_status))
+ break;
+ atomic_inc(&dev->total_sleeps);
+ ret = tsleep(&dev_priv->buf_queue, PZERO|PCATCH,
+ "mgafg", 0);
+ if (ret) {
+ clear_bit(MGA_IN_GETBUF,
+ &dev_priv->dispatch_status);
+ splx(s);
+ goto failed_getbuf;
+ }
+ }
+ splx(s);
+ }
+
+ if(dev_priv->tail->age < dev_priv->last_prim_age) {
+ prev = dev_priv->tail->prev;
+ next = dev_priv->tail;
+ prev->next = NULL;
+ next->prev = next->next = NULL;
+ dev_priv->tail = prev;
+ next->age = MGA_BUF_USED;
+ failed = 0;
+ return next->buf;
+ }
+
+failed_getbuf:
+ failed++;
+ return NULL;
+}
+
+int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf)
+{
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_freelist_t *prev;
+ drm_mga_freelist_t *head;
+ drm_mga_freelist_t *next;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(buf_priv->my_freelist->age == MGA_BUF_USED) {
+ /* Discarded buffer, put it on the tail */
+ next = buf_priv->my_freelist;
+ next->age = MGA_BUF_FREE;
+ prev = dev_priv->tail;
+ prev->next = next;
+ next->prev = prev;
+ next->next = NULL;
+ dev_priv->tail = next;
+ DRM_DEBUG("Discarded\n");
+ } else {
+ /* Normally aged buffer, put it on the head + 1,
+ * as the real head is a sentinal element
+ */
+ next = buf_priv->my_freelist;
+ head = dev_priv->head;
+ prev = head->next;
+ head->next = next;
+ prev->prev = next;
+ next->prev = head;
+ next->next = prev;
+ }
+
+ return 0;
+}
+
+static int mga_init_primary_bufs(drm_device_t *dev, drm_mga_init_t *init)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_prim_buf_t *prim_buffer;
+ int i, temp, size_of_buf;
+ int offset = init->reserved_map_agpstart;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ dev_priv->primary_size = ((init->primary_size + PAGE_SIZE - 1) /
+ PAGE_SIZE) * PAGE_SIZE;
+ size_of_buf = dev_priv->primary_size / MGA_NUM_PRIM_BUFS;
+ dev_priv->warp_ucode_size = init->warp_ucode_size;
+ dev_priv->prim_bufs = drm_alloc(sizeof(drm_mga_prim_buf_t *) *
+ (MGA_NUM_PRIM_BUFS + 1),
+ DRM_MEM_DRIVER);
+ if(dev_priv->prim_bufs == NULL) {
+ DRM_ERROR("Unable to allocate memory for prim_buf\n");
+ return ENOMEM;
+ }
+ memset(dev_priv->prim_bufs,
+ 0, sizeof(drm_mga_prim_buf_t *) * (MGA_NUM_PRIM_BUFS + 1));
+
+ temp = init->warp_ucode_size + dev_priv->primary_size;
+ temp = ((temp + PAGE_SIZE - 1) / PAGE_SIZE) * PAGE_SIZE;
+
+ dev_priv->ioremap = drm_ioremap(dev->agp->base + offset,
+ temp);
+ if(dev_priv->ioremap == NULL) {
+ DRM_DEBUG("Ioremap failed\n");
+ return ENOMEM;
+ }
+ dev_priv->wait_queue = 0;
+
+ for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
+ prim_buffer = drm_alloc(sizeof(drm_mga_prim_buf_t),
+ DRM_MEM_DRIVER);
+ if(prim_buffer == NULL) return ENOMEM;
+ memset(prim_buffer, 0, sizeof(drm_mga_prim_buf_t));
+ prim_buffer->phys_head = offset + dev->agp->base;
+ prim_buffer->current_dma_ptr =
+ prim_buffer->head =
+ (u_int32_t *) (dev_priv->ioremap +
+ offset -
+ init->reserved_map_agpstart);
+ prim_buffer->num_dwords = 0;
+ prim_buffer->max_dwords = size_of_buf / sizeof(u_int32_t);
+ prim_buffer->max_dwords -= 5; /* Leave room for the softrap */
+ prim_buffer->sec_used = 0;
+ prim_buffer->idx = i;
+ prim_buffer->prim_age = i + 1;
+ offset = offset + size_of_buf;
+ dev_priv->prim_bufs[i] = prim_buffer;
+ }
+ dev_priv->current_prim_idx = 0;
+ dev_priv->next_prim =
+ dev_priv->last_prim =
+ dev_priv->current_prim =
+ dev_priv->prim_bufs[0];
+ dev_priv->next_prim_age = 2;
+ dev_priv->last_prim_age = 1;
+ set_bit(MGA_BUF_IN_USE, &dev_priv->current_prim->buffer_status);
+ return 0;
+}
+
+static void mga_fire_primary(drm_device_t *dev, drm_mga_prim_buf_t *prim)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int use_agp = PDEA_pagpxfer_enable;
+ unsigned long end;
+ int i;
+ int next_idx;
+ PRIMLOCALS;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ dev_priv->last_prim = prim;
+
+ /* We never check for overflow, b/c there is always room */
+ PRIMPTR(prim);
+ if(num_dwords <= 0) {
+ DRM_DEBUG("num_dwords == 0 when dispatched\n");
+ goto out_prim_wait;
+ }
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_DMAPAD, 0);
+ PRIMOUTREG( MGAREG_SOFTRAP, 0);
+ PRIMFINISH(prim);
+
+ end = ticks + (hz*3);
+ if(sarea_priv->dirty & MGA_DMA_FLUSH) {
+ DRM_DEBUG("Dma top flush\n");
+ while((MGA_READ(MGAREG_STATUS) & 0x00030001) != 0x00020000) {
+ if((signed)(end - ticks) <= 0) {
+ DRM_ERROR("irqs: %d wanted %d\n",
+ atomic_read(&dev->total_irq),
+ atomic_read(&dma->total_lost));
+ DRM_ERROR("lockup in fire primary "
+ "(Dma Top Flush)\n");
+ goto out_prim_wait;
+ }
+
+ for (i = 0 ; i < 4096 ; i++) mga_delay();
+ }
+ sarea_priv->dirty &= ~(MGA_DMA_FLUSH);
+ } else {
+ DRM_DEBUG("Status wait\n");
+ while((MGA_READ(MGAREG_STATUS) & 0x00020001) != 0x00020000) {
+ if((signed)(end - ticks) <= 0) {
+ DRM_ERROR("irqs: %d wanted %d\n",
+ atomic_read(&dev->total_irq),
+ atomic_read(&dma->total_lost));
+ DRM_ERROR("lockup in fire primary "
+ "(Status Wait)\n");
+ goto out_prim_wait;
+ }
+
+ for (i = 0 ; i < 4096 ; i++) mga_delay();
+ }
+ }
+
+ mga_flush_write_combine();
+ atomic_inc(&dev_priv->pending_bufs);
+ MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
+ MGA_WRITE(MGAREG_PRIMEND, (phys_head + num_dwords * 4) | use_agp);
+ prim->num_dwords = 0;
+ sarea_priv->last_enqueue = prim->prim_age;
+
+ next_idx = prim->idx + 1;
+ if(next_idx >= MGA_NUM_PRIM_BUFS)
+ next_idx = 0;
+
+ dev_priv->next_prim = dev_priv->prim_bufs[next_idx];
+ return;
+
+ out_prim_wait:
+ prim->num_dwords = 0;
+ prim->sec_used = 0;
+ clear_bit(MGA_BUF_IN_USE, &prim->buffer_status);
+ wakeup(&dev_priv->wait_queue);
+ clear_bit(MGA_BUF_SWAP_PENDING, &prim->buffer_status);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
+}
+
+int mga_advance_primary(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_prim_buf_t *prim_buffer;
+ drm_device_dma_t *dma = dev->dma;
+ int next_prim_idx;
+ int ret = 0;
+ int s;
+
+ /* This needs to reset the primary buffer if available,
+ * we should collect stats on how many times it bites
+ * it's tail */
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ next_prim_idx = dev_priv->current_prim_idx + 1;
+ if(next_prim_idx >= MGA_NUM_PRIM_BUFS)
+ next_prim_idx = 0;
+ prim_buffer = dev_priv->prim_bufs[next_prim_idx];
+ set_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
+
+ /* In use is cleared in interrupt handler */
+
+ s = splsofttq();
+ if(test_and_set_bit(MGA_BUF_IN_USE, &prim_buffer->buffer_status)) {
+ for (;;) {
+ mga_dma_schedule(dev, 0);
+ if(!test_and_set_bit(MGA_BUF_IN_USE,
+ &prim_buffer->buffer_status))
+ break;
+ atomic_inc(&dev->total_sleeps);
+ atomic_inc(&dma->total_missed_sched);
+ ret = tsleep(&dev_priv->wait_queue, PZERO|PCATCH,
+ "mgaap", 0);
+ if (ret)
+ break;
+ }
+ if(ret) {
+ splx(s);
+ return ret;
+ }
+ }
+ clear_bit(MGA_IN_WAIT, &dev_priv->dispatch_status);
+ splx(s);
+
+ /* This primary buffer is now free to use */
+ prim_buffer->current_dma_ptr = prim_buffer->head;
+ prim_buffer->num_dwords = 0;
+ prim_buffer->sec_used = 0;
+ prim_buffer->prim_age = dev_priv->next_prim_age++;
+ if(prim_buffer->prim_age == 0 || prim_buffer->prim_age == 0xffffffff) {
+ mga_flush_queue(dev);
+ mga_dma_quiescent(dev);
+ mga_reset_freelist(dev);
+ prim_buffer->prim_age = (dev_priv->next_prim_age += 2);
+ }
+
+ /* Reset all buffer status stuff */
+ clear_bit(MGA_BUF_NEEDS_OVERFLOW, &prim_buffer->buffer_status);
+ clear_bit(MGA_BUF_FORCE_FIRE, &prim_buffer->buffer_status);
+ clear_bit(MGA_BUF_SWAP_PENDING, &prim_buffer->buffer_status);
+
+ dev_priv->current_prim = prim_buffer;
+ dev_priv->current_prim_idx = next_prim_idx;
+ return 0;
+}
+
+/* More dynamic performance decisions */
+static __inline int mga_decide_to_fire(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(test_bit(MGA_BUF_FORCE_FIRE, &dev_priv->next_prim->buffer_status)) {
+ atomic_inc(&dma->total_prio);
+ return 1;
+ }
+
+ if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords) {
+ atomic_inc(&dma->total_prio);
+ return 1;
+ }
+
+ if (test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords) {
+ atomic_inc(&dma->total_prio);
+ return 1;
+ }
+
+ if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS - 1) {
+ if(test_bit(MGA_BUF_SWAP_PENDING,
+ &dev_priv->next_prim->buffer_status)) {
+ atomic_inc(&dma->total_dmas);
+ return 1;
+ }
+ }
+
+ if(atomic_read(&dev_priv->pending_bufs) <= MGA_NUM_PRIM_BUFS / 2) {
+ if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 8) {
+ atomic_inc(&dma->total_hit);
+ return 1;
+ }
+ }
+
+ if(atomic_read(&dev_priv->pending_bufs) >= MGA_NUM_PRIM_BUFS / 2) {
+ if(dev_priv->next_prim->sec_used >= MGA_DMA_BUF_NR / 4) {
+ atomic_inc(&dma->total_missed_free);
+ return 1;
+ }
+ }
+
+ atomic_inc(&dma->total_tried);
+ return 0;
+}
+
+int mga_dma_schedule(drm_device_t *dev, int locked)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+
+ if (test_and_set_bit(0, &dev->dma_flag)) {
+ atomic_inc(&dma->total_missed_dma);
+ return EBUSY;
+ }
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ if (!dev_priv) {
+ DRM_DEBUG("dev_priv is not set\n");
+ return (0);
+ }
+
+ if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) ||
+ test_bit(MGA_IN_WAIT, &dev_priv->dispatch_status) ||
+ test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
+ locked = 1;
+ }
+
+ if (!locked &&
+ !drm_lock_take(&dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT)) {
+ atomic_inc(&dma->total_missed_lock);
+ clear_bit(0, &dev->dma_flag);
+ DRM_DEBUG("Not locked\n");
+ return EBUSY;
+ }
+ DRM_DEBUG("I'm locked\n");
+
+ if(!test_and_set_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status)) {
+ /* Fire dma buffer */
+ if(mga_decide_to_fire(dev)) {
+ DRM_DEBUG("idx :%d\n", dev_priv->next_prim->idx);
+ clear_bit(MGA_BUF_FORCE_FIRE,
+ &dev_priv->next_prim->buffer_status);
+ if(dev_priv->current_prim == dev_priv->next_prim) {
+ /* Schedule overflow for a later time */
+ set_bit(MGA_BUF_NEEDS_OVERFLOW,
+ &dev_priv->next_prim->buffer_status);
+ }
+ mga_fire_primary(dev, dev_priv->next_prim);
+ } else {
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
+ }
+ } else {
+ DRM_DEBUG("I can't get the dispatch lock\n");
+ }
+
+ if (!locked) {
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+ }
+
+ if(test_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status) &&
+ dev_priv->next_prim->num_dwords == 0 &&
+ atomic_read(&dev_priv->pending_bufs) == 0) {
+ /* Everything has been processed by the hardware */
+ clear_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
+ wakeup(&dev_priv->flush_queue);
+ }
+
+ if(test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status) &&
+ dev_priv->tail->age < dev_priv->last_prim_age) {
+ clear_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status);
+ DRM_DEBUG("Waking up buf queue\n");
+ wakeup(&dev_priv->buf_queue);
+ } else if (test_bit(MGA_IN_GETBUF, &dev_priv->dispatch_status)) {
+ DRM_DEBUG("Not waking buf_queue on %d %d\n",
+ atomic_read(&dev->total_irq),
+ dev_priv->last_prim_age);
+ }
+
+ clear_bit(0, &dev->dma_flag);
+ return 0;
+}
+
+static void mga_dma_service(void *arg)
+{
+ drm_device_t *dev = (drm_device_t *)arg;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ drm_mga_prim_buf_t *last_prim_buffer;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ atomic_inc(&dev->total_irq);
+ if((MGA_READ(MGAREG_STATUS) & 0x00000001) != 0x00000001) return;
+ MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
+ last_prim_buffer = dev_priv->last_prim;
+ last_prim_buffer->num_dwords = 0;
+ last_prim_buffer->sec_used = 0;
+ dev_priv->sarea_priv->last_dispatch =
+ dev_priv->last_prim_age = last_prim_buffer->prim_age;
+ clear_bit(MGA_BUF_IN_USE, &last_prim_buffer->buffer_status);
+ wakeup(&dev_priv->wait_queue);
+ clear_bit(MGA_BUF_SWAP_PENDING, &last_prim_buffer->buffer_status);
+ clear_bit(MGA_IN_DISPATCH, &dev_priv->dispatch_status);
+ atomic_dec(&dev_priv->pending_bufs);
+ taskqueue_enqueue(taskqueue_swi, &dev->task);
+}
+
+static void mga_dma_task_queue(void *device, int pending)
+{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ mga_dma_schedule((drm_device_t *)device, 0);
+}
+
+int mga_dma_cleanup(drm_device_t *dev)
+{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(dev->dev_private) {
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+
+ if(dev_priv->ioremap) {
+ int temp = (dev_priv->warp_ucode_size +
+ dev_priv->primary_size +
+ PAGE_SIZE - 1) / PAGE_SIZE * PAGE_SIZE;
+
+ drm_ioremapfree((void *) dev_priv->ioremap, temp);
+ }
+ if(dev_priv->real_status_page != 0UL) {
+ mga_free_page(dev, dev_priv->real_status_page);
+ }
+ if(dev_priv->prim_bufs != NULL) {
+ int i;
+ for(i = 0; i < MGA_NUM_PRIM_BUFS; i++) {
+ if(dev_priv->prim_bufs[i] != NULL) {
+ drm_free(dev_priv->prim_bufs[i],
+ sizeof(drm_mga_prim_buf_t),
+ DRM_MEM_DRIVER);
+ }
+ }
+ drm_free(dev_priv->prim_bufs, sizeof(void *) *
+ (MGA_NUM_PRIM_BUFS + 1),
+ DRM_MEM_DRIVER);
+ }
+ if(dev_priv->head != NULL) {
+ mga_freelist_cleanup(dev);
+ }
+
+
+ drm_free(dev->dev_private, sizeof(drm_mga_private_t),
+ DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+ }
+
+ return 0;
+}
+
+static int mga_dma_initialize(drm_device_t *dev, drm_mga_init_t *init) {
+ drm_mga_private_t *dev_priv;
+ drm_map_t *sarea_map = NULL;
+ int i;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ dev_priv = drm_alloc(sizeof(drm_mga_private_t), DRM_MEM_DRIVER);
+ if(dev_priv == NULL) return ENOMEM;
+ dev->dev_private = (void *) dev_priv;
+
+ memset(dev_priv, 0, sizeof(drm_mga_private_t));
+
+ if((init->reserved_map_idx >= dev->map_count) ||
+ (init->buffer_map_idx >= dev->map_count)) {
+ mga_dma_cleanup(dev);
+ DRM_DEBUG("reserved_map or buffer_map are invalid\n");
+ return EINVAL;
+ }
+
+ dev_priv->reserved_map_idx = init->reserved_map_idx;
+ dev_priv->buffer_map_idx = init->buffer_map_idx;
+ sarea_map = dev->maplist[0];
+ dev_priv->sarea_priv = (drm_mga_sarea_t *)
+ ((u_int8_t *)sarea_map->handle +
+ init->sarea_priv_offset);
+
+ /* Scale primary size to the next page */
+ dev_priv->chipset = init->chipset;
+ dev_priv->frontOffset = init->frontOffset;
+ dev_priv->backOffset = init->backOffset;
+ dev_priv->depthOffset = init->depthOffset;
+ dev_priv->textureOffset = init->textureOffset;
+ dev_priv->textureSize = init->textureSize;
+ dev_priv->cpp = init->cpp;
+ dev_priv->sgram = init->sgram;
+ dev_priv->stride = init->stride;
+
+ dev_priv->mAccess = init->mAccess;
+ dev_priv->flush_queue = 0;
+ dev_priv->buf_queue = 0;
+ dev_priv->WarpPipe = -1;
+
+ DRM_DEBUG("chipset: %d ucode_size: %d backOffset: %x depthOffset: %x\n",
+ dev_priv->chipset, dev_priv->warp_ucode_size,
+ dev_priv->backOffset, dev_priv->depthOffset);
+ DRM_DEBUG("cpp: %d sgram: %d stride: %d maccess: %x\n",
+ dev_priv->cpp, dev_priv->sgram, dev_priv->stride,
+ dev_priv->mAccess);
+
+ memcpy(&dev_priv->WarpIndex, &init->WarpIndex,
+ sizeof(drm_mga_warp_index_t) * MGA_MAX_WARP_PIPES);
+
+ for (i = 0 ; i < MGA_MAX_WARP_PIPES ; i++)
+ DRM_DEBUG("warp pipe %d: installed: %d phys: %lx size: %x\n",
+ i,
+ dev_priv->WarpIndex[i].installed,
+ dev_priv->WarpIndex[i].phys_addr,
+ dev_priv->WarpIndex[i].size);
+
+ if(mga_init_primary_bufs(dev, init) != 0) {
+ DRM_ERROR("Can not initialize primary buffers\n");
+ mga_dma_cleanup(dev);
+ return ENOMEM;
+ }
+ dev_priv->real_status_page = mga_alloc_page(dev);
+ if(dev_priv->real_status_page == 0UL) {
+ mga_dma_cleanup(dev);
+ DRM_ERROR("Can not allocate status page\n");
+ return ENOMEM;
+ }
+
+ dev_priv->status_page = (void*)dev_priv->real_status_page; /* XXX wants nocache */
+#if 0
+ dev_priv->status_page =
+ ioremap_nocache(virt_to_bus((void *)dev_priv->real_status_page),
+ PAGE_SIZE);
+
+ if(dev_priv->status_page == NULL) {
+ mga_dma_cleanup(dev);
+ DRM_ERROR("Can not remap status page\n");
+ return ENOMEM;
+ }
+#endif
+
+ /* Write status page when secend or softrap occurs */
+ MGA_WRITE(MGAREG_PRIMPTR,
+ vtophys((void *)dev_priv->real_status_page) | 0x00000003);
+
+
+ /* Private is now filled in, initialize the hardware */
+ {
+ PRIMLOCALS;
+ PRIMGETPTR( dev_priv );
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0x0100);
+ PRIMOUTREG(MGAREG_SOFTRAP, 0);
+ /* Poll for the first buffer to insure that
+ * the status register will be correct
+ */
+
+ mga_flush_write_combine();
+ MGA_WRITE(MGAREG_PRIMADDRESS, phys_head | TT_GENERAL);
+
+ MGA_WRITE(MGAREG_PRIMEND, ((phys_head + num_dwords * 4) |
+ PDEA_pagpxfer_enable));
+
+ while(MGA_READ(MGAREG_DWGSYNC) != 0x0100) ;
+ }
+
+ if(mga_freelist_init(dev) != 0) {
+ DRM_ERROR("Could not initialize freelist\n");
+ mga_dma_cleanup(dev);
+ return ENOMEM;
+ }
+ return 0;
+}
+
+int
+mga_dma_init(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_mga_init_t init;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ init = *(drm_mga_init_t *) data;
+
+ switch(init.func) {
+ case MGA_INIT_DMA:
+ return mga_dma_initialize(dev, &init);
+ case MGA_CLEANUP_DMA:
+ return mga_dma_cleanup(dev);
+ }
+
+ return EINVAL;
+}
+
+int mga_irq_install(drm_device_t *dev, int irq)
+{
+ int rid;
+ int retcode;
+
+ if (!irq) return EINVAL;
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ if (dev->irq) {
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+ return EBUSY;
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ DRM_DEBUG("install irq handler %d\n", irq);
+
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->dma->next_buffer = NULL;
+ dev->dma->next_queue = NULL;
+ dev->dma->this_buffer = NULL;
+ TASK_INIT(&dev->task, 0, mga_dma_task_queue, dev);
+
+ /* Before installing handler */
+ MGA_WRITE(MGAREG_IEN, 0);
+ /* Install handler */
+ rid = 0;
+ dev->irq = bus_alloc_resource(dev->device, SYS_RES_IRQ, &rid,
+ 0, ~0, 1, RF_SHAREABLE);
+ if (!dev->irq)
+ return ENOENT;
+
+ retcode = bus_setup_intr(dev->device, dev->irq, INTR_TYPE_TTY,
+ mga_dma_service, dev, &dev->irqh);
+ if (retcode) {
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
+ dev->irq = 0;
+ return retcode;
+ }
+
+ /* After installing handler */
+ MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
+ MGA_WRITE(MGAREG_IEN, 0x00000001);
+ return 0;
+}
+
+int mga_irq_uninstall(drm_device_t *dev)
+{
+ if (!dev->irq)
+ return EINVAL;
+
+ DRM_DEBUG("remove irq handler %ld\n", rman_get_start(dev->irq));
+ MGA_WRITE(MGAREG_ICLEAR, 0x00000001);
+ MGA_WRITE(MGAREG_IEN, 0);
+
+ bus_teardown_intr(dev->device, dev->irq, dev->irqh);
+ bus_release_resource(dev->device, SYS_RES_IRQ, 0, dev->irq);
+ dev->irq = 0;
+
+ return 0;
+}
+
+int mga_control(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_control_t ctl;
+
+ ctl = *(drm_control_t *) data;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ switch (ctl.func) {
+ case DRM_INST_HANDLER:
+ return mga_irq_install(dev, ctl.irq);
+ case DRM_UNINST_HANDLER:
+ return mga_irq_uninstall(dev);
+ default:
+ return EINVAL;
+ }
+}
+
+static int mga_flush_queue(drm_device_t *dev)
+{
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ int ret = 0;
+ int s;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if(dev_priv == NULL) {
+ return 0;
+ }
+
+ if(dev_priv->next_prim->num_dwords != 0) {
+ s = splsofttq();
+ set_bit(MGA_IN_FLUSH, &dev_priv->dispatch_status);
+ for (;;) {
+ mga_dma_schedule(dev, 0);
+ if (!test_bit(MGA_IN_FLUSH,
+ &dev_priv->dispatch_status))
+ break;
+ atomic_inc(&dev->total_sleeps);
+ ret = tsleep(&dev_priv->flush_queue, PZERO|PCATCH,
+ "mgafq", 0);
+ if (ret) {
+ clear_bit(MGA_IN_FLUSH,
+ &dev_priv->dispatch_status);
+ break;
+ }
+ }
+ splx(s);
+ }
+ return ret;
+}
+
+/* Must be called with the lock held */
+void mga_reclaim_buffers(drm_device_t *dev, pid_t pid)
+{
+ drm_device_dma_t *dma = dev->dma;
+ int i;
+
+ if (!dma) return;
+ if(dev->dev_private == NULL) return;
+ if(dma->buflist == NULL) return;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ mga_flush_queue(dev);
+
+ for (i = 0; i < dma->buf_count; i++) {
+ drm_buf_t *buf = dma->buflist[ i ];
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+
+ /* Only buffers that need to get reclaimed ever
+ * get set to free
+ */
+ if (buf->pid == pid && buf_priv) {
+ if(buf_priv->my_freelist->age == MGA_BUF_USED)
+ buf_priv->my_freelist->age = MGA_BUF_FREE;
+ }
+ }
+}
+
+int mga_lock(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ int ret = 0;
+ drm_lock_t lock;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ lock = *(drm_lock_t *) data;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lock.context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n",
+ lock.context, p->p_pid, dev->lock.hw_lock->lock,
+ lock.flags);
+
+ if (lock.context < 0) {
+ return EINVAL;
+ }
+
+ /* Only one queue:
+ */
+
+ if (!ret) {
+ atomic_inc(&dev->lock.lock_queue);
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ ret = EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ lock.context)) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = ticks;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ ret = tsleep(&dev->lock.lock_queue, PZERO|PCATCH,
+ "mgal2", 0);
+ if (ret)
+ break;
+ }
+ atomic_dec(&dev->lock.lock_queue);
+ }
+
+ if (!ret) {
+ if (lock.flags & _DRM_LOCK_QUIESCENT) {
+ DRM_DEBUG("_DRM_LOCK_QUIESCENT\n");
+ mga_flush_queue(dev);
+ mga_dma_quiescent(dev);
+ }
+ }
+
+ DRM_DEBUG("%d %s\n", lock.context, ret ? "interrupted" : "has lock");
+ return ret;
+}
+
+int mga_flush_ioctl(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_lock_t lock;
+ drm_mga_private_t *dev_priv = (drm_mga_private_t *)dev->dev_private;
+ int s;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+ lock = *(drm_lock_t *) data;
+
+ if(!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_flush_ioctl called without lock held\n");
+ return EINVAL;
+ }
+
+ if(lock.flags & _DRM_LOCK_FLUSH || lock.flags & _DRM_LOCK_FLUSH_ALL) {
+ drm_mga_prim_buf_t *temp_buf =
+ dev_priv->prim_bufs[dev_priv->current_prim_idx];
+
+ if(temp_buf && temp_buf->num_dwords) {
+ s = splsofttq();
+ set_bit(MGA_BUF_FORCE_FIRE, &temp_buf->buffer_status);
+ mga_advance_primary(dev);
+ mga_dma_schedule(dev, 1);
+ splx(s);
+ }
+ }
+ if(lock.flags & _DRM_LOCK_QUIESCENT) {
+ mga_flush_queue(dev);
+ mga_dma_quiescent(dev);
+ }
+
+ return 0;
+}
diff --git a/bsd/mga/mga_drv.c b/bsd/mga/mga_drv.c
new file mode 100644
index 00000000..09937201
--- /dev/null
+++ b/bsd/mga/mga_drv.c
@@ -0,0 +1,714 @@
+/* mga_drv.c -- Matrox g200/g400 driver
+ * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ *
+ */
+
+#include "drmP.h"
+#include "mga_drv.h"
+
+#include <pci/pcivar.h>
+
+MODULE_DEPEND(mga, drm, 1, 1, 1);
+MODULE_DEPEND(mga, agp, 1, 1, 1);
+
+#define MGA_NAME "mga"
+#define MGA_DESC "Matrox g200/g400"
+#define MGA_DATE "19991213"
+#define MGA_MAJOR 1
+#define MGA_MINOR 0
+#define MGA_PATCHLEVEL 0
+
+drm_ctx_t mga_res_ctx;
+
+static int mga_probe(device_t dev)
+{
+ const char *s = 0;
+
+ switch (pci_get_devid(dev)) {
+ case 0x0525102b:
+ s = "Matrox MGA G400 AGP graphics accelerator";
+ break;
+
+ case 0x0521102b:
+ s = "Matrox MGA G200 AGP graphics accelerator";
+ break;
+ }
+
+ if (s) {
+ device_set_desc(dev, s);
+ return 0;
+ }
+
+ return ENXIO;
+}
+
+static int mga_attach(device_t dev)
+{
+ return mga_init(dev);
+}
+
+static int mga_detach(device_t dev)
+{
+ mga_cleanup(dev);
+ return 0;
+}
+
+static device_method_t mga_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, mga_probe),
+ DEVMETHOD(device_attach, mga_attach),
+ DEVMETHOD(device_detach, mga_detach),
+
+ { 0, 0 }
+};
+
+static driver_t mga_driver = {
+ "drm",
+ mga_methods,
+ sizeof(drm_device_t),
+};
+
+static devclass_t mga_devclass;
+#define MGA_SOFTC(unit) \
+ ((drm_device_t *) devclass_get_softc(mga_devclass, unit))
+
+DRIVER_MODULE(if_mga, pci, mga_driver, mga_devclass, 0, 0);
+
+#define CDEV_MAJOR 145
+ /* mga_drv.c */
+static struct cdevsw mga_cdevsw = {
+ /* open */ mga_open,
+ /* close */ mga_close,
+ /* read */ drm_read,
+ /* write */ drm_write,
+ /* ioctl */ mga_ioctl,
+ /* poll */ drm_poll,
+ /* mmap */ drm_mmap,
+ /* strategy */ nostrategy,
+ /* name */ "mga",
+ /* maj */ CDEV_MAJOR,
+ /* dump */ nodump,
+ /* psize */ nopsize,
+ /* flags */ D_TTY | D_TRACKCLOSE,
+ /* bmaj */ -1
+};
+
+static drm_ioctl_desc_t mga_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 },
+
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_swap_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_clear_bufs, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_VERTEX)] = { mga_vertex, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_FLUSH)] = { mga_flush_ioctl, 1, 0 },
+ [DRM_IOCTL_NR(DRM_IOCTL_MGA_INDICES)] = { mga_indices, 1, 0 },
+};
+
+#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls)
+
+static int mga_setup(drm_device_t *dev)
+{
+ int i;
+
+ device_busy(dev->device);
+
+ atomic_set(&dev->ioctl_count, 0);
+ atomic_set(&dev->vma_count, 0);
+ dev->buf_use = 0;
+ atomic_set(&dev->buf_alloc, 0);
+
+ drm_dma_setup(dev);
+
+ atomic_set(&dev->total_open, 0);
+ atomic_set(&dev->total_close, 0);
+ atomic_set(&dev->total_ioctl, 0);
+ atomic_set(&dev->total_irq, 0);
+ atomic_set(&dev->total_ctx, 0);
+ atomic_set(&dev->total_locks, 0);
+ atomic_set(&dev->total_unlocks, 0);
+ atomic_set(&dev->total_contends, 0);
+ atomic_set(&dev->total_sleeps, 0);
+
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ dev->magiclist[i].head = NULL;
+ dev->magiclist[i].tail = NULL;
+ }
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ dev->vmalist = NULL;
+ dev->lock.hw_lock = NULL;
+ dev->lock.lock_queue = 0;
+ dev->queue_count = 0;
+ dev->queue_reserved = 0;
+ dev->queue_slots = 0;
+ dev->queuelist = NULL;
+ dev->irq = 0;
+ dev->context_flag = 0;
+ dev->interrupt_flag = 0;
+ dev->dma_flag = 0;
+ dev->last_context = 0;
+ dev->last_switch = 0;
+ dev->last_checked = 0;
+ callout_init(&dev->timer);
+ dev->context_wait = 0;
+
+ timespecclear(&dev->ctx_start);
+ timespecclear(&dev->lck_start);
+
+ dev->buf_rp = dev->buf;
+ dev->buf_wp = dev->buf;
+ dev->buf_end = dev->buf + DRM_BSZ;
+ bzero(&dev->buf_sel, sizeof dev->buf_sel);
+ dev->buf_sigio = NULL;
+ dev->buf_readers = 0;
+ dev->buf_writers = 0;
+ dev->buf_selecting = 0;
+
+ DRM_DEBUG("\n");
+
+ /* The kernel's context could be created here, but is now created
+ in drm_dma_enqueue. This is more resource-efficient for
+ hardware that does not do DMA, but may mean that
+ drm_select_queue fails between the time the interrupt is
+ initialized and the time the queues are initialized. */
+
+ return 0;
+}
+
+
+static int mga_takedown(drm_device_t *dev)
+{
+ int i;
+ drm_magic_entry_t *pt, *next;
+ drm_map_t *map;
+ drm_vma_entry_t *vma, *vma_next;
+
+ DRM_DEBUG("\n");
+
+ if (dev->irq) mga_irq_uninstall(dev);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, curproc);
+ callout_stop(&dev->timer);
+
+ if (dev->devname) {
+ drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER);
+ dev->devname = NULL;
+ }
+
+ if (dev->unique) {
+ drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER);
+ dev->unique = NULL;
+ dev->unique_len = 0;
+ }
+ /* Clear pid list */
+ for (i = 0; i < DRM_HASH_SIZE; i++) {
+ for (pt = dev->magiclist[i].head; pt; pt = next) {
+ next = pt->next;
+ drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC);
+ }
+ dev->magiclist[i].head = dev->magiclist[i].tail = NULL;
+ }
+ /* Clear AGP information */
+ if (dev->agp) {
+ drm_agp_mem_t *entry;
+ drm_agp_mem_t *nexte;
+
+ /* Remove AGP resources, but leave dev->agp
+ intact until cleanup is called. */
+ for (entry = dev->agp->memory; entry; entry = nexte) {
+ nexte = entry->next;
+ if (entry->bound) drm_unbind_agp(entry->handle);
+ drm_free_agp(entry->handle, entry->pages);
+ drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
+ }
+ dev->agp->memory = NULL;
+
+ if (dev->agp->acquired)
+ agp_release(dev->agp->agpdev);
+
+ dev->agp->acquired = 0;
+ dev->agp->enabled = 0;
+ }
+ /* Clear vma list (only built for debugging) */
+ if (dev->vmalist) {
+ for (vma = dev->vmalist; vma; vma = vma_next) {
+ vma_next = vma->next;
+ drm_free(vma, sizeof(*vma), DRM_MEM_VMAS);
+ }
+ dev->vmalist = NULL;
+ }
+
+ /* Clear map area and mtrr information */
+ if (dev->maplist) {
+ for (i = 0; i < dev->map_count; i++) {
+ map = dev->maplist[i];
+ switch (map->type) {
+ case _DRM_REGISTERS:
+ case _DRM_FRAME_BUFFER:
+#ifdef CONFIG_MTRR
+ if (map->mtrr >= 0) {
+ int retcode;
+ retcode = mtrr_del(map->mtrr,
+ map->offset,
+ map->size);
+ DRM_DEBUG("mtrr_del = %d\n", retcode);
+ }
+#endif
+ drm_ioremapfree(map->handle, map->size);
+ break;
+ case _DRM_SHM:
+ drm_free_pages((unsigned long)map->handle,
+ drm_order(map->size)
+ - PAGE_SHIFT,
+ DRM_MEM_SAREA);
+ break;
+ case _DRM_AGP:
+ break;
+ }
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ }
+ drm_free(dev->maplist,
+ dev->map_count * sizeof(*dev->maplist),
+ DRM_MEM_MAPS);
+ dev->maplist = NULL;
+ dev->map_count = 0;
+ }
+
+ if (dev->queuelist) {
+ for (i = 0; i < dev->queue_count; i++) {
+ drm_waitlist_destroy(&dev->queuelist[i]->waitlist);
+ if (dev->queuelist[i]) {
+ drm_free(dev->queuelist[i],
+ sizeof(*dev->queuelist[0]),
+ DRM_MEM_QUEUES);
+ dev->queuelist[i] = NULL;
+ }
+ }
+ drm_free(dev->queuelist,
+ dev->queue_slots * sizeof(*dev->queuelist),
+ DRM_MEM_QUEUES);
+ dev->queuelist = NULL;
+ }
+
+ drm_dma_takedown(dev);
+
+ dev->queue_count = 0;
+ if (dev->lock.hw_lock) {
+ dev->lock.hw_lock = NULL; /* SHM removed */
+ dev->lock.pid = 0;
+ wakeup(&dev->lock.lock_queue);
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, curproc);
+
+ return 0;
+}
+
+/* mga_init is called via mga_attach at module load time, */
+
+int
+mga_init(device_t nbdev)
+{
+ int retcode;
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ memset((void *)dev, 0, sizeof(*dev));
+ simple_lock_init(&dev->count_lock);
+ lockinit(&dev->dev_lock, PZERO, "drmlk", 0, 0);
+
+#if 0
+ drm_parse_options(mga);
+#endif
+ dev->device = nbdev;
+ dev->devnode = make_dev(&mga_cdevsw,
+ device_get_unit(nbdev),
+ DRM_DEV_UID,
+ DRM_DEV_GID,
+ DRM_DEV_MODE,
+ MGA_NAME);
+ dev->name = MGA_NAME;
+
+ DRM_DEBUG("doing mem init\n");
+ drm_mem_init();
+ DRM_DEBUG("doing proc init\n");
+ drm_sysctl_init(dev);
+ TAILQ_INIT(&dev->files);
+ DRM_DEBUG("doing agp init\n");
+ dev->agp = drm_agp_init();
+ if(dev->agp == NULL) {
+ DRM_INFO("The mga drm module requires the agp module"
+ " to function correctly\nPlease load the agp"
+ " module before you load the mga module\n");
+ drm_sysctl_cleanup(dev);
+ mga_takedown(dev);
+ return ENOMEM;
+ }
+#if 0
+ dev->agp->agp_mtrr = mtrr_add(dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size * 1024 * 1024,
+ MTRR_TYPE_WRCOMB,
+ 1);
+#endif
+ DRM_DEBUG("doing ctxbitmap init\n");
+ if((retcode = drm_ctxbitmap_init(dev))) {
+ DRM_ERROR("Cannot allocate memory for context bitmap.\n");
+ drm_sysctl_cleanup(dev);
+ mga_takedown(dev);
+ return retcode;
+ }
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ MGA_NAME,
+ MGA_MAJOR,
+ MGA_MINOR,
+ MGA_PATCHLEVEL,
+ MGA_DATE,
+ device_get_unit(nbdev));
+
+ return 0;
+}
+
+/* mga_cleanup is called via cleanup_module at module unload time. */
+
+void mga_cleanup(device_t nbdev)
+{
+ drm_device_t *dev = device_get_softc(nbdev);
+
+ DRM_DEBUG("\n");
+
+ drm_sysctl_cleanup(dev);
+ destroy_dev(dev->devnode);
+
+ DRM_INFO("Module unloaded\n");
+ drm_ctxbitmap_cleanup(dev);
+ mga_dma_cleanup(dev);
+#if 0
+ if(dev->agp && dev->agp->agp_mtrr) {
+ int retval;
+ retval = mtrr_del(dev->agp->agp_mtrr,
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size * 1024*1024);
+ DRM_DEBUG("mtrr_del = %d\n", retval);
+ }
+#endif
+
+ mga_takedown(dev);
+ if (dev->agp) {
+ drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS);
+ dev->agp = NULL;
+ }
+}
+
+int
+mga_version(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_version_t version;
+ int len;
+
+ version = *(drm_version_t *) data;
+
+#define DRM_COPY(name,value) \
+ len = strlen(value); \
+ if (len > name##_len) len = name##_len; \
+ name##_len = strlen(value); \
+ if (len && name) { \
+ int error = copyout(value, name, len); \
+ if (error) return error; \
+ }
+
+ version.version_major = MGA_MAJOR;
+ version.version_minor = MGA_MINOR;
+ version.version_patchlevel = MGA_PATCHLEVEL;
+
+ DRM_COPY(version.name, MGA_NAME);
+ DRM_COPY(version.date, MGA_DATE);
+ DRM_COPY(version.desc, MGA_DESC);
+
+ *(drm_version_t *) data = version;
+ return 0;
+}
+
+int
+mga_open(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = MGA_SOFTC(minor(kdev));
+ int retcode = 0;
+
+ DRM_DEBUG("open_count = %d\n", dev->open_count);
+
+ device_busy(dev->device);
+ if (!(retcode = drm_open_helper(kdev, flags, fmt, p, dev))) {
+ atomic_inc(&dev->total_open);
+ simple_lock(&dev->count_lock);
+ if (!dev->open_count++) {
+ simple_unlock(&dev->count_lock);
+ retcode = mga_setup(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ }
+ device_unbusy(dev->device);
+
+ return retcode;
+}
+
+int
+mga_close(dev_t kdev, int flags, int fmt, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+ int retcode = 0;
+
+ DRM_DEBUG("pid = %d, open_count = %d\n",
+ p->p_pid, dev->open_count);
+
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ if (dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)
+ && dev->lock.pid == p->p_pid) {
+ mga_reclaim_buffers(dev, priv->pid);
+ DRM_ERROR("Process %d dead, freeing lock for context %d\n",
+ p->p_pid,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ drm_lock_free(dev,
+ &dev->lock.hw_lock->lock,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+
+ /* FIXME: may require heavy-handed reset of
+ hardware at this point, possibly
+ processed via a callback to the X
+ server. */
+ } else if (dev->lock.hw_lock) {
+ /* The lock is required to reclaim buffers */
+ for (;;) {
+ if (!dev->lock.hw_lock) {
+ /* Device has been unregistered */
+ retcode = EINTR;
+ break;
+ }
+ if (drm_lock_take(&dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ dev->lock.pid = p->p_pid;
+ dev->lock.lock_time = ticks;
+ atomic_inc(&dev->total_locks);
+ break; /* Got lock */
+ }
+ /* Contention */
+ atomic_inc(&dev->total_sleeps);
+ retcode = tsleep(&dev->lock.lock_queue,
+ PZERO|PCATCH,
+ "drmlk2",
+ 0);
+ if (retcode)
+ break;
+ }
+ if(!retcode) {
+ mga_reclaim_buffers(dev, priv->pid);
+ drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT);
+ }
+ }
+ funsetown(dev->buf_sigio);
+
+ lockmgr(&dev->dev_lock, LK_EXCLUSIVE, 0, p);
+ priv = drm_find_file_by_proc(dev, p);
+ if (priv) {
+ priv->refs--;
+ if (!priv->refs) {
+ TAILQ_REMOVE(&dev->files, priv, link);
+ drm_free(priv, sizeof(*priv), DRM_MEM_FILES);
+ }
+ }
+ lockmgr(&dev->dev_lock, LK_RELEASE, 0, p);
+
+ atomic_inc(&dev->total_close);
+ simple_lock(&dev->count_lock);
+ if (!--dev->open_count) {
+ if (atomic_read(&dev->ioctl_count) || dev->blocked) {
+ DRM_ERROR("Device busy: %d %d\n",
+ atomic_read(&dev->ioctl_count),
+ dev->blocked);
+ simple_unlock(&dev->count_lock);
+ return EBUSY;
+ }
+ simple_unlock(&dev->count_lock);
+ device_unbusy(dev->device);
+ return mga_takedown(dev);
+ }
+ simple_unlock(&dev->count_lock);
+ return retcode;
+}
+
+
+/* mga_ioctl is called whenever a process performs an ioctl on /dev/drm. */
+
+int
+mga_ioctl(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ int nr = DRM_IOCTL_NR(cmd);
+ drm_device_t *dev = kdev->si_drv1;
+ drm_file_t *priv;
+ int retcode = 0;
+ drm_ioctl_desc_t *ioctl;
+ d_ioctl_t *func;
+
+ DRM_DEBUG("dev=%p\n", dev);
+ priv = drm_find_file_by_proc(dev, p);
+ if (!priv) {
+ DRM_DEBUG("can't find authenticator\n");
+ return EINVAL;
+ }
+
+ atomic_inc(&dev->ioctl_count);
+ atomic_inc(&dev->total_ioctl);
+ ++priv->ioctl_count;
+
+ DRM_DEBUG("pid = %d, cmd = 0x%02lx, nr = 0x%02x, auth = %d\n",
+ p->p_pid, cmd, nr, priv->authenticated);
+
+ switch (cmd) {
+ case FIONBIO:
+ atomic_dec(&dev->ioctl_count);
+ return 0;
+
+ case FIOASYNC:
+ atomic_dec(&dev->ioctl_count);
+ dev->flags |= FASYNC;
+ return 0;
+
+ case FIOSETOWN:
+ atomic_dec(&dev->ioctl_count);
+ return fsetown(*(int *)data, &dev->buf_sigio);
+
+ case FIOGETOWN:
+ atomic_dec(&dev->ioctl_count);
+ *(int *) data = fgetown(dev->buf_sigio);
+ return 0;
+ }
+
+ if (nr >= MGA_IOCTL_COUNT) {
+ retcode = EINVAL;
+ } else {
+ ioctl = &mga_ioctls[nr];
+ func = ioctl->func;
+
+ if (!func) {
+ DRM_DEBUG("no function\n");
+ retcode = EINVAL;
+ } else if ((ioctl->root_only && suser(p))
+ || (ioctl->auth_needed && !priv->authenticated)) {
+ retcode = EACCES;
+ } else {
+ retcode = (func)(kdev, cmd, data, flags, p);
+ }
+ }
+
+ atomic_dec(&dev->ioctl_count);
+ return retcode;
+}
+
+int
+mga_unlock(dev_t kdev, u_long cmd, caddr_t data, int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_lock_t lock;
+ int s;
+
+ lock = *(drm_lock_t *) data;
+
+ if (lock.context == DRM_KERNEL_CONTEXT) {
+ DRM_ERROR("Process %d using kernel context %d\n",
+ p->p_pid, lock.context);
+ return EINVAL;
+ }
+
+ DRM_DEBUG("%d frees lock (%d holds)\n",
+ lock.context,
+ _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock));
+ atomic_inc(&dev->total_unlocks);
+ if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock))
+ atomic_inc(&dev->total_contends);
+ drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT);
+
+ s = splsofttq();
+ mga_dma_schedule(dev, 1);
+ splx(s);
+
+ if (drm_lock_free(dev, &dev->lock.hw_lock->lock,
+ DRM_KERNEL_CONTEXT)) {
+ DRM_ERROR("\n");
+ }
+
+ return 0;
+}
diff --git a/bsd/mga/mga_drv.h b/bsd/mga/mga_drv.h
new file mode 100644
index 00000000..05c4d9d2
--- /dev/null
+++ b/bsd/mga/mga_drv.h
@@ -0,0 +1,420 @@
+/* mga_drv.h -- Private header for the Matrox g200/g400 driver
+ * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
+ *
+ */
+
+#ifndef _MGA_DRV_H_
+#define _MGA_DRV_H_
+
+#define MGA_BUF_IN_USE 0
+#define MGA_BUF_SWAP_PENDING 1
+#define MGA_BUF_FORCE_FIRE 2
+#define MGA_BUF_NEEDS_OVERFLOW 3
+
+typedef struct {
+ u_int32_t buffer_status;
+ unsigned int num_dwords;
+ unsigned int max_dwords;
+ u_int32_t *current_dma_ptr;
+ u_int32_t *head;
+ u_int32_t phys_head;
+ unsigned int prim_age;
+ int sec_used;
+ int idx;
+} drm_mga_prim_buf_t;
+
+typedef struct _drm_mga_freelist {
+ unsigned int age;
+ drm_buf_t *buf;
+ struct _drm_mga_freelist *next;
+ struct _drm_mga_freelist *prev;
+} drm_mga_freelist_t;
+
+#define MGA_IN_DISPATCH 0
+#define MGA_IN_FLUSH 1
+#define MGA_IN_WAIT 2
+#define MGA_IN_GETBUF 3
+
+typedef struct _drm_mga_private {
+ u_int32_t dispatch_status;
+ unsigned int next_prim_age;
+ __volatile__ unsigned int last_prim_age;
+ int reserved_map_idx;
+ int buffer_map_idx;
+ drm_mga_sarea_t *sarea_priv;
+ int primary_size;
+ int warp_ucode_size;
+ int chipset;
+ unsigned int frontOffset;
+ unsigned int backOffset;
+ unsigned int depthOffset;
+ unsigned int textureOffset;
+ unsigned int textureSize;
+ int cpp;
+ unsigned int stride;
+ int sgram;
+ int use_agp;
+ drm_mga_warp_index_t WarpIndex[MGA_MAX_G400_PIPES];
+ unsigned int WarpPipe;
+ atomic_t pending_bufs;
+ void *status_page;
+ unsigned long real_status_page;
+ u_int8_t *ioremap;
+ drm_mga_prim_buf_t **prim_bufs;
+ drm_mga_prim_buf_t *next_prim;
+ drm_mga_prim_buf_t *last_prim;
+ drm_mga_prim_buf_t *current_prim;
+ int current_prim_idx;
+ drm_mga_freelist_t *head;
+ drm_mga_freelist_t *tail;
+ int flush_queue; /* Processes waiting until flush */
+ int wait_queue; /* Processes waiting until interrupt */
+ int buf_queue; /* Processes waiting for a free buf */
+ /* Some validated register values:
+ */
+ u_int32_t mAccess;
+} drm_mga_private_t;
+
+ /* mga_drv.c */
+extern int mga_init(device_t);
+extern void mga_cleanup(device_t);
+extern d_ioctl_t mga_version;
+extern d_open_t mga_open;
+extern d_close_t mga_close;
+extern d_ioctl_t mga_ioctl;
+extern d_ioctl_t mga_unlock;
+
+ /* mga_dma.c */
+extern int mga_dma_schedule(drm_device_t *dev, int locked);
+extern int mga_irq_install(drm_device_t *dev, int irq);
+extern int mga_irq_uninstall(drm_device_t *dev);
+extern d_ioctl_t mga_dma;
+extern d_ioctl_t mga_control;
+extern d_ioctl_t mga_lock;
+
+/* mga_dma_init does init and release */
+extern int mga_dma_cleanup(drm_device_t *dev);
+extern d_ioctl_t mga_dma_init;
+extern d_ioctl_t mga_flush_ioctl;
+extern void mga_flush_write_combine(void);
+extern unsigned int mga_create_sync_tag(drm_device_t *dev);
+extern drm_buf_t *mga_freelist_get(drm_device_t *dev);
+extern int mga_freelist_put(drm_device_t *dev, drm_buf_t *buf);
+extern int mga_advance_primary(drm_device_t *dev);
+extern void mga_reclaim_buffers(drm_device_t *dev, pid_t pid);
+
+ /* mga_bufs.c */
+extern d_ioctl_t mga_addbufs;
+extern d_ioctl_t mga_infobufs;
+extern d_ioctl_t mga_markbufs;
+extern d_ioctl_t mga_freebufs;
+extern d_ioctl_t mga_mapbufs;
+extern d_ioctl_t mga_addmap;
+ /* mga_state.c */
+extern d_ioctl_t mga_clear_bufs;
+extern d_ioctl_t mga_swap_bufs;
+extern d_ioctl_t mga_iload;
+extern d_ioctl_t mga_vertex;
+extern d_ioctl_t mga_indices;
+ /* mga_context.c */
+extern d_ioctl_t mga_resctx;
+extern d_ioctl_t mga_addctx;
+extern d_ioctl_t mga_modctx;
+extern d_ioctl_t mga_getctx;
+extern d_ioctl_t mga_switchctx;
+extern d_ioctl_t mga_newctx;
+extern d_ioctl_t mga_rmctx;
+
+extern int mga_context_switch(drm_device_t *dev, int old, int new);
+extern int mga_context_switch_complete(drm_device_t *dev, int new);
+
+
+typedef enum {
+ TT_GENERAL,
+ TT_BLIT,
+ TT_VECTOR,
+ TT_VERTEX
+} transferType_t;
+
+typedef struct {
+ drm_mga_freelist_t *my_freelist;
+ int discard;
+ int dispatched;
+} drm_mga_buf_priv_t;
+
+#define DWGREG0 0x1c00
+#define DWGREG0_END 0x1dff
+#define DWGREG1 0x2c00
+#define DWGREG1_END 0x2dff
+
+#define ISREG0(r) (r >= DWGREG0 && r <= DWGREG0_END)
+#define ADRINDEX0(r) (u_int8_t)((r - DWGREG0) >> 2)
+#define ADRINDEX1(r) (u_int8_t)(((r - DWGREG1) >> 2) | 0x80)
+#define ADRINDEX(r) (ISREG0(r) ? ADRINDEX0(r) : ADRINDEX1(r))
+
+#define MGA_VERBOSE 0
+#define MGA_NUM_PRIM_BUFS 8
+
+#define PRIMLOCALS u_int8_t tempIndex[4]; u_int32_t *dma_ptr; u_int32_t phys_head; \
+ int outcount, num_dwords
+
+#define PRIM_OVERFLOW(dev, dev_priv, length) do { \
+ drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ if( test_bit(MGA_BUF_NEEDS_OVERFLOW, \
+ &tmp_buf->buffer_status)) { \
+ mga_advance_primary(dev); \
+ mga_dma_schedule(dev, 1); \
+ } else if( tmp_buf->max_dwords - tmp_buf->num_dwords < length ||\
+ tmp_buf->sec_used > MGA_DMA_BUF_NR/2) { \
+ set_bit(MGA_BUF_FORCE_FIRE, &tmp_buf->buffer_status); \
+ mga_advance_primary(dev); \
+ mga_dma_schedule(dev, 1); \
+ } \
+} while(0)
+
+#define PRIMGETPTR(dev_priv) do { \
+ drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ if(MGA_VERBOSE) \
+ DRM_DEBUG("PRIMGETPTR in %s\n", __FUNCTION__); \
+ dma_ptr = tmp_buf->current_dma_ptr; \
+ num_dwords = tmp_buf->num_dwords; \
+ phys_head = tmp_buf->phys_head; \
+ outcount = 0; \
+} while(0)
+
+#define PRIMPTR(prim_buf) do { \
+ if(MGA_VERBOSE) \
+ DRM_DEBUG("PRIMPTR in %s\n", __FUNCTION__); \
+ dma_ptr = prim_buf->current_dma_ptr; \
+ num_dwords = prim_buf->num_dwords; \
+ phys_head = prim_buf->phys_head; \
+ outcount = 0; \
+} while(0)
+
+#define PRIMFINISH(prim_buf) do { \
+ if (MGA_VERBOSE) { \
+ DRM_DEBUG( "PRIMFINISH in %s\n", __FUNCTION__); \
+ if (outcount & 3) \
+ DRM_DEBUG(" --- truncation\n"); \
+ } \
+ prim_buf->num_dwords = num_dwords; \
+ prim_buf->current_dma_ptr = dma_ptr; \
+} while(0)
+
+#define PRIMADVANCE(dev_priv) do { \
+drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ if (MGA_VERBOSE) { \
+ DRM_DEBUG("PRIMADVANCE in %s\n", __FUNCTION__); \
+ if (outcount & 3) \
+ DRM_DEBUG(" --- truncation\n"); \
+ } \
+ tmp_buf->num_dwords = num_dwords; \
+ tmp_buf->current_dma_ptr = dma_ptr; \
+} while (0)
+
+#define PRIMUPDATE(dev_priv) do { \
+ drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ tmp_buf->sec_used++; \
+} while (0)
+
+#define AGEBUF(dev_priv, buf_priv) do { \
+ drm_mga_prim_buf_t *tmp_buf = \
+ dev_priv->prim_bufs[dev_priv->current_prim_idx]; \
+ buf_priv->my_freelist->age = tmp_buf->prim_age; \
+} while (0)
+
+
+#define PRIMOUTREG(reg, val) do { \
+ tempIndex[outcount]=ADRINDEX(reg); \
+ dma_ptr[1+outcount] = val; \
+ if (MGA_VERBOSE) \
+ DRM_DEBUG(" PRIMOUT %d: 0x%x -- 0x%x\n", \
+ num_dwords + 1 + outcount, ADRINDEX(reg), val); \
+ if( ++outcount == 4) { \
+ outcount = 0; \
+ dma_ptr[0] = *(u_int32_t *)tempIndex; \
+ dma_ptr+=5; \
+ num_dwords += 5; \
+ } \
+}while (0)
+
+/* A reduced set of the mga registers.
+ */
+
+#define MGAREG_MGA_EXEC 0x0100
+#define MGAREG_ALPHACTRL 0x2c7c
+#define MGAREG_AR0 0x1c60
+#define MGAREG_AR1 0x1c64
+#define MGAREG_AR2 0x1c68
+#define MGAREG_AR3 0x1c6c
+#define MGAREG_AR4 0x1c70
+#define MGAREG_AR5 0x1c74
+#define MGAREG_AR6 0x1c78
+#define MGAREG_CXBNDRY 0x1c80
+#define MGAREG_CXLEFT 0x1ca0
+#define MGAREG_CXRIGHT 0x1ca4
+#define MGAREG_DMAPAD 0x1c54
+#define MGAREG_DSTORG 0x2cb8
+#define MGAREG_DWGCTL 0x1c00
+#define MGAREG_DWGSYNC 0x2c4c
+#define MGAREG_FCOL 0x1c24
+#define MGAREG_FIFOSTATUS 0x1e10
+#define MGAREG_FOGCOL 0x1cf4
+#define MGAREG_FXBNDRY 0x1c84
+#define MGAREG_FXLEFT 0x1ca8
+#define MGAREG_FXRIGHT 0x1cac
+#define MGAREG_ICLEAR 0x1e18
+#define MGAREG_IEN 0x1e1c
+#define MGAREG_LEN 0x1c5c
+#define MGAREG_MACCESS 0x1c04
+#define MGAREG_PITCH 0x1c8c
+#define MGAREG_PLNWT 0x1c1c
+#define MGAREG_PRIMADDRESS 0x1e58
+#define MGAREG_PRIMEND 0x1e5c
+#define MGAREG_PRIMPTR 0x1e50
+#define MGAREG_SECADDRESS 0x2c40
+#define MGAREG_SECEND 0x2c44
+#define MGAREG_SETUPADDRESS 0x2cd0
+#define MGAREG_SETUPEND 0x2cd4
+#define MGAREG_SOFTRAP 0x2c48
+#define MGAREG_SRCORG 0x2cb4
+#define MGAREG_STATUS 0x1e14
+#define MGAREG_STENCIL 0x2cc8
+#define MGAREG_STENCILCTL 0x2ccc
+#define MGAREG_TDUALSTAGE0 0x2cf8
+#define MGAREG_TDUALSTAGE1 0x2cfc
+#define MGAREG_TEXBORDERCOL 0x2c5c
+#define MGAREG_TEXCTL 0x2c30
+#define MGAREG_TEXCTL2 0x2c3c
+#define MGAREG_TEXFILTER 0x2c58
+#define MGAREG_TEXHEIGHT 0x2c2c
+#define MGAREG_TEXORG 0x2c24
+#define MGAREG_TEXORG1 0x2ca4
+#define MGAREG_TEXORG2 0x2ca8
+#define MGAREG_TEXORG3 0x2cac
+#define MGAREG_TEXORG4 0x2cb0
+#define MGAREG_TEXTRANS 0x2c34
+#define MGAREG_TEXTRANSHIGH 0x2c38
+#define MGAREG_TEXWIDTH 0x2c28
+#define MGAREG_WACCEPTSEQ 0x1dd4
+#define MGAREG_WCODEADDR 0x1e6c
+#define MGAREG_WFLAG 0x1dc4
+#define MGAREG_WFLAG1 0x1de0
+#define MGAREG_WFLAGNB 0x1e64
+#define MGAREG_WFLAGNB1 0x1e08
+#define MGAREG_WGETMSB 0x1dc8
+#define MGAREG_WIADDR 0x1dc0
+#define MGAREG_WIADDR2 0x1dd8
+#define MGAREG_WMISC 0x1e70
+#define MGAREG_WVRTXSZ 0x1dcc
+#define MGAREG_YBOT 0x1c9c
+#define MGAREG_YDST 0x1c90
+#define MGAREG_YDSTLEN 0x1c88
+#define MGAREG_YDSTORG 0x1c94
+#define MGAREG_YTOP 0x1c98
+#define MGAREG_ZORG 0x1c0c
+
+#define PDEA_pagpxfer_enable 0x2
+
+#define WIA_wmode_suspend 0x0
+#define WIA_wmode_start 0x3
+#define WIA_wagp_agp 0x4
+
+#define DC_opcod_line_open 0x0
+#define DC_opcod_autoline_open 0x1
+#define DC_opcod_line_close 0x2
+#define DC_opcod_autoline_close 0x3
+#define DC_opcod_trap 0x4
+#define DC_opcod_texture_trap 0x6
+#define DC_opcod_bitblt 0x8
+#define DC_opcod_iload 0x9
+#define DC_atype_rpl 0x0
+#define DC_atype_rstr 0x10
+#define DC_atype_zi 0x30
+#define DC_atype_blk 0x40
+#define DC_atype_i 0x70
+#define DC_linear_xy 0x0
+#define DC_linear_linear 0x80
+#define DC_zmode_nozcmp 0x0
+#define DC_zmode_ze 0x200
+#define DC_zmode_zne 0x300
+#define DC_zmode_zlt 0x400
+#define DC_zmode_zlte 0x500
+#define DC_zmode_zgt 0x600
+#define DC_zmode_zgte 0x700
+#define DC_solid_disable 0x0
+#define DC_solid_enable 0x800
+#define DC_arzero_disable 0x0
+#define DC_arzero_enable 0x1000
+#define DC_sgnzero_disable 0x0
+#define DC_sgnzero_enable 0x2000
+#define DC_shftzero_disable 0x0
+#define DC_shftzero_enable 0x4000
+#define DC_bop_SHIFT 16
+#define DC_trans_SHIFT 20
+#define DC_bltmod_bmonolef 0x0
+#define DC_bltmod_bmonowf 0x8000000
+#define DC_bltmod_bplan 0x2000000
+#define DC_bltmod_bfcol 0x4000000
+#define DC_bltmod_bu32bgr 0x6000000
+#define DC_bltmod_bu32rgb 0xe000000
+#define DC_bltmod_bu24bgr 0x16000000
+#define DC_bltmod_bu24rgb 0x1e000000
+#define DC_pattern_disable 0x0
+#define DC_pattern_enable 0x20000000
+#define DC_transc_disable 0x0
+#define DC_transc_enable 0x40000000
+#define DC_clipdis_disable 0x0
+#define DC_clipdis_enable 0x80000000
+
+#define SETADD_mode_vertlist 0x0
+
+
+#define MGA_CLEAR_CMD (DC_opcod_trap | DC_arzero_enable | \
+ DC_sgnzero_enable | DC_shftzero_enable | \
+ (0xC << DC_bop_SHIFT) | DC_clipdis_enable | \
+ DC_solid_enable | DC_transc_enable)
+
+
+#define MGA_COPY_CMD (DC_opcod_bitblt | DC_atype_rpl | DC_linear_xy | \
+ DC_solid_disable | DC_arzero_disable | \
+ DC_sgnzero_enable | DC_shftzero_enable | \
+ (0xC << DC_bop_SHIFT) | DC_bltmod_bfcol | \
+ DC_pattern_disable | DC_transc_disable | \
+ DC_clipdis_enable) \
+
+#define MGA_FLUSH_CMD (DC_opcod_texture_trap | (0xF << DC_trans_SHIFT) |\
+ DC_arzero_enable | DC_sgnzero_enable | \
+ DC_atype_i)
+
+#endif
diff --git a/bsd/mga/mga_state.c b/bsd/mga/mga_state.c
new file mode 100644
index 00000000..73029bbe
--- /dev/null
+++ b/bsd/mga/mga_state.c
@@ -0,0 +1,1074 @@
+/* mga_state.c -- State support for mga g200/g400
+ * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Jeff Hartmann <jhartmann@precisioninsight.com>
+ * Keith Whitwell <keithw@precisioninsight.com>
+ *
+ */
+
+#define __NO_VERSION__
+#include "drmP.h"
+#include "mga_drv.h"
+#include "drm.h"
+
+typedef u_int16_t u16;
+typedef u_int32_t u32;
+
+static void mgaEmitClipRect(drm_mga_private_t * dev_priv,
+ drm_clip_rect_t * box)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ /* This takes 10 dwords */
+ PRIMGETPTR(dev_priv);
+
+ /* Force reset of dwgctl (eliminates clip disable) */
+#if 0
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0);
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+#else
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+ PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000);
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+ PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0x80000000);
+#endif
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_CXBNDRY, ((box->x2) << 16) | (box->x1));
+ PRIMOUTREG(MGAREG_YTOP, box->y1 * dev_priv->stride / 2);
+ PRIMOUTREG(MGAREG_YBOT, box->y2 * dev_priv->stride / 2);
+
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaEmitContext(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ /* This takes a max of 15 dwords */
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG(MGAREG_DSTORG, regs[MGA_CTXREG_DSTORG]);
+ PRIMOUTREG(MGAREG_MACCESS, regs[MGA_CTXREG_MACCESS]);
+ PRIMOUTREG(MGAREG_PLNWT, regs[MGA_CTXREG_PLNWT]);
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+
+ PRIMOUTREG(MGAREG_ALPHACTRL, regs[MGA_CTXREG_ALPHACTRL]);
+ PRIMOUTREG(MGAREG_FOGCOL, regs[MGA_CTXREG_FOGCOLOR]);
+ PRIMOUTREG(MGAREG_WFLAG, regs[MGA_CTXREG_WFLAG]);
+ PRIMOUTREG(MGAREG_ZORG, dev_priv->depthOffset); /* invarient */
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ PRIMOUTREG(MGAREG_WFLAG1, regs[MGA_CTXREG_WFLAG]);
+ PRIMOUTREG(MGAREG_TDUALSTAGE0, regs[MGA_CTXREG_TDUAL0]);
+ PRIMOUTREG(MGAREG_TDUALSTAGE1, regs[MGA_CTXREG_TDUAL1]);
+ PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]);
+ } else {
+ PRIMOUTREG(MGAREG_FCOL, regs[MGA_CTXREG_FCOL]);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ }
+
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaG200EmitTex(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->TexState[0];
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR(dev_priv);
+
+ /* This takes 20 dwords */
+
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2]);
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
+ PRIMOUTREG(0x2d00 + 24 * 4, regs[MGA_TEXREG_WIDTH]);
+
+ PRIMOUTREG(0x2d00 + 34 * 4, regs[MGA_TEXREG_HEIGHT]);
+ PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
+ PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaG400EmitTex0(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->TexState[0];
+ int multitex = sarea_priv->WarpPipe & MGA_T2;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR(dev_priv);
+
+ /* This takes a max of 30 dwords */
+
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000);
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
+ PRIMOUTREG(0x2d00 + 49 * 4, 0);
+
+ PRIMOUTREG(0x2d00 + 57 * 4, 0);
+ PRIMOUTREG(0x2d00 + 53 * 4, 0);
+ PRIMOUTREG(0x2d00 + 61 * 4, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ if (!multitex) {
+ PRIMOUTREG(0x2d00 + 52 * 4, 0x40);
+ PRIMOUTREG(0x2d00 + 60 * 4, 0x40);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ }
+
+ PRIMOUTREG(0x2d00 + 54 * 4, regs[MGA_TEXREG_WIDTH] | 0x40);
+ PRIMOUTREG(0x2d00 + 62 * 4, regs[MGA_TEXREG_HEIGHT] | 0x40);
+ PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
+ PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
+
+ PRIMADVANCE(dev_priv);
+}
+
+#define TMC_map1_enable 0x80000000
+
+static void mgaG400EmitTex1(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->TexState[1];
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR(dev_priv);
+
+ /* This takes 25 dwords */
+
+ PRIMOUTREG(MGAREG_TEXCTL2,
+ regs[MGA_TEXREG_CTL2] | TMC_map1_enable | 0x00008000);
+ PRIMOUTREG(MGAREG_TEXCTL, regs[MGA_TEXREG_CTL]);
+ PRIMOUTREG(MGAREG_TEXFILTER, regs[MGA_TEXREG_FILTER]);
+ PRIMOUTREG(MGAREG_TEXBORDERCOL, regs[MGA_TEXREG_BORDERCOL]);
+
+ PRIMOUTREG(MGAREG_TEXORG, regs[MGA_TEXREG_ORG]);
+ PRIMOUTREG(MGAREG_TEXORG1, regs[MGA_TEXREG_ORG1]);
+ PRIMOUTREG(MGAREG_TEXORG2, regs[MGA_TEXREG_ORG2]);
+ PRIMOUTREG(MGAREG_TEXORG3, regs[MGA_TEXREG_ORG3]);
+
+ PRIMOUTREG(MGAREG_TEXORG4, regs[MGA_TEXREG_ORG4]);
+ PRIMOUTREG(MGAREG_TEXWIDTH, regs[MGA_TEXREG_WIDTH]);
+ PRIMOUTREG(MGAREG_TEXHEIGHT, regs[MGA_TEXREG_HEIGHT]);
+ PRIMOUTREG(0x2d00 + 49 * 4, 0);
+
+ PRIMOUTREG(0x2d00 + 57 * 4, 0);
+ PRIMOUTREG(0x2d00 + 53 * 4, 0);
+ PRIMOUTREG(0x2d00 + 61 * 4, 0);
+ PRIMOUTREG(0x2d00 + 52 * 4, regs[MGA_TEXREG_WIDTH] | 0x40);
+
+ PRIMOUTREG(0x2d00 + 60 * 4, regs[MGA_TEXREG_HEIGHT] | 0x40);
+ PRIMOUTREG(MGAREG_TEXTRANS, 0xffff);
+ PRIMOUTREG(MGAREG_TEXTRANSHIGH, 0xffff);
+ PRIMOUTREG(MGAREG_TEXCTL2, regs[MGA_TEXREG_CTL2] | 0x00008000);
+
+ PRIMADVANCE(dev_priv);
+}
+
+#define EMIT_PIPE 50
+static void mgaG400EmitPipe(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int pipe = sarea_priv->WarpPipe;
+ float fParam = 12800.0f;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR(dev_priv);
+
+ /* This takes 50 dwords */
+
+ /* Establish vertex size.
+ */
+ PRIMOUTREG(MGAREG_WIADDR2, WIA_wmode_suspend);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ if (pipe & MGA_T2) {
+ PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001e09);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x1e000000);
+ } else {
+ if (dev_priv->WarpPipe & MGA_T2) {
+ /* Flush the WARP pipe */
+ PRIMOUTREG(MGAREG_YDST, 0);
+ PRIMOUTREG(MGAREG_FXLEFT, 0);
+ PRIMOUTREG(MGAREG_FXRIGHT, 1);
+ PRIMOUTREG(MGAREG_DWGCTL, MGA_FLUSH_CMD);
+
+ PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 1);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGSYNC, 0x7000);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMOUTREG(MGAREG_TEXCTL2, 0 | 0x00008000);
+ PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
+ PRIMOUTREG(MGAREG_TEXCTL2, 0x80 | 0x00008000);
+ PRIMOUTREG(MGAREG_LEN + MGAREG_MGA_EXEC, 0);
+ }
+
+ PRIMOUTREG(MGAREG_WVRTXSZ, 0x00001807);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0);
+ PRIMOUTREG(MGAREG_WACCEPTSEQ, 0x18000000);
+ }
+
+ PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG(MGAREG_WFLAG1, 0);
+ PRIMOUTREG(0x2d00 + 56 * 4, *((u32 *) (&fParam)));
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+
+ PRIMOUTREG(0x2d00 + 49 * 4, 0); /* Tex stage 0 */
+ PRIMOUTREG(0x2d00 + 57 * 4, 0); /* Tex stage 0 */
+ PRIMOUTREG(0x2d00 + 53 * 4, 0); /* Tex stage 1 */
+ PRIMOUTREG(0x2d00 + 61 * 4, 0); /* Tex stage 1 */
+
+ PRIMOUTREG(0x2d00 + 54 * 4, 0x40); /* Tex stage 0 : w */
+ PRIMOUTREG(0x2d00 + 62 * 4, 0x40); /* Tex stage 0 : h */
+ PRIMOUTREG(0x2d00 + 52 * 4, 0x40); /* Tex stage 1 : w */
+ PRIMOUTREG(0x2d00 + 60 * 4, 0x40); /* Tex stage 1 : h */
+
+ /* Dma pading required due to hw bug */
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_WIADDR2,
+ (u32) (dev_priv->WarpIndex[pipe].
+ phys_addr | WIA_wmode_start | WIA_wagp_agp));
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaG200EmitPipe(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int pipe = sarea_priv->WarpPipe;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ PRIMGETPTR(dev_priv);
+
+ /* This takes 15 dwords */
+
+ PRIMOUTREG(MGAREG_WIADDR, WIA_wmode_suspend);
+ PRIMOUTREG(MGAREG_WVRTXSZ, 7);
+ PRIMOUTREG(MGAREG_WFLAG, 0);
+ PRIMOUTREG(0x2d00 + 24 * 4, 0); /* tex w/h */
+
+ PRIMOUTREG(0x2d00 + 25 * 4, 0x100);
+ PRIMOUTREG(0x2d00 + 34 * 4, 0); /* tex w/h */
+ PRIMOUTREG(0x2d00 + 42 * 4, 0xFFFF);
+ PRIMOUTREG(0x2d00 + 60 * 4, 0xFFFF);
+
+ /* Dma pading required due to hw bug */
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_DMAPAD, 0xffffffff);
+ PRIMOUTREG(MGAREG_WIADDR,
+ (u32) (dev_priv->WarpIndex[pipe].
+ phys_addr | WIA_wmode_start | WIA_wagp_agp));
+
+ PRIMADVANCE(dev_priv);
+}
+
+static void mgaEmitState(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ int multitex = sarea_priv->WarpPipe & MGA_T2;
+
+ if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
+ mgaG400EmitPipe(dev_priv);
+ dev_priv->WarpPipe = sarea_priv->WarpPipe;
+ }
+
+ if (dirty & MGA_UPLOAD_CTX) {
+ mgaEmitContext(dev_priv);
+ sarea_priv->dirty &= ~MGA_UPLOAD_CTX;
+ }
+
+ if (dirty & MGA_UPLOAD_TEX0) {
+ mgaG400EmitTex0(dev_priv);
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+ }
+
+ if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
+ mgaG400EmitTex1(dev_priv);
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
+ }
+ } else {
+ if (sarea_priv->WarpPipe != dev_priv->WarpPipe) {
+ mgaG200EmitPipe(dev_priv);
+ dev_priv->WarpPipe = sarea_priv->WarpPipe;
+ }
+
+ if (dirty & MGA_UPLOAD_CTX) {
+ mgaEmitContext(dev_priv);
+ sarea_priv->dirty &= ~MGA_UPLOAD_CTX;
+ }
+
+ if (dirty & MGA_UPLOAD_TEX0) {
+ mgaG200EmitTex(dev_priv);
+ sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
+ }
+ }
+}
+
+
+/* Disallow all write destinations except the front and backbuffer.
+ */
+static int mgaVerifyContext(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (regs[MGA_CTXREG_DSTORG] != dev_priv->frontOffset &&
+ regs[MGA_CTXREG_DSTORG] != dev_priv->backOffset) {
+ DRM_DEBUG("BAD DSTORG: %x (front %x, back %x)\n\n",
+ regs[MGA_CTXREG_DSTORG], dev_priv->frontOffset,
+ dev_priv->backOffset);
+ regs[MGA_CTXREG_DSTORG] = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Disallow texture reads from PCI space.
+ */
+static int mgaVerifyTex(drm_mga_private_t * dev_priv, int unit)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if ((sarea_priv->TexState[unit][MGA_TEXREG_ORG] & 0x3) == 0x1) {
+ DRM_DEBUG("BAD TEXREG_ORG: %x, unit %d\n",
+ sarea_priv->TexState[unit][MGA_TEXREG_ORG],
+ unit);
+ sarea_priv->TexState[unit][MGA_TEXREG_ORG] = 0;
+ return -1;
+ }
+
+ return 0;
+}
+
+static int mgaVerifyState(drm_mga_private_t * dev_priv)
+{
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int dirty = sarea_priv->dirty;
+ int rv = 0;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+ if (dirty & MGA_UPLOAD_CTX)
+ rv |= mgaVerifyContext(dev_priv);
+
+ if (dirty & MGA_UPLOAD_TEX0)
+ rv |= mgaVerifyTex(dev_priv, 0);
+
+ if (dev_priv->chipset == MGA_CARD_TYPE_G400) {
+ if (dirty & MGA_UPLOAD_TEX1)
+ rv |= mgaVerifyTex(dev_priv, 1);
+
+ if (dirty & MGA_UPLOAD_PIPE)
+ rv |= (sarea_priv->WarpPipe > MGA_MAX_G400_PIPES);
+ } else {
+ if (dirty & MGA_UPLOAD_PIPE)
+ rv |= (sarea_priv->WarpPipe > MGA_MAX_G200_PIPES);
+ }
+
+ return rv == 0;
+}
+
+static int mgaVerifyIload(drm_mga_private_t * dev_priv,
+ unsigned long bus_address,
+ unsigned int dstOrg, int length)
+{
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (dstOrg < dev_priv->textureOffset ||
+ dstOrg + length >
+ (dev_priv->textureOffset + dev_priv->textureSize)) {
+ return EINVAL;
+ }
+ if (length % 64) {
+ return EINVAL;
+ }
+ return 0;
+}
+
+/* This copies a 64 byte aligned agp region to the frambuffer
+ * with a standard blit, the ioctl needs to do checking */
+
+static void mga_dma_dispatch_tex_blit(drm_device_t * dev,
+ unsigned long bus_address,
+ int length, unsigned int destOrg)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ int use_agp = PDEA_pagpxfer_enable | 0x00000001;
+ u16 y2;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ y2 = length / 64;
+
+ PRIM_OVERFLOW(dev, dev_priv, 30);
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG(MGAREG_DSTORG, destOrg);
+ PRIMOUTREG(MGAREG_MACCESS, 0x00000000);
+ DRM_DEBUG("srcorg : %lx\n", bus_address | use_agp);
+ PRIMOUTREG(MGAREG_SRCORG, (u32) bus_address | use_agp);
+ PRIMOUTREG(MGAREG_AR5, 64);
+
+ PRIMOUTREG(MGAREG_PITCH, 64);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
+
+ PRIMOUTREG(MGAREG_AR0, 63);
+ PRIMOUTREG(MGAREG_AR3, 0);
+ PRIMOUTREG(MGAREG_FXBNDRY, (63 << 16));
+ PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC, y2);
+
+ PRIMOUTREG(MGAREG_SRCORG, 0);
+ PRIMOUTREG(MGAREG_PITCH, dev_priv->stride / dev_priv->cpp);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMADVANCE(dev_priv);
+}
+
+static void mga_dma_dispatch_vertex(drm_device_t * dev, drm_buf_t * buf)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned long address = (unsigned long) buf->bus_address;
+ int length = buf->used;
+ int use_agp = PDEA_pagpxfer_enable;
+ int i = 0;
+ int primary_needed;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("dispatch vertex %d addr 0x%lx, "
+ "length 0x%x nbox %d dirty %x\n",
+ buf->idx, address, length,
+ sarea_priv->nbox, sarea_priv->dirty);
+
+ DRM_DEBUG("used : %d, total : %d\n", buf->used, buf->total);
+
+ if (buf->used) {
+ /* WARNING: if you change any of the state functions verify
+ * these numbers (Overestimating this doesn't hurt).
+ */
+ buf_priv->dispatched = 1;
+ primary_needed = (50 + 15 + 15 + 30 + 25 +
+ 10 + 15 * MGA_NR_SAREA_CLIPRECTS);
+ PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ mgaEmitState(dev_priv);
+
+ do {
+ if (i < sarea_priv->nbox) {
+ DRM_DEBUG("idx %d Emit box %d/%d:"
+ "%d,%d - %d,%d\n",
+ buf->idx,
+ i, sarea_priv->nbox,
+ sarea_priv->boxes[i].x1,
+ sarea_priv->boxes[i].y1,
+ sarea_priv->boxes[i].x2,
+ sarea_priv->boxes[i].y2);
+
+ mgaEmitClipRect(dev_priv,
+ &sarea_priv->boxes[i]);
+ }
+
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_SECADDRESS,
+ ((u32) address) | TT_VERTEX);
+ PRIMOUTREG(MGAREG_SECEND,
+ (((u32) (address + length)) | use_agp));
+ PRIMADVANCE(dev_priv);
+ } while (++i < sarea_priv->nbox);
+ }
+
+ if (buf_priv->discard) {
+ if (buf_priv->dispatched == 1)
+ AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
+
+
+}
+
+
+static void mga_dma_dispatch_indices(drm_device_t * dev,
+ drm_buf_t * buf,
+ unsigned int start, unsigned int end)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_buf_priv_t *buf_priv = buf->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int address = (unsigned int) buf->bus_address;
+ int use_agp = PDEA_pagpxfer_enable;
+ int i = 0;
+ int primary_needed;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("dispatch indices %d addr 0x%x, "
+ "start 0x%x end 0x%x nbox %d dirty %x\n",
+ buf->idx, address, start, end,
+ sarea_priv->nbox, sarea_priv->dirty);
+
+ if (start != end) {
+ /* WARNING: if you change any of the state functions verify
+ * these numbers (Overestimating this doesn't hurt).
+ */
+ buf_priv->dispatched = 1;
+ primary_needed = (50 + 15 + 15 + 30 + 25 +
+ 10 + 15 * MGA_NR_SAREA_CLIPRECTS);
+ PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ mgaEmitState(dev_priv);
+
+ do {
+ if (i < sarea_priv->nbox) {
+ DRM_DEBUG("idx %d Emit box %d/%d:"
+ "%d,%d - %d,%d\n",
+ buf->idx,
+ i, sarea_priv->nbox,
+ sarea_priv->boxes[i].x1,
+ sarea_priv->boxes[i].y1,
+ sarea_priv->boxes[i].x2,
+ sarea_priv->boxes[i].y2);
+
+ mgaEmitClipRect(dev_priv,
+ &sarea_priv->boxes[i]);
+ }
+
+ PRIMGETPTR(dev_priv);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_SETUPADDRESS,
+ ((address + start) |
+ SETADD_mode_vertlist));
+ PRIMOUTREG(MGAREG_SETUPEND,
+ ((address + end) | use_agp));
+ PRIMADVANCE(dev_priv);
+ } while (++i < sarea_priv->nbox);
+ }
+ if (buf_priv->discard) {
+ if (buf_priv->dispatched == 1)
+ AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
+}
+
+
+static void mga_dma_dispatch_clear(drm_device_t * dev, int flags,
+ unsigned int clear_color,
+ unsigned int clear_zval)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ unsigned int cmd;
+ int i;
+ int primary_needed;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (dev_priv->sgram)
+ cmd = MGA_CLEAR_CMD | DC_atype_blk;
+ else
+ cmd = MGA_CLEAR_CMD | DC_atype_rstr;
+
+ primary_needed = nbox * 70;
+ if (primary_needed == 0)
+ primary_needed = 70;
+ PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ PRIMGETPTR(dev_priv);
+
+ for (i = 0; i < nbox; i++) {
+ unsigned int height = pbox[i].y2 - pbox[i].y1;
+
+ DRM_DEBUG("dispatch clear %d,%d-%d,%d flags %x!\n",
+ pbox[i].x1, pbox[i].y1, pbox[i].x2,
+ pbox[i].y2, flags);
+
+ if (flags & MGA_FRONT) {
+ DRM_DEBUG("clear front\n");
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_YDSTLEN,
+ (pbox[i].y1 << 16) | height);
+ PRIMOUTREG(MGAREG_FXBNDRY,
+ (pbox[i].x2 << 16) | pbox[i].x1);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_FCOL, clear_color);
+ PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
+ PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
+ }
+
+ if (flags & MGA_BACK) {
+ DRM_DEBUG("clear back\n");
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_YDSTLEN,
+ (pbox[i].y1 << 16) | height);
+ PRIMOUTREG(MGAREG_FXBNDRY,
+ (pbox[i].x2 << 16) | pbox[i].x1);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_FCOL, clear_color);
+ PRIMOUTREG(MGAREG_DSTORG, dev_priv->backOffset);
+ PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
+ }
+
+ if (flags & MGA_DEPTH) {
+ DRM_DEBUG("clear depth\n");
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_YDSTLEN,
+ (pbox[i].y1 << 16) | height);
+ PRIMOUTREG(MGAREG_FXBNDRY,
+ (pbox[i].x2 << 16) | pbox[i].x1);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_FCOL, clear_zval);
+ PRIMOUTREG(MGAREG_DSTORG, dev_priv->depthOffset);
+ PRIMOUTREG(MGAREG_DWGCTL + MGAREG_MGA_EXEC, cmd);
+ }
+ }
+
+ /* Force reset of DWGCTL */
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+ PRIMADVANCE(dev_priv);
+}
+
+static void mga_dma_dispatch_swap(drm_device_t * dev)
+{
+ drm_mga_private_t *dev_priv = dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ unsigned int *regs = sarea_priv->ContextState;
+ int nbox = sarea_priv->nbox;
+ drm_clip_rect_t *pbox = sarea_priv->boxes;
+ int i;
+ int primary_needed;
+ PRIMLOCALS;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ primary_needed = nbox * 5;
+ primary_needed += 60;
+ PRIM_OVERFLOW(dev, dev_priv, primary_needed);
+ PRIMGETPTR(dev_priv);
+
+ PRIMOUTREG(MGAREG_DSTORG, dev_priv->frontOffset);
+ PRIMOUTREG(MGAREG_MACCESS, dev_priv->mAccess);
+ PRIMOUTREG(MGAREG_SRCORG, dev_priv->backOffset);
+ PRIMOUTREG(MGAREG_AR5, dev_priv->stride / 2);
+
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DWGCTL, MGA_COPY_CMD);
+
+ for (i = 0; i < nbox; i++) {
+ unsigned int h = pbox[i].y2 - pbox[i].y1;
+ unsigned int start = pbox[i].y1 * dev_priv->stride / 2;
+
+ DRM_DEBUG("dispatch swap %d,%d-%d,%d!\n",
+ pbox[i].x1, pbox[i].y1, pbox[i].x2, pbox[i].y2);
+
+ PRIMOUTREG(MGAREG_AR0, start + pbox[i].x2 - 1);
+ PRIMOUTREG(MGAREG_AR3, start + pbox[i].x1);
+ PRIMOUTREG(MGAREG_FXBNDRY,
+ pbox[i].x1 | ((pbox[i].x2 - 1) << 16));
+ PRIMOUTREG(MGAREG_YDSTLEN + MGAREG_MGA_EXEC,
+ (pbox[i].y1 << 16) | h);
+ }
+
+ /* Force reset of DWGCTL */
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_DMAPAD, 0);
+ PRIMOUTREG(MGAREG_SRCORG, 0);
+ PRIMOUTREG(MGAREG_DWGCTL, regs[MGA_CTXREG_DWGCTL]);
+
+ PRIMADVANCE(dev_priv);
+}
+
+int mga_clear_bufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_mga_clear_t clear;
+ int s;
+
+ clear = *(drm_mga_clear_t *) data;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_clear_bufs called without lock held\n");
+ return EINVAL;
+ }
+
+ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX;
+ mga_dma_dispatch_clear(dev, clear.flags,
+ clear.clear_color, clear.clear_depth);
+ PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
+ s = splsofttq();
+ mga_dma_schedule(dev, 1);
+ splx(s);
+ return 0;
+}
+
+int mga_swap_bufs(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ int s;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_swap_bufs called without lock held\n");
+ return EINVAL;
+ }
+
+ if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
+ sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
+
+ /* Make sure we restore the 3D state next time.
+ */
+ dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CTX;
+ mga_dma_dispatch_swap(dev);
+ PRIMUPDATE(dev_priv);
+ set_bit(MGA_BUF_SWAP_PENDING,
+ &dev_priv->current_prim->buffer_status);
+ mga_flush_write_combine();
+ s = splsofttq();
+ mga_dma_schedule(dev, 1);
+ splx(s);
+ return 0;
+}
+
+int mga_iload(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_iload_t iload;
+ unsigned long bus_address;
+ int s;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ DRM_DEBUG("Starting Iload\n");
+ iload = *(drm_mga_iload_t *) data;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_iload called without lock held\n");
+ return EINVAL;
+ }
+
+ buf = dma->buflist[iload.idx];
+ buf_priv = buf->dev_private;
+ bus_address = buf->bus_address;
+ DRM_DEBUG("bus_address %lx, length %d, destorg : %x\n",
+ bus_address, iload.length, iload.destOrg);
+
+ if (mgaVerifyIload(dev_priv,
+ bus_address, iload.destOrg, iload.length)) {
+ mga_freelist_put(dev, buf);
+ return EINVAL;
+ }
+
+ sarea_priv->dirty |= MGA_UPLOAD_CTX;
+
+ mga_dma_dispatch_tex_blit(dev, bus_address, iload.length,
+ iload.destOrg);
+ AGEBUF(dev_priv, buf_priv);
+ buf_priv->discard = 1;
+ mga_freelist_put(dev, buf);
+ mga_flush_write_combine();
+ s = splsofttq();
+ mga_dma_schedule(dev, 1);
+ splx(s);
+ return 0;
+}
+
+int mga_vertex(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_vertex_t vertex;
+
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ vertex = *(drm_mga_vertex_t *) data;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_vertex called without lock held\n");
+ return EINVAL;
+ }
+
+ DRM_DEBUG("mga_vertex\n");
+
+ buf = dma->buflist[vertex.idx];
+ buf_priv = buf->dev_private;
+
+ buf->used = vertex.used;
+ buf_priv->discard = vertex.discard;
+
+ if (!mgaVerifyState(dev_priv)) {
+ if (vertex.discard) {
+ if (buf_priv->dispatched == 1)
+ AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
+ DRM_DEBUG("bad state\n");
+ return EINVAL;
+ }
+
+ mga_dma_dispatch_vertex(dev, buf);
+
+ PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
+ mga_dma_schedule(dev, 1);
+ return 0;
+}
+
+
+int mga_indices(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_mga_private_t *dev_priv =
+ (drm_mga_private_t *) dev->dev_private;
+ drm_device_dma_t *dma = dev->dma;
+ drm_buf_t *buf;
+ drm_mga_buf_priv_t *buf_priv;
+ drm_mga_indices_t indices;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ indices = *(drm_mga_indices_t *) data;
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_indices called without lock held\n");
+ return EINVAL;
+ }
+
+ DRM_DEBUG("mga_indices\n");
+
+ buf = dma->buflist[indices.idx];
+ buf_priv = buf->dev_private;
+
+ buf_priv->discard = indices.discard;
+
+ if (!mgaVerifyState(dev_priv)) {
+ if (indices.discard) {
+ if (buf_priv->dispatched == 1)
+ AGEBUF(dev_priv, buf_priv);
+ buf_priv->dispatched = 0;
+ mga_freelist_put(dev, buf);
+ }
+ return EINVAL;
+ }
+
+ mga_dma_dispatch_indices(dev, buf, indices.start, indices.end);
+
+ PRIMUPDATE(dev_priv);
+ mga_flush_write_combine();
+ mga_dma_schedule(dev, 1);
+ return 0;
+}
+
+
+
+static int
+mga_dma_get_buffers(drm_device_t * dev, drm_dma_t * d, struct proc *p)
+{
+ int i, error;
+ drm_buf_t *buf;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ for (i = d->granted_count; i < d->request_count; i++) {
+ buf = mga_freelist_get(dev);
+ if (!buf)
+ break;
+ buf->pid = p->p_pid;
+ error = copyout(&buf->idx,
+ &d->request_indices[i],
+ sizeof(buf->idx));
+ if (error) return error;
+ error = copyout(&buf->total,
+ &d->request_sizes[i],
+ sizeof(buf->total));
+ if (error) return error;
+ ++d->granted_count;
+ }
+ return 0;
+}
+
+int mga_dma(dev_t kdev, u_long cmd, caddr_t data,
+ int flags, struct proc *p)
+{
+ drm_device_t *dev = kdev->si_drv1;
+ drm_device_dma_t *dma = dev->dma;
+ int retcode = 0;
+ drm_dma_t d;
+ DRM_DEBUG("%s\n", __FUNCTION__);
+
+ d = *(drm_dma_t *) data;
+ DRM_DEBUG("%d %d: %d send, %d req\n",
+ p->p_pid, d.context, d.send_count, d.request_count);
+
+ if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) {
+ DRM_ERROR("mga_dma called without lock held\n");
+ return EINVAL;
+ }
+
+ /* Please don't send us buffers.
+ */
+ if (d.send_count != 0) {
+ DRM_ERROR
+ ("Process %d trying to send %d buffers via drmDMA\n",
+ p->p_pid, d.send_count);
+ return EINVAL;
+ }
+
+ /* We'll send you buffers.
+ */
+ if (d.request_count < 0 || d.request_count > dma->buf_count) {
+ DRM_ERROR
+ ("Process %d trying to get %d buffers (of %d max)\n",
+ p->p_pid, d.request_count, dma->buf_count);
+ return EINVAL;
+ }
+
+ d.granted_count = 0;
+
+ if (d.request_count) {
+ retcode = mga_dma_get_buffers(dev, &d, p);
+ }
+
+ DRM_DEBUG("%d returning, granted = %d\n",
+ p->p_pid, d.granted_count);
+ *(drm_dma_t *) data = d;
+ return retcode;
+}