diff options
author | Kevin E Martin <kem@kem.org> | 2000-02-22 15:43:59 +0000 |
---|---|---|
committer | Kevin E Martin <kem@kem.org> | 2000-02-22 15:43:59 +0000 |
commit | 7a9b291ab55dd9dcfeb35217f6105ad57c94f433 (patch) | |
tree | 8941caba3a974912d8abc9f2f7b307c82ef5a54a /linux-core | |
parent | 9a1197da5cd84624f5b0741e0a20fee60eb8b4f1 (diff) |
Import of XFree86 3.9.18
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/Makefile.kernel | 9 | ||||
-rw-r--r-- | linux-core/i810_dma.c | 762 | ||||
-rw-r--r-- | linux-core/i810_drv.c | 583 | ||||
-rw-r--r-- | linux-core/i810_drv.h | 76 | ||||
-rw-r--r-- | linux-core/mga_drv.c | 576 |
5 files changed, 2005 insertions, 1 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index c286d8fd..44d75c48 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -9,7 +9,7 @@ # Note 2! The CFLAGS definitions are now inherited from the # parent makes.. # -# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.2 1999/09/27 14:59:24 dawes Exp $ +# $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/Makefile.kernel,v 1.5 2000/02/14 06:27:25 martin Exp $ L_TARGET := libdrm.a @@ -22,7 +22,14 @@ ifdef CONFIG_DRM_GAMMA M_OBJS += gamma.o endif +ifdef CONFIG_DRM_TDFX +M_OBJS += tdfx.o +endif + include $(TOPDIR)/Rules.make gamma.o: gamma_drv.o gamma_dma.o $(L_TARGET) $(LD) $(LD_RFLAG) -r -o $@ gamma_drv.o gamma_dma.o -L. -ldrm + +tdfx.o: tdfx_drv.o tdfx_context.o $(L_TARGET) + $(LD) $(LD_RFLAG) -r -o $@ tdfx_drv.o tdfx_context.o -L. -ldrm diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c new file mode 100644 index 00000000..09959b65 --- /dev/null +++ b/linux-core/i810_dma.c @@ -0,0 +1,762 @@ +/* i810_dma.c -- DMA support for the i810 -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> + * Jeff Hartmann <jhartmann@precisioninsight.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_dma.c,v 1.1 2000/02/11 17:26:04 dawes Exp $ + * + */ + +#define __NO_VERSION__ +#include "drmP.h" +#include "i810_drv.h" + +#include <linux/interrupt.h> /* For task queue support */ + +#define I810_REG(reg) 2 +#define I810_BASE(reg) ((unsigned long) \ + dev->maplist[I810_REG(reg)]->handle) +#define I810_ADDR(reg) (I810_BASE(reg) + reg) +#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg) +#define I810_READ(reg) I810_DEREF(reg) +#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0) + +void i810_dma_init(drm_device_t *dev) +{ + printk(KERN_INFO "i810_dma_init\n"); +} + +void i810_dma_cleanup(drm_device_t *dev) +{ + printk(KERN_INFO "i810_dma_cleanup\n"); +} + +static inline void i810_dma_dispatch(drm_device_t *dev, unsigned long address, + unsigned long length) +{ + printk(KERN_INFO "i810_dma_dispatch\n"); +} + +static inline void i810_dma_quiescent(drm_device_t *dev) +{ +} + +static inline void i810_dma_ready(drm_device_t *dev) +{ + i810_dma_quiescent(dev); + printk(KERN_INFO "i810_dma_ready\n"); +} + +static inline int i810_dma_is_ready(drm_device_t *dev) +{ + + i810_dma_quiescent(dev); + + printk(KERN_INFO "i810_dma_is_ready\n"); + return 1; +} + + +static void i810_dma_service(int irq, void *device, struct pt_regs *regs) +{ + drm_device_t *dev = (drm_device_t *)device; + drm_device_dma_t *dma = dev->dma; + + atomic_inc(&dev->total_irq); + if (i810_dma_is_ready(dev)) { + /* Free previous buffer */ + if (test_and_set_bit(0, &dev->dma_flag)) { + atomic_inc(&dma->total_missed_free); + return; + } + if (dma->this_buffer) { + drm_free_buffer(dev, dma->this_buffer); + dma->this_buffer = NULL; + } + clear_bit(0, &dev->dma_flag); + + /* Dispatch new buffer */ + queue_task(&dev->tq, &tq_immediate); + mark_bh(IMMEDIATE_BH); + } +} + +/* Only called by i810_dma_schedule. */ +static int i810_do_dma(drm_device_t *dev, int locked) +{ + unsigned long address; + unsigned long length; + drm_buf_t *buf; + int retcode = 0; + drm_device_dma_t *dma = dev->dma; +#if DRM_DMA_HISTOGRAM + cycles_t dma_start, dma_stop; +#endif + + if (test_and_set_bit(0, &dev->dma_flag)) { + atomic_inc(&dma->total_missed_dma); + return -EBUSY; + } + +#if DRM_DMA_HISTOGRAM + dma_start = get_cycles(); +#endif + + if (!dma->next_buffer) { + DRM_ERROR("No next_buffer\n"); + clear_bit(0, &dev->dma_flag); + return -EINVAL; + } + + buf = dma->next_buffer; + address = (unsigned long)buf->bus_address; + length = buf->used; + + + DRM_DEBUG("context %d, buffer %d (%ld bytes)\n", + buf->context, buf->idx, length); + + if (buf->list == DRM_LIST_RECLAIM) { + drm_clear_next_buffer(dev); + drm_free_buffer(dev, buf); + clear_bit(0, &dev->dma_flag); + return -EINVAL; + } + + if (!length) { + DRM_ERROR("0 length buffer\n"); + drm_clear_next_buffer(dev); + drm_free_buffer(dev, buf); + clear_bit(0, &dev->dma_flag); + return 0; + } + + if (!i810_dma_is_ready(dev)) { + clear_bit(0, &dev->dma_flag); + return -EBUSY; + } + + if (buf->while_locked) { + if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock)) { + DRM_ERROR("Dispatching buffer %d from pid %d" + " \"while locked\", but no lock held\n", + buf->idx, buf->pid); + } + } else { + if (!locked && !drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + atomic_inc(&dma->total_missed_lock); + clear_bit(0, &dev->dma_flag); + return -EBUSY; + } + } + + if (dev->last_context != buf->context + && !(dev->queuelist[buf->context]->flags + & _DRM_CONTEXT_PRESERVED)) { + /* PRE: dev->last_context != buf->context */ + if (drm_context_switch(dev, dev->last_context, buf->context)) { + drm_clear_next_buffer(dev); + drm_free_buffer(dev, buf); + } + retcode = -EBUSY; + goto cleanup; + + /* POST: we will wait for the context + switch and will dispatch on a later call + when dev->last_context == buf->context. + NOTE WE HOLD THE LOCK THROUGHOUT THIS + TIME! */ + } + + drm_clear_next_buffer(dev); + buf->pending = 1; + buf->waiting = 0; + buf->list = DRM_LIST_PEND; +#if DRM_DMA_HISTOGRAM + buf->time_dispatched = get_cycles(); +#endif + + i810_dma_dispatch(dev, address, length); + drm_free_buffer(dev, dma->this_buffer); + dma->this_buffer = buf; + + atomic_add(length, &dma->total_bytes); + atomic_inc(&dma->total_dmas); + + if (!buf->while_locked && !dev->context_flag && !locked) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } +cleanup: + + clear_bit(0, &dev->dma_flag); + +#if DRM_DMA_HISTOGRAM + dma_stop = get_cycles(); + atomic_inc(&dev->histo.dma[drm_histogram_slot(dma_stop - dma_start)]); +#endif + + return retcode; +} + +static void i810_dma_schedule_timer_wrapper(unsigned long dev) +{ + i810_dma_schedule((drm_device_t *)dev, 0); +} + +static void i810_dma_schedule_tq_wrapper(void *dev) +{ + i810_dma_schedule(dev, 0); +} + +int i810_dma_schedule(drm_device_t *dev, int locked) +{ + int next; + drm_queue_t *q; + drm_buf_t *buf; + int retcode = 0; + int processed = 0; + int missed; + int expire = 20; + drm_device_dma_t *dma = dev->dma; +#if DRM_DMA_HISTOGRAM + cycles_t schedule_start; +#endif + + if (test_and_set_bit(0, &dev->interrupt_flag)) { + /* Not reentrant */ + atomic_inc(&dma->total_missed_sched); + return -EBUSY; + } + missed = atomic_read(&dma->total_missed_sched); + +#if DRM_DMA_HISTOGRAM + schedule_start = get_cycles(); +#endif + +again: + if (dev->context_flag) { + clear_bit(0, &dev->interrupt_flag); + return -EBUSY; + } + if (dma->next_buffer) { + /* Unsent buffer that was previously + selected, but that couldn't be sent + because the lock could not be obtained + or the DMA engine wasn't ready. Try + again. */ + atomic_inc(&dma->total_tried); + if (!(retcode = i810_do_dma(dev, locked))) { + atomic_inc(&dma->total_hit); + ++processed; + } + } else { + do { + next = drm_select_queue(dev, + i810_dma_schedule_timer_wrapper); + if (next >= 0) { + q = dev->queuelist[next]; + buf = drm_waitlist_get(&q->waitlist); + dma->next_buffer = buf; + dma->next_queue = q; + if (buf && buf->list == DRM_LIST_RECLAIM) { + drm_clear_next_buffer(dev); + drm_free_buffer(dev, buf); + } + } + } while (next >= 0 && !dma->next_buffer); + if (dma->next_buffer) { + if (!(retcode = i810_do_dma(dev, locked))) { + ++processed; + } + } + } + + if (--expire) { + if (missed != atomic_read(&dma->total_missed_sched)) { + atomic_inc(&dma->total_lost); + if (i810_dma_is_ready(dev)) goto again; + } + if (processed && i810_dma_is_ready(dev)) { + atomic_inc(&dma->total_lost); + processed = 0; + goto again; + } + } + + clear_bit(0, &dev->interrupt_flag); + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.schedule[drm_histogram_slot(get_cycles() + - schedule_start)]); +#endif + return retcode; +} + +static int i810_dma_priority(drm_device_t *dev, drm_dma_t *d) +{ + unsigned long address; + unsigned long length; + int must_free = 0; + int retcode = 0; + int i; + int idx; + drm_buf_t *buf; + drm_buf_t *last_buf = NULL; + drm_device_dma_t *dma = dev->dma; + DECLARE_WAITQUEUE(entry, current); + + /* Turn off interrupt handling */ + while (test_and_set_bit(0, &dev->interrupt_flag)) { + schedule(); + if (signal_pending(current)) return -EINTR; + } + if (!(d->flags & _DRM_DMA_WHILE_LOCKED)) { + while (!drm_lock_take(&dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + schedule(); + if (signal_pending(current)) { + clear_bit(0, &dev->interrupt_flag); + return -EINTR; + } + } + ++must_free; + } + atomic_inc(&dma->total_prio); + + for (i = 0; i < d->send_count; i++) { + idx = d->send_indices[i]; + if (idx < 0 || idx >= dma->buf_count) { + DRM_ERROR("Index %d (of %d max)\n", + d->send_indices[i], dma->buf_count - 1); + continue; + } + buf = dma->buflist[ idx ]; + if (buf->pid != current->pid) { + DRM_ERROR("Process %d using buffer owned by %d\n", + current->pid, buf->pid); + retcode = -EINVAL; + goto cleanup; + } + if (buf->list != DRM_LIST_NONE) { + DRM_ERROR("Process %d using %d's buffer on list %d\n", + current->pid, buf->pid, buf->list); + retcode = -EINVAL; + goto cleanup; + } + /* This isn't a race condition on + buf->list, since our concern is the + buffer reclaim during the time the + process closes the /dev/drm? handle, so + it can't also be doing DMA. */ + buf->list = DRM_LIST_PRIO; + buf->used = d->send_sizes[i]; + buf->context = d->context; + buf->while_locked = d->flags & _DRM_DMA_WHILE_LOCKED; + address = (unsigned long)buf->address; + length = buf->used; + if (!length) { + DRM_ERROR("0 length buffer\n"); + } + if (buf->pending) { + DRM_ERROR("Sending pending buffer:" + " buffer %d, offset %d\n", + d->send_indices[i], i); + retcode = -EINVAL; + goto cleanup; + } + if (buf->waiting) { + DRM_ERROR("Sending waiting buffer:" + " buffer %d, offset %d\n", + d->send_indices[i], i); + retcode = -EINVAL; + goto cleanup; + } + buf->pending = 1; + + if (dev->last_context != buf->context + && !(dev->queuelist[buf->context]->flags + & _DRM_CONTEXT_PRESERVED)) { + add_wait_queue(&dev->context_wait, &entry); + current->state = TASK_INTERRUPTIBLE; + /* PRE: dev->last_context != buf->context */ + drm_context_switch(dev, dev->last_context, + buf->context); + /* POST: we will wait for the context + switch and will dispatch on a later call + when dev->last_context == buf->context. + NOTE WE HOLD THE LOCK THROUGHOUT THIS + TIME! */ + schedule(); + current->state = TASK_RUNNING; + remove_wait_queue(&dev->context_wait, &entry); + if (signal_pending(current)) { + retcode = -EINTR; + goto cleanup; + } + if (dev->last_context != buf->context) { + DRM_ERROR("Context mismatch: %d %d\n", + dev->last_context, + buf->context); + } + } + +#if DRM_DMA_HISTOGRAM + buf->time_queued = get_cycles(); + buf->time_dispatched = buf->time_queued; +#endif + i810_dma_dispatch(dev, address, length); + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + + atomic_add(length, &dma->total_bytes); + atomic_inc(&dma->total_dmas); + + if (last_buf) { + drm_free_buffer(dev, last_buf); + } + last_buf = buf; + } + + +cleanup: + if (last_buf) { + i810_dma_ready(dev); + drm_free_buffer(dev, last_buf); + } + + if (must_free && !dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } + clear_bit(0, &dev->interrupt_flag); + return retcode; +} + +static int i810_dma_send_buffers(drm_device_t *dev, drm_dma_t *d) +{ + DECLARE_WAITQUEUE(entry, current); + drm_buf_t *last_buf = NULL; + int retcode = 0; + drm_device_dma_t *dma = dev->dma; + + if (d->flags & _DRM_DMA_BLOCK) { + last_buf = dma->buflist[d->send_indices[d->send_count-1]]; + add_wait_queue(&last_buf->dma_wait, &entry); + } + + if ((retcode = drm_dma_enqueue(dev, d))) { + if (d->flags & _DRM_DMA_BLOCK) + remove_wait_queue(&last_buf->dma_wait, &entry); + return retcode; + } + + i810_dma_schedule(dev, 0); + + if (d->flags & _DRM_DMA_BLOCK) { + DRM_DEBUG("%d waiting\n", current->pid); + current->state = TASK_INTERRUPTIBLE; + for (;;) { + if (!last_buf->waiting + && !last_buf->pending) + break; /* finished */ + schedule(); + if (signal_pending(current)) { + retcode = -EINTR; /* Can't restart */ + break; + } + } + current->state = TASK_RUNNING; + DRM_DEBUG("%d running\n", current->pid); + remove_wait_queue(&last_buf->dma_wait, &entry); + if (!retcode + || (last_buf->list==DRM_LIST_PEND && !last_buf->pending)) { + if (!waitqueue_active(&last_buf->dma_wait)) { + drm_free_buffer(dev, last_buf); + } + } + if (retcode) { + DRM_ERROR("ctx%d w%d p%d c%d i%d l%d %d/%d\n", + d->context, + last_buf->waiting, + last_buf->pending, + DRM_WAITCOUNT(dev, d->context), + last_buf->idx, + last_buf->list, + last_buf->pid, + current->pid); + } + } + return retcode; +} + +int i810_dma(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_device_dma_t *dma = dev->dma; + int retcode = 0; + drm_dma_t d; + + printk("i810_dma start\n"); + copy_from_user_ret(&d, (drm_dma_t *)arg, sizeof(d), -EFAULT); + DRM_DEBUG("%d %d: %d send, %d req\n", + current->pid, d.context, d.send_count, d.request_count); + + if (d.context == DRM_KERNEL_CONTEXT || d.context >= dev->queue_slots) { + DRM_ERROR("Process %d using context %d\n", + current->pid, d.context); + return -EINVAL; + } + + if (d.send_count < 0 || d.send_count > dma->buf_count) { + DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n", + current->pid, d.send_count, dma->buf_count); + return -EINVAL; + } + if (d.request_count < 0 || d.request_count > dma->buf_count) { + DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n", + current->pid, d.request_count, dma->buf_count); + return -EINVAL; + } + + if (d.send_count) { +#if 0 + if (d.flags & _DRM_DMA_PRIORITY) + retcode = i810_dma_priority(dev, &d); + else + retcode = i810_dma_send_buffers(dev, &d); +#endif + printk("i810_dma priority\n"); + + retcode = i810_dma_priority(dev, &d); + } + + d.granted_count = 0; + + if (!retcode && d.request_count) { + retcode = drm_dma_get_buffers(dev, &d); + } + + DRM_DEBUG("%d returning, granted = %d\n", + current->pid, d.granted_count); + copy_to_user_ret((drm_dma_t *)arg, &d, sizeof(d), -EFAULT); + + printk("i810_dma end (granted)\n"); + return retcode; +} + +int i810_irq_install(drm_device_t *dev, int irq) +{ + int retcode; + + if (!irq) return -EINVAL; + + down(&dev->struct_sem); + if (dev->irq) { + up(&dev->struct_sem); + return -EBUSY; + } + dev->irq = irq; + up(&dev->struct_sem); + + DRM_DEBUG("%d\n", irq); + + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + + dev->dma->next_buffer = NULL; + dev->dma->next_queue = NULL; + dev->dma->this_buffer = NULL; + + dev->tq.next = NULL; + dev->tq.sync = 0; + dev->tq.routine = i810_dma_schedule_tq_wrapper; + dev->tq.data = dev; + + + /* Before installing handler */ + /* TODO */ + /* Install handler */ + if ((retcode = request_irq(dev->irq, + i810_dma_service, + 0, + dev->devname, + dev))) { + down(&dev->struct_sem); + dev->irq = 0; + up(&dev->struct_sem); + return retcode; + } + + /* After installing handler */ + /* TODO */ + return 0; +} + +int i810_irq_uninstall(drm_device_t *dev) +{ + int irq; + + down(&dev->struct_sem); + irq = dev->irq; + dev->irq = 0; + up(&dev->struct_sem); + + if (!irq) return -EINVAL; + + DRM_DEBUG("%d\n", irq); + + /* TODO : Disable interrupts */ + free_irq(irq, dev); + + return 0; +} + + +int i810_control(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_control_t ctl; + int retcode; + + printk(KERN_INFO "i810_control\n"); + i810_dma_init(dev); + + copy_from_user_ret(&ctl, (drm_control_t *)arg, sizeof(ctl), -EFAULT); + + switch (ctl.func) { + case DRM_INST_HANDLER: + if ((retcode = i810_irq_install(dev, ctl.irq))) + return retcode; + break; + case DRM_UNINST_HANDLER: + if ((retcode = i810_irq_uninstall(dev))) + return retcode; + break; + default: + return -EINVAL; + } + return 0; +} + +int i810_lock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + DECLARE_WAITQUEUE(entry, current); + int ret = 0; + drm_lock_t lock; + drm_queue_t *q; +#if DRM_DMA_HISTOGRAM + cycles_t start; + + dev->lck_start = start = get_cycles(); +#endif + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + if (lock.context < 0 || lock.context >= dev->queue_count) { + return -EINVAL; + } + q = dev->queuelist[lock.context]; + + ret = drm_flush_block_and_flush(dev, lock.context, lock.flags); + + if (!ret) { + if (_DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) + != lock.context) { + long j = jiffies - dev->lock.lock_time; + + if (j > 0 && j <= DRM_LOCK_SLICE) { + /* Can't take lock if we just had it and + there is contention. */ + current->state = TASK_INTERRUPTIBLE; + schedule_timeout(j); + } + } + add_wait_queue(&dev->lock.lock_queue, &entry); + for (;;) { + if (!dev->lock.hw_lock) { + /* Device has been unregistered */ + ret = -EINTR; + break; + } + if (drm_lock_take(&dev->lock.hw_lock->lock, + lock.context)) { + dev->lock.pid = current->pid; + dev->lock.lock_time = jiffies; + atomic_inc(&dev->total_locks); + atomic_inc(&q->total_locks); + break; /* Got lock */ + } + + /* Contention */ + atomic_inc(&dev->total_sleeps); + current->state = TASK_INTERRUPTIBLE; + schedule(); + if (signal_pending(current)) { + ret = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue(&dev->lock.lock_queue, &entry); + } + + drm_flush_unblock(dev, lock.context, lock.flags); /* cleanup phase */ + + if (!ret) { + if (lock.flags & _DRM_LOCK_READY) + i810_dma_ready(dev); + if (lock.flags & _DRM_LOCK_QUIESCENT) + i810_dma_quiescent(dev); + } + +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lacq[drm_histogram_slot(get_cycles() - start)]); +#endif + + return ret; +} diff --git a/linux-core/i810_drv.c b/linux-core/i810_drv.c new file mode 100644 index 00000000..f33153a3 --- /dev/null +++ b/linux-core/i810_drv.c @@ -0,0 +1,583 @@ +/* i810_drv.c -- I810 driver -*- linux-c -*- + * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> + * Jeff Hartmann <jhartmann@precisioninsight.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.c,v 1.1 2000/02/11 17:26:05 dawes Exp $ + * + */ + +#define EXPORT_SYMTAB +#include "drmP.h" +#include "i810_drv.h" +EXPORT_SYMBOL(i810_init); +EXPORT_SYMBOL(i810_cleanup); + +#define I810_NAME "i810" +#define I810_DESC "Matrox g200/g400" +#define I810_DATE "19991213" +#define I810_MAJOR 0 +#define I810_MINOR 0 +#define I810_PATCHLEVEL 1 + +static drm_device_t i810_device; +drm_ctx_t i810_res_ctx; + +static struct file_operations i810_fops = { + open: i810_open, + flush: drm_flush, + release: i810_release, + ioctl: i810_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, +}; + +static struct miscdevice i810_misc = { + minor: MISC_DYNAMIC_MINOR, + name: I810_NAME, + fops: &i810_fops, +}; + +static drm_ioctl_desc_t i810_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { i810_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { i810_control, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { i810_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { i810_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { i810_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { i810_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { i810_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { drm_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { drm_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { drm_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { drm_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { drm_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { drm_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { drm_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { i810_dma, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { i810_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { i810_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, +}; + +#define I810_IOCTL_COUNT DRM_ARRAY_SIZE(i810_ioctls) + +#ifdef MODULE +int init_module(void); +void cleanup_module(void); +static char *i810 = NULL; + +MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas."); +MODULE_DESCRIPTION("Intel I810"); +MODULE_PARM(i810, "s"); + +/* init_module is called when insmod is used to load the module */ + +int init_module(void) +{ + printk("doing i810_init()\n"); + return i810_init(); +} + +/* cleanup_module is called when rmmod is used to unload the module */ + +void cleanup_module(void) +{ + i810_cleanup(); +} +#endif + +#ifndef MODULE +/* i810_setup is called by the kernel to parse command-line options passed + * via the boot-loader (e.g., LILO). It calls the insmod option routine, + * drm_parse_drm. + * + * This is not currently supported, since it requires changes to + * linux/init/main.c. */ + + +void __init i810_setup(char *str, int *ints) +{ + if (ints[0] != 0) { + DRM_ERROR("Illegal command line format, ignored\n"); + return; + } + drm_parse_options(str); +} +#endif + +static int i810_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); +#if DRM_DMA_HISTO + memset(&dev->histo, 0, sizeof(dev->histo)); +#endif + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int i810_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + if (dev->irq) i810_irq_uninstall(dev); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until r128_cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired && drm_agp.release) + (*drm_agp.release)(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + if (dev->queuelist) { + for (i = 0; i < dev->queue_count; i++) { + drm_waitlist_destroy(&dev->queuelist[i]->waitlist); + if (dev->queuelist[i]) { + drm_free(dev->queuelist[i], + sizeof(*dev->queuelist[0]), + DRM_MEM_QUEUES); + dev->queuelist[i] = NULL; + } + } + drm_free(dev->queuelist, + dev->queue_slots * sizeof(*dev->queuelist), + DRM_MEM_QUEUES); + dev->queuelist = NULL; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* i810_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +int i810_init(void) +{ + int retcode; + drm_device_t *dev = &i810_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(i810); +#endif + printk("doing misc_register\n"); + if ((retcode = misc_register(&i810_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", I810_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, i810_misc.minor); + dev->name = I810_NAME; + + printk("doing mem init\n"); + drm_mem_init(); + printk("doing proc init\n"); + drm_proc_init(dev); + printk("doing agp init\n"); + dev->agp = drm_agp_init(); + printk("doing ctxbitmap init\n"); + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&i810_misc); + i810_takedown(dev); + return retcode; + } +#if 0 + printk("doing i810_dma_init\n"); + i810_dma_init(dev); +#endif + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + I810_NAME, + I810_MAJOR, + I810_MINOR, + I810_PATCHLEVEL, + I810_DATE, + i810_misc.minor); + + return 0; +} + +/* i810_cleanup is called via cleanup_module at module unload time. */ + +void i810_cleanup(void) +{ + drm_device_t *dev = &i810_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&i810_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + i810_dma_cleanup(dev); + i810_takedown(dev); + if (dev->agp) { + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +} + +int i810_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + copy_from_user_ret(&version, + (drm_version_t *)arg, + sizeof(version), + -EFAULT); + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + copy_to_user_ret(name, value, len, -EFAULT); \ + } + + version.version_major = I810_MAJOR; + version.version_minor = I810_MINOR; + version.version_patchlevel = I810_PATCHLEVEL; + + DRM_COPY(version.name, I810_NAME); + DRM_COPY(version.date, I810_DATE); + DRM_COPY(version.desc, I810_DESC); + + copy_to_user_ret((drm_version_t *)arg, + &version, + sizeof(version), + -EFAULT); + return 0; +} + +int i810_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &i810_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { + MOD_INC_USE_COUNT; + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return i810_setup(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +int i810_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_release(inode, filp))) { + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return i810_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int i810_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= I810_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &i810_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int i810_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + i810_dma_schedule(dev, 1); + if (!dev->context_flag) { + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + } +#if DRM_DMA_HISTOGRAM + atomic_inc(&dev->histo.lhld[drm_histogram_slot(get_cycles() + - dev->lck_start)]); +#endif + + return 0; +} diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h new file mode 100644 index 00000000..0f5f42bb --- /dev/null +++ b/linux-core/i810_drv.h @@ -0,0 +1,76 @@ +/* i810_drv.h -- Private header for the Matrox g200/g400 driver -*- linux-c -*- + * Created: Mon Dec 13 01:50:01 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> + * Jeff Hartmann <jhartmann@precisioninsight.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/i810_drv.h,v 1.1 2000/02/11 17:26:05 dawes Exp $ + */ + +#ifndef _I810_DRV_H_ +#define _I810_DRV_H_ + + /* i810_drv.c */ +extern int i810_init(void); +extern void i810_cleanup(void); +extern int i810_version(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_open(struct inode *inode, struct file *filp); +extern int i810_release(struct inode *inode, struct file *filp); +extern int i810_ioctl(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_unlock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + /* i810_dma.c */ +extern int i810_dma_schedule(drm_device_t *dev, int locked); +extern int i810_dma(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_irq_install(drm_device_t *dev, int irq); +extern int i810_irq_uninstall(drm_device_t *dev); +extern int i810_control(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_lock(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern void i810_dma_init(drm_device_t *dev); +extern void i810_dma_cleanup(drm_device_t *dev); + + + /* i810_bufs.c */ +extern int i810_addbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_infobufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_markbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_freebufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_mapbufs(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); +extern int i810_addmap(struct inode *inode, struct file *filp, + unsigned int cmd, unsigned long arg); + + +#endif diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c new file mode 100644 index 00000000..8bc25617 --- /dev/null +++ b/linux-core/mga_drv.c @@ -0,0 +1,576 @@ +/* mga_drv.c -- Matrox g200/g400 driver -*- linux-c -*- + * Created: Mon Dec 13 01:56:22 1999 by jhartmann@precisioninsight.com + * + * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + * + * Authors: Rickard E. (Rik) Faith <faith@precisioninsight.com> + * Jeff Hartmann <jhartmann@precisioninsight.com> + * + * $XFree86: xc/programs/Xserver/hw/xfree86/os-support/linux/drm/kernel/mga_drv.c,v 1.1 2000/02/11 17:26:07 dawes Exp $ + * + */ + +#define EXPORT_SYMTAB +#include "drmP.h" +#include "mga_drv.h" +EXPORT_SYMBOL(mga_init); +EXPORT_SYMBOL(mga_cleanup); + +#define MGA_NAME "mga" +#define MGA_DESC "Matrox g200/g400" +#define MGA_DATE "19991213" +#define MGA_MAJOR 0 +#define MGA_MINOR 0 +#define MGA_PATCHLEVEL 1 + +static drm_device_t mga_device; +drm_ctx_t mga_res_ctx; + +static struct file_operations mga_fops = { + open: mga_open, + flush: drm_flush, + release: mga_release, + ioctl: mga_ioctl, + mmap: drm_mmap, + read: drm_read, + fasync: drm_fasync, +}; + +static struct miscdevice mga_misc = { + minor: MISC_DYNAMIC_MINOR, + name: MGA_NAME, + fops: &mga_fops, +}; + +static drm_ioctl_desc_t mga_ioctls[] = { + [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = { mga_version, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = { drm_getunique, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = { drm_getmagic, 0, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = { drm_irq_busid, 0, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = { drm_setunique, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = { drm_block, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = { drm_unblock, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = { mga_control, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = { drm_authmagic, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = { drm_addmap, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = { mga_addbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = { mga_markbufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = { mga_infobufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = { mga_mapbufs, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = { mga_freebufs, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = { mga_addctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = { mga_rmctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = { mga_modctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = { mga_getctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = { mga_switchctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = { mga_newctx, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = { mga_resctx, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = { drm_adddraw, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = { drm_rmdraw, 1, 1 }, + + [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = { mga_dma, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = { mga_lock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = { mga_unlock, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = { drm_finish, 1, 0 }, + + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = { drm_agp_acquire, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = { drm_agp_release, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = { drm_agp_enable, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = { drm_agp_info, 1, 0 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = { drm_agp_alloc, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = { drm_agp_free, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = { drm_agp_bind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = { drm_agp_unbind, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_INIT)] = { mga_dma_init, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_SWAP)] = { mga_clear_bufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_CLEAR)] = { mga_swap_bufs, 1, 1 }, + [DRM_IOCTL_NR(DRM_IOCTL_MGA_ILOAD)] = { mga_iload, 1, 1 }, +}; + +#define MGA_IOCTL_COUNT DRM_ARRAY_SIZE(mga_ioctls) + +#ifdef MODULE +int init_module(void); +void cleanup_module(void); +static char *mga = NULL; + +MODULE_AUTHOR("Precision Insight, Inc., Cedar Park, Texas."); +MODULE_DESCRIPTION("Matrox g200/g400"); +MODULE_PARM(mga, "s"); + +/* init_module is called when insmod is used to load the module */ + +int init_module(void) +{ + DRM_DEBUG("doing mga_init()\n"); + return mga_init(); +} + +/* cleanup_module is called when rmmod is used to unload the module */ + +void cleanup_module(void) +{ + mga_cleanup(); +} +#endif + +#ifndef MODULE +/* mga_setup is called by the kernel to parse command-line options passed + * via the boot-loader (e.g., LILO). It calls the insmod option routine, + * drm_parse_drm. + * + * This is not currently supported, since it requires changes to + * linux/init/main.c. */ + + +void __init mga_setup(char *str, int *ints) +{ + if (ints[0] != 0) { + DRM_ERROR("Illegal command line format, ignored\n"); + return; + } + drm_parse_options(str); +} +#endif + +static int mga_setup(drm_device_t *dev) +{ + int i; + + atomic_set(&dev->ioctl_count, 0); + atomic_set(&dev->vma_count, 0); + dev->buf_use = 0; + atomic_set(&dev->buf_alloc, 0); + + drm_dma_setup(dev); + + atomic_set(&dev->total_open, 0); + atomic_set(&dev->total_close, 0); + atomic_set(&dev->total_ioctl, 0); + atomic_set(&dev->total_irq, 0); + atomic_set(&dev->total_ctx, 0); + atomic_set(&dev->total_locks, 0); + atomic_set(&dev->total_unlocks, 0); + atomic_set(&dev->total_contends, 0); + atomic_set(&dev->total_sleeps, 0); + + for (i = 0; i < DRM_HASH_SIZE; i++) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + dev->maplist = NULL; + dev->map_count = 0; + dev->vmalist = NULL; + dev->lock.hw_lock = NULL; + init_waitqueue_head(&dev->lock.lock_queue); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_timer(&dev->timer); + init_waitqueue_head(&dev->context_wait); + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head(&dev->buf_readers); + init_waitqueue_head(&dev->buf_writers); + + DRM_DEBUG("\n"); + + /* The kernel's context could be created here, but is now created + in drm_dma_enqueue. This is more resource-efficient for + hardware that does not do DMA, but may mean that + drm_select_queue fails between the time the interrupt is + initialized and the time the queues are initialized. */ + + return 0; +} + + +static int mga_takedown(drm_device_t *dev) +{ + int i; + drm_magic_entry_t *pt, *next; + drm_map_t *map; + drm_vma_entry_t *vma, *vma_next; + + DRM_DEBUG("\n"); + + if (dev->irq) mga_irq_uninstall(dev); + + down(&dev->struct_sem); + del_timer(&dev->timer); + + if (dev->devname) { + drm_free(dev->devname, strlen(dev->devname)+1, DRM_MEM_DRIVER); + dev->devname = NULL; + } + + if (dev->unique) { + drm_free(dev->unique, strlen(dev->unique)+1, DRM_MEM_DRIVER); + dev->unique = NULL; + dev->unique_len = 0; + } + /* Clear pid list */ + for (i = 0; i < DRM_HASH_SIZE; i++) { + for (pt = dev->magiclist[i].head; pt; pt = next) { + next = pt->next; + drm_free(pt, sizeof(*pt), DRM_MEM_MAGIC); + } + dev->magiclist[i].head = dev->magiclist[i].tail = NULL; + } + /* Clear AGP information */ + if (dev->agp) { + drm_agp_mem_t *entry; + drm_agp_mem_t *nexte; + + /* Remove AGP resources, but leave dev->agp + intact until cleanup is called. */ + for (entry = dev->agp->memory; entry; entry = nexte) { + nexte = entry->next; + if (entry->bound) drm_unbind_agp(entry->memory); + drm_free_agp(entry->memory, entry->pages); + drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); + } + dev->agp->memory = NULL; + + if (dev->agp->acquired && drm_agp.release) + (*drm_agp.release)(); + + dev->agp->acquired = 0; + dev->agp->enabled = 0; + } + /* Clear vma list (only built for debugging) */ + if (dev->vmalist) { + for (vma = dev->vmalist; vma; vma = vma_next) { + vma_next = vma->next; + drm_free(vma, sizeof(*vma), DRM_MEM_VMAS); + } + dev->vmalist = NULL; + } + + /* Clear map area and mtrr information */ + if (dev->maplist) { + for (i = 0; i < dev->map_count; i++) { + map = dev->maplist[i]; + switch (map->type) { + case _DRM_REGISTERS: + case _DRM_FRAME_BUFFER: +#ifdef CONFIG_MTRR + if (map->mtrr >= 0) { + int retcode; + retcode = mtrr_del(map->mtrr, + map->offset, + map->size); + DRM_DEBUG("mtrr_del = %d\n", retcode); + } +#endif + drm_ioremapfree(map->handle, map->size); + break; + case _DRM_SHM: + drm_free_pages((unsigned long)map->handle, + drm_order(map->size) + - PAGE_SHIFT, + DRM_MEM_SAREA); + break; + case _DRM_AGP: + break; + } + drm_free(map, sizeof(*map), DRM_MEM_MAPS); + } + drm_free(dev->maplist, + dev->map_count * sizeof(*dev->maplist), + DRM_MEM_MAPS); + dev->maplist = NULL; + dev->map_count = 0; + } + + if (dev->queuelist) { + for (i = 0; i < dev->queue_count; i++) { + drm_waitlist_destroy(&dev->queuelist[i]->waitlist); + if (dev->queuelist[i]) { + drm_free(dev->queuelist[i], + sizeof(*dev->queuelist[0]), + DRM_MEM_QUEUES); + dev->queuelist[i] = NULL; + } + } + drm_free(dev->queuelist, + dev->queue_slots * sizeof(*dev->queuelist), + DRM_MEM_QUEUES); + dev->queuelist = NULL; + } + + drm_dma_takedown(dev); + + dev->queue_count = 0; + if (dev->lock.hw_lock) { + dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.pid = 0; + wake_up_interruptible(&dev->lock.lock_queue); + } + up(&dev->struct_sem); + + return 0; +} + +/* mga_init is called via init_module at module load time, or via + * linux/init/main.c (this is not currently supported). */ + +int mga_init(void) +{ + int retcode; + drm_device_t *dev = &mga_device; + + DRM_DEBUG("\n"); + + memset((void *)dev, 0, sizeof(*dev)); + dev->count_lock = SPIN_LOCK_UNLOCKED; + sema_init(&dev->struct_sem, 1); + +#ifdef MODULE + drm_parse_options(mga); +#endif + DRM_DEBUG("doing misc_register\n"); + if ((retcode = misc_register(&mga_misc))) { + DRM_ERROR("Cannot register \"%s\"\n", MGA_NAME); + return retcode; + } + dev->device = MKDEV(MISC_MAJOR, mga_misc.minor); + dev->name = MGA_NAME; + + DRM_DEBUG("doing mem init\n"); + drm_mem_init(); + DRM_DEBUG("doing proc init\n"); + drm_proc_init(dev); + DRM_DEBUG("doing agp init\n"); + dev->agp = drm_agp_init(); + DRM_DEBUG("doing ctxbitmap init\n"); + if((retcode = drm_ctxbitmap_init(dev))) { + DRM_ERROR("Cannot allocate memory for context bitmap.\n"); + drm_proc_cleanup(); + misc_deregister(&mga_misc); + mga_takedown(dev); + return retcode; + } + + DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", + MGA_NAME, + MGA_MAJOR, + MGA_MINOR, + MGA_PATCHLEVEL, + MGA_DATE, + mga_misc.minor); + + return 0; +} + +/* mga_cleanup is called via cleanup_module at module unload time. */ + +void mga_cleanup(void) +{ + drm_device_t *dev = &mga_device; + + DRM_DEBUG("\n"); + + drm_proc_cleanup(); + if (misc_deregister(&mga_misc)) { + DRM_ERROR("Cannot unload module\n"); + } else { + DRM_INFO("Module unloaded\n"); + } + drm_ctxbitmap_cleanup(dev); + mga_dma_cleanup(dev); + mga_takedown(dev); + if (dev->agp) { + drm_free(dev->agp, sizeof(*dev->agp), DRM_MEM_AGPLISTS); + dev->agp = NULL; + } +} + +int mga_version(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_version_t version; + int len; + + copy_from_user_ret(&version, + (drm_version_t *)arg, + sizeof(version), + -EFAULT); + +#define DRM_COPY(name,value) \ + len = strlen(value); \ + if (len > name##_len) len = name##_len; \ + name##_len = strlen(value); \ + if (len && name) { \ + copy_to_user_ret(name, value, len, -EFAULT); \ + } + + version.version_major = MGA_MAJOR; + version.version_minor = MGA_MINOR; + version.version_patchlevel = MGA_PATCHLEVEL; + + DRM_COPY(version.name, MGA_NAME); + DRM_COPY(version.date, MGA_DATE); + DRM_COPY(version.desc, MGA_DESC); + + copy_to_user_ret((drm_version_t *)arg, + &version, + sizeof(version), + -EFAULT); + return 0; +} + +int mga_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = &mga_device; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_open_helper(inode, filp, dev))) { + MOD_INC_USE_COUNT; + atomic_inc(&dev->total_open); + spin_lock(&dev->count_lock); + if (!dev->open_count++) { + spin_unlock(&dev->count_lock); + return mga_setup(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +int mga_release(struct inode *inode, struct file *filp) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + + DRM_DEBUG("open_count = %d\n", dev->open_count); + if (!(retcode = drm_release(inode, filp))) { + MOD_DEC_USE_COUNT; + atomic_inc(&dev->total_close); + spin_lock(&dev->count_lock); + if (!--dev->open_count) { + if (atomic_read(&dev->ioctl_count) || dev->blocked) { + DRM_ERROR("Device busy: %d %d\n", + atomic_read(&dev->ioctl_count), + dev->blocked); + spin_unlock(&dev->count_lock); + return -EBUSY; + } + spin_unlock(&dev->count_lock); + return mga_takedown(dev); + } + spin_unlock(&dev->count_lock); + } + return retcode; +} + +/* drm_ioctl is called whenever a process performs an ioctl on /dev/drm. */ + +int mga_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + int nr = DRM_IOCTL_NR(cmd); + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + int retcode = 0; + drm_ioctl_desc_t *ioctl; + drm_ioctl_t *func; + + atomic_inc(&dev->ioctl_count); + atomic_inc(&dev->total_ioctl); + ++priv->ioctl_count; + + DRM_DEBUG("pid = %d, cmd = 0x%02x, nr = 0x%02x, dev 0x%x, auth = %d\n", + current->pid, cmd, nr, dev->device, priv->authenticated); + + if (nr >= MGA_IOCTL_COUNT) { + retcode = -EINVAL; + } else { + ioctl = &mga_ioctls[nr]; + func = ioctl->func; + + if (!func) { + DRM_DEBUG("no function\n"); + retcode = -EINVAL; + } else if ((ioctl->root_only && !capable(CAP_SYS_ADMIN)) + || (ioctl->auth_needed && !priv->authenticated)) { + retcode = -EACCES; + } else { + retcode = (func)(inode, filp, cmd, arg); + } + } + + atomic_dec(&dev->ioctl_count); + return retcode; +} + +int mga_unlock(struct inode *inode, struct file *filp, unsigned int cmd, + unsigned long arg) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev = priv->dev; + drm_lock_t lock; + + copy_from_user_ret(&lock, (drm_lock_t *)arg, sizeof(lock), -EFAULT); + + if (lock.context == DRM_KERNEL_CONTEXT) { + DRM_ERROR("Process %d using kernel context %d\n", + current->pid, lock.context); + return -EINVAL; + } + + DRM_DEBUG("%d frees lock (%d holds)\n", + lock.context, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); + atomic_inc(&dev->total_unlocks); + if (_DRM_LOCK_IS_CONT(dev->lock.hw_lock->lock)) + atomic_inc(&dev->total_contends); + drm_lock_transfer(dev, &dev->lock.hw_lock->lock, DRM_KERNEL_CONTEXT); + mga_dma_schedule(dev, 1); + + if (drm_lock_free(dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT)) { + DRM_ERROR("\n"); + } + + return 0; +} |