diff options
Diffstat (limited to 'linux-core/drm_fops.c')
-rw-r--r-- | linux-core/drm_fops.c | 315 |
1 files changed, 315 insertions, 0 deletions
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 0ee303df..fea01130 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -37,6 +37,156 @@ #include "drmP.h" #include <linux/poll.h> +static int drm_setup( drm_device_t *dev ) +{ + int i; + + if (dev->fn_tbl->presetup) + dev->fn_tbl->presetup(dev); + + atomic_set( &dev->ioctl_count, 0 ); + atomic_set( &dev->vma_count, 0 ); + dev->buf_use = 0; + atomic_set( &dev->buf_alloc, 0 ); + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + { + i = drm_dma_setup( dev ); + if ( i < 0 ) + return i; + } + + for ( i = 0 ; i < DRM_ARRAY_SIZE(dev->counts) ; i++ ) + atomic_set( &dev->counts[i], 0 ); + + for ( i = 0 ; i < DRM_HASH_SIZE ; i++ ) { + dev->magiclist[i].head = NULL; + dev->magiclist[i].tail = NULL; + } + + dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), + DRM_MEM_CTXLIST); + if(dev->ctxlist == NULL) return -ENOMEM; + memset(dev->ctxlist, 0, sizeof(*dev->ctxlist)); + INIT_LIST_HEAD(&dev->ctxlist->head); + + dev->vmalist = NULL; + dev->sigdata.lock = dev->lock.hw_lock = NULL; + init_waitqueue_head( &dev->lock.lock_queue ); + dev->queue_count = 0; + dev->queue_reserved = 0; + dev->queue_slots = 0; + dev->queuelist = NULL; + dev->irq_enabled = 0; + dev->context_flag = 0; + dev->interrupt_flag = 0; + dev->dma_flag = 0; + dev->last_context = 0; + dev->last_switch = 0; + dev->last_checked = 0; + init_waitqueue_head( &dev->context_wait ); + dev->if_version = 0; + + dev->ctx_start = 0; + dev->lck_start = 0; + + dev->buf_rp = dev->buf; + dev->buf_wp = dev->buf; + dev->buf_end = dev->buf + DRM_BSZ; + dev->buf_async = NULL; + init_waitqueue_head( &dev->buf_readers ); + init_waitqueue_head( &dev->buf_writers ); + + DRM_DEBUG( "\n" ); + + /* + * The kernel's context could be created here, but is now created + * in drm_dma_enqueue. This is more resource-efficient for + * hardware that does not do DMA, but may mean that + * drm_select_queue fails between the time the interrupt is + * initialized and the time the queues are initialized. + */ + if (dev->fn_tbl->postsetup) + dev->fn_tbl->postsetup(dev); + + return 0; +} + + +/** + * Open file. + * + * \param inode device inode + * \param filp file pointer. + * \return zero on success or a negative number on failure. + * + * Searches the DRM device with the same minor number, calls open_helper(), and + * increments the device open count. If the open count was previous at zero, + * i.e., it's the first that the device is open, then calls setup(). + */ +int drm_open( struct inode *inode, struct file *filp ) +{ + drm_device_t *dev = NULL; + int minor = iminor(inode); + int retcode = 0; + + if (!((minor >= 0) && (minor < cards_limit))) + return -ENODEV; + + dev = drm_minors[minor].dev; + if (!dev) + return -ENODEV; + + retcode = drm_open_helper( inode, filp, dev ); + if ( !retcode ) { + atomic_inc( &dev->counts[_DRM_STAT_OPENS] ); + spin_lock( &dev->count_lock ); + if ( !dev->open_count++ ) { + spin_unlock( &dev->count_lock ); + return drm_setup( dev ); + } + spin_unlock( &dev->count_lock ); + } + + return retcode; +} +EXPORT_SYMBOL(drm_open); + +/** + * File \c open operation. + * + * \param inode device inode. + * \param filp file pointer. + * + * Puts the dev->fops corresponding to the device minor number into + * \p filp, call the \c open method, and restore the file operations. + */ +int drm_stub_open(struct inode *inode, struct file *filp) +{ + drm_device_t *dev = NULL; + int minor = iminor(inode); + int err = -ENODEV; + struct file_operations *old_fops; + + DRM_DEBUG("\n"); + + if (!((minor >= 0) && (minor < cards_limit))) + return -ENODEV; + + dev = drm_minors[minor].dev; + if (!dev) + return -ENODEV; + + old_fops = filp->f_op; + filp->f_op = fops_get(&dev->fn_tbl->fops); + if (filp->f_op->open && (err = filp->f_op->open(inode, filp))) { + fops_put(filp->f_op); + filp->f_op = fops_get(old_fops); + } + fops_put(old_fops); + + return err; +} /** * Called whenever a process opens /dev/drm. @@ -128,3 +278,168 @@ int drm_fasync(int fd, struct file *filp, int on) return 0; } EXPORT_SYMBOL(drm_fasync); + +/** + * Release file. + * + * \param inode device inode + * \param filp file pointer. + * \return zero on success or a negative number on failure. + * + * If the hardware lock is held then free it, and take it again for the kernel + * context since it's necessary to reclaim buffers. Unlink the file private + * data from its list and free it. Decreases the open count and if it reaches + * zero calls takedown(). + */ +int drm_release( struct inode *inode, struct file *filp ) +{ + drm_file_t *priv = filp->private_data; + drm_device_t *dev; + int retcode = 0; + + lock_kernel(); + dev = priv->dev; + + DRM_DEBUG( "open_count = %d\n", dev->open_count ); + + if (dev->fn_tbl->prerelease) + dev->fn_tbl->prerelease(dev, filp); + + /* ======================================================== + * Begin inline drm_release + */ + + DRM_DEBUG( "pid = %d, device = 0x%lx, open_count = %d\n", + current->pid, (long)old_encode_dev(dev->device), dev->open_count ); + + if ( priv->lock_count && dev->lock.hw_lock && + _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && + dev->lock.filp == filp ) { + DRM_DEBUG( "File %p released, freeing lock for context %d\n", + filp, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); + + if (dev->fn_tbl->release) + dev->fn_tbl->release(dev, filp); + + drm_lock_free( dev, &dev->lock.hw_lock->lock, + _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock) ); + + /* FIXME: may require heavy-handed reset of + hardware at this point, possibly + processed via a callback to the X + server. */ + } + else if ( dev->fn_tbl->release && priv->lock_count && dev->lock.hw_lock ) { + /* The lock is required to reclaim buffers */ + DECLARE_WAITQUEUE( entry, current ); + + add_wait_queue( &dev->lock.lock_queue, &entry ); + for (;;) { + current->state = TASK_INTERRUPTIBLE; + if ( !dev->lock.hw_lock ) { + /* Device has been unregistered */ + retcode = -EINTR; + break; + } + if ( drm_lock_take( &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT ) ) { + dev->lock.filp = filp; + dev->lock.lock_time = jiffies; + atomic_inc( &dev->counts[_DRM_STAT_LOCKS] ); + break; /* Got lock */ + } + /* Contention */ + schedule(); + if ( signal_pending( current ) ) { + retcode = -ERESTARTSYS; + break; + } + } + current->state = TASK_RUNNING; + remove_wait_queue( &dev->lock.lock_queue, &entry ); + if( !retcode ) { + if (dev->fn_tbl->release) + dev->fn_tbl->release(dev, filp); + drm_lock_free( dev, &dev->lock.hw_lock->lock, + DRM_KERNEL_CONTEXT ); + } + } + + if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) + { + dev->fn_tbl->reclaim_buffers(filp); + } + + drm_fasync( -1, filp, 0 ); + + down( &dev->ctxlist_sem ); + if ( !list_empty( &dev->ctxlist->head ) ) { + drm_ctx_list_t *pos, *n; + + list_for_each_entry_safe( pos, n, &dev->ctxlist->head, head ) { + if ( pos->tag == priv && + pos->handle != DRM_KERNEL_CONTEXT ) { + if (dev->fn_tbl->context_dtor) + dev->fn_tbl->context_dtor(dev, pos->handle); + + drm_ctxbitmap_free( dev, pos->handle ); + + list_del( &pos->head ); + drm_free( pos, sizeof(*pos), DRM_MEM_CTXLIST ); + --dev->ctx_count; + } + } + } + up( &dev->ctxlist_sem ); + + down( &dev->struct_sem ); + if ( priv->remove_auth_on_close == 1 ) { + drm_file_t *temp = dev->file_first; + while ( temp ) { + temp->authenticated = 0; + temp = temp->next; + } + } + if ( priv->prev ) { + priv->prev->next = priv->next; + } else { + dev->file_first = priv->next; + } + if ( priv->next ) { + priv->next->prev = priv->prev; + } else { + dev->file_last = priv->prev; + } + up( &dev->struct_sem ); + + if (dev->fn_tbl->free_filp_priv) + dev->fn_tbl->free_filp_priv( dev, priv ); + drm_free( priv, sizeof(*priv), DRM_MEM_FILES ); + + /* ======================================================== + * End inline drm_release + */ + + atomic_inc( &dev->counts[_DRM_STAT_CLOSES] ); + spin_lock( &dev->count_lock ); + if ( !--dev->open_count ) { + if ( atomic_read( &dev->ioctl_count ) || dev->blocked ) { + DRM_ERROR( "Device busy: %d %d\n", + atomic_read( &dev->ioctl_count ), + dev->blocked ); + spin_unlock( &dev->count_lock ); + unlock_kernel(); + return -EBUSY; + } + spin_unlock( &dev->count_lock ); + unlock_kernel(); + return drm_takedown( dev ); + } + spin_unlock( &dev->count_lock ); + + unlock_kernel(); + + return retcode; +} +EXPORT_SYMBOL(drm_release); |