diff options
Diffstat (limited to 'linux-core')
84 files changed, 5894 insertions, 3246 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile index 1758777c..1cdf3b30 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -58,7 +58,7 @@ endif # Modules for all architectures MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \ - mach64.o nv.o nouveau.o + mach64.o nv.o nouveau.o xgi.o # Modules only for ix86 architectures ifneq (,$(findstring 86,$(MACHINE))) @@ -91,6 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS) NVHEADERS = nv_drv.h $(DRMHEADERS) FFBHEADERS = ffb_drv.h $(DRMHEADERS) NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS) +XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS) PROGS = dristat drmstat @@ -268,6 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif +EXTRA_CFLAGS += -g -O0 # Start with all modules turned off. CONFIG_DRM_GAMMA := n @@ -284,6 +286,7 @@ CONFIG_DRM_VIA := n CONFIG_DRM_MACH64 := n CONFIG_DRM_NV := n CONFIG_DRM_NOUVEAU := n +CONFIG_DRM_XGI := n # Enable module builds for the modules requested/supported. @@ -320,6 +323,9 @@ endif ifneq (,$(findstring nouveau,$(DRM_MODULES))) CONFIG_DRM_NOUVEAU := m endif +ifneq (,$(findstring xgi,$(DRM_MODULES))) +CONFIG_DRM_XGI := m +endif # These require AGP support @@ -347,6 +353,7 @@ $(via-objs): $(VIAHEADERS) $(mach64-objs): $(MACH64HEADERS) $(nv-objs): $(NVHEADERS) $(nouveau-objs): $(NOUVEAUHEADERS) +$(xgi-objs): $(XGIHEADERS) endif diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index ac613d2e..a7ab1f1d 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -24,12 +24,14 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ - nv40_graph.o nv50_graph.o + nv40_graph.o nv50_graph.o \ + nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o sis-objs := sis_drv.o sis_mm.o ffb-objs := ffb_drv.o ffb_context.o @@ -38,6 +40,8 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \ via_video.o via_dmablit.o via_fence.o via_buffer.o mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o nv-objs := nv_drv.o +xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \ + xgi_fence.o ifeq ($(CONFIG_COMPAT),y) drm-objs += drm_ioc32.o @@ -46,6 +50,7 @@ mga-objs += mga_ioc32.o r128-objs += r128_ioc32.o i915-objs += i915_ioc32.o nouveau-objs += nouveau_ioc32.o +xgi-objs += xgi_ioc32.o endif obj-m += drm.o @@ -62,3 +67,4 @@ obj-$(CONFIG_DRM_VIA) += via.o obj-$(CONFIG_DRM_MACH64)+= mach64.o obj-$(CONFIG_DRM_NV) += nv.o obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o +obj-$(CONFIG_DRM_XGI) += xgi.o diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c index 524618a8..7241c2a8 100644 --- a/linux-core/ati_pcigart.c +++ b/linux-core/ati_pcigart.c @@ -81,9 +81,9 @@ static void drm_ati_free_pcigart_table(void *address, int order) free_pages((unsigned long)address, order); } -int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; unsigned long pages; int i; int order; @@ -132,9 +132,9 @@ int drm_ati_pcigart_cleanup(drm_device_t *dev, drm_ati_pcigart_info *gart_info) } EXPORT_SYMBOL(drm_ati_pcigart_cleanup); -int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info) +int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info) { - drm_sg_mem_t *entry = dev->sg; + struct drm_sg_mem *entry = dev->sg; void *address = NULL; unsigned long pages; u32 *pci_gart, page_base, bus_address = 0; diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 510de608..64f9a63f 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -84,6 +84,8 @@ #include "drm_os_linux.h" #include "drm_hashtab.h" +struct drm_file; + /* If you want the memory alloc debug functionality, change define below */ /* #define DEBUG_MEMORY */ @@ -250,15 +252,15 @@ * Test that the hardware lock is held by the caller, returning otherwise. * * \param dev DRM device. - * \param filp file pointer of the caller. + * \param file_priv DRM file private pointer of the caller. */ -#define LOCK_TEST_WITH_RETURN( dev, filp ) \ +#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \ do { \ if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ - dev->lock.filp != filp ) { \ + dev->lock.file_priv != file_priv ) { \ DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ - dev->lock.filp, filp ); \ + dev->lock.file_priv, file_priv ); \ return -EINVAL; \ } \ } while (0) @@ -275,16 +277,19 @@ do { \ return -EFAULT; \ } +struct drm_device; +struct drm_file; + /** * Ioctl function type. * - * \param inode device inode. - * \param filp file pointer. - * \param cmd command. - * \param arg argument. + * \param dev DRM device structure + * \param data pointer to kernel-space stored data, copied in and out according + * to ioctl description. + * \param file_priv DRM file private pointer. */ -typedef int drm_ioctl_t(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +typedef int drm_ioctl_t(struct drm_device *dev, void *data, + struct drm_file *file_priv); typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, unsigned long arg); @@ -293,31 +298,34 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd, #define DRM_MASTER 0x2 #define DRM_ROOT_ONLY 0x4 -typedef struct drm_ioctl_desc { +struct drm_ioctl_desc { + unsigned int cmd; drm_ioctl_t *func; int flags; -} drm_ioctl_desc_t; - -typedef struct drm_devstate { - pid_t owner; /**< X server pid holding x_lock */ -} drm_devstate_t; +}; +/** + * Creates a driver or general drm_ioctl_desc array entry for the given + * ioctl, for use by drm_ioctl(). + */ +#define DRM_IOCTL_DEF(ioctl, func, flags) \ + [DRM_IOCTL_NR(ioctl)] = {ioctl, func, flags} -typedef struct drm_magic_entry { +struct drm_magic_entry { struct list_head head; - drm_hash_item_t hash_item; + struct drm_hash_item hash_item; struct drm_file *priv; -} drm_magic_entry_t; +}; -typedef struct drm_vma_entry { +struct drm_vma_entry { struct list_head head; struct vm_area_struct *vma; pid_t pid; -} drm_vma_entry_t; +}; /** * DMA buffer. */ -typedef struct drm_buf { +struct drm_buf { int idx; /**< Index into master buflist */ int total; /**< Buffer size */ int order; /**< log-base-2(total) */ @@ -329,7 +337,7 @@ typedef struct drm_buf { __volatile__ int waiting; /**< On kernel DMA queue */ __volatile__ int pending; /**< On hardware DMA queue */ wait_queue_head_t dma_wait; /**< Processes waiting */ - struct file *filp; /**< Pointer to holding file descr */ + struct drm_file *file_priv; /**< Private of holding file descr */ int context; /**< Kernel queue for this buffer */ int while_locked; /**< Dispatch this buffer while locked */ enum { @@ -343,30 +351,30 @@ typedef struct drm_buf { int dev_priv_size; /**< Size of buffer private storage */ void *dev_private; /**< Per-buffer private storage */ -} drm_buf_t; +}; /** bufs is one longer than it has to be */ -typedef struct drm_waitlist { +struct drm_waitlist { int count; /**< Number of possible buffers */ - drm_buf_t **bufs; /**< List of pointers to buffers */ - drm_buf_t **rp; /**< Read pointer */ - drm_buf_t **wp; /**< Write pointer */ - drm_buf_t **end; /**< End pointer */ + struct drm_buf **bufs; /**< List of pointers to buffers */ + struct drm_buf **rp; /**< Read pointer */ + struct drm_buf **wp; /**< Write pointer */ + struct drm_buf **end; /**< End pointer */ spinlock_t read_lock; spinlock_t write_lock; -} drm_waitlist_t; +}; -typedef struct drm_freelist { +struct drm_freelist { int initialized; /**< Freelist in use */ atomic_t count; /**< Number of free buffers */ - drm_buf_t *next; /**< End pointer */ + struct drm_buf *next; /**< End pointer */ wait_queue_head_t waiting; /**< Processes waiting on free bufs */ int low_mark; /**< Low water mark */ int high_mark; /**< High water mark */ atomic_t wfh; /**< If waiting for high mark */ spinlock_t lock; -} drm_freelist_t; +}; typedef struct drm_dma_handle { dma_addr_t busaddr; @@ -377,15 +385,15 @@ typedef struct drm_dma_handle { /** * Buffer entry. There is one of this for each buffer size order. */ -typedef struct drm_buf_entry { +struct drm_buf_entry { int buf_size; /**< size */ int buf_count; /**< number of buffers */ - drm_buf_t *buflist; /**< buffer list */ + struct drm_buf *buflist; /**< buffer list */ int seg_count; int page_order; - drm_dma_handle_t **seglist; - drm_freelist_t freelist; -} drm_buf_entry_t; + struct drm_dma_handle **seglist; + struct drm_freelist freelist; +}; /* * This should be small enough to allow the use of kmalloc for hash tables @@ -393,15 +401,15 @@ typedef struct drm_buf_entry { */ #define DRM_FILE_HASH_ORDER 8 -typedef enum{ +enum drm_ref_type { _DRM_REF_USE=0, _DRM_REF_TYPE1, _DRM_NO_REF_TYPES -} drm_ref_t; +}; /** File private data */ -typedef struct drm_file { +struct drm_file { int authenticated; int master; int minor; @@ -424,14 +432,15 @@ typedef struct drm_file { struct list_head refd_objects; struct list_head user_objects; - drm_open_hash_t refd_object_hash[_DRM_NO_REF_TYPES]; + struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; + struct file *filp; void *driver_priv; struct list_head fbs; -} drm_file_t; +}; /** Wait queue */ -typedef struct drm_queue { +struct drm_queue { atomic_t use_count; /**< Outstanding uses (+1) */ atomic_t finalization; /**< Finalization in progress */ atomic_t block_count; /**< Count of processes waiting */ @@ -444,33 +453,34 @@ typedef struct drm_queue { atomic_t total_flushed; /**< Total flushes statistic */ atomic_t total_locks; /**< Total locks statistics */ #endif - drm_ctx_flags_t flags; /**< Context preserving and 2D-only */ - drm_waitlist_t waitlist; /**< Pending buffers */ + enum drm_ctx_flags flags; /**< Context preserving and 2D-only */ + struct drm_waitlist waitlist; /**< Pending buffers */ wait_queue_head_t flush_queue; /**< Processes waiting until flush */ -} drm_queue_t; +}; /** * Lock data. */ -typedef struct drm_lock_data { - drm_hw_lock_t *hw_lock; /**< Hardware lock */ - struct file *filp; /**< File descr of lock holder (0=kernel) */ +struct drm_lock_data { + struct drm_hw_lock *hw_lock; /**< Hardware lock */ + /** Private of lock holder's file (NULL=kernel) */ + struct drm_file *file_priv; wait_queue_head_t lock_queue; /**< Queue of blocked processes */ unsigned long lock_time; /**< Time of last lock in jiffies */ spinlock_t spinlock; uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; -} drm_lock_data_t; +}; /** * DMA data. */ -typedef struct drm_device_dma { +struct drm_device_dma { - drm_buf_entry_t bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ + struct drm_buf_entry bufs[DRM_MAX_ORDER + 1]; /**< buffers, grouped by their size order */ int buf_count; /**< total number of buffers */ - drm_buf_t **buflist; /**< Vector of pointers into drm_device_dma::bufs */ + struct drm_buf **buflist; /**< Vector of pointers into drm_device_dma::bufs */ int seg_count; int page_count; /**< number of pages */ unsigned long *pagelist; /**< page list */ @@ -482,25 +492,25 @@ typedef struct drm_device_dma { _DRM_DMA_USE_PCI_RO = 0x08 } flags; -} drm_device_dma_t; +}; /** * AGP memory entry. Stored as a doubly linked list. */ -typedef struct drm_agp_mem { +struct drm_agp_mem { unsigned long handle; /**< handle */ DRM_AGP_MEM *memory; unsigned long bound; /**< address */ int pages; struct list_head head; -} drm_agp_mem_t; +}; /** * AGP data. * * \sa drm_agp_init)() and drm_device::agp. */ -typedef struct drm_agp_head { +struct drm_agp_head { DRM_AGP_KERN agp_info; /**< AGP device information */ struct list_head memory; unsigned long mode; /**< AGP mode */ @@ -513,30 +523,30 @@ typedef struct drm_agp_head { int agp_mtrr; int cant_use_aperture; unsigned long page_mask; -} drm_agp_head_t; +}; /** * Scatter-gather memory. */ -typedef struct drm_sg_mem { +struct drm_sg_mem { unsigned long handle; void *virtual; int pages; struct page **pagelist; dma_addr_t *busaddr; -} drm_sg_mem_t; +}; -typedef struct drm_sigdata { +struct drm_sigdata { int context; - drm_hw_lock_t *lock; -} drm_sigdata_t; + struct drm_hw_lock *lock; +}; /* * Generic memory manager structs */ -typedef struct drm_mm_node { +struct drm_mm_node { struct list_head fl_entry; struct list_head ml_entry; int free; @@ -544,46 +554,42 @@ typedef struct drm_mm_node { unsigned long size; struct drm_mm *mm; void *private; -} drm_mm_node_t; +}; -typedef struct drm_mm { +struct drm_mm { struct list_head fl_entry; struct list_head ml_entry; -} drm_mm_t; +}; /** * Mappings list */ -typedef struct drm_map_list { +struct drm_map_list { struct list_head head; /**< list head */ - drm_hash_item_t hash; - drm_map_t *map; /**< mapping */ - drm_u64_t user_token; - drm_mm_node_t *file_offset_node; -} drm_map_list_t; + struct drm_hash_item hash; + struct drm_map *map; /**< mapping */ + uint64_t user_token; + struct drm_mm_node *file_offset_node; +}; -typedef drm_map_t drm_local_map_t; +typedef struct drm_map drm_local_map_t; /** * Context handle list */ -typedef struct drm_ctx_list { +struct drm_ctx_list { struct list_head head; /**< list head */ drm_context_t handle; /**< context handle */ - drm_file_t *tag; /**< associated fd private data */ -} drm_ctx_list_t; - -struct drm_ctx_sarea_list { - drm_map_t *map; + struct drm_file *tag; /**< associated fd private data */ }; -typedef struct drm_vbl_sig { +struct drm_vbl_sig { struct list_head head; unsigned int sequence; struct siginfo info; struct task_struct *task; -} drm_vbl_sig_t; +}; /* location of GART table */ #define DRM_ATI_GART_MAIN 1 @@ -593,17 +599,13 @@ typedef struct drm_vbl_sig { #define DRM_ATI_GART_PCIE 2 #define DRM_ATI_GART_IGP 3 -typedef struct ati_pcigart_info { +struct ati_pcigart_info { int gart_table_location; int gart_reg_if; void *addr; dma_addr_t bus_addr; drm_local_map_t mapping; int table_size; -} drm_ati_pcigart_info; - -struct drm_drawable_list { - drm_drawable_info_t info; }; #include "drm_objects.h" @@ -614,16 +616,15 @@ struct drm_drawable_list { * in this family */ -struct drm_device; struct drm_driver { int (*load) (struct drm_device *, unsigned long flags); int (*firstopen) (struct drm_device *); - int (*open) (struct drm_device *, drm_file_t *); - void (*preclose) (struct drm_device *, struct file * filp); - void (*postclose) (struct drm_device *, drm_file_t *); + int (*open) (struct drm_device *, struct drm_file *); + void (*preclose) (struct drm_device *, struct drm_file *file_priv); + void (*postclose) (struct drm_device *, struct drm_file *); void (*lastclose) (struct drm_device *); int (*unload) (struct drm_device *); - int (*dma_ioctl) (DRM_IOCTL_ARGS); + int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv); void (*dma_ready) (struct drm_device *); int (*dma_quiescent) (struct drm_device *); int (*context_ctor) (struct drm_device * dev, int context); @@ -653,14 +654,15 @@ struct drm_driver { void (*irq_preinstall) (struct drm_device * dev); void (*irq_postinstall) (struct drm_device * dev); void (*irq_uninstall) (struct drm_device * dev); - void (*reclaim_buffers) (struct drm_device *dev, struct file * filp); + void (*reclaim_buffers) (struct drm_device *dev, + struct drm_file *file_priv); void (*reclaim_buffers_locked) (struct drm_device *dev, - struct file * filp); + struct drm_file *file_priv); void (*reclaim_buffers_idlelocked) (struct drm_device *dev, - struct file * filp); - unsigned long (*get_map_ofs) (drm_map_t * map); + struct drm_file *file_priv); + unsigned long (*get_map_ofs) (struct drm_map * map); unsigned long (*get_reg_ofs) (struct drm_device * dev); - void (*set_version) (struct drm_device * dev, drm_set_version_t * sv); + void (*set_version) (struct drm_device * dev, struct drm_set_version * sv); /* FB routines, if present */ int (*fb_probe)(struct drm_device *dev, struct drm_crtc *crtc); @@ -679,7 +681,7 @@ struct drm_driver { /* variables */ u32 driver_features; int dev_priv_size; - drm_ioctl_desc_t *ioctls; + struct drm_ioctl_desc *ioctls; int num_ioctls; struct file_operations fops; struct pci_driver pci_driver; @@ -690,20 +692,20 @@ struct drm_driver { * that may contain multiple heads. Embed one per head of these in the * private drm_device structure. */ -typedef struct drm_head { +struct drm_head { int minor; /**< Minor device number */ struct drm_device *dev; struct proc_dir_entry *dev_root; /**< proc directory entry */ dev_t device; /**< Device number for mknod */ struct class_device *dev_class; -} drm_head_t; +}; /** * DRM device structure. This structure represent a complete card that * may contain multiple heads. */ -typedef struct drm_device { +struct drm_device { char *unique; /**< Unique identifier: e.g., busid */ int unique_len; /**< Length of unique field */ char *devname; /**< For /proc/interrupts */ @@ -729,14 +731,14 @@ typedef struct drm_device { /** \name Performance counters */ /*@{ */ unsigned long counters; - drm_stat_type_t types[15]; + enum drm_stat_type types[15]; atomic_t counts[15]; /*@} */ /** \name Authentication */ /*@{ */ struct list_head filelist; - drm_open_hash_t magiclist; + struct drm_open_hash magiclist; struct list_head magicfree; /*@} */ @@ -744,9 +746,9 @@ typedef struct drm_device { /*@{ */ struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ - drm_open_hash_t map_hash; /**< User token hash table for maps */ - drm_mm_t offset_manager; /**< User token manager */ - drm_open_hash_t object_hash; /**< User token hash table for objects */ + struct drm_open_hash map_hash; /**< User token hash table for maps */ + struct drm_mm offset_manager; /**< User token manager */ + struct drm_open_hash object_hash; /**< User token hash table for objects */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -759,7 +761,7 @@ typedef struct drm_device { struct idr ctx_idr; struct list_head vmalist; /**< List of vmas (for debugging) */ - drm_lock_data_t lock; /**< Information on hardware lock */ + struct drm_lock_data lock; /**< Information on hardware lock */ /*@} */ /** \name DMA queues (contexts) */ @@ -767,8 +769,8 @@ typedef struct drm_device { int queue_count; /**< Number of active DMA queues */ int queue_reserved; /**< Number of reserved DMA queues */ int queue_slots; /**< Actual length of queuelist */ - drm_queue_t **queuelist; /**< Vector of pointers to DMA queues */ - drm_device_dma_t *dma; /**< Optional pointer for DMA support */ + struct drm_queue **queuelist; /**< Vector of pointers to DMA queues */ + struct drm_device_dma *dma; /**< Optional pointer for DMA support */ /*@} */ /** \name Context support */ @@ -808,7 +810,7 @@ typedef struct drm_device { wait_queue_head_t buf_readers; /**< Processes waiting to read */ wait_queue_head_t buf_writers; /**< Processes waiting to ctx switch */ - drm_agp_head_t *agp; /**< AGP data */ + struct drm_agp_head *agp; /**< AGP data */ struct pci_dev *pdev; /**< PCI device structure */ int pci_vendor; /**< PCI vendor id */ @@ -816,18 +818,18 @@ typedef struct drm_device { #ifdef __alpha__ struct pci_controller *hose; #endif - drm_sg_mem_t *sg; /**< Scatter gather memory */ + struct drm_sg_mem *sg; /**< Scatter gather memory */ void *dev_private; /**< device private data */ - drm_sigdata_t sigdata; /**< For block_all_signals */ + struct drm_sigdata sigdata; /**< For block_all_signals */ sigset_t sigmask; struct drm_driver *driver; drm_local_map_t *agp_buffer_map; unsigned int agp_buffer_token; - drm_head_t primary; /**< primary screen head */ + struct drm_head primary; /**< primary screen head */ - drm_fence_manager_t fm; - drm_buffer_manager_t bm; + struct drm_fence_manager fm; + struct drm_buffer_manager bm; /** \name Drawable information */ /*@{ */ @@ -837,15 +839,15 @@ typedef struct drm_device { /* DRM mode setting */ struct drm_mode_config mode_config; -} drm_device_t; +}; #if __OS_HAS_AGP -typedef struct drm_agp_ttm_backend { - drm_ttm_backend_t backend; +struct drm_agp_ttm_backend { + struct drm_ttm_backend backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; -} drm_agp_ttm_backend_t; +}; #endif @@ -920,10 +922,12 @@ extern void drm_exit(struct drm_driver *driver); extern void drm_cleanup_pci(struct pci_dev *pdev); extern int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); +extern long drm_unlocked_ioctl(struct file *filp, + unsigned int cmd, unsigned long arg); extern long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); -extern int drm_lastclose(drm_device_t * dev); +extern int drm_lastclose(struct drm_device *dev); /* Device support (drm_fops.h) */ extern int drm_open(struct inode *inode, struct file *filp); @@ -934,7 +938,7 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); /* Mapping support (drm_vm.h) */ extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); -extern unsigned long drm_core_get_map_ofs(drm_map_t * map); +extern unsigned long drm_core_get_map_ofs(struct drm_map * map); extern unsigned long drm_core_get_reg_ofs(struct drm_device *dev); extern pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma); @@ -947,174 +951,179 @@ extern void *drm_calloc(size_t nmemb, size_t size, int area); extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); extern unsigned long drm_alloc_pages(int order, int area); extern void drm_free_pages(unsigned long address, int order, int area); -extern DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type); +extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); extern int drm_unbind_agp(DRM_AGP_MEM * handle); extern void drm_free_memctl(size_t size); extern int drm_alloc_memctl(size_t size); -extern void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold); +extern void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold); extern void drm_init_memctl(size_t low_threshold, size_t high_threshold, size_t unit_size); /* Misc. IOCTL support (drm_ioctl.h) */ -extern int drm_irq_by_busid(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getunique(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_setunique(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getmap(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getclient(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getstats(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_setversion(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_noop(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_setversion(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Context IOCTL support (drm_context.h) */ -extern int drm_resctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_addctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_modctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_switchctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_newctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_rmctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); - -extern int drm_ctxbitmap_init(drm_device_t * dev); -extern void drm_ctxbitmap_cleanup(drm_device_t * dev); -extern void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle); - -extern int drm_setsareactx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_getsareactx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_modctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +extern int drm_ctxbitmap_init(struct drm_device *dev); +extern void drm_ctxbitmap_cleanup(struct drm_device *dev); +extern void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle); + +extern int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Drawable IOCTL support (drm_drawable.h) */ -extern int drm_adddraw(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_rmdraw(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_update_drawable_info(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, - drm_drawable_t id); -extern void drm_drawable_free_all(drm_device_t *dev); +extern int drm_adddraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmdraw(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_update_drawable_info(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, + drm_drawable_t id); +extern void drm_drawable_free_all(struct drm_device *dev); /* Authentication IOCTL support (drm_auth.h) */ -extern int drm_getmagic(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_authmagic(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_getmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* Locking IOCTL support (drm_lock.h) */ -extern int drm_lock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_unlock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_lock_take(drm_lock_data_t *lock_data, unsigned int context); -extern int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context); -extern void drm_idlelock_take(drm_lock_data_t *lock_data); -extern void drm_idlelock_release(drm_lock_data_t *lock_data); +extern int drm_lock(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_unlock(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); +extern int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context); +extern void drm_idlelock_take(struct drm_lock_data *lock_data); +extern void drm_idlelock_release(struct drm_lock_data *lock_data); /* * These are exported to drivers so that they can implement fencing using * DMA quiscent + idle. DMA quiescent usually requires the hardware lock. */ -extern int drm_i_have_hw_lock(struct file *filp); -extern int drm_kernel_take_hw_lock(struct file *filp); +extern int drm_i_have_hw_lock(struct drm_device *dev, + struct drm_file *file_priv); /* Buffer management support (drm_bufs.h) */ -extern int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addbufs_fb (drm_device_t * dev, drm_buf_desc_t * request); -extern int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr); -extern int drm_addmap_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_rmmap(drm_device_t *dev, drm_local_map_t *map); -extern int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map); -extern int drm_rmmap_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_addbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_infobufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_markbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_freebufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mapbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addbufs_fb (struct drm_device *dev, struct drm_buf_desc * request); +extern int drm_addmap(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr); +extern int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_rmmap(struct drm_device *dev, drm_local_map_t *map); +extern int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map); +extern int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern int drm_order(unsigned long size); -extern unsigned long drm_get_resource_start(drm_device_t *dev, +extern unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource); -extern unsigned long drm_get_resource_len(drm_device_t *dev, +extern unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource); +extern struct drm_map_list *drm_find_matching_map(struct drm_device *dev, + drm_local_map_t *map); + /* DMA support (drm_dma.h) */ -extern int drm_dma_setup(drm_device_t * dev); -extern void drm_dma_takedown(drm_device_t * dev); -extern void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf); -extern void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp); +extern int drm_dma_setup(struct drm_device *dev); +extern void drm_dma_takedown(struct drm_device *dev); +extern void drm_free_buffer(struct drm_device *dev, struct drm_buf * buf); +extern void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *filp); /* IRQ support (drm_irq.h) */ -extern int drm_control(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv); extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); -extern int drm_irq_uninstall(drm_device_t *dev); -extern void drm_driver_irq_preinstall(drm_device_t * dev); -extern void drm_driver_irq_postinstall(drm_device_t * dev); -extern void drm_driver_irq_uninstall(drm_device_t * dev); - -extern int drm_wait_vblank(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_vblank_wait(drm_device_t * dev, unsigned int *vbl_seq); -extern void drm_vbl_send_signals(drm_device_t * dev); -extern void drm_locked_tasklet(drm_device_t *dev, void(*func)(drm_device_t*)); +extern int drm_irq_install(struct drm_device *dev); +extern int drm_irq_uninstall(struct drm_device *dev); +extern void drm_driver_irq_preinstall(struct drm_device *dev); +extern void drm_driver_irq_postinstall(struct drm_device *dev); +extern void drm_driver_irq_uninstall(struct drm_device *dev); + +extern int drm_wait_vblank(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); +extern void drm_vbl_send_signals(struct drm_device *dev); +extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); /* AGP/GART support (drm_agpsupport.h) */ -extern drm_agp_head_t *drm_agp_init(drm_device_t *dev); -extern int drm_agp_acquire(drm_device_t * dev); -extern int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_release(drm_device_t *dev); -extern int drm_agp_release_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode); -extern int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info); -extern int drm_agp_info_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request); -extern int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request); -extern int drm_agp_free_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request); -extern int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request); -extern int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern struct drm_agp_head *drm_agp_init(struct drm_device *dev); +extern int drm_agp_acquire(struct drm_device *dev); +extern int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_release(struct drm_device *dev); +extern int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode); +extern int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info); +extern int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request); +extern int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request); +extern int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request); +extern int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request); +extern int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) extern DRM_AGP_MEM *drm_agp_allocate_memory(size_t pages, u32 type); #else @@ -1123,22 +1132,22 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); -extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); -extern int drm_put_dev(drm_device_t * dev); -extern int drm_put_head(drm_head_t * head); +extern int drm_put_dev(struct drm_device *dev); +extern int drm_put_head(struct drm_head * head); extern unsigned int drm_debug; /* 1 to enable debug output */ extern unsigned int drm_cards_limit; -extern drm_head_t **drm_heads; -extern struct drm_sysfs_class *drm_class; +extern struct drm_head **drm_heads; +extern struct class *drm_class; extern struct proc_dir_entry *drm_proc_root; extern drm_local_map_t *drm_getsarea(struct drm_device *dev); /* Proc support (drm_proc.h) */ -extern int drm_proc_init(drm_device_t * dev, +extern int drm_proc_init(struct drm_device *dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root); @@ -1147,47 +1156,47 @@ extern int drm_proc_cleanup(int minor, struct proc_dir_entry *dev_root); /* Scatter Gather Support (drm_scatter.h) */ -extern void drm_sg_cleanup(drm_sg_mem_t * entry); -extern int drm_sg_alloc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern void drm_sg_cleanup(struct drm_sg_mem * entry); +extern int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request); +extern int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv); /* ATI PCIGART support (ati_pcigart.h) */ -extern int drm_ati_pcigart_init(drm_device_t * dev, drm_ati_pcigart_info *gart_info); -extern int drm_ati_pcigart_cleanup(drm_device_t * dev, drm_ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info); +extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info); -extern drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, +extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size, size_t align, dma_addr_t maxaddr); -extern void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); -extern void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah); +extern void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); +extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah); /* sysfs support (drm_sysfs.c) */ struct drm_sysfs_class; -extern struct drm_sysfs_class *drm_sysfs_create(struct module *owner, - char *name); -extern void drm_sysfs_destroy(struct drm_sysfs_class *cs); -extern struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head); +extern struct class *drm_sysfs_create(struct module *owner, char *name); +extern void drm_sysfs_destroy(struct class *cs); +extern struct class_device *drm_sysfs_device_add(struct class *cs, + struct drm_head * head); extern void drm_sysfs_device_remove(struct class_device *class_dev); /* * Basic memory manager support (drm_mm.c) */ -extern drm_mm_node_t * drm_mm_get_block(drm_mm_node_t * parent, unsigned long size, +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment); -extern void drm_mm_put_block(drm_mm_node_t *cur); -extern drm_mm_node_t *drm_mm_search_free(const drm_mm_t *mm, unsigned long size, +extern void drm_mm_put_block(struct drm_mm_node *cur); +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, unsigned alignment, int best_match); -extern int drm_mm_init(drm_mm_t *mm, unsigned long start, unsigned long size); -extern void drm_mm_takedown(drm_mm_t *mm); -extern int drm_mm_clean(drm_mm_t *mm); -extern unsigned long drm_mm_tail_space(drm_mm_t *mm); -extern int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size); -extern int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size); - -static inline drm_mm_t *drm_get_mm(drm_mm_node_t *block) +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); +extern void drm_mm_takedown(struct drm_mm *mm); +extern int drm_mm_clean(struct drm_mm *mm); +extern unsigned long drm_mm_tail_space(struct drm_mm *mm); +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); + +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) { return block->mm; } @@ -1198,14 +1207,14 @@ extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, unsigned int token) { - drm_map_list_t *_entry; + struct drm_map_list *_entry; list_for_each_entry(_entry, &dev->maplist, head) if (_entry->user_token == token) return _entry->map; return NULL; } -static __inline__ int drm_device_is_agp(drm_device_t *dev) +static __inline__ int drm_device_is_agp(struct drm_device *dev) { if ( dev->driver->device_is_agp != NULL ) { int err = (*dev->driver->device_is_agp)( dev ); @@ -1218,7 +1227,7 @@ static __inline__ int drm_device_is_agp(drm_device_t *dev) return pci_find_capability(dev->pdev, PCI_CAP_ID_AGP); } -static __inline__ int drm_device_is_pcie(drm_device_t *dev) +static __inline__ int drm_device_is_pcie(struct drm_device *dev) { return pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); } @@ -1279,5 +1288,19 @@ static inline void drm_ctl_free(void *pt, size_t size, int area) /*@}*/ +/** Type for the OS's non-sleepable mutex lock */ +#define DRM_SPINTYPE spinlock_t +/** + * Initialize the lock for use. name is an optional string describing the + * lock + */ +#define DRM_SPININIT(l,name) spin_lock_init(l) +#define DRM_SPINUNINIT(l) +#define DRM_SPINLOCK(l) spin_lock(l) +#define DRM_SPINUNLOCK(l) spin_unlock(l) +#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags); +#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags); +#define DRM_SPINLOCK_ASSERT(l) do {} while (0) + #endif /* __KERNEL__ */ #endif diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index f134563a..4618823c 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -40,7 +40,7 @@ * Get AGP information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a (output) drm_agp_info structure. * \return zero on success or a negative number on failure. @@ -48,7 +48,7 @@ * Verifies the AGP device has been initialized and acquired and fills in the * drm_agp_info structure with the information in drm_agp_head::agp_info. */ -int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) +int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info) { DRM_AGP_KERN *kern; @@ -70,20 +70,16 @@ int drm_agp_info(drm_device_t * dev, drm_agp_info_t *info) } EXPORT_SYMBOL(drm_agp_info); -int drm_agp_info_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_info_t info; + struct drm_agp_info *info = data; int err; - err = drm_agp_info(dev, &info); + err = drm_agp_info(dev, info); if (err) return err; - - if (copy_to_user((drm_agp_info_t __user *) arg, &info, sizeof(info))) - return -EFAULT; + return 0; } @@ -96,7 +92,7 @@ int drm_agp_info_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire(drm_device_t * dev) +int drm_agp_acquire(struct drm_device * dev) { #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) int retcode; @@ -123,7 +119,7 @@ EXPORT_SYMBOL(drm_agp_acquire); * Acquire the AGP device (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or a negative number on failure. @@ -131,12 +127,10 @@ EXPORT_SYMBOL(drm_agp_acquire); * Verifies the AGP device hasn't been acquired before and calls * \c agp_backend_acquire. */ -int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_acquire_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - - return drm_agp_acquire( (drm_device_t *) priv->head->dev ); + return drm_agp_acquire( (struct drm_device *) file_priv->head->dev ); } /** @@ -147,7 +141,7 @@ int drm_agp_acquire_ioctl(struct inode *inode, struct file *filp, * * Verifies the AGP device has been acquired and calls \c agp_backend_release. */ -int drm_agp_release(drm_device_t *dev) +int drm_agp_release(struct drm_device *dev) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -162,12 +156,9 @@ int drm_agp_release(drm_device_t *dev) } EXPORT_SYMBOL(drm_agp_release); -int drm_agp_release_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_release_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - return drm_agp_release(dev); } @@ -181,7 +172,7 @@ int drm_agp_release_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device has been acquired but not enabled, and calls * \c agp_enable. */ -int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) +int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode) { if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -192,31 +183,24 @@ int drm_agp_enable(drm_device_t *dev, drm_agp_mode_t mode) #else agp_enable(dev->agp->bridge, mode.mode); #endif - dev->agp->base = dev->agp->agp_info.aper_base; dev->agp->enabled = 1; return 0; } EXPORT_SYMBOL(drm_agp_enable); -int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_enable_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_mode_t mode; - + struct drm_agp_mode *mode = data; - if (copy_from_user(&mode, (drm_agp_mode_t __user *) arg, sizeof(mode))) - return -EFAULT; - - return drm_agp_enable(dev, mode); + return drm_agp_enable(dev, *mode); } /** * Allocate AGP memory. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv file private pointer. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -224,9 +208,9 @@ int drm_agp_enable_ioctl(struct inode *inode, struct file *filp, * Verifies the AGP device is present and has been acquired, allocates the * memory via alloc_agp() and creates a drm_agp_mem entry for it. */ -int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; DRM_AGP_MEM *memory; unsigned long pages; u32 type; @@ -259,35 +243,12 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request) EXPORT_SYMBOL(drm_agp_alloc); -int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; - drm_agp_buffer_t __user *argp = (void __user *)arg; - int err; - - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; + struct drm_agp_buffer *request = data; - err = drm_agp_alloc(dev, &request); - if (err) - return err; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_agp_mem_t *entry; - list_for_each_entry(entry, &dev->agp->memory, head) { - if (entry->handle == request.handle) - break; - } - list_del(&entry->head); - drm_free_agp(entry->memory, entry->pages); - drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS); - return -EFAULT; - } - - return 0; + return drm_agp_alloc(dev, request); } /** @@ -299,10 +260,10 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp, * * Walks through drm_agp_head::memory until finding a matching handle. */ -static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, +static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device * dev, unsigned long handle) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; list_for_each_entry(entry, &dev->agp->memory, head) { if (entry->handle == handle) @@ -315,7 +276,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Unbind AGP memory from the GATT (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -323,9 +284,9 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev, * Verifies the AGP device is present and acquired, looks-up the AGP memory * entry and passes it to the unbind_agp() function. */ -int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int ret; if (!dev->agp || !dev->agp->acquired) @@ -342,18 +303,12 @@ int drm_agp_unbind(drm_device_t *dev, drm_agp_binding_t *request) EXPORT_SYMBOL(drm_agp_unbind); -int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_unbind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; - - if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) - return -EFAULT; + struct drm_agp_binding *request = data; - return drm_agp_unbind(dev, &request); + return drm_agp_unbind(dev, request); } @@ -361,7 +316,7 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * Bind AGP memory into the GATT (ioctl) * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_binding structure. * \return zero on success or a negative number on failure. @@ -370,9 +325,9 @@ int drm_agp_unbind_ioctl(struct inode *inode, struct file *filp, * is currently bound into the GATT. Looks-up the AGP memory entry and passes * it to bind_agp() function. */ -int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) +int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int retcode; int page; @@ -393,18 +348,12 @@ int drm_agp_bind(drm_device_t *dev, drm_agp_binding_t *request) EXPORT_SYMBOL(drm_agp_bind); -int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_bind_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_binding_t request; + struct drm_agp_binding *request = data; - if (copy_from_user - (&request, (drm_agp_binding_t __user *) arg, sizeof(request))) - return -EFAULT; - - return drm_agp_bind(dev, &request); + return drm_agp_bind(dev, request); } @@ -412,7 +361,7 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * Free AGP memory (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_agp_buffer structure. * \return zero on success or a negative number on failure. @@ -422,9 +371,9 @@ int drm_agp_bind_ioctl(struct inode *inode, struct file *filp, * unbind_agp(). Frees it via free_agp() as well as the entry itself * and unlinks from the doubly linked list it's inserted in. */ -int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request) +int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request) { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; if (!dev->agp || !dev->agp->acquired) return -EINVAL; @@ -443,18 +392,12 @@ EXPORT_SYMBOL(drm_agp_free); -int drm_agp_free_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_agp_free_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_agp_buffer_t request; - - if (copy_from_user - (&request, (drm_agp_buffer_t __user *) arg, sizeof(request))) - return -EFAULT; + struct drm_agp_buffer *request = data; - return drm_agp_free(dev, &request); + return drm_agp_free(dev, request); } @@ -467,9 +410,9 @@ int drm_agp_free_ioctl(struct inode *inode, struct file *filp, * via the inter_module_* functions. Creates and initializes a drm_agp_head * structure. */ -drm_agp_head_t *drm_agp_init(drm_device_t *dev) +struct drm_agp_head *drm_agp_init(struct drm_device *dev) { - drm_agp_head_t *head = NULL; + struct drm_agp_head *head = NULL; if (!(head = drm_alloc(sizeof(*head), DRM_MEM_AGPLISTS))) return NULL; @@ -497,6 +440,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev) INIT_LIST_HEAD(&head->memory); head->cant_use_aperture = head->agp_info.cant_use_aperture; head->page_mask = head->agp_info.page_mask; + head->base = head->agp_info.aper_base; return head; } @@ -554,16 +498,16 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_REQUIRED_MAJOR 0 #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } -static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, +static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages, struct page **pages) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); struct page **cur_page, **last_page = pages + num_pages; DRM_AGP_MEM *mem; @@ -590,12 +534,12 @@ static int drm_agp_populate(drm_ttm_backend_t *backend, unsigned long num_pages, return 0; } -static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, unsigned long offset, int cached) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; @@ -612,10 +556,10 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, return ret; } -static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_DEBUG("drm_agp_unbind_ttm\n"); if (agp_be->mem->is_bound) @@ -624,10 +568,10 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { return 0; } -static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be = - container_of(backend, drm_agp_ttm_backend_t, backend); + struct drm_agp_ttm_backend *agp_be = + container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; DRM_DEBUG("drm_agp_clear_ttm\n"); @@ -640,13 +584,13 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { agp_be->mem = NULL; } -static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; if (backend) { DRM_DEBUG("drm_agp_destroy_ttm\n"); - agp_be = container_of(backend, drm_agp_ttm_backend_t, backend); + agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); if (agp_be) { if (agp_be->mem) { backend->func->clear(backend); @@ -656,7 +600,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { } } -static drm_ttm_backend_func_t agp_ttm_backend = +static struct drm_ttm_backend_func agp_ttm_backend = { .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, .populate = drm_agp_populate, @@ -666,10 +610,10 @@ static drm_ttm_backend_func_t agp_ttm_backend = .destroy = drm_agp_destroy_ttm, }; -drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev) +struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) { - drm_agp_ttm_backend_t *agp_be; + struct drm_agp_ttm_backend *agp_be; struct agp_kern_info *info; if (!dev->agp) { diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c index 6948d858..e35e8b6d 100644 --- a/linux-core/drm_auth.c +++ b/linux-core/drm_auth.c @@ -45,15 +45,15 @@ * the one with matching magic number, while holding the drm_device::struct_mutex * lock. */ -static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) +static struct drm_file *drm_find_file(struct drm_device * dev, drm_magic_t magic) { - drm_file_t *retval = NULL; - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_file *retval = NULL; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; - mutex_lock(&dev->struct_mutex); - if (!drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) { - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + mutex_lock(&dev->struct_mutex); + if (!drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) { + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); retval = pt->priv; } mutex_unlock(&dev->struct_mutex); @@ -71,10 +71,10 @@ static drm_file_t *drm_find_file(drm_device_t * dev, drm_magic_t magic) * associated the magic number hash key in drm_device::magiclist, while holding * the drm_device::struct_mutex lock. */ -static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, +static int drm_add_magic(struct drm_device * dev, struct drm_file * priv, drm_magic_t magic) { - drm_magic_entry_t *entry; + struct drm_magic_entry *entry; DRM_DEBUG("%d\n", magic); @@ -101,10 +101,10 @@ static int drm_add_magic(drm_device_t *dev, drm_file_t *priv, * Searches and unlinks the entry in drm_device::magiclist with the magic * number hash key, while holding the drm_device::struct_mutex lock. */ -static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) +static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic) { - drm_magic_entry_t *pt; - drm_hash_item_t *hash; + struct drm_magic_entry *pt; + struct drm_hash_item *hash; DRM_DEBUG("%d\n", magic); @@ -113,7 +113,7 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) mutex_unlock(&dev->struct_mutex); return -EINVAL; } - pt = drm_hash_entry(hash, drm_magic_entry_t, hash_item); + pt = drm_hash_entry(hash, struct drm_magic_entry, hash_item); drm_ht_remove_item(&dev->magiclist, hash); list_del(&pt->head); mutex_unlock(&dev->struct_mutex); @@ -127,42 +127,38 @@ static int drm_remove_magic(drm_device_t * dev, drm_magic_t magic) * Get a unique magic number (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a resulting drm_auth structure. * \return zero on success, or a negative number on failure. * * If there is a magic number in drm_file::magic then use it, otherwise * searches an unique non-zero magic number and add it associating it with \p - * filp. + * file_priv. */ -int drm_getmagic(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv) { static drm_magic_t sequence = 0; static DEFINE_SPINLOCK(lock); - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; + struct drm_auth *auth = data; /* Find unique magic */ - if (priv->magic) { - auth.magic = priv->magic; + if (file_priv->magic) { + auth->magic = file_priv->magic; } else { do { spin_lock(&lock); if (!sequence) ++sequence; /* reserve 0 */ - auth.magic = sequence++; + auth->magic = sequence++; spin_unlock(&lock); - } while (drm_find_file(dev, auth.magic)); - priv->magic = auth.magic; - drm_add_magic(dev, priv, auth.magic); + } while (drm_find_file(dev, auth->magic)); + file_priv->magic = auth->magic; + drm_add_magic(dev, file_priv, auth->magic); } - DRM_DEBUG("%u\n", auth.magic); - if (copy_to_user((drm_auth_t __user *) arg, &auth, sizeof(auth))) - return -EFAULT; + DRM_DEBUG("%u\n", auth->magic); + return 0; } @@ -170,27 +166,23 @@ int drm_getmagic(struct inode *inode, struct file *filp, * Authenticate with a magic. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_auth structure. * \return zero if authentication successed, or a negative number otherwise. * - * Checks if \p filp is associated with the magic number passed in \arg. + * Checks if \p file_priv is associated with the magic number passed in \arg. */ -int drm_authmagic(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_authmagic(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_auth_t auth; - drm_file_t *file; - - if (copy_from_user(&auth, (drm_auth_t __user *) arg, sizeof(auth))) - return -EFAULT; - DRM_DEBUG("%u\n", auth.magic); - if ((file = drm_find_file(dev, auth.magic))) { + struct drm_auth *auth = data; + struct drm_file *file; + + DRM_DEBUG("%u\n", auth->magic); + if ((file = drm_find_file(dev, auth->magic))) { file->authenticated = 1; - drm_remove_magic(dev, auth.magic); + drm_remove_magic(dev, auth->magic); return 0; } return -EINVAL; diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index fd097968..89062f11 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,10 +49,10 @@ * */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object * bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); DRM_ASSERT_LOCKED(&bo->mutex); @@ -74,14 +74,13 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); - if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)) - || bo->mem.mem_type != bo->pinned_mem_type) { + if (!bo->pinned || bo->mem.mem_type != bo->pinned_mem_type) { man = &bo->dev->bm.man[bo->mem.mem_type]; list_add_tail(&bo->lru, &man->lru); } else { @@ -89,7 +88,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) } } -static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +111,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,9 +132,9 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_buffer_object_t * bo) +static int drm_bo_add_ttm(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; bo->ttm = NULL; @@ -164,16 +163,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } -static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int evict, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); - drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; - drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci) @@ -201,9 +200,9 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { - drm_bo_mem_reg_t *old_mem = &bo->mem; - uint32_t save_flags = old_mem->flags; - uint32_t save_mask = old_mem->mask; + struct drm_bo_mem_reg *old_mem = &bo->mem; + uint64_t save_flags = old_mem->flags; + uint64_t save_mask = old_mem->mask; *old_mem = *mem; mem->mm_node = NULL; @@ -266,7 +265,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -292,10 +291,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; if (bo->fence) { if (bm->nice_mode) { @@ -327,10 +326,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -389,10 +388,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -438,19 +437,19 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) * Call dev->struct_mutex locked. */ -static void drm_bo_delayed_delete(drm_device_t * dev, int remove_all) +static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; struct list_head *list, *next; list_for_each_safe(list, next, &bm->ddestroy) { - entry = list_entry(list, drm_buffer_object_t, ddestroy); + entry = list_entry(list, struct drm_buffer_object, ddestroy); nentry = NULL; if (next != &bm->ddestroy) { - nentry = list_entry(next, drm_buffer_object_t, + nentry = list_entry(next, struct drm_buffer_object, ddestroy); atomic_inc(&nentry->usage); } @@ -470,12 +469,12 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) - drm_device_t *dev = (drm_device_t *) data; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = (struct drm_device *) data; + struct drm_buffer_manager *bm = &dev->bm; #else - drm_buffer_manager_t *bm = - container_of(work, drm_buffer_manager_t, wq.work); - drm_device_t *dev = container_of(bm, drm_device_t, bm); + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); + struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif DRM_DEBUG("Delayed delete Worker\n"); @@ -493,7 +492,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; bo = NULL; @@ -506,10 +505,11 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) } EXPORT_SYMBOL(drm_bo_usage_deref_locked); -static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) +static void drm_bo_base_deref_locked(struct drm_file * file_priv, + struct drm_user_object * uo) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -517,10 +517,10 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo) drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; - drm_device_t *dev = tmp_bo->dev; + struct drm_device *dev = tmp_bo->dev; *bo = NULL; if (atomic_dec_and_test(&tmp_bo->usage)) { @@ -530,22 +530,23 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); /* * Note. The caller has to register (if applicable) * and deregister fence object usage. */ -int drm_fence_buffer_objects(drm_file_t * priv, +int drm_fence_buffer_objects(struct drm_file * file_priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence) + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence) { - drm_device_t *dev = priv->head->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; uint32_t fence_type = 0; int count = 0; int ret = 0; @@ -603,7 +604,7 @@ int drm_fence_buffer_objects(drm_file_t * priv, l = f_list.next; while (l != &f_list) { prefetch(l->next); - entry = list_entry(l, drm_buffer_object_t, lru); + entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -636,12 +637,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, int no_wait) { int ret = 0; - drm_device_t *dev = bo->dev; - drm_bo_mem_reg_t evict_mem; + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -706,14 +707,18 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, return ret; } -static int drm_bo_mem_force_space(drm_device_t * dev, - drm_bo_mem_reg_t * mem, +/** + * Repeatedly evict memory from the LRU for @mem_type until we create enough + * space, or we've evicted everything and there isn't enough space. + */ +static int drm_bo_mem_force_space(struct drm_device * dev, + struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) { - drm_mm_node_t *node; - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *entry; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_mm_node *node; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; struct list_head *lru; unsigned long num_pages = mem->num_pages; int ret; @@ -729,11 +734,11 @@ static int drm_bo_mem_force_space(drm_device_t * dev, if (lru->next == lru) break; - entry = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); - BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)); + BUG_ON(entry->pinned); ret = drm_bo_evict(entry, mem_type, no_wait); mutex_unlock(&entry->mutex); @@ -755,7 +760,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, uint32_t mask, uint32_t * res_mask) { @@ -792,12 +797,20 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, return 1; } -int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait) +/** + * Creates space for memory region @mem according to its type. + * + * This function first searches for free space in compatible memory types in + * the priority order defined by the driver. If free space isn't found, then + * drm_bo_mem_force_space is attempted in priority order to evict and find + * space. + */ +int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -807,7 +820,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - drm_mm_node_t *node = NULL; + struct drm_mm_node *node = NULL; int ret; mem->mm_node = NULL; @@ -884,8 +897,8 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t * bo, - uint32_t new_mask, uint32_t hint) +static int drm_bo_new_mask(struct drm_buffer_object * bo, + uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -893,18 +906,6 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, DRM_ERROR("User buffers are not supported yet\n"); return -EINVAL; } - if (bo->type == drm_bo_type_fake && - !(new_mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))) { - DRM_ERROR("Fake buffers must be pinned.\n"); - return -EINVAL; - } - - if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) { - DRM_ERROR - ("DRM_BO_FLAG_NO_EVICT is only available to priviliged " - "processes\n"); - return -EPERM; - } new_props = new_mask & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE | DRM_BO_FLAG_READ); @@ -922,25 +923,25 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, uint32_t handle, int check_owner) { - drm_user_object_t *uo; - drm_buffer_object_t *bo; + struct drm_user_object *uo; + struct drm_buffer_object *bo; - uo = drm_lookup_user_object(priv, handle); + uo = drm_lookup_user_object(file_priv, handle); if (!uo || (uo->type != drm_buffer_type)) { DRM_ERROR("Could not find buffer object 0x%08x\n", handle); return NULL; } - if (check_owner && priv != uo->owner) { - if (!drm_lookup_ref_object(priv, uo, _DRM_REF_USE)) + if (check_owner && file_priv != uo->owner) { + if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE)) return NULL; } - bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); atomic_inc(&bo->usage); return bo; } @@ -951,9 +952,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(drm_file_t * priv, * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(drm_buffer_object_t * bo) +static int drm_bo_quick_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -971,9 +972,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(drm_buffer_object_t * bo) +static int drm_bo_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -991,7 +992,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo) return 0; } -static int drm_bo_read_cached(drm_buffer_object_t * bo) +static int drm_bo_read_cached(struct drm_buffer_object * bo) { int ret = 0; @@ -1005,7 +1006,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) { int ret = 0; @@ -1021,7 +1022,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object * bo) { int ret; @@ -1043,7 +1044,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo) * the buffer "unfenced" after validating, but before fencing. */ -static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1076,8 +1077,8 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, - drm_bo_arg_reply_t * rep) +static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, + struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; @@ -1103,17 +1104,17 @@ static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, * unregistered. */ -static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, +static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle, uint32_t map_flags, unsigned hint, - drm_bo_arg_reply_t * rep) + struct drm_bo_info_rep *rep) { - drm_buffer_object_t *bo; - drm_device_t *dev = priv->head->dev; + struct drm_buffer_object *bo; + struct drm_device *dev = file_priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) @@ -1170,7 +1171,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, } mutex_lock(&dev->struct_mutex); - ret = drm_add_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); mutex_unlock(&dev->struct_mutex); if (ret) { if (atomic_add_negative(-1, &bo->mapped)) @@ -1184,28 +1185,28 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) +static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; - drm_buffer_object_t *bo; - drm_ref_object_t *ro; + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + struct drm_ref_object *ro; int ret = 0; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); if (!bo) { ret = -EINVAL; goto out; } - ro = drm_lookup_ref_object(priv, &bo->base, _DRM_REF_TYPE1); + ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1); if (!ro) { ret = -EINVAL; goto out; } - drm_remove_ref_object(priv, ro); + drm_remove_ref_object(file_priv, ro); drm_bo_usage_deref_locked(&bo); out: mutex_unlock(&dev->struct_mutex); @@ -1216,12 +1217,12 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle) * Call struct-sem locked. */ -static void drm_buffer_user_object_unmap(drm_file_t * priv, - drm_user_object_t * uo, - drm_ref_t action) +static void drm_buffer_user_object_unmap(struct drm_file *file_priv, + struct drm_user_object * uo, + enum drm_ref_type action) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); /* * We DON'T want to take the bo->lock here, because we want to @@ -1239,13 +1240,13 @@ static void drm_buffer_user_object_unmap(drm_file_t * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; /* * Flush outstanding fences. */ @@ -1301,7 +1302,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1319,10 +1320,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1352,7 +1353,8 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) return 0; } - DRM_ERROR("Illegal fake buffer flags 0x%08x\n", mem->mask); + DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", + (unsigned long long) mem->mask); return -EINVAL; } @@ -1360,23 +1362,52 @@ static int drm_bo_check_fake(drm_device_t * dev, drm_bo_mem_reg_t * mem) * bo locked. */ -static int drm_buffer_object_validate(drm_buffer_object_t * bo, +static int drm_buffer_object_validate(struct drm_buffer_object * bo, + uint32_t fence_class, int move_unfenced, int no_wait) { - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + uint32_t ftype; int ret; - DRM_DEBUG("New flags 0x%08x, Old flags 0x%08x\n", bo->mem.mask, - bo->mem.flags); - ret = - driver->fence_type(bo, &bo->fence_class, &bo->fence_type); + DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n", + (unsigned long long) bo->mem.mask, + (unsigned long long) bo->mem.flags); + + ret = driver->fence_type(bo, &ftype); + if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); return ret; } + if (bo->pinned && bo->pinned_mem_type != bo->mem.mem_type) { + DRM_ERROR("Attempt to validate pinned buffer into different memory " + "type\n"); + return -EINVAL; + } + + /* + * We're switching command submission mechanism, + * or cannot simply rely on the hardware serializing for us. + * + * Wait for buffer idle. + */ + + if ((fence_class != bo->fence_class) || + ((ftype ^ bo->fence_type) & bo->fence_type)) { + + ret = drm_bo_wait(bo, 0, 0, no_wait); + + if (ret) + return ret; + + } + + bo->fence_class = fence_class; + bo->fence_type = ftype; ret = drm_bo_wait_unmapped(bo, no_wait); if (ret) return ret; @@ -1402,37 +1433,6 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, } /* - * Pinned buffers. - */ - - if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) { - bo->pinned_mem_type = bo->mem.mem_type; - mutex_lock(&dev->struct_mutex); - list_del_init(&bo->pinned_lru); - drm_bo_add_to_pinned_lru(bo); - - if (bo->pinned_node != bo->mem.mm_node) { - if (bo->pinned_node != NULL) - drm_mm_put_block(bo->pinned_node); - bo->pinned_node = bo->mem.mm_node; - } - - mutex_unlock(&dev->struct_mutex); - - } else if (bo->pinned_node != NULL) { - - mutex_lock(&dev->struct_mutex); - - if (bo->pinned_node != bo->mem.mm_node) - drm_mm_put_block(bo->pinned_node); - - list_del_init(&bo->pinned_lru); - bo->pinned_node = NULL; - mutex_unlock(&dev->struct_mutex); - - } - - /* * We might need to add a TTM. */ @@ -1466,17 +1466,19 @@ static int drm_buffer_object_validate(drm_buffer_object_t * bo, return 0; } -static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, - uint32_t flags, uint32_t mask, uint32_t hint, - drm_bo_arg_reply_t * rep) +static int drm_bo_handle_validate(struct drm_file *file_priv, + uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, + struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { return -EINVAL; @@ -1494,7 +1496,8 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, goto out; ret = - drm_buffer_object_validate(bo, !(hint & DRM_BO_HINT_DONT_FENCE), + drm_buffer_object_validate(bo, fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), no_wait); drm_bo_fill_rep_arg(bo, rep); @@ -1506,14 +1509,18 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle, return ret; } -static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, - drm_bo_arg_reply_t * rep) +/** + * Fills out the generic buffer object ioctl reply with the information for + * the BO with id of handle. + */ +static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle, + struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1528,16 +1535,17 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle, return 0; } -static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, - uint32_t hint, drm_bo_arg_reply_t * rep) +static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle, + uint32_t hint, + struct drm_bo_info_rep *rep) { - struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(priv, handle, 1); + bo = drm_lookup_buffer_object(file_priv, handle, 1); mutex_unlock(&dev->struct_mutex); if (!bo) { @@ -1560,17 +1568,18 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle, return ret; } -int drm_buffer_object_create(drm_device_t *dev, +int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - drm_bo_type_t type, - uint32_t mask, + enum drm_bo_type type, + uint64_t mask, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - drm_buffer_object_t ** buf_obj) + struct drm_buffer_object ** buf_obj) { - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret = 0; unsigned long num_pages; @@ -1615,8 +1624,8 @@ int drm_buffer_object_create(drm_device_t *dev, bo->buffer_start = buffer_start; } bo->priv_flags = 0; - bo->mem.flags = 0; - bo->mem.mask = 0; + bo->mem.flags = 0ULL; + bo->mem.mask = 0ULL; atomic_inc(&bm->count); ret = drm_bo_new_mask(bo, mask, hint); @@ -1630,10 +1639,28 @@ int drm_buffer_object_create(drm_device_t *dev, if (ret) goto out_err; } - ret = drm_buffer_object_validate(bo, 0, hint & DRM_BO_HINT_DONT_BLOCK); + + bo->fence_class = 0; + ret = driver->fence_type(bo, &bo->fence_type); + if (ret) { + DRM_ERROR("Driver did not support given buffer permissions\n"); + goto out_err; + } + + if (bo->type == drm_bo_type_fake) { + ret = drm_bo_check_fake(dev, &bo->mem); + if (ret) + goto out_err; + } + + ret = drm_bo_add_ttm(bo); if (ret) goto out_err; + mutex_lock(&dev->struct_mutex); + drm_bo_add_to_lru(bo); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&bo->mutex); *buf_obj = bo; return 0; @@ -1646,14 +1673,14 @@ int drm_buffer_object_create(drm_device_t *dev, } EXPORT_SYMBOL(drm_buffer_object_create); -static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, - int shareable) +int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); - ret = drm_add_user_object(priv, &bo->base, shareable); + ret = drm_add_user_object(file_priv, &bo->base, shareable); if (ret) goto out; @@ -1666,22 +1693,25 @@ static int drm_bo_add_user_object(drm_file_t * priv, drm_buffer_object_t * bo, mutex_unlock(&dev->struct_mutex); return ret; } +EXPORT_SYMBOL(drm_bo_add_user_object); -static int drm_bo_lock_test(drm_device_t * dev, struct file *filp) +static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return 0; } -int drm_bo_ioctl(DRM_IOCTL_ARGS) +int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_bo_arg_t arg; - drm_bo_arg_request_t *req = &arg.d.req; - drm_bo_arg_reply_t rep; - unsigned long next; - drm_user_object_t *uo; - drm_buffer_object_t *entry; + struct drm_bo_op_arg curarg; + struct drm_bo_op_arg *arg = data; + struct drm_bo_op_req *req = &arg->d.req; + struct drm_bo_info_rep rep; + unsigned long next = 0; + void __user *curuserarg = NULL; + int ret; + + DRM_DEBUG("drm_bo_op_ioctl\n"); if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); @@ -1689,140 +1719,388 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS) } do { - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + if (next != 0) { + curuserarg = (void __user *)next; + if (copy_from_user(&curarg, curuserarg, + sizeof(curarg)) != 0) + return -EFAULT; + arg = &curarg; + } - if (arg.handled) { - data = arg.next; + if (arg->handled) { + next = arg->next; continue; } - - rep.ret = 0; + req = &arg->d.req; + ret = 0; switch (req->op) { - case drm_bo_create: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - rep.ret = - drm_buffer_object_create(priv->head->dev, - req->size, - req->type, - req->mask, - req->hint, - req->page_alignment, - req->buffer_start, &entry); - if (rep.ret) - break; - - rep.ret = - drm_bo_add_user_object(priv, entry, - req-> - mask & - DRM_BO_FLAG_SHAREABLE); - if (rep.ret) - drm_bo_usage_deref_unlocked(&entry); - - if (rep.ret) - break; - - mutex_lock(&entry->mutex); - drm_bo_fill_rep_arg(entry, &rep); - mutex_unlock(&entry->mutex); - break; - case drm_bo_unmap: - rep.ret = drm_buffer_object_unmap(priv, req->handle); - break; - case drm_bo_map: - rep.ret = drm_buffer_object_map(priv, req->handle, - req->mask, - req->hint, &rep); - break; - case drm_bo_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, req->handle); - if (!uo || (uo->type != drm_buffer_type) - || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - rep.ret = -EINVAL; - break; - } - rep.ret = drm_remove_user_object(priv, uo); - mutex_unlock(&dev->struct_mutex); - break; - case drm_bo_reference: - rep.ret = drm_user_object_ref(priv, req->handle, - drm_buffer_type, &uo); - if (rep.ret) - break; - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_unreference: - rep.ret = drm_user_object_unref(priv, req->handle, - drm_buffer_type); - break; case drm_bo_validate: - rep.ret = drm_bo_lock_test(dev, filp); - - if (rep.ret) + ret = drm_bo_lock_test(dev, file_priv); + if (ret) break; - rep.ret = - drm_bo_handle_validate(priv, req->handle, req->mask, - req->arg_handle, req->hint, - &rep); + ret = drm_bo_handle_validate(file_priv, req->bo_req.handle, + req->bo_req.fence_class, + req->bo_req.flags, + req->bo_req.mask, + req->bo_req.hint, + &rep); break; case drm_bo_fence: - rep.ret = drm_bo_lock_test(dev, filp); - if (rep.ret) - break; - /**/ break; - case drm_bo_info: - rep.ret = drm_bo_handle_info(priv, req->handle, &rep); - break; - case drm_bo_wait_idle: - rep.ret = drm_bo_handle_wait(priv, req->handle, - req->hint, &rep); + ret = -EINVAL; + DRM_ERROR("Function is not implemented yet.\n"); break; case drm_bo_ref_fence: - rep.ret = -EINVAL; + ret = -EINVAL; DRM_ERROR("Function is not implemented yet.\n"); + break; default: - rep.ret = -EINVAL; + ret = -EINVAL; } - next = arg.next; + next = arg->next; /* * A signal interrupted us. Make sure the ioctl is restartable. */ - if (rep.ret == -EAGAIN) + if (ret == -EAGAIN) return -EAGAIN; - arg.handled = 1; - arg.d.rep = rep; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); - data = next; - } while (data); + arg->handled = 1; + arg->d.rep.ret = ret; + arg->d.rep.bo_info = rep; + if (arg != data) { + if (copy_to_user(curuserarg, &curarg, + sizeof(curarg)) != 0) + return -EFAULT; + } + } while (next != 0); + return 0; +} + +int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_create_arg *arg = data; + struct drm_bo_create_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *entry; + int ret = 0; + + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", + (int)(req->size / 1024), req->page_alignment * 4, req->type); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + if (req->type == drm_bo_type_fake) + LOCK_TEST_WITH_RETURN(dev, file_priv); + + ret = drm_buffer_object_create(file_priv->head->dev, + req->size, req->type, req->mask, + req->hint, req->page_alignment, + req->buffer_start, &entry); + if (ret) + goto out; + + ret = drm_bo_add_user_object(file_priv, entry, + req->mask & DRM_BO_FLAG_SHAREABLE); + if (ret) { + drm_bo_usage_deref_unlocked(&entry); + goto out; + } + + mutex_lock(&entry->mutex); + drm_bo_fill_rep_arg(entry, rep); + mutex_unlock(&entry->mutex); + +out: + return ret; +} + + +int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_handle_arg *arg = data; + struct drm_user_object *uo; + int ret = 0; + + DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(file_priv, arg->handle); + if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { + mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(file_priv, uo); + mutex_unlock(&dev->struct_mutex); + + return ret; +} + +int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + + DRM_DEBUG("drm_bo_map_ioctl: buffer %d\n", req->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_buffer_object_map(file_priv, req->handle, req->mask, + req->hint, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_handle_arg *arg = data; + int ret; + + DRM_DEBUG("drm_bo_unmap_ioctl: buffer %d\n", arg->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_buffer_object_unmap(file_priv, arg->handle); + return ret; +} + + +int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_user_object *uo; + int ret; + + DRM_DEBUG("drm_bo_reference_ioctl: buffer %d\n", req->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_user_object_ref(file_priv, req->handle, + drm_buffer_type, &uo); + if (ret) + return ret; + + ret = drm_bo_handle_info(file_priv, req->handle, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_handle_arg *arg = data; + int ret = 0; + + DRM_DEBUG("drm_bo_unreference_ioctl: buffer %d\n", arg->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type); + return ret; +} + +int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_reference_info_arg *arg = data; + struct drm_bo_handle_arg *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + + DRM_DEBUG("drm_bo_info_ioctl: buffer %d\n", req->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_info(file_priv, req->handle, rep); + if (ret) + return ret; + + return 0; +} + +int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_map_wait_idle_arg *arg = data; + struct drm_bo_info_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + int ret; + + DRM_DEBUG("drm_bo_wait_idle_ioctl: buffer %d\n", req->handle); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + ret = drm_bo_handle_wait(file_priv, req->handle, + req->hint, rep); + if (ret) + return ret; + return 0; } /** + * Pins or unpins the given buffer object in the given memory area. + * + * Pinned buffers will not be evicted from or move within their memory area. + * Must be called with the hardware lock held for pinning. + */ +int +drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, + int pin) +{ + int ret = 0; + + mutex_lock(&bo->mutex); + if (bo->pinned == pin) { + mutex_unlock(&bo->mutex); + return 0; + } + + if (pin) { + ret = drm_bo_wait_unfenced(bo, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Validate the buffer into its pinned location, with no + * pending fence. + */ + ret = drm_buffer_object_validate(bo, bo->fence_class, 0, 0); + if (ret) { + mutex_unlock(&bo->mutex); + return ret; + } + + /* Pull the buffer off of the LRU and add it to the pinned + * list + */ + bo->pinned_mem_type = bo->mem.mem_type; + mutex_lock(&dev->struct_mutex); + list_del_init(&bo->lru); + list_del_init(&bo->pinned_lru); + drm_bo_add_to_pinned_lru(bo); + + if (bo->pinned_node != bo->mem.mm_node) { + if (bo->pinned_node != NULL) + drm_mm_put_block(bo->pinned_node); + bo->pinned_node = bo->mem.mm_node; + } + + bo->pinned = pin; + mutex_unlock(&dev->struct_mutex); + + } else { + mutex_lock(&dev->struct_mutex); + + /* Remove our buffer from the pinned list */ + if (bo->pinned_node != bo->mem.mm_node) + drm_mm_put_block(bo->pinned_node); + + list_del_init(&bo->pinned_lru); + bo->pinned_node = NULL; + bo->pinned = pin; + mutex_unlock(&dev->struct_mutex); + } + mutex_unlock(&bo->mutex); + return 0; +} +EXPORT_SYMBOL(drm_bo_set_pin); + +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_bo_set_pin_arg *arg = data; + struct drm_bo_set_pin_req *req = &arg->d.req; + struct drm_bo_info_rep *rep = &arg->d.rep; + struct drm_buffer_object *bo; + int ret; + + DRM_DEBUG("drm_bo_set_pin_ioctl: buffer %d, pin %d\n", + req->handle, req->pin); + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized.\n"); + return -EINVAL; + } + + if (req->pin < 0 || req->pin > 1) { + DRM_ERROR("Bad arguments to set_pin\n"); + return -EINVAL; + } + + if (req->pin) + LOCK_TEST_WITH_RETURN(dev, file_priv); + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, req->handle, 1); + mutex_unlock(&dev->struct_mutex); + if (!bo) { + return -EINVAL; + } + + ret = drm_bo_set_pin(dev, bo, req->pin); + if (ret) { + drm_bo_usage_deref_unlocked(&bo); + return ret; + } + + drm_bo_fill_rep_arg(bo, rep); + drm_bo_usage_deref_unlocked(&bo); + + return 0; +} + + +/** *Clean the unfenced list and put on regular LRU. *This is part of the memory manager cleanup and should only be *called with the DRI lock held. *Call dev->struct_sem locked. */ -static void drm_bo_clean_unfenced(drm_device_t *dev) +static void drm_bo_clean_unfenced(struct drm_device *dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; head = &bm->unfenced; list = head->next; while(list != head) { prefetch(list->next); - entry = list_entry(list, drm_buffer_object_t, lru); + entry = list_entry(list, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -1837,11 +2115,11 @@ static void drm_bo_clean_unfenced(drm_device_t *dev) } } -static int drm_bo_leave_list(drm_buffer_object_t * bo, +static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; int ret = 0; mutex_lock(&bo->mutex); @@ -1863,11 +2141,10 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, mutex_unlock(&dev->struct_mutex); } - if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) { - DRM_ERROR("A DRM_BO_NO_EVICT buffer present at " + if (bo->pinned) { + DRM_ERROR("A pinned buffer was present at " "cleanup. Removing flag and evicting.\n"); - bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT; - bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT; + bo->pinned = 0; } if (bo->mem.mem_type == mem_type) @@ -1888,20 +2165,20 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, } -static drm_buffer_object_t *drm_bo_entry(struct list_head *list, +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, int pinned_list) { if (pinned_list) - return list_entry(list, drm_buffer_object_t, pinned_lru); + return list_entry(list, struct drm_buffer_object, pinned_lru); else - return list_entry(list, drm_buffer_object_t, lru); + return list_entry(list, struct drm_buffer_object, lru); } /* * dev->struct_mutex locked. */ -static int drm_bo_force_list_clean(drm_device_t * dev, +static int drm_bo_force_list_clean(struct drm_device * dev, struct list_head *head, unsigned mem_type, int free_pinned, @@ -1909,7 +2186,7 @@ static int drm_bo_force_list_clean(drm_device_t * dev, int pinned_list) { struct list_head *list, *next, *prev; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; int ret; int do_restart; @@ -1966,10 +2243,10 @@ restart: return 0; } -int drm_bo_clean_mm(drm_device_t * dev, unsigned mem_type) +int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -2009,11 +2286,11 @@ EXPORT_SYMBOL(drm_bo_clean_mm); *point since we have the hardware lock. */ -static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) +static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); @@ -2035,13 +2312,13 @@ static int drm_bo_lock_mm(drm_device_t * dev, unsigned mem_type) return ret; } -int drm_bo_init_mm(drm_device_t * dev, +int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); @@ -2084,12 +2361,12 @@ EXPORT_SYMBOL(drm_bo_init_mm); * any clients still running when we set the initialized flag to zero. */ -int drm_bo_driver_finish(drm_device_t * dev) +int drm_bo_driver_finish(struct drm_device * dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2136,10 +2413,10 @@ int drm_bo_driver_finish(drm_device_t * dev) } EXPORT_SYMBOL(drm_bo_driver_finish); -int drm_bo_driver_init(drm_device_t * dev) +int drm_bo_driver_init(struct drm_device * dev) { - drm_bo_driver_t *driver = dev->driver->bo_driver; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; mutex_lock(&dev->bm.init_mutex); @@ -2151,8 +2428,7 @@ int drm_bo_driver_init(drm_device_t * dev) * Initialize the system memory buffer type. * Other types need to be driver / IOCTL initialized. */ - - ret = drm_bo_init_mm(dev, 0, 0, 0); + ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0); if (ret) goto out_unlock; @@ -2175,81 +2451,150 @@ int drm_bo_driver_init(drm_device_t * dev) EXPORT_SYMBOL(drm_bo_driver_init); -int drm_mm_init_ioctl(DRM_IOCTL_ARGS) +int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; + struct drm_mm_init_arg *arg = data; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; - int ret = 0; - drm_mm_init_arg_t arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + DRM_DEBUG("drm_mm_init_ioctl: type %d, 0x%08llx offset, %dkb\n", + arg->mem_type, arg->p_offset * PAGE_SIZE, (int)(arg->p_size * 4)); if (!driver) { DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); + ret = -EINVAL; + if (arg->magic != DRM_BO_INIT_MAGIC) { + DRM_ERROR("You are using an old libdrm that is not compatible with\n" + "\tthe kernel DRM module. Please upgrade your libdrm.\n"); + return -EINVAL; + } + if (arg->major != DRM_BO_INIT_MAJOR) { + DRM_ERROR("libdrm and kernel DRM buffer object interface major\n" + "\tversion don't match. Got %d, expected %d,\n", + arg->major, DRM_BO_INIT_MAJOR); + return -EINVAL; + } + if (arg->minor > DRM_BO_INIT_MINOR) { + DRM_ERROR("libdrm expects a newer DRM buffer object interface.\n" + "\tlibdrm buffer object interface version is %d.%d.\n" + "\tkernel DRM buffer object interface version is %d.%d\n", + arg->major, arg->minor, DRM_BO_INIT_MAJOR, DRM_BO_INIT_MINOR); + return -EINVAL; + } - switch (arg.req.op) { - case mm_init: - ret = -EINVAL; - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized.\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR - ("System memory buffers already initialized.\n"); - break; - } - ret = drm_bo_init_mm(dev, arg.req.mem_type, - arg.req.p_offset, arg.req.p_size); - break; - case mm_takedown: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = -EINVAL; - if (!bm->initialized) { - DRM_ERROR("DRM memory manager was not initialized\n"); - break; - } - if (arg.req.mem_type == 0) { - DRM_ERROR("No takedown for System memory buffers.\n"); - break; - } - ret = 0; - if (drm_bo_clean_mm(dev, arg.req.mem_type)) { - DRM_ERROR("Memory manager type %d not clean. " - "Delaying takedown\n", arg.req.mem_type); - } - break; - case mm_lock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = drm_bo_lock_mm(dev, arg.req.mem_type); - break; - case mm_unlock: - LOCK_TEST_WITH_RETURN(dev, filp); - mutex_lock(&dev->bm.init_mutex); - mutex_lock(&dev->struct_mutex); - ret = 0; - break; - default: - DRM_ERROR("Function not implemented yet\n"); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized.\n"); + goto out; + } + if (arg->mem_type == 0) { + DRM_ERROR("System memory buffers already initialized.\n"); + goto out; + } + ret = drm_bo_init_mm(dev, arg->mem_type, + arg->p_offset, arg->p_size); + +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + return 0; +} + +int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_mm_type_arg *arg = data; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + DRM_DEBUG("drm_mm_takedown_ioctl: %d type\n", arg->mem_type); + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + LOCK_TEST_WITH_RETURN(dev, file_priv); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = -EINVAL; + if (!bm->initialized) { + DRM_ERROR("DRM memory manager was not initialized\n"); + goto out; + } + if (arg->mem_type == 0) { + DRM_ERROR("No takedown for System memory buffers.\n"); + goto out; + } + ret = 0; + if (drm_bo_clean_mm(dev, arg->mem_type)) { + DRM_ERROR("Memory manager type %d not clean. " + "Delaying takedown\n", arg->mem_type); + } +out: + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + return 0; +} + +int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_mm_type_arg *arg = data; + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + DRM_DEBUG("drm_mm_lock_ioctl: %d type\n", arg->mem_type); + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); return -EINVAL; } + LOCK_TEST_WITH_RETURN(dev, file_priv); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = drm_bo_lock_mm(dev, arg->mem_type); + mutex_unlock(&dev->struct_mutex); + mutex_unlock(&dev->bm.init_mutex); + if (ret) + return ret; + + return 0; +} + +int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_bo_driver *driver = dev->driver->bo_driver; + int ret; + + DRM_DEBUG("drm_mm_unlock_ioctl\n"); + + if (!driver) { + DRM_ERROR("Buffer objects are not supported by this driver\n"); + return -EINVAL; + } + + LOCK_TEST_WITH_RETURN(dev, file_priv); + mutex_lock(&dev->bm.init_mutex); + mutex_lock(&dev->struct_mutex); + ret = 0; + mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->bm.init_mutex); if (ret) return ret; - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return 0; } @@ -2257,10 +2602,10 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(drm_device_t * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) @@ -2291,13 +2636,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(drm_device_t * dev, - drm_bo_mem_reg_t * mem, +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; *bus_size = 0; if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) @@ -2320,9 +2665,9 @@ int drm_bo_pci_offset(drm_device_t * dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object * bo) { - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; @@ -2332,11 +2677,11 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); if (list->user_token) { @@ -2358,11 +2703,11 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) { - drm_map_list_t *list = &bo->map_list; + struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; - drm_device_t *dev = bo->dev; + struct drm_device *dev = bo->dev; DRM_ASSERT_LOCKED(&dev->struct_mutex); list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ); @@ -2394,7 +2739,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) return -ENOMEM; } - list->user_token = ((drm_u64_t) list->hash.key) << PAGE_SHIFT; + list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT; return 0; } diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 28c1509e..21ab4bbb 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,9 +35,9 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t * bo) +static void drm_bo_free_old_node(struct drm_buffer_object * bo) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); @@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void **virtual) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -138,11 +138,11 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap); * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(drm_device_t * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; @@ -166,7 +166,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -184,7 +184,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -202,14 +202,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) return 0; } -int drm_bo_move_memcpy(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; - drm_bo_mem_reg_t old_copy = *old_mem; + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -283,12 +283,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t * bo, - drm_buffer_object_t ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object * bo, + struct drm_buffer_object ** new_obj) { - drm_buffer_object_t *fbo; - drm_device_t *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_object *fbo; + struct drm_device *dev = bo->dev; + struct drm_buffer_manager *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) @@ -325,20 +325,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, +int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, - uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) + uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_device *dev = bo->dev; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; - drm_buffer_object_t *old_obj; + struct drm_buffer_object *old_obj; if (bo->fence) drm_fence_usage_deref_unlocked(&bo->fence); diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c index 38f0dd3a..dc7f342a 100644 --- a/linux-core/drm_bufs.c +++ b/linux-core/drm_bufs.c @@ -36,22 +36,21 @@ #include <linux/vmalloc.h> #include "drmP.h" -unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_start(struct drm_device *dev, unsigned int resource) { return pci_resource_start(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_start); -unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource) +unsigned long drm_get_resource_len(struct drm_device *dev, unsigned int resource) { return pci_resource_len(dev->pdev, resource); } EXPORT_SYMBOL(drm_get_resource_len); -static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, - drm_local_map_t *map) +struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && map->type == entry->map->type && ((entry->map->offset == map->offset) || @@ -62,8 +61,9 @@ static drm_map_list_t *drm_find_matching_map(drm_device_t *dev, return NULL; } +EXPORT_SYMBOL(drm_find_matching_map); -static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, +static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash, unsigned long user_token, int hashed_handle) { int use_hashed_handle; @@ -92,7 +92,7 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * Ioctl to specify a range of memory that is available for mapping by a non-root process. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_map structure. * \return zero on success or a negative value on error. @@ -101,12 +101,13 @@ static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash, * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where * applicable and if supported by the kernel. */ -static int drm_addmap_core(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_map_list_t ** maplist) +static int drm_addmap_core(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, + struct drm_map_list **maplist) { - drm_map_t *map; - drm_map_list_t *list; + struct drm_map *map; + struct drm_map_list *list; drm_dma_handle_t *dmah; unsigned long user_token; int ret; @@ -212,7 +213,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, } break; case _DRM_AGP: { - drm_agp_mem_t *entry; + struct drm_agp_mem *entry; int valid = 0; if (!drm_core_has_AGP(dev)) { @@ -222,11 +223,17 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, #ifdef __alpha__ map->offset += dev->hose->mem_space->start; #endif - /* Note: dev->agp->base may actually be 0 when the DRM - * is not in control of AGP space. But if user space is - * it should already have added the AGP base itself. + /* In some cases (i810 driver), user space may have already + * added the AGP base itself, because dev->agp->base previously + * only got set during AGP enable. So, only add the base + * address if the map's offset isn't already within the + * aperture. */ - map->offset += dev->agp->base; + if (map->offset < dev->agp->base || + map->offset > dev->agp->base + + dev->agp->agp_info.aper_size * 1024 * 1024 - 1) { + map->offset += dev->agp->base; + } map->mtrr = dev->agp->agp_mtrr; /* for getmap */ /* This assumes the DRM is in total control of AGP space. @@ -310,11 +317,11 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset, return 0; } -int drm_addmap(drm_device_t * dev, unsigned int offset, - unsigned int size, drm_map_type_t type, - drm_map_flags_t flags, drm_local_map_t ** map_ptr) +int drm_addmap(struct drm_device *dev, unsigned int offset, + unsigned int size, enum drm_map_type type, + enum drm_map_flags flags, drm_local_map_t ** map_ptr) { - drm_map_list_t *list; + struct drm_map_list *list; int rc; rc = drm_addmap_core(dev, offset, size, type, flags, &list); @@ -325,38 +332,24 @@ int drm_addmap(drm_device_t * dev, unsigned int offset, EXPORT_SYMBOL(drm_addmap); -int drm_addmap_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_addmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t map; - drm_map_list_t *maplist; - drm_map_t __user *argp = (void __user *)arg; + struct drm_map *map = data; + struct drm_map_list *maplist; int err; - if (!(filp->f_mode & 3)) - return -EACCES; /* Require read/write */ - - if (copy_from_user(&map, argp, sizeof(map))) { - return -EFAULT; - } - - if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP)) + if (!(capable(CAP_SYS_ADMIN) || map->type == _DRM_AGP)) return -EPERM; - err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags, - &maplist); + err = drm_addmap_core(dev, map->offset, map->size, map->type, + map->flags, &maplist); if (err) return err; - if (copy_to_user(argp, maplist->map, sizeof(drm_map_t))) - return -EFAULT; - /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */ - if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle)) - return -EFAULT; + map->handle = (void *)(unsigned long)maplist->user_token; return 0; } @@ -365,9 +358,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * isn't in use. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. - * \param arg pointer to a drm_map_t structure. + * \param arg pointer to a struct drm_map structure. * \return zero on success or a negative value on error. * * Searches the map on drm_device::maplist, removes it from the list, see if @@ -376,9 +369,9 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp, * * \sa drm_addmap */ -int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map) { - drm_map_list_t *r_list = NULL, *list_t; + struct drm_map_list *r_list = NULL, *list_t; drm_dma_handle_t dmah; int found = 0; @@ -434,7 +427,7 @@ int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map) } EXPORT_SYMBOL(drm_rmmap_locked); -int drm_rmmap(drm_device_t *dev, drm_local_map_t *map) +int drm_rmmap(struct drm_device *dev, drm_local_map_t *map) { int ret; @@ -455,24 +448,18 @@ EXPORT_SYMBOL(drm_rmmap); * gets used by drivers that the server doesn't need to care about. This seems * unlikely. */ -int drm_rmmap_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_rmmap_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t request; + struct drm_map *request = data; drm_local_map_t *map = NULL; - drm_map_list_t *r_list; + struct drm_map_list *r_list; int ret; - if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) { - return -EFAULT; - } - mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map && - r_list->user_token == (unsigned long)request.handle && + r_list->user_token == (unsigned long)request->handle && r_list->map->flags & _DRM_REMOVABLE) { map = r_list->map; break; @@ -487,11 +474,6 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, return -EINVAL; } - if (!map) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - /* Register and framebuffer maps are permanent */ if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) { mutex_unlock(&dev->struct_mutex); @@ -513,7 +495,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp, * * Frees any pages and buffers associated with the given entry. */ -static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) +static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry) { int i; @@ -550,20 +532,20 @@ static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry) /** * Add AGP buffers for DMA transfers * - * \param dev drm_device_t to which the buffers are to be added. - * \param request pointer to a drm_buf_desc_t describing the request. + * \param dev struct drm_device to which the buffers are to be added. + * \param request pointer to a struct drm_buf_desc describing the request. * \return zero on success or a negative number on failure. * * After some sanity checks creates a drm_buf structure for each buffer and * reallocates the buffer list of the same size order to accommodate the new * buffers. */ -int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_agp_mem_t *agp_entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_agp_mem *agp_entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -574,7 +556,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i, valid; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!dma) return -EINVAL; @@ -667,7 +649,7 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -728,24 +710,24 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request) EXPORT_SYMBOL(drm_addbufs_agp); #endif /* __OS_HAS_AGP */ -int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int count; int order; int size; int total; int page_order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; drm_dma_handle_t *dmah; - drm_buf_t *buf; + struct drm_buf *buf; int alignment; unsigned long offset; int i; int byte_count; int page_count; unsigned long *temp_pagelist; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL; @@ -878,7 +860,7 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, @@ -954,11 +936,11 @@ int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request) } EXPORT_SYMBOL(drm_addbufs_pci); -static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) +static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -969,7 +951,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; @@ -1056,7 +1038,7 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1116,11 +1098,11 @@ static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request) return 0; } -int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) +int drm_addbufs_fb(struct drm_device *dev, struct drm_buf_desc *request) { - drm_device_dma_t *dma = dev->dma; - drm_buf_entry_t *entry; - drm_buf_t *buf; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_entry *entry; + struct drm_buf *buf; unsigned long offset; unsigned long agp_offset; int count; @@ -1131,7 +1113,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) int total; int byte_count; int i; - drm_buf_t **temp_buflist; + struct drm_buf **temp_buflist; if (!drm_core_check_feature(dev, DRIVER_FB_DMA)) return -EINVAL; @@ -1217,7 +1199,7 @@ int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request) buf->waiting = 0; buf->pending = 0; init_waitqueue_head(&buf->dma_wait); - buf->filp = NULL; + buf->file_priv = NULL; buf->dev_priv_size = dev->driver->dev_priv_size; buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS); @@ -1282,9 +1264,9 @@ EXPORT_SYMBOL(drm_addbufs_fb); * Add buffers for DMA transfers (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. - * \param arg pointer to a drm_buf_desc_t request. + * \param arg pointer to a struct drm_buf_desc request. * \return zero on success or a negative number on failure. * * According with the memory type specified in drm_buf_desc::flags and the @@ -1292,39 +1274,27 @@ EXPORT_SYMBOL(drm_addbufs_fb); * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent * PCI memory respectively. */ -int drm_addbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_addbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_buf_desc_t request; - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_buf_desc *request = data; int ret; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; - if (copy_from_user(&request, (drm_buf_desc_t __user *) arg, - sizeof(request))) - return -EFAULT; - #if __OS_HAS_AGP - if (request.flags & _DRM_AGP_BUFFER) - ret = drm_addbufs_agp(dev, &request); + if (request->flags & _DRM_AGP_BUFFER) + ret = drm_addbufs_agp(dev, request); else #endif - if (request.flags & _DRM_SG_BUFFER) - ret = drm_addbufs_sg(dev, &request); - else if (request.flags & _DRM_FB_BUFFER) - ret = drm_addbufs_fb(dev, &request); + if (request->flags & _DRM_SG_BUFFER) + ret = drm_addbufs_sg(dev, request); + else if (request->flags & _DRM_FB_BUFFER) + ret = drm_addbufs_fb(dev, request); else - ret = drm_addbufs_pci(dev, &request); + ret = drm_addbufs_pci(dev, request); - if (ret == 0) { - if (copy_to_user((void __user *) arg, &request, - sizeof(request))) { - ret = -EFAULT; - } - } return ret; } @@ -1336,7 +1306,7 @@ int drm_addbufs(struct inode *inode, struct file *filp, * large buffers can be used for image transfer). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_info structure. * \return zero on success or a negative number on failure. @@ -1345,14 +1315,11 @@ int drm_addbufs(struct inode *inode, struct file *filp, * lock, preventing of allocating more buffers after this call. Information * about each requested buffer is then copied into user space. */ -int drm_infobufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_infobufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_info_t request; - drm_buf_info_t __user *argp = (void __user *)arg; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_info *request = data; int i; int count; @@ -1370,9 +1337,6 @@ int drm_infobufs(struct inode *inode, struct file *filp, ++dev->buf_use; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) ++count; @@ -1380,13 +1344,13 @@ int drm_infobufs(struct inode *inode, struct file *filp, DRM_DEBUG("count = %d\n", count); - if (request.count >= count) { + if (request->count >= count) { for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) { if (dma->bufs[i].buf_count) { - drm_buf_desc_t __user *to = - &request.list[count]; - drm_buf_entry_t *from = &dma->bufs[i]; - drm_freelist_t *list = &dma->bufs[i].freelist; + struct drm_buf_desc __user *to = + &request->list[count]; + struct drm_buf_entry *from = &dma->bufs[i]; + struct drm_freelist *list = &dma->bufs[i].freelist; if (copy_to_user(&to->count, &from->buf_count, sizeof(from->buf_count)) || @@ -1411,10 +1375,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, } } } - request.count = count; - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = count; return 0; } @@ -1423,7 +1384,7 @@ int drm_infobufs(struct inode *inode, struct file *filp, * Specifies a low and high water mark for buffer allocation * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg a pointer to a drm_buf_desc structure. * \return zero on success or a negative number on failure. @@ -1433,15 +1394,13 @@ int drm_infobufs(struct inode *inode, struct file *filp, * * \note This ioctl is deprecated and mostly never used. */ -int drm_markbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_markbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_desc_t request; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_desc *request = data; int order; - drm_buf_entry_t *entry; + struct drm_buf_entry *entry; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1449,24 +1408,20 @@ int drm_markbufs(struct inode *inode, struct file *filp, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (drm_buf_desc_t __user *) arg, sizeof(request))) - return -EFAULT; - DRM_DEBUG("%d, %d, %d\n", - request.size, request.low_mark, request.high_mark); - order = drm_order(request.size); + request->size, request->low_mark, request->high_mark); + order = drm_order(request->size); if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER) return -EINVAL; entry = &dma->bufs[order]; - if (request.low_mark < 0 || request.low_mark > entry->buf_count) + if (request->low_mark < 0 || request->low_mark > entry->buf_count) return -EINVAL; - if (request.high_mark < 0 || request.high_mark > entry->buf_count) + if (request->high_mark < 0 || request->high_mark > entry->buf_count) return -EINVAL; - entry->freelist.low_mark = request.low_mark; - entry->freelist.high_mark = request.high_mark; + entry->freelist.low_mark = request->low_mark; + entry->freelist.high_mark = request->high_mark; return 0; } @@ -1475,7 +1430,7 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Unreserve the buffers in list, previously reserved using drmDMA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_free structure. * \return zero on success or a negative number on failure. @@ -1483,16 +1438,14 @@ int drm_markbufs(struct inode *inode, struct file *filp, * Calls free_buffer() for each used buffer. * This function is primarily used for debugging. */ -int drm_freebufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_freebufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_free_t request; + struct drm_device_dma *dma = dev->dma; + struct drm_buf_free *request = data; int i; int idx; - drm_buf_t *buf; + struct drm_buf *buf; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) return -EINVAL; @@ -1500,13 +1453,9 @@ int drm_freebufs(struct inode *inode, struct file *filp, if (!dma) return -EINVAL; - if (copy_from_user(&request, - (drm_buf_free_t __user *) arg, sizeof(request))) - return -EFAULT; - - DRM_DEBUG("%d\n", request.count); - for (i = 0; i < request.count; i++) { - if (copy_from_user(&idx, &request.list[i], sizeof(idx))) + DRM_DEBUG("%d\n", request->count); + for (i = 0; i < request->count; i++) { + if (copy_from_user(&idx, &request->list[i], sizeof(idx))) return -EFAULT; if (idx < 0 || idx >= dma->buf_count) { DRM_ERROR("Index %d (of %d max)\n", @@ -1514,7 +1463,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, return -EINVAL; } buf = dma->buflist[idx]; - if (buf->filp != filp) { + if (buf->file_priv != file_priv) { DRM_ERROR("Process %d freeing buffer not owned\n", current->pid); return -EINVAL; @@ -1529,7 +1478,7 @@ int drm_freebufs(struct inode *inode, struct file *filp, * Maps all of the DMA buffers into client-virtual space (ioctl). * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg pointer to a drm_buf_map structure. * \return zero on success or a negative number on failure. @@ -1539,18 +1488,15 @@ int drm_freebufs(struct inode *inode, struct file *filp, * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls * drm_mmap_dma(). */ -int drm_mapbufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mapbufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; - drm_buf_map_t __user *argp = (void __user *)arg; + struct drm_device_dma *dma = dev->dma; int retcode = 0; const int zero = 0; unsigned long virtual; unsigned long address; - drm_buf_map_t request; + struct drm_buf_map *request = data; int i; if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA)) @@ -1567,16 +1513,13 @@ int drm_mapbufs(struct inode *inode, struct file *filp, dev->buf_use++; /* Can't allocate more after this call */ spin_unlock(&dev->count_lock); - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - - if (request.count >= dma->buf_count) { + if (request->count >= dma->buf_count) { if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) || (drm_core_check_feature(dev, DRIVER_SG) && (dma->flags & _DRM_DMA_USE_SG)) || (drm_core_check_feature(dev, DRIVER_FB_DMA) && (dma->flags & _DRM_DMA_USE_FB))) { - drm_map_t *map = dev->agp_buffer_map; + struct drm_map *map = dev->agp_buffer_map; unsigned long token = dev->agp_buffer_token; if (!map) { @@ -1584,14 +1527,14 @@ int drm_mapbufs(struct inode *inode, struct file *filp, goto done; } down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, map->size, + virtual = do_mmap(file_priv->filp, 0, map->size, PROT_READ | PROT_WRITE, MAP_SHARED, token); up_write(¤t->mm->mmap_sem); } else { down_write(¤t->mm->mmap_sem); - virtual = do_mmap(filp, 0, dma->byte_count, + virtual = do_mmap(file_priv->filp, 0, dma->byte_count, PROT_READ | PROT_WRITE, MAP_SHARED, 0); up_write(¤t->mm->mmap_sem); @@ -1601,28 +1544,28 @@ int drm_mapbufs(struct inode *inode, struct file *filp, retcode = (signed long)virtual; goto done; } - request.virtual = (void __user *)virtual; + request->virtual = (void __user *)virtual; for (i = 0; i < dma->buf_count; i++) { - if (copy_to_user(&request.list[i].idx, + if (copy_to_user(&request->list[i].idx, &dma->buflist[i]->idx, - sizeof(request.list[0].idx))) { + sizeof(request->list[0].idx))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].total, + if (copy_to_user(&request->list[i].total, &dma->buflist[i]->total, - sizeof(request.list[0].total))) { + sizeof(request->list[0].total))) { retcode = -EFAULT; goto done; } - if (copy_to_user(&request.list[i].used, + if (copy_to_user(&request->list[i].used, &zero, sizeof(zero))) { retcode = -EFAULT; goto done; } address = virtual + dma->buflist[i]->offset; /* *** */ - if (copy_to_user(&request.list[i].address, + if (copy_to_user(&request->list[i].address, &address, sizeof(address))) { retcode = -EFAULT; goto done; @@ -1630,11 +1573,8 @@ int drm_mapbufs(struct inode *inode, struct file *filp, } } done: - request.count = dma->buf_count; - DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode); - - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; + request->count = dma->buf_count; + DRM_DEBUG("%d buffers, retcode = %d\n", request->count, retcode); return retcode; } diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 08d20d06..e51aedb7 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -196,15 +196,16 @@ static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, return ret; } -static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, + +static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -261,7 +262,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -350,11 +351,11 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; mutex_lock(&bo->mutex); @@ -394,7 +395,7 @@ out_unlock: int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; @@ -405,7 +406,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) BUG_ON(ret); if (bus_size) { - drm_mem_type_manager_t *man = &bo->dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type]; unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT; pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma); ret = io_remap_pfn_range(vma, vma->vm_start, pfn, @@ -417,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) } -int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; @@ -453,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) return 0; } -void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; @@ -485,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) -int drm_bo_lock_kmm(drm_buffer_object_t * bo) +int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; @@ -517,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo) return -EAGAIN; } -void drm_bo_unlock_kmm(drm_buffer_object_t * bo) +void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; @@ -528,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo) } } -int drm_bo_remap_bound(drm_buffer_object_t *bo) +int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; @@ -544,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) return ret; } -void drm_bo_finish_unmap(drm_buffer_object_t *bo) +void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; @@ -677,4 +678,51 @@ void idr_remove_all(struct idr *idp) idp->layers = 0; } EXPORT_SYMBOL(idr_remove_all); + +#endif /* DRM_IDR_COMPAT_FN */ + + + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +/** + * idr_replace - replace pointer for given id + * @idp: idr handle + * @ptr: pointer you want associated with the id + * @id: lookup key + * + * Replace the pointer registered with an id and return the old value. + * A -ENOENT return indicates that @id was not found. + * A -EINVAL return indicates that @id was not within valid constraints. + * + * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove(). + */ +void *idr_replace(struct idr *idp, void *ptr, int id) +{ + int n; + struct idr_layer *p, *old_p; + + n = idp->layers * IDR_BITS; + p = idp->top; + + id &= MAX_ID_MASK; + + if (id >= (1 << n)) + return ERR_PTR(-EINVAL); + + n -= IDR_BITS; + while ((n > 0) && p) { + p = p->ary[(id >> n) & IDR_MASK]; + n -= IDR_BITS; + } + + n = id & IDR_MASK; + if (unlikely(p == NULL || !test_bit(n, &p->bitmap))) + return ERR_PTR(-ENOENT); + + old_p = p->ary[n]; + p->ary[n] = ptr; + + return (void *)old_p; +} +EXPORT_SYMBOL(idr_replace); #endif diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index 1cf7f165..e906a539 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -313,12 +313,23 @@ extern int drm_bo_map_bound(struct vm_area_struct *vma); #endif -/* fixme when functions are upstreamed */ +/* fixme when functions are upstreamed - upstreamed for 2.6.23 */ +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) #define DRM_IDR_COMPAT_FN +#endif #ifdef DRM_IDR_COMPAT_FN int idr_for_each(struct idr *idp, int (*fn)(int id, void *p, void *data), void *data); void idr_remove_all(struct idr *idp); #endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) +void *idr_replace(struct idr *idp, void *ptr, int id); +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) +typedef _Bool bool; +#endif + #endif diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c index 101a298c..7854e89c 100644 --- a/linux-core/drm_context.c +++ b/linux-core/drm_context.c @@ -56,19 +56,11 @@ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex * lock. */ -void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) +void drm_ctxbitmap_free(struct drm_device *dev, int ctx_handle) { - struct drm_ctx_sarea_list *ctx; - mutex_lock(&dev->struct_mutex); - ctx = idr_find(&dev->ctx_idr, ctx_handle); - if (ctx) { - idr_remove(&dev->ctx_idr, ctx_handle); - drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - } else - DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle); + idr_remove(&dev->ctx_idr, ctx_handle); mutex_unlock(&dev->struct_mutex); - return; } /** @@ -80,24 +72,19 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle) * Allocate a new idr from drm_device::ctx_idr while holding the * drm_device::struct_mutex lock. */ -static int drm_ctxbitmap_next(drm_device_t * dev) +static int drm_ctxbitmap_next(struct drm_device *dev) { int new_id; int ret; - struct drm_ctx_sarea_list *new_ctx; - - new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - if (!new_ctx) - return -1; again: if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); return -ENOMEM; } mutex_lock(&dev->struct_mutex); - ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id); + ret = idr_get_new_above(&dev->ctx_idr, NULL, + DRM_RESERVED_CONTEXTS, &new_id); if (ret == -EAGAIN) { mutex_unlock(&dev->struct_mutex); goto again; @@ -114,21 +101,12 @@ again: * * Initialise the drm_device::ctx_idr */ -int drm_ctxbitmap_init(drm_device_t * dev) +int drm_ctxbitmap_init(struct drm_device *dev) { idr_init(&dev->ctx_idr); return 0; } - - -static int drm_ctx_sarea_free(int id, void *p, void *data) -{ - struct drm_ctx_sarea_list *ctx_entry = p; - drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST); - return 0; -} - /** * Context bitmap cleanup. * @@ -137,10 +115,9 @@ static int drm_ctx_sarea_free(int id, void *p, void *data) * Free all idr members using drm_ctx_sarea_free helper function * while holding the drm_device::struct_mutex lock. */ -void drm_ctxbitmap_cleanup(drm_device_t * dev) +void drm_ctxbitmap_cleanup(struct drm_device *dev) { mutex_lock(&dev->struct_mutex); - idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL); idr_remove_all(&dev->ctx_idr); mutex_unlock(&dev->struct_mutex); } @@ -155,7 +132,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev) * Get per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -163,44 +140,34 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev) * Gets the map from drm_device::ctx_idr with the handle specified and * returns its handle. */ -int drm_getsareactx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t __user *argp = (void __user *)arg; - drm_ctx_priv_map_t request; - drm_map_t *map; - drm_map_list_t *_entry; - struct drm_ctx_sarea_list *ctx_sarea; - - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; + struct drm_ctx_priv_map *request = data; + struct drm_map *map; + struct drm_map_list *_entry; mutex_lock(&dev->struct_mutex); - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) { + map = idr_find(&dev->ctx_idr, request->ctx_id); + if (!map) { mutex_unlock(&dev->struct_mutex); return -EINVAL; } - map = ctx_sarea->map; mutex_unlock(&dev->struct_mutex); - request.handle = NULL; + request->handle = NULL; list_for_each_entry(_entry, &dev->maplist, head) { if (_entry->map == map) { - request.handle = + request->handle = (void *)(unsigned long)_entry->user_token; break; } } - if (request.handle == NULL) + if (request->handle == NULL) return -EINVAL; - if (copy_to_user(argp, &request, sizeof(request))) - return -EFAULT; return 0; } @@ -208,7 +175,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Set per-context SAREA. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_priv_map structure. * \return zero on success or a negative number on failure. @@ -216,24 +183,17 @@ int drm_getsareactx(struct inode *inode, struct file *filp, * Searches the mapping specified in \p arg and update the entry in * drm_device::ctx_idr with it. */ -int drm_setsareactx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_setsareactx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_priv_map_t request; - drm_map_t *map = NULL; - drm_map_list_t *r_list = NULL; - struct drm_ctx_sarea_list *ctx_sarea; - - if (copy_from_user(&request, - (drm_ctx_priv_map_t __user *) arg, sizeof(request))) - return -EFAULT; + struct drm_ctx_priv_map *request = data; + struct drm_map *map = NULL; + struct drm_map_list *r_list = NULL; mutex_lock(&dev->struct_mutex); list_for_each_entry(r_list, &dev->maplist, head) { if (r_list->map - && r_list->user_token == (unsigned long) request.handle) + && r_list->user_token == (unsigned long) request->handle) goto found; } bad: @@ -245,14 +205,11 @@ int drm_setsareactx(struct inode *inode, struct file *filp, if (!map) goto bad; - mutex_lock(&dev->struct_mutex); - - ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id); - if (!ctx_sarea) + if (IS_ERR(idr_replace(&dev->ctx_idr, map, request->ctx_id))) goto bad; - ctx_sarea->map = map; mutex_unlock(&dev->struct_mutex); + return 0; } @@ -272,7 +229,7 @@ int drm_setsareactx(struct inode *inode, struct file *filp, * * Attempt to set drm_device::context_flag. */ -static int drm_context_switch(drm_device_t * dev, int old, int new) +static int drm_context_switch(struct drm_device *dev, int old, int new) { if (test_and_set_bit(0, &dev->context_flag)) { DRM_ERROR("Reentering -- FIXME\n"); @@ -300,7 +257,7 @@ static int drm_context_switch(drm_device_t * dev, int old, int new) * hardware lock is held, clears the drm_device::context_flag and wakes up * drm_device::context_wait. */ -static int drm_context_switch_complete(drm_device_t * dev, int new) +static int drm_context_switch_complete(struct drm_device *dev, int new) { dev->last_context = new; /* PRE/POST: This is the _only_ writer. */ dev->last_switch = jiffies; @@ -322,34 +279,28 @@ static int drm_context_switch_complete(drm_device_t * dev, int new) * Reserve contexts. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx_res structure. * \return zero on success or a negative number on failure. */ -int drm_resctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_resctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_ctx_res_t res; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; + struct drm_ctx_res *res = data; + struct drm_ctx ctx; int i; - if (copy_from_user(&res, argp, sizeof(res))) - return -EFAULT; - - if (res.count >= DRM_RESERVED_CONTEXTS) { + if (res->count >= DRM_RESERVED_CONTEXTS) { memset(&ctx, 0, sizeof(ctx)); for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) { ctx.handle = i; - if (copy_to_user(&res.contexts[i], &ctx, sizeof(ctx))) + if (copy_to_user(&res->contexts[i], &ctx, sizeof(ctx))) return -EFAULT; } } - res.count = DRM_RESERVED_CONTEXTS; + res->count = DRM_RESERVED_CONTEXTS; - if (copy_to_user(argp, &res, sizeof(res))) - return -EFAULT; return 0; } @@ -357,40 +308,34 @@ int drm_resctx(struct inode *inode, struct file *filp, * Add context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Get a new handle for the context and copy to userspace. */ -int drm_addctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_addctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_list_t *ctx_entry; - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; - - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; + struct drm_ctx_list *ctx_entry; + struct drm_ctx *ctx = data; - ctx.handle = drm_ctxbitmap_next(dev); - if (ctx.handle == DRM_KERNEL_CONTEXT) { + ctx->handle = drm_ctxbitmap_next(dev); + if (ctx->handle == DRM_KERNEL_CONTEXT) { /* Skip kernel's context and get a new one. */ - ctx.handle = drm_ctxbitmap_next(dev); + ctx->handle = drm_ctxbitmap_next(dev); } - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == -1) { + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == -1) { DRM_DEBUG("Not enough free contexts.\n"); /* Should this return -EBUSY instead? */ return -ENOMEM; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_ctor) - if (!dev->driver->context_ctor(dev, ctx.handle)) { + if (!dev->driver->context_ctor(dev, ctx->handle)) { DRM_DEBUG("Running out of ctxs or memory.\n"); return -ENOMEM; } @@ -403,21 +348,18 @@ int drm_addctx(struct inode *inode, struct file *filp, } INIT_LIST_HEAD(&ctx_entry->head); - ctx_entry->handle = ctx.handle; - ctx_entry->tag = priv; + ctx_entry->handle = ctx->handle; + ctx_entry->tag = file_priv; mutex_lock(&dev->ctxlist_mutex); list_add(&ctx_entry->head, &dev->ctxlist); ++dev->ctx_count; mutex_unlock(&dev->ctxlist_mutex); - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } -int drm_modctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_modctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { /* This does nothing */ return 0; @@ -427,25 +369,18 @@ int drm_modctx(struct inode *inode, struct file *filp, * Get context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. */ -int drm_getctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getctx(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_ctx_t __user *argp = (void __user *)arg; - drm_ctx_t ctx; - - if (copy_from_user(&ctx, argp, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; /* This is 0, because we don't handle any context flags */ - ctx.flags = 0; + ctx->flags = 0; - if (copy_to_user(argp, &ctx, sizeof(ctx))) - return -EFAULT; return 0; } @@ -453,50 +388,40 @@ int drm_getctx(struct inode *inode, struct file *filp, * Switch context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch(). */ -int drm_switchctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_switchctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; - - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; - DRM_DEBUG("%d\n", ctx.handle); - return drm_context_switch(dev, dev->last_context, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + return drm_context_switch(dev, dev->last_context, ctx->handle); } /** * New context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * Calls context_switch_complete(). */ -int drm_newctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_newctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; + struct drm_ctx *ctx = data; - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) - return -EFAULT; - - DRM_DEBUG("%d\n", ctx.handle); - drm_context_switch_complete(dev, ctx.handle); + DRM_DEBUG("%d\n", ctx->handle); + drm_context_switch_complete(dev, ctx->handle); return 0; } @@ -505,39 +430,34 @@ int drm_newctx(struct inode *inode, struct file *filp, * Remove context. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument pointing to a drm_ctx structure. * \return zero on success or a negative number on failure. * * If not the special kernel context, calls ctxbitmap_free() to free the specified context. */ -int drm_rmctx(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_rmctx(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ctx_t ctx; - - if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) - return -EFAULT; + struct drm_ctx *ctx = data; - DRM_DEBUG("%d\n", ctx.handle); - if (ctx.handle == DRM_KERNEL_CONTEXT + 1) { - priv->remove_auth_on_close = 1; + DRM_DEBUG("%d\n", ctx->handle); + if (ctx->handle == DRM_KERNEL_CONTEXT + 1) { + file_priv->remove_auth_on_close = 1; } - if (ctx.handle != DRM_KERNEL_CONTEXT) { + if (ctx->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) - dev->driver->context_dtor(dev, ctx.handle); - drm_ctxbitmap_free(dev, ctx.handle); + dev->driver->context_dtor(dev, ctx->handle); + drm_ctxbitmap_free(dev, ctx->handle); } mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->handle == ctx.handle) { + if (pos->handle == ctx->handle) { list_del(&pos->head); drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST); --dev->ctx_count; diff --git a/linux-core/drm_crtc.c b/linux-core/drm_crtc.c index df00a7d2..ed5f1dfa 100644 --- a/linux-core/drm_crtc.c +++ b/linux-core/drm_crtc.c @@ -117,7 +117,7 @@ struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, * RETURNS: * Pointer to new framebuffer or NULL on error. */ -struct drm_framebuffer *drm_framebuffer_create(drm_device_t *dev) +struct drm_framebuffer *drm_framebuffer_create(struct drm_device *dev) { struct drm_framebuffer *fb; @@ -153,7 +153,7 @@ EXPORT_SYMBOL(drm_framebuffer_create); */ void drm_framebuffer_destroy(struct drm_framebuffer *fb) { - drm_device_t *dev = fb->dev; + struct drm_device *dev = fb->dev; struct drm_crtc *crtc; /* remove from any CRTC */ @@ -183,7 +183,7 @@ EXPORT_SYMBOL(drm_framebuffer_destroy); * RETURNS: * Pointer to new CRTC object or NULL on error. */ -struct drm_crtc *drm_crtc_create(drm_device_t *dev, +struct drm_crtc *drm_crtc_create(struct drm_device *dev, const struct drm_crtc_funcs *funcs) { struct drm_crtc *crtc; @@ -216,7 +216,7 @@ EXPORT_SYMBOL(drm_crtc_create); */ void drm_crtc_destroy(struct drm_crtc *crtc) { - drm_device_t *dev = crtc->dev; + struct drm_device *dev = crtc->dev; if (crtc->funcs->cleanup) (*crtc->funcs->cleanup)(crtc); @@ -243,7 +243,7 @@ EXPORT_SYMBOL(drm_crtc_destroy); bool drm_crtc_in_use(struct drm_crtc *crtc) { struct drm_output *output; - drm_device_t *dev = crtc->dev; + struct drm_device *dev = crtc->dev; /* FIXME: Locking around list access? */ list_for_each_entry(output, &dev->mode_config.output_list, head) if (output->crtc == crtc) @@ -369,7 +369,7 @@ void drm_crtc_probe_output_modes(struct drm_device *dev, int maxX, int maxY) bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mode, int x, int y) { - drm_device_t *dev = crtc->dev; + struct drm_device *dev = crtc->dev; struct drm_display_mode *adjusted_mode, saved_mode; int saved_x, saved_y; bool didLock = false; @@ -549,7 +549,7 @@ EXPORT_SYMBOL(drm_mode_remove); * RETURNS: * Pointer to the new output or NULL on error. */ -struct drm_output *drm_output_create(drm_device_t *dev, +struct drm_output *drm_output_create(struct drm_device *dev, const struct drm_output_funcs *funcs, const char *name) { @@ -698,7 +698,7 @@ EXPORT_SYMBOL(drm_mode_destroy); * Initialize @dev's mode_config structure, used for tracking the graphics * configuration of @dev. */ -void drm_mode_config_init(drm_device_t *dev) +void drm_mode_config_init(struct drm_device *dev) { mutex_init(&dev->mode_config.mutex); INIT_LIST_HEAD(&dev->mode_config.fb_list); @@ -724,10 +724,10 @@ EXPORT_SYMBOL(drm_mode_config_init); * RETURNS: * Zero on success, -EINVAL if the handle couldn't be found. */ -static int drm_get_buffer_object(drm_device_t *dev, struct drm_buffer_object **bo, unsigned long handle) +static int drm_get_buffer_object(struct drm_device *dev, struct drm_buffer_object **bo, unsigned long handle) { - drm_user_object_t *uo; - drm_hash_item_t *hash; + struct drm_user_object *uo; + struct drm_hash_item *hash; int ret; *bo = NULL; @@ -740,13 +740,13 @@ static int drm_get_buffer_object(drm_device_t *dev, struct drm_buffer_object **b goto out_err; } - uo = drm_hash_entry(hash, drm_user_object_t, hash); + uo = drm_hash_entry(hash, struct drm_user_object, hash); if (uo->type != drm_buffer_type) { ret = -EINVAL; goto out_err; } - *bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + *bo = drm_user_object_entry(uo, struct drm_buffer_object, base); ret = 0; out_err: mutex_unlock(&dev->struct_mutex); @@ -760,7 +760,7 @@ out_err: * LOCKING: * Caller must hold mode config lock. */ -static void drm_pick_crtcs (drm_device_t *dev) +static void drm_pick_crtcs (struct drm_device *dev) { int c, o; struct drm_output *output, *output_equal; @@ -853,7 +853,7 @@ clone: * RETURNS: * Zero if everything went ok, nonzero otherwise. */ -bool drm_initial_config(drm_device_t *dev, bool can_grow) +bool drm_initial_config(struct drm_device *dev, bool can_grow) { struct drm_output *output; int ret = false; @@ -891,7 +891,7 @@ EXPORT_SYMBOL(drm_initial_config); * * FIXME: cleanup any dangling user buffer objects too */ -void drm_mode_config_cleanup(drm_device_t *dev) +void drm_mode_config_cleanup(struct drm_device *dev) { struct drm_output *output, *ot; struct drm_crtc *crtc, *ct; @@ -942,7 +942,7 @@ EXPORT_SYMBOL(drm_mode_config_cleanup); */ int drm_crtc_set_config(struct drm_crtc *crtc, struct drm_mode_crtc *crtc_info, struct drm_display_mode *new_mode, struct drm_output **output_set, struct drm_framebuffer *fb) { - drm_device_t *dev = crtc->dev; + struct drm_device *dev = crtc->dev; struct drm_crtc **save_crtcs, *new_crtc; bool save_enabled = crtc->enabled; bool changed; @@ -1085,12 +1085,10 @@ void drm_crtc_convert_umode(struct drm_display_mode *out, struct drm_mode_modein * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_getresources(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_getresources(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_card_res __user *argp = (void __user *)arg; + struct drm_mode_card_res __user *argp = (void __user *)data; struct drm_mode_card_res card_res; struct list_head *lh; struct drm_framebuffer *fb; @@ -1236,12 +1234,10 @@ out_unlock: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_getcrtc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_crtc __user *argp = (void __user *)arg; + struct drm_mode_crtc __user *argp = (void __user *)data; struct drm_mode_crtc crtc_resp; struct drm_crtc *crtc; struct drm_output *output; @@ -1300,12 +1296,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_getoutput(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_getoutput(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_get_output __user *argp = (void __user *)arg; + struct drm_mode_get_output __user *argp = (void __user *)data; struct drm_mode_get_output out_resp; struct drm_output *output; struct drm_display_mode *mode; @@ -1390,12 +1384,10 @@ out_unlock: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_setcrtc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_setcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_crtc __user *argp = (void __user *)arg; + struct drm_mode_crtc __user *argp = (void __user *)data; struct drm_mode_crtc crtc_req; struct drm_crtc *crtc; struct drm_output **output_set = NULL, *output; @@ -1508,12 +1500,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_addfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - struct drm_file *priv = filp->private_data; - struct drm_device *dev = priv->head->dev; - struct drm_mode_fb_cmd __user *argp = (void __user *)arg; + struct drm_mode_fb_cmd __user *argp = (void __user *)data; struct drm_mode_fb_cmd r; struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -1560,7 +1550,7 @@ int drm_mode_addfb(struct inode *inode, struct file *filp, r.buffer_id = fb->id; - list_add(&fb->filp_head, &priv->fbs); + list_add(&fb->filp_head, &file_priv->fbs); if (copy_to_user(argp, &r, sizeof(r))) { ret = -EFAULT; @@ -1595,13 +1585,11 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_rmfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; struct drm_framebuffer *fb = 0; - uint32_t id = arg; + uint32_t id = data; int ret = 0; mutex_lock(&dev->mode_config.mutex); @@ -1644,12 +1632,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_getfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_fb_cmd __user *argp = (void __user *)arg; + struct drm_mode_fb_cmd __user *argp = (void __user *)data; struct drm_mode_fb_cmd r; struct drm_framebuffer *fb; int ret = 0; @@ -1696,8 +1682,8 @@ out: */ void drm_fb_release(struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; struct drm_framebuffer *fb, *tfb; mutex_lock(&dev->mode_config.mutex); @@ -1724,12 +1710,10 @@ void drm_fb_release(struct file *filp) * writes new mode id into arg. * Zero on success, errno on failure. */ -int drm_mode_addmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_addmode(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_modeinfo __user *argp = (void __user *)arg; + struct drm_mode_modeinfo __user *argp = (void __user *)data; struct drm_mode_modeinfo new_mode; struct drm_display_mode *user_mode; int ret = 0; @@ -1774,12 +1758,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_rmmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_rmmode(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - uint32_t id = arg; + uint32_t id = (uint32_t)data; struct drm_display_mode *mode, *t; int ret = -EINVAL; @@ -1827,12 +1809,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_attachmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_attachmode(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_mode_cmd __user *argp = (void __user *)arg; + struct drm_mode_mode_cmd __user *argp = (void __user *)data; struct drm_mode_mode_cmd mode_cmd; struct drm_output *output; struct drm_display_mode *mode; @@ -1884,12 +1864,10 @@ out: * RETURNS: * Zero on success, errno on failure. */ -int drm_mode_detachmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_mode_detachmode(struct drm_device *dev, + void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - struct drm_mode_mode_cmd __user *argp = (void __user *)arg; + struct drm_mode_mode_cmd __user *argp = (void __user *)data; struct drm_mode_mode_cmd mode_cmd; struct drm_output *output; struct drm_display_mode *mode; diff --git a/linux-core/drm_crtc.h b/linux-core/drm_crtc.h index 75db5b05..d97cd196 100644 --- a/linux-core/drm_crtc.h +++ b/linux-core/drm_crtc.h @@ -528,29 +528,29 @@ extern bool drm_crtc_set_mode(struct drm_crtc *crtc, struct drm_display_mode *mo int x, int y); /* IOCTLs */ -extern int drm_mode_getresources(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); - -extern int drm_mode_getcrtc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_getoutput(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_setcrtc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_addfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_rmfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_getfb(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_addmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_rmmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_attachmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); -extern int drm_mode_detachmode(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +extern int drm_mode_getresources(struct drm_device *dev, + void *data, struct drm_file *file_priv); + +extern int drm_mode_getcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_getoutput(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_setcrtc(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_addfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_rmfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_getfb(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_addmode(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_rmmode(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_attachmode(struct drm_device *dev, + void *data, struct drm_file *file_priv); +extern int drm_mode_detachmode(struct drm_device *dev, + void *data, struct drm_file *file_priv); #endif /* __DRM_CRTC_H__ */ diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c index a7eee1a4..7cc44193 100644 --- a/linux-core/drm_dma.c +++ b/linux-core/drm_dma.c @@ -43,7 +43,7 @@ * * Allocate and initialize a drm_device_dma structure. */ -int drm_dma_setup(drm_device_t * dev) +int drm_dma_setup(struct drm_device * dev) { int i; @@ -67,9 +67,9 @@ int drm_dma_setup(drm_device_t * dev) * Free all pages associated with DMA buffers, the buffers and pages lists, and * finally the the drm_device::dma structure itself. */ -void drm_dma_takedown(drm_device_t * dev) +void drm_dma_takedown(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, j; if (!dma) @@ -129,14 +129,14 @@ void drm_dma_takedown(drm_device_t * dev) * * Resets the fields of \p buf. */ -void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) +void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf) { if (!buf) return; buf->waiting = 0; buf->pending = 0; - buf->filp = NULL; + buf->file_priv = NULL; buf->used = 0; if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) @@ -148,19 +148,20 @@ void drm_free_buffer(drm_device_t * dev, drm_buf_t * buf) /** * Reclaim the buffers. * - * \param filp file pointer. + * \param file_priv DRM file private. * - * Frees each buffer associated with \p filp not already on the hardware. + * Frees each buffer associated with \p file_priv not already on the hardware. */ -void drm_core_reclaim_buffers(drm_device_t *dev, struct file *filp) +void drm_core_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) return; for (i = 0; i < dma->buf_count; i++) { - if (dma->buflist[i]->filp == filp) { + if (dma->buflist[i]->file_priv == file_priv) { switch (dma->buflist[i]->list) { case DRM_LIST_NONE: drm_free_buffer(dev, dma->buflist[i]); diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c index eb44a189..1839c576 100644 --- a/linux-core/drm_drawable.c +++ b/linux-core/drm_drawable.c @@ -40,28 +40,21 @@ /** * Allocate drawable ID and memory to store information about it. */ -int drm_adddraw(DRM_IOCTL_ARGS) +int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; unsigned long irqflags; - struct drm_drawable_list *draw_info; - drm_draw_t draw; + struct drm_draw *draw = data; int new_id = 0; int ret; - draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); - if (!draw_info) - return -ENOMEM; - again: if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) { DRM_ERROR("Out of memory expanding drawable idr\n"); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); return -ENOMEM; } spin_lock_irqsave(&dev->drw_lock, irqflags); - ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id); + ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id); if (ret == -EAGAIN) { spin_unlock_irqrestore(&dev->drw_lock, irqflags); goto again; @@ -69,11 +62,9 @@ again: spin_unlock_irqrestore(&dev->drw_lock, irqflags); - draw.handle = new_id; - - DRM_DEBUG("%d\n", draw.handle); + draw->handle = new_id; - DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw)); + DRM_DEBUG("%d\n", draw->handle); return 0; } @@ -81,73 +72,64 @@ again: /** * Free drawable ID and memory to store information about it. */ -int drm_rmdraw(DRM_IOCTL_ARGS) +int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_draw_t draw; + struct drm_draw *draw = data; unsigned long irqflags; - struct drm_drawable_list *draw_info; - - DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data, - sizeof(draw)); - - draw_info = idr_find(&dev->drw_idr, draw.handle); - if (!draw_info) { - DRM_DEBUG("No such drawable %d\n", draw.handle); - return -EINVAL; - } spin_lock_irqsave(&dev->drw_lock, irqflags); - idr_remove(&dev->drw_idr, draw.handle); - drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + drm_free(drm_get_drawable_info(dev, draw->handle), + sizeof(struct drm_drawable_info), DRM_MEM_BUFS); + + idr_remove(&dev->drw_idr, draw->handle); spin_unlock_irqrestore(&dev->drw_lock, irqflags); - DRM_DEBUG("%d\n", draw.handle); + DRM_DEBUG("%d\n", draw->handle); return 0; } -int drm_update_drawable_info(DRM_IOCTL_ARGS) { - DRM_DEVICE; - drm_update_draw_t update; +int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct drm_update_draw *update = data; unsigned long irqflags; - drm_drawable_info_t *info; - drm_clip_rect_t *rects; - struct drm_drawable_list *draw_info; + struct drm_clip_rect *rects; + struct drm_drawable_info *info; int err; - DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data, - sizeof(update)); - - draw_info = idr_find(&dev->drw_idr, update.handle); - if (!draw_info) { - DRM_ERROR("No such drawable %d\n", update.handle); - return DRM_ERR(EINVAL); + info = idr_find(&dev->drw_idr, update->handle); + if (!info) { + info = drm_calloc(1, sizeof(*info), DRM_MEM_BUFS); + if (!info) + return -ENOMEM; + if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) { + DRM_ERROR("No such drawable %d\n", update->handle); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + return -EINVAL; + } } - info = &draw_info->info; - - switch (update.type) { + switch (update->type) { case DRM_DRAWABLE_CLIPRECTS: - if (update.num != info->num_rects) { - rects = drm_alloc(update.num * sizeof(drm_clip_rect_t), + if (update->num != info->num_rects) { + rects = drm_alloc(update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } else rects = info->rects; - if (update.num && !rects) { + if (update->num && !rects) { DRM_ERROR("Failed to allocate cliprect memory\n"); - err = DRM_ERR(ENOMEM); + err = -ENOMEM; goto error; } - if (update.num && DRM_COPY_FROM_USER(rects, - (drm_clip_rect_t __user *) - (unsigned long)update.data, - update.num * + if (update->num && DRM_COPY_FROM_USER(rects, + (struct drm_clip_rect __user *) + (unsigned long)update->data, + update->num * sizeof(*rects))) { DRM_ERROR("Failed to copy cliprects from userspace\n"); - err = DRM_ERR(EFAULT); + err = -EFAULT; goto error; } @@ -155,27 +137,27 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) { if (rects != info->rects) { drm_free(info->rects, info->num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); } info->rects = rects; - info->num_rects = update.num; + info->num_rects = update->num; spin_unlock_irqrestore(&dev->drw_lock, irqflags); DRM_DEBUG("Updated %d cliprects for drawable %d\n", - info->num_rects, update.handle); + info->num_rects, update->handle); break; default: - DRM_ERROR("Invalid update type %d\n", update.type); - return DRM_ERR(EINVAL); + DRM_ERROR("Invalid update type %d\n", update->type); + return -EINVAL; } return 0; error: if (rects != info->rects) - drm_free(rects, update.num * sizeof(drm_clip_rect_t), + drm_free(rects, update->num * sizeof(struct drm_clip_rect), DRM_MEM_BUFS); return err; @@ -184,28 +166,26 @@ error: /** * Caller must hold the drawable spinlock! */ -drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) { - struct drm_drawable_list *draw_info; - draw_info = idr_find(&dev->drw_idr, id); - if (!draw_info) { - DRM_DEBUG("No such drawable %d\n", id); - return NULL; - } - - return &draw_info->info; +struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id) +{ + return idr_find(&dev->drw_idr, id); } EXPORT_SYMBOL(drm_get_drawable_info); static int drm_drawable_free(int idr, void *p, void *data) { - struct drm_drawable_list *drw_entry = p; - drm_free(drw_entry->info.rects, drw_entry->info.num_rects * - sizeof(drm_clip_rect_t), DRM_MEM_BUFS); - drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS); + struct drm_drawable_info *info = p; + + if (info) { + drm_free(info->rects, info->num_rects * + sizeof(struct drm_clip_rect), DRM_MEM_BUFS); + drm_free(info, sizeof(*info), DRM_MEM_BUFS); + } + return 0; } -void drm_drawable_free_all(drm_device_t *dev) +void drm_drawable_free_all(struct drm_device *dev) { idr_for_each(&dev->drw_idr, drm_drawable_free, NULL); idr_remove_all(&dev->drw_idr); diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 0b4f8095..9004d535 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -48,92 +48,119 @@ #include "drmP.h" #include "drm_core.h" -static void drm_cleanup(drm_device_t * dev); +static void drm_cleanup(struct drm_device * dev); int drm_fb_loaded = 0; -static int drm_version(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg); +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv); /** Ioctl table */ -static drm_ioctl_desc_t drm_ioctls[] = { - [DRM_IOCTL_NR(DRM_IOCTL_VERSION)] = {drm_version, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE)] = {drm_getunique, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAGIC)] = {drm_getmagic, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_IRQ_BUSID)] = {drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP)] = {drm_getmap, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CLIENT)] = {drm_getclient, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_STATS)] = {drm_getstats, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_SET_VERSION)] = {drm_setversion, DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_UNIQUE)] = {drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_BLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_UNBLOCK)] = {drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AUTH_MAGIC)] = {drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_MAP)] = {drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_MAP)] = {drm_rmmap_ioctl, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_SET_SAREA_CTX)] = {drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_SAREA_CTX)] = {drm_getsareactx, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_CTX)] = {drm_addctx, DRM_AUTH|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_CTX)] = {drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MOD_CTX)] = {drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_GET_CTX)] = {drm_getctx, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_SWITCH_CTX)] = {drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_NEW_CTX)] = {drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RES_CTX)] = {drm_resctx, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_DRAW)] = {drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_RM_DRAW)] = {drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_LOCK)] = {drm_lock, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_UNLOCK)] = {drm_unlock, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_FINISH)] = {drm_noop, DRM_AUTH}, - - [DRM_IOCTL_NR(DRM_IOCTL_ADD_BUFS)] = {drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MARK_BUFS)] = {drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_INFO_BUFS)] = {drm_infobufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MAP_BUFS)] = {drm_mapbufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_FREE_BUFS)] = {drm_freebufs, DRM_AUTH}, +static struct drm_ioctl_desc drm_ioctls[] = { + DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), + DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_MAP, drm_getmap, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CLIENT, drm_getclient, 0), + DRM_IOCTL_DEF(DRM_IOCTL_GET_STATS, drm_getstats, 0), + DRM_IOCTL_DEF(DRM_IOCTL_SET_VERSION, drm_setversion, DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_UNIQUE, drm_setunique, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_BLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_UNBLOCK, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AUTH_MAGIC, drm_authmagic, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_MAP, drm_addmap_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_MAP, drm_rmmap_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_SET_SAREA_CTX, drm_setsareactx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_SAREA_CTX, drm_getsareactx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_CTX, drm_addctx, DRM_AUTH|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_CTX, drm_rmctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MOD_CTX, drm_modctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_GET_CTX, drm_getctx, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_SWITCH_CTX, drm_switchctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_FINISH, drm_noop, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_ADD_BUFS, drm_addbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MARK_BUFS, drm_markbufs, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_INFO_BUFS, drm_infobufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MAP_BUFS, drm_mapbufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FREE_BUFS, drm_freebufs, DRM_AUTH), /* The DRM_IOCTL_DMA ioctl should be defined by the driver. */ - [DRM_IOCTL_NR(DRM_IOCTL_DMA)] = {NULL, DRM_AUTH}, + DRM_IOCTL_DEF(DRM_IOCTL_DMA, NULL, DRM_AUTH), - [DRM_IOCTL_NR(DRM_IOCTL_CONTROL)] = {drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #if __OS_HAS_AGP - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ACQUIRE)] = {drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_RELEASE)] = {drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ENABLE)] = {drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_INFO)] = {drm_agp_info_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_ALLOC)] = {drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_FREE)] = {drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_BIND)] = {drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_AGP_UNBIND)] = {drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), #endif - [DRM_IOCTL_NR(DRM_IOCTL_SG_ALLOC)] = {drm_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_SG_FREE)] = {drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - - [DRM_IOCTL_NR(DRM_IOCTL_WAIT_VBLANK)] = {drm_wait_vblank, 0}, - [DRM_IOCTL_NR(DRM_IOCTL_FENCE)] = {drm_fence_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_BUFOBJ)] = {drm_bo_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_IOCTL_MM_INIT)] = {drm_mm_init_ioctl, - DRM_AUTH }, - - [DRM_IOCTL_NR(DRM_IOCTL_UPDATE_DRAW)] = {drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETRESOURCES)] = {drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETCRTC)] = {drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETOUTPUT)] = {drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_SETCRTC)] = {drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDFB)] = {drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_RMFB)] = {drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_GETFB)] = {drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_ADDMODE)] = {drm_mode_addmode, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_RMMODE)] = {drm_mode_rmmode, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_ATTACHMODE)] = {drm_mode_attachmode, DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_IOCTL_MODE_DETACHMODE)] = {drm_mode_detachmode, DRM_MASTER|DRM_ROOT_ONLY}, + DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), + // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, + DRM_AUTH ), + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDMODE, drm_mode_addmode, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMMODE, drm_mode_rmmode, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode, DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode, DRM_MASTER|DRM_ROOT_ONLY), + // DRM_IOCTL_DEF(DRM_IOCTL_BUFOBJ, drm_bo_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + + + DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_FLUSH, drm_fence_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_WAIT, drm_fence_wait_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_EMIT, drm_fence_emit_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), + + DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_UNREFERENCE, drm_bo_unreference_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_OP, drm_bo_op_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_INFO, drm_bo_info_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_WAIT_IDLE, drm_bo_wait_idle_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_IOCTL_BO_SET_PIN, drm_bo_set_pin_ioctl, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -148,11 +175,11 @@ static drm_ioctl_desc_t drm_ioctls[] = { * * \sa drm_device */ -int drm_lastclose(drm_device_t * dev) +int drm_lastclose(struct drm_device * dev) { - drm_magic_entry_t *pt, *next; - drm_map_list_t *r_list, *list_t; - drm_vma_entry_t *vma, *vma_temp; + struct drm_magic_entry *pt, *next; + struct drm_map_list *r_list, *list_t; + struct drm_vma_entry *vma, *vma_temp; int i; DRM_DEBUG("\n"); @@ -198,7 +225,7 @@ int drm_lastclose(drm_device_t * dev) /* Clear AGP information */ if (drm_core_has_AGP(dev) && dev->agp) { - drm_agp_mem_t *entry, *tempe; + struct drm_agp_mem *entry, *tempe; /* Remove AGP resources, but leave dev->agp intact until drv_cleanup is called. */ @@ -252,8 +279,9 @@ int drm_lastclose(drm_device_t * dev) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA)) drm_dma_takedown(dev); - if (dev->lock.filp) { - dev->lock.filp = NULL; + if (dev->lock.hw_lock) { + dev->sigdata.lock = dev->lock.hw_lock = NULL; /* SHM removed */ + dev->lock.file_priv = NULL; wake_up_interruptible(&dev->lock.lock_queue); } dev->dev_mapping = NULL; @@ -265,7 +293,7 @@ int drm_lastclose(drm_device_t * dev) void drm_cleanup_pci(struct pci_dev *pdev) { - drm_device_t *dev = pci_get_drvdata(pdev); + struct drm_device *dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); pci_release_regions(pdev); @@ -319,7 +347,7 @@ int drm_init(struct drm_driver *driver, } if (!drm_fb_loaded) - pci_register_driver(&driver->pci_driver); + return pci_register_driver(&driver->pci_driver); else { for (i = 0; pciidlist[i].vendor != 0; i++) { pid = &pciidlist[i]; @@ -351,7 +379,7 @@ EXPORT_SYMBOL(drm_init); * * \sa drm_init */ -static void drm_cleanup(drm_device_t * dev) +static void drm_cleanup(struct drm_device * dev) { DRM_DEBUG("\n"); @@ -400,8 +428,8 @@ static void drm_cleanup(drm_device_t * dev) void drm_exit(struct drm_driver *driver) { int i; - drm_device_t *dev = NULL; - drm_head_t *head; + struct drm_device *dev = NULL; + struct drm_head *head; DRM_DEBUG("\n"); if (drm_fb_loaded) { @@ -519,34 +547,26 @@ module_exit(drm_core_exit); * Get version information * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_version structure. * \return zero on success or negative number on failure. * * Fills in the version information in \p arg. */ -static int drm_version(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int drm_version(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_version_t __user *argp = (void __user *)arg; - drm_version_t version; + struct drm_version *version = data; int len; - if (copy_from_user(&version, argp, sizeof(version))) - return -EFAULT; + version->version_major = dev->driver->major; + version->version_minor = dev->driver->minor; + version->version_patchlevel = dev->driver->patchlevel; + DRM_COPY(version->name, dev->driver->name); + DRM_COPY(version->date, dev->driver->date); + DRM_COPY(version->desc, dev->driver->desc); - version.version_major = dev->driver->major; - version.version_minor = dev->driver->minor; - version.version_patchlevel = dev->driver->patchlevel; - DRM_COPY(version.name, dev->driver->name); - DRM_COPY(version.date, dev->driver->date); - DRM_COPY(version.desc, dev->driver->desc); - - if (copy_to_user(argp, &version, sizeof(version))) - return -EFAULT; return 0; } @@ -554,31 +574,43 @@ static int drm_version(struct inode *inode, struct file *filp, * Called whenever a process performs an ioctl on /dev/drm. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. * * Looks up the ioctl function in the ::ioctls table, checking for root * previleges if so required, and dispatches to the respective function. + * + * Copies data in and out according to the size and direction given in cmd, + * which must match the ioctl cmd known by the kernel. The kernel uses a 512 + * byte stack buffer to store the ioctl arguments in kernel space. Should we + * ever need much larger ioctl arguments, we may need to allocate memory. */ int drm_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_ioctl_desc_t *ioctl; + return drm_unlocked_ioctl(filp, cmd, arg); +} +EXPORT_SYMBOL(drm_ioctl); + +long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; + struct drm_ioctl_desc *ioctl; drm_ioctl_t *func; unsigned int nr = DRM_IOCTL_NR(cmd); int retcode = -EINVAL; + char kdata[512]; atomic_inc(&dev->ioctl_count); atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); - ++priv->ioctl_count; + ++file_priv->ioctl_count; DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", - current->pid, cmd, nr, (long)old_encode_dev(priv->head->device), - priv->authenticated); + current->pid, cmd, nr, (long)old_encode_dev(file_priv->head->device), + file_priv->authenticated); if ((nr >= DRM_CORE_IOCTL_COUNT) && ((nr < DRM_COMMAND_BASE) || (nr >= DRM_COMMAND_END))) @@ -588,35 +620,63 @@ int drm_ioctl(struct inode *inode, struct file *filp, ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) ioctl = &drm_ioctls[nr]; - else + else { + retcode = -EINVAL; goto err_i1; - + } +#if 0 + /* + * This check is disabled, because driver private ioctl->cmd + * are not the ioctl commands with size and direction bits but + * just the indices. The DRM core ioctl->cmd are the proper ioctl + * commands. The drivers' ioctl tables need to be fixed. + */ + if (ioctl->cmd != cmd) { + retcode = -EINVAL; + goto err_i1; + } +#endif func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) func = dev->driver->dma_ioctl; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, + _IOC_SIZE(cmd)) != 0) { + retcode = -EACCES; + goto err_i1; + } + } + if (!func) { DRM_DEBUG("no function\n"); retcode = -EINVAL; } else if (((ioctl->flags & DRM_ROOT_ONLY) && !capable(CAP_SYS_ADMIN)) || - ((ioctl->flags & DRM_AUTH) && !priv->authenticated) || - ((ioctl->flags & DRM_MASTER) && !priv->master)) { + ((ioctl->flags & DRM_AUTH) && !file_priv->authenticated) || + ((ioctl->flags & DRM_MASTER) && !file_priv->master)) { retcode = -EACCES; } else { - retcode = func(inode, filp, cmd, arg); + retcode = func(dev, kdata, file_priv); + } + + if (cmd & IOC_OUT) { + if (copy_to_user((void __user *)arg, kdata, + _IOC_SIZE(cmd)) != 0) + retcode = -EACCES; } + err_i1: atomic_dec(&dev->ioctl_count); if (retcode) - DRM_DEBUG("ret = %x\n", retcode); + DRM_DEBUG("ret = %d\n", retcode); return retcode; } -EXPORT_SYMBOL(drm_ioctl); +EXPORT_SYMBOL(drm_unlocked_ioctl); drm_local_map_t *drm_getsarea(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; list_for_each_entry(entry, &dev->maplist, head) { if (entry->map && entry->map->type == _DRM_SHM && diff --git a/linux-core/drm_edid.c b/linux-core/drm_edid.c index 0d067929..d7d3939a 100644 --- a/linux-core/drm_edid.c +++ b/linux-core/drm_edid.c @@ -94,7 +94,7 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev, * if this is the display's preferred timing, and we'll use it to indicate * to the other layers that this mode is desired. */ -struct drm_display_mode *drm_mode_detailed(drm_device_t *dev, +struct drm_display_mode *drm_mode_detailed(struct drm_device *dev, struct detailed_timing *timing) { struct drm_display_mode *mode; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 5215feb6..2f16f7ef 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,17 +34,17 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(drm_device_t * dev, uint32_t class, +void drm_fence_handler(struct drm_device * dev, uint32_t class, uint32_t sequence, uint32_t type) { int wake = 0; uint32_t diff; uint32_t relevant; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; - drm_fence_object_t *fence, *next; + struct drm_fence_object *fence, *next; int found = 0; int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; @@ -114,9 +114,9 @@ void drm_fence_handler(drm_device_t * dev, uint32_t class, EXPORT_SYMBOL(drm_fence_handler); -static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) +static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -124,11 +124,11 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; @@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) } } -void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { @@ -180,22 +180,22 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, } -static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base) +static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { - drm_fence_object_t *fence = - drm_user_object_entry(base, drm_fence_object_t, base); + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_fence_object_t * fence, +int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev, fence->class); @@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence, return signaled; } -static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, - drm_fence_driver_t * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, + struct drm_fence_driver * driver, uint32_t sequence) { uint32_t diff; @@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_fence_object_t * fence, +int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { @@ -262,14 +262,14 @@ int drm_fence_object_flush(drm_fence_object_t * fence, * wrapped around and reused. */ -void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t diff; write_lock_irqsave(&fm->lock, flags); @@ -290,7 +290,7 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) mutex_unlock(&dev->struct_mutex); return; } - fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); @@ -302,13 +302,13 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence) EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_fence_object_t *fence, +static int drm_fence_lazy_wait(struct drm_fence_object *fence, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence, return 0; } -int drm_fence_object_wait(drm_fence_object_t * fence, +int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_driver *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; int signaled; @@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence, return 0; } -int drm_fence_object_emit(drm_fence_object_t * fence, +int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -435,14 +435,14 @@ int drm_fence_object_emit(drm_fence_object_t * fence, return 0; } -static int drm_fence_object_init(drm_device_t * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, - drm_fence_object_t * fence) + struct drm_fence_object * fence) { int ret = 0; unsigned long flags; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); @@ -471,10 +471,10 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -491,12 +491,12 @@ out: } EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, - unsigned flags, drm_fence_object_t ** c_fence) +int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, + unsigned flags, struct drm_fence_object ** c_fence) { - drm_fence_object_t *fence; + struct drm_fence_object *fence; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) @@ -514,15 +514,16 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type, EXPORT_SYMBOL(drm_fence_object_create); -void drm_fence_manager_init(drm_device_t * dev) +void drm_fence_manager_init(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *class; - drm_fence_driver_t *fed = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *class; + struct drm_fence_driver *fed = dev->driver->fence_driver; int i; + unsigned long flags; rwlock_init(&fm->lock); - write_lock(&fm->lock); + write_lock_irqsave(&fm->lock, flags); fm->initialized = 0; if (!fed) goto out_unlock; @@ -541,18 +542,18 @@ void drm_fence_manager_init(drm_device_t * dev) atomic_set(&fm->count, 0); out_unlock: - write_unlock(&fm->lock); + write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_manager_takedown(drm_device_t * dev) +void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_fence_object_t *fence; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); @@ -560,19 +561,17 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle) mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } -int drm_fence_ioctl(DRM_IOCTL_ARGS) +int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_arg_t arg; - drm_fence_object_t *fence; - drm_user_object_t *uo; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -581,100 +580,265 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS) return -EINVAL; } - DRM_COPY_FROM_USER_IOCTL(arg, (void __user *)data, sizeof(arg)); - switch (arg.op) { - case drm_fence_create: - if (arg.flags & DRM_FENCE_FLAG_EMIT) - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_object_create(dev, arg.class, - arg.type, arg.flags, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) { - drm_fence_usage_deref_unlocked(&fence); - return ret; - } - arg.handle = fence->base.hash.key; + if (arg->flags & DRM_FENCE_FLAG_EMIT) + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_object_create(dev, arg->class, + arg->type, arg->flags, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) { + drm_fence_usage_deref_unlocked(&fence); + return ret; + } + + /* + * usage > 0. No need to lock dev->struct_mutex; + */ + + arg->handle = fence->base.hash.key; - break; - case drm_fence_destroy: - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(priv, arg.handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(priv, uo); + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_user_object *uo; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + mutex_lock(&dev->struct_mutex); + uo = drm_lookup_user_object(file_priv, arg->handle); + if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { mutex_unlock(&dev->struct_mutex); + return -EINVAL; + } + ret = drm_remove_user_object(file_priv, uo); + mutex_unlock(&dev->struct_mutex); + return ret; +} + + +int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + struct drm_user_object *uo; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + ret = drm_user_object_ref(file_priv, arg->handle, drm_fence_type, &uo); + if (ret) return ret; - case drm_fence_reference: - ret = - drm_user_object_ref(priv, arg.handle, drm_fence_type, &uo); - if (ret) - return ret; - fence = drm_lookup_fence_object(priv, arg.handle); - break; - case drm_fence_unreference: - ret = drm_user_object_unref(priv, arg.handle, drm_fence_type); - return ret; - case drm_fence_signaled: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - break; - case drm_fence_flush: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_flush(fence, arg.type); - break; - case drm_fence_wait: - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = - drm_fence_object_wait(fence, - arg.flags & DRM_FENCE_FLAG_WAIT_LAZY, - 0, arg.type); - break; - case drm_fence_emit: - LOCK_TEST_WITH_RETURN(dev, filp); - fence = drm_lookup_fence_object(priv, arg.handle); - if (!fence) - return -EINVAL; - ret = drm_fence_object_emit(fence, arg.flags, arg.class, - arg.type); - break; - case drm_fence_buffers: - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized\n"); - return -EINVAL; - } - LOCK_TEST_WITH_RETURN(dev, filp); - ret = drm_fence_buffer_objects(priv, NULL, arg.flags, - NULL, &fence); - if (ret) - return ret; - ret = drm_fence_add_user_object(priv, fence, - arg.flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; - arg.handle = fence->base.hash.key; - break; - default: + fence = drm_lookup_fence_object(file_priv, arg->handle); + + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + return drm_user_object_unref(file_priv, arg->handle, drm_fence_type); +} + +int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); return -EINVAL; } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_flush(fence, arg->type); + + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_wait(fence, + arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, + 0, arg->type); + + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + + +int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + LOCK_TEST_WITH_RETURN(dev, file_priv); + fence = drm_lookup_fence_object(file_priv, arg->handle); + if (!fence) + return -EINVAL; + ret = drm_fence_object_emit(fence, arg->flags, arg->class, + arg->type); + + read_lock_irqsave(&fm->lock, flags); + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; + read_unlock_irqrestore(&fm->lock, flags); + drm_fence_usage_deref_unlocked(&fence); + + return ret; +} + +int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + int ret; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_arg *arg = data; + struct drm_fence_object *fence; + unsigned long flags; + ret = 0; + + if (!fm->initialized) { + DRM_ERROR("The DRM driver does not support fencing.\n"); + return -EINVAL; + } + + if (!dev->bm.initialized) { + DRM_ERROR("Buffer object manager is not initialized\n"); + return -EINVAL; + } + LOCK_TEST_WITH_RETURN(dev, file_priv); + ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, + NULL, &fence); + if (ret) + return ret; + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + + arg->handle = fence->base.hash.key; + read_lock_irqsave(&fm->lock, flags); - arg.class = fence->class; - arg.type = fence->type; - arg.signaled = fence->signaled; + arg->class = fence->class; + arg->type = fence->type; + arg->signaled = fence->signaled; read_unlock_irqrestore(&fm->lock, flags); drm_fence_usage_deref_unlocked(&fence); - DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg)); return ret; } diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 9116685c..643205c8 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -39,9 +39,9 @@ #include <linux/poll.h> static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev); + struct drm_device * dev); -static int drm_setup(drm_device_t * dev) +static int drm_setup(struct drm_device * dev) { drm_local_map_t *map; int i; @@ -128,7 +128,7 @@ static int drm_setup(drm_device_t * dev) */ int drm_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int retcode = 0; @@ -176,7 +176,7 @@ EXPORT_SYMBOL(drm_open); */ int drm_stub_open(struct inode *inode, struct file *filp) { - drm_device_t *dev = NULL; + struct drm_device *dev = NULL; int minor = iminor(inode); int err = -ENODEV; const struct file_operations *old_fops; @@ -232,10 +232,10 @@ static int drm_cpu_valid(void) * filp and add it into the double linked list in \p dev. */ static int drm_open_helper(struct inode *inode, struct file *filp, - drm_device_t * dev) + struct drm_device * dev) { int minor = iminor(inode); - drm_file_t *priv; + struct drm_file *priv; int ret; int i,j; @@ -252,6 +252,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp, memset(priv, 0, sizeof(*priv)); filp->private_data = priv; + priv->filp = filp; priv->uid = current->euid; priv->pid = current->pid; priv->minor = minor; @@ -321,8 +322,8 @@ static int drm_open_helper(struct inode *inode, struct file *filp, /** No-op. */ int drm_fasync(int fd, struct file *filp, int on) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int retcode; DRM_DEBUG("fd = %d, device = 0x%lx\n", fd, @@ -336,10 +337,10 @@ EXPORT_SYMBOL(drm_fasync); static void drm_object_release(struct file *filp) { - drm_file_t *priv = filp->private_data; + struct drm_file *priv = filp->private_data; struct list_head *head; - drm_user_object_t *user_object; - drm_ref_object_t *ref_object; + struct drm_user_object *user_object; + struct drm_ref_object *ref_object; int i; /* @@ -352,7 +353,7 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; while (head->next != head) { - ref_object = list_entry(head->next, drm_ref_object_t, list); + ref_object = list_entry(head->next, struct drm_ref_object, list); drm_remove_ref_object(priv, ref_object); head = &priv->refd_objects; } @@ -363,7 +364,7 @@ static void drm_object_release(struct file *filp) { head = &priv->user_objects; while (head->next != head) { - user_object = list_entry(head->next, drm_user_object_t, list); + user_object = list_entry(head->next, struct drm_user_object, list); drm_remove_user_object(priv, user_object); head = &priv->user_objects; } @@ -377,7 +378,7 @@ static void drm_object_release(struct file *filp) { * Release file. * * \param inode device inode - * \param filp file pointer. + * \param file_priv DRM file private. * \return zero on success or a negative number on failure. * * If the hardware lock is held then free it, and take it again for the kernel @@ -387,29 +388,28 @@ static void drm_object_release(struct file *filp) { */ int drm_release(struct inode *inode, struct file *filp) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *file_priv = filp->private_data; + struct drm_device *dev = file_priv->head->dev; int retcode = 0; lock_kernel(); - dev = priv->head->dev; DRM_DEBUG("open_count = %d\n", dev->open_count); if (dev->driver->preclose) - dev->driver->preclose(dev, filp); + dev->driver->preclose(dev, file_priv); /* ======================================================== * Begin inline drm_release */ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", - current->pid, (long)old_encode_dev(priv->head->device), + current->pid, (long)old_encode_dev(file_priv->head->device), dev->open_count); if (dev->driver->reclaim_buffers_locked && dev->lock.hw_lock) { - if (drm_i_have_hw_lock(filp)) { - dev->driver->reclaim_buffers_locked(dev, filp); + if (drm_i_have_hw_lock(dev, file_priv)) { + dev->driver->reclaim_buffers_locked(dev, file_priv); } else { unsigned long _end=jiffies + 3*DRM_HZ; int locked = 0; @@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp) "\tI will go on reclaiming the buffers anyway.\n"); } - dev->driver->reclaim_buffers_locked(dev, filp); + dev->driver->reclaim_buffers_locked(dev, file_priv); drm_idlelock_release(&dev->lock); } } @@ -443,12 +443,12 @@ int drm_release(struct inode *inode, struct file *filp) if (dev->driver->reclaim_buffers_idlelocked && dev->lock.hw_lock) { drm_idlelock_take(&dev->lock); - dev->driver->reclaim_buffers_idlelocked(dev, filp); + dev->driver->reclaim_buffers_idlelocked(dev, file_priv); drm_idlelock_release(&dev->lock); } - if (drm_i_have_hw_lock(filp)) { + if (drm_i_have_hw_lock(dev, file_priv)) { DRM_DEBUG("File %p released, freeing lock for context %d\n", filp, _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock)); @@ -459,7 +459,7 @@ int drm_release(struct inode *inode, struct file *filp) if (drm_core_check_feature(dev, DRIVER_HAVE_DMA) && !dev->driver->reclaim_buffers_locked) { - dev->driver->reclaim_buffers(dev, filp); + dev->driver->reclaim_buffers(dev, file_priv); } drm_fasync(-1, filp, 0); @@ -467,10 +467,10 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->ctxlist_mutex); if (!list_empty(&dev->ctxlist)) { - drm_ctx_list_t *pos, *n; + struct drm_ctx_list *pos, *n; list_for_each_entry_safe(pos, n, &dev->ctxlist, head) { - if (pos->tag == priv && + if (pos->tag == file_priv && pos->handle != DRM_KERNEL_CONTEXT) { if (dev->driver->context_dtor) dev->driver->context_dtor(dev, @@ -489,18 +489,18 @@ int drm_release(struct inode *inode, struct file *filp) mutex_lock(&dev->struct_mutex); drm_fb_release(filp); drm_object_release(filp); - if (priv->remove_auth_on_close == 1) { - drm_file_t *temp; + if (file_priv->remove_auth_on_close == 1) { + struct drm_file *temp; list_for_each_entry(temp, &dev->filelist, lhead) temp->authenticated = 0; } - list_del(&priv->lhead); + list_del(&file_priv->lhead); mutex_unlock(&dev->struct_mutex); if (dev->driver->postclose) - dev->driver->postclose(dev, priv); - drm_free(priv, sizeof(*priv), DRM_MEM_FILES); + dev->driver->postclose(dev, file_priv); + drm_free(file_priv, sizeof(*file_priv), DRM_MEM_FILES); /* ======================================================== * End inline drm_release diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c index 6f17e114..a8ec8468 100644 --- a/linux-core/drm_hashtab.c +++ b/linux-core/drm_hashtab.c @@ -36,7 +36,7 @@ #include "drm_hashtab.h" #include <linux/hash.h> -int drm_ht_create(drm_open_hash_t * ht, unsigned int order) +int drm_ht_create(struct drm_open_hash * ht, unsigned int order) { unsigned int i; @@ -63,9 +63,9 @@ int drm_ht_create(drm_open_hash_t * ht, unsigned int order) return 0; } -void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) +void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -75,15 +75,15 @@ void drm_ht_verbose_list(drm_open_hash_t * ht, unsigned long key) DRM_DEBUG("Key is 0x%08lx, Hashed key is 0x%08x\n", key, hashed_key); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); DRM_DEBUG("count %d, key: 0x%08lx\n", count++, entry->key); } } -static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, +static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht, unsigned long key) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list; unsigned int hashed_key; @@ -91,7 +91,7 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, hashed_key = hash_long(key, ht->order); h_list = &ht->table[hashed_key]; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return list; if (entry->key > key) @@ -100,9 +100,9 @@ static struct hlist_node *drm_ht_find_key(drm_open_hash_t * ht, return NULL; } -int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item) { - drm_hash_item_t *entry; + struct drm_hash_item *entry; struct hlist_head *h_list; struct hlist_node *list, *parent; unsigned int hashed_key; @@ -112,7 +112,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) h_list = &ht->table[hashed_key]; parent = NULL; hlist_for_each(list, h_list) { - entry = hlist_entry(list, drm_hash_item_t, head); + entry = hlist_entry(list, struct drm_hash_item, head); if (entry->key == key) return -EINVAL; if (entry->key > key) @@ -131,7 +131,7 @@ int drm_ht_insert_item(drm_open_hash_t * ht, drm_hash_item_t * item) * Just insert an item and return any "bits" bit key that hasn't been * used before. */ -int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, +int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item, unsigned long seed, int bits, int shift, unsigned long add) { @@ -155,8 +155,8 @@ int drm_ht_just_insert_please(drm_open_hash_t * ht, drm_hash_item_t * item, return 0; } -int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, - drm_hash_item_t ** item) +int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key, + struct drm_hash_item ** item) { struct hlist_node *list; @@ -164,11 +164,11 @@ int drm_ht_find_item(drm_open_hash_t * ht, unsigned long key, if (!list) return -EINVAL; - *item = hlist_entry(list, drm_hash_item_t, head); + *item = hlist_entry(list, struct drm_hash_item, head); return 0; } -int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) +int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key) { struct hlist_node *list; @@ -181,14 +181,14 @@ int drm_ht_remove_key(drm_open_hash_t * ht, unsigned long key) return -EINVAL; } -int drm_ht_remove_item(drm_open_hash_t * ht, drm_hash_item_t * item) +int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item) { hlist_del_init(&item->head); ht->fill--; return 0; } -void drm_ht_remove(drm_open_hash_t * ht) +void drm_ht_remove(struct drm_open_hash * ht) { if (ht->table) { if (ht->use_vmalloc) diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h index 613091c9..0f137677 100644 --- a/linux-core/drm_hashtab.h +++ b/linux-core/drm_hashtab.h @@ -37,31 +37,31 @@ #define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef struct drm_hash_item{ +struct drm_hash_item { struct hlist_node head; unsigned long key; -} drm_hash_item_t; +}; -typedef struct drm_open_hash{ +struct drm_open_hash { unsigned int size; unsigned int order; unsigned int fill; struct hlist_head *table; int use_vmalloc; -} drm_open_hash_t; +}; -extern int drm_ht_create(drm_open_hash_t *ht, unsigned int order); -extern int drm_ht_insert_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern int drm_ht_just_insert_please(drm_open_hash_t *ht, drm_hash_item_t *item, +extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order); +extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item, unsigned long seed, int bits, int shift, unsigned long add); -extern int drm_ht_find_item(drm_open_hash_t *ht, unsigned long key, drm_hash_item_t **item); +extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item); -extern void drm_ht_verbose_list(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_key(drm_open_hash_t *ht, unsigned long key); -extern int drm_ht_remove_item(drm_open_hash_t *ht, drm_hash_item_t *item); -extern void drm_ht_remove(drm_open_hash_t *ht); +extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key); +extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item); +extern void drm_ht_remove(struct drm_open_hash *ht); #endif diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c index bbab3ea2..0188154e 100644 --- a/linux-core/drm_ioc32.c +++ b/linux-core/drm_ioc32.c @@ -82,7 +82,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd, unsigned long arg) { drm_version32_t v32; - drm_version_t __user *version; + struct drm_version __user *version; int err; if (copy_from_user(&v32, (void __user *)arg, sizeof(v32))) @@ -129,7 +129,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; int err; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) @@ -159,7 +159,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, unsigned long arg) { drm_unique32_t uq32; - drm_unique_t __user *u; + struct drm_unique __user *u; if (copy_from_user(&uq32, (void __user *)arg, sizeof(uq32))) return -EFAULT; @@ -179,8 +179,8 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd, typedef struct drm_map32 { u32 offset; /**< Requested physical address (0 for SAREA)*/ u32 size; /**< Requested physical size (bytes) */ - drm_map_type_t type; /**< Type of memory to map */ - drm_map_flags_t flags; /**< Flags */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ u32 handle; /**< User-space: "Handle" to pass to mmap() */ int mtrr; /**< MTRR slot used */ } drm_map32_t; @@ -190,7 +190,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int idx, err; void *handle; @@ -228,7 +228,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd, { drm_map32_t __user *argp = (void __user *)arg; drm_map32_t m32; - drm_map_t __user *map; + struct drm_map __user *map; int err; void *handle; @@ -270,7 +270,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd, unsigned long arg) { drm_map32_t __user *argp = (void __user *)arg; - drm_map_t __user *map; + struct drm_map __user *map; u32 handle; if (get_user(handle, &argp->handle)) @@ -300,7 +300,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd, { drm_client32_t c32; drm_client32_t __user *argp = (void __user *)arg; - drm_client_t __user *client; + struct drm_client __user *client; int idx, err; if (get_user(idx, &argp->idx)) @@ -333,7 +333,7 @@ typedef struct drm_stats32 { u32 count; struct { u32 value; - drm_stat_type_t type; + enum drm_stat_type type; } data[15]; } drm_stats32_t; @@ -342,7 +342,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd, { drm_stats32_t s32; drm_stats32_t __user *argp = (void __user *)arg; - drm_stats_t __user *stats; + struct drm_stats __user *stats; int i, err; stats = compat_alloc_user_space(sizeof(*stats)); @@ -379,7 +379,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; int err; unsigned long agp_start; @@ -411,7 +411,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd, { drm_buf_desc32_t b32; drm_buf_desc32_t __user *argp = (void __user *)arg; - drm_buf_desc_t __user *buf; + struct drm_buf_desc __user *buf; if (copy_from_user(&b32, argp, sizeof(b32))) return -EFAULT; @@ -440,8 +440,8 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, drm_buf_info32_t req32; drm_buf_info32_t __user *argp = (void __user *)arg; drm_buf_desc32_t __user *to; - drm_buf_info_t __user *request; - drm_buf_desc_t __user *list; + struct drm_buf_info __user *request; + struct drm_buf_desc __user *list; size_t nbytes; int i, err; int count, actual; @@ -457,11 +457,11 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, && !access_ok(VERIFY_WRITE, to, count * sizeof(drm_buf_desc32_t))) return -EFAULT; - nbytes = sizeof(*request) + count * sizeof(drm_buf_desc_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_desc); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_desc_t *) (request + 1); + list = (struct drm_buf_desc *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -477,7 +477,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&to[i], &list[i], - offsetof(drm_buf_desc_t, flags))) + offsetof(struct drm_buf_desc, flags))) return -EFAULT; if (__put_user(actual, &argp->count)) @@ -505,8 +505,8 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, drm_buf_map32_t __user *argp = (void __user *)arg; drm_buf_map32_t req32; drm_buf_pub32_t __user *list32; - drm_buf_map_t __user *request; - drm_buf_pub_t __user *list; + struct drm_buf_map __user *request; + struct drm_buf_pub __user *list; int i, err; int count, actual; size_t nbytes; @@ -519,11 +519,11 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count < 0) return -EINVAL; - nbytes = sizeof(*request) + count * sizeof(drm_buf_pub_t); + nbytes = sizeof(*request) + count * sizeof(struct drm_buf_pub); request = compat_alloc_user_space(nbytes); if (!access_ok(VERIFY_WRITE, request, nbytes)) return -EFAULT; - list = (drm_buf_pub_t *) (request + 1); + list = (struct drm_buf_pub *) (request + 1); if (__put_user(count, &request->count) || __put_user(list, &request->list)) @@ -539,7 +539,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd, if (count >= actual) for (i = 0; i < actual; ++i) if (__copy_in_user(&list32[i], &list[i], - offsetof(drm_buf_pub_t, address)) + offsetof(struct drm_buf_pub, address)) || __get_user(addr, &list[i].address) || __put_user((unsigned long)addr, &list32[i].address)) @@ -562,7 +562,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd, unsigned long arg) { drm_buf_free32_t req32; - drm_buf_free_t __user *request; + struct drm_buf_free __user *request; drm_buf_free32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -589,7 +589,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, unsigned long arg) { drm_ctx_priv_map32_t req32; - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -610,7 +610,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd, static int compat_drm_getsareactx(struct file *file, unsigned int cmd, unsigned long arg) { - drm_ctx_priv_map_t __user *request; + struct drm_ctx_priv_map __user *request; drm_ctx_priv_map32_t __user *argp = (void __user *)arg; int err; unsigned int ctx_id; @@ -648,7 +648,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, { drm_ctx_res32_t __user *argp = (void __user *)arg; drm_ctx_res32_t res32; - drm_ctx_res_t __user *res; + struct drm_ctx_res __user *res; int err; if (copy_from_user(&res32, argp, sizeof(res32))) @@ -658,7 +658,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd, if (!access_ok(VERIFY_WRITE, res, sizeof(*res))) return -EFAULT; if (__put_user(res32.count, &res->count) - || __put_user((drm_ctx_t __user *)(unsigned long)res32.contexts, + || __put_user((struct drm_ctx __user *) (unsigned long)res32.contexts, &res->contexts)) return -EFAULT; @@ -679,7 +679,7 @@ typedef struct drm_dma32 { int send_count; /**< Number of buffers to send */ u32 send_indices; /**< List of handles to buffers */ u32 send_sizes; /**< Lengths of data to send */ - drm_dma_flags_t flags; /**< Flags */ + enum drm_dma_flags flags; /**< Flags */ int request_count; /**< Number of buffers requested */ int request_size; /**< Desired size for buffers */ u32 request_indices; /**< Buffer information */ @@ -692,7 +692,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd, { drm_dma32_t d32; drm_dma32_t __user *argp = (void __user *)arg; - drm_dma_t __user *d; + struct drm_dma __user *d; int err; if (copy_from_user(&d32, argp, sizeof(d32))) @@ -740,7 +740,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd, { drm_agp_mode32_t __user *argp = (void __user *)arg; drm_agp_mode32_t m32; - drm_agp_mode_t __user *mode; + struct drm_agp_mode __user *mode; if (get_user(m32.mode, &argp->mode)) return -EFAULT; @@ -772,7 +772,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd, { drm_agp_info32_t __user *argp = (void __user *)arg; drm_agp_info32_t i32; - drm_agp_info_t __user *info; + struct drm_agp_info __user *info; int err; info = compat_alloc_user_space(sizeof(*info)); @@ -813,7 +813,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd, { drm_agp_buffer32_t __user *argp = (void __user *)arg; drm_agp_buffer32_t req32; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -845,7 +845,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_buffer32_t __user *argp = (void __user *)arg; - drm_agp_buffer_t __user *request; + struct drm_agp_buffer __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -868,7 +868,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd, { drm_agp_binding32_t __user *argp = (void __user *)arg; drm_agp_binding32_t req32; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; if (copy_from_user(&req32, argp, sizeof(req32))) return -EFAULT; @@ -887,7 +887,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd, unsigned long arg) { drm_agp_binding32_t __user *argp = (void __user *)arg; - drm_agp_binding_t __user *request; + struct drm_agp_binding __user *request; u32 handle; request = compat_alloc_user_space(sizeof(*request)); @@ -910,7 +910,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; int err; unsigned long x; @@ -938,7 +938,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, unsigned long arg) { drm_scatter_gather32_t __user *argp = (void __user *)arg; - drm_scatter_gather_t __user *request; + struct drm_scatter_gather __user *request; unsigned long x; request = compat_alloc_user_space(sizeof(*request)); @@ -953,13 +953,13 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd, } struct drm_wait_vblank_request32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; u32 signal; }; struct drm_wait_vblank_reply32 { - drm_vblank_seq_type_t type; + enum drm_vblank_seq_type type; unsigned int sequence; s32 tval_sec; s32 tval_usec; @@ -975,7 +975,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd, { drm_wait_vblank32_t __user *argp = (void __user *)arg; drm_wait_vblank32_t req32; - drm_wait_vblank_t __user *request; + union drm_wait_vblank __user *request; int err; if (copy_from_user(&req32, argp, sizeof(req32))) @@ -1040,7 +1040,7 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = { * Called whenever a 32-bit process running under a 64-bit kernel * performs an ioctl on /dev/drm. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument. * \return zero on success or negative number on failure. @@ -1051,8 +1051,13 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) drm_ioctl_compat_t *fn; int ret; + + /* Assume that ioctls without an explicit compat routine will "just + * work". This may not always be a good assumption, but it's better + * than always failing. + */ if (nr >= DRM_ARRAY_SIZE(drm_compat_ioctls)) - return -ENOTTY; + return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); fn = drm_compat_ioctls[nr]; diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c index 97df972f..9d52fd8a 100644 --- a/linux-core/drm_ioctl.c +++ b/linux-core/drm_ioctl.c @@ -42,30 +42,24 @@ * Get the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. * * Copies the bus id from drm_device::unique into user space. */ -int drm_getunique(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_unique_t __user *argp = (void __user *)arg; - drm_unique_t u; + struct drm_unique *u = data; - if (copy_from_user(&u, argp, sizeof(u))) - return -EFAULT; - if (u.unique_len >= dev->unique_len) { - if (copy_to_user(u.unique, dev->unique, dev->unique_len)) + if (u->unique_len >= dev->unique_len) { + if (copy_to_user(u->unique, dev->unique, dev->unique_len)) return -EFAULT; } - u.unique_len = dev->unique_len; - if (copy_to_user(argp, &u, sizeof(u))) - return -EFAULT; + u->unique_len = dev->unique_len; + return 0; } @@ -73,7 +67,7 @@ int drm_getunique(struct inode *inode, struct file *filp, * Set the bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_unique structure. * \return zero on success or a negative number on failure. @@ -83,28 +77,23 @@ int drm_getunique(struct inode *inode, struct file *filp, * in interface version 1.1 and will return EBUSY when setversion has requested * version 1.1 or greater. */ -int drm_setunique(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_setunique(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_unique_t u; + struct drm_unique *u = data; int domain, bus, slot, func, ret; if (dev->unique_len || dev->unique) return -EBUSY; - if (copy_from_user(&u, (drm_unique_t __user *) arg, sizeof(u))) - return -EFAULT; - - if (!u.unique_len || u.unique_len > 1024) + if (!u->unique_len || u->unique_len > 1024) return -EINVAL; - dev->unique_len = u.unique_len; - dev->unique = drm_alloc(u.unique_len + 1, DRM_MEM_DRIVER); + dev->unique_len = u->unique_len; + dev->unique = drm_alloc(u->unique_len + 1, DRM_MEM_DRIVER); if (!dev->unique) return -ENOMEM; - if (copy_from_user(dev->unique, u.unique, dev->unique_len)) + if (copy_from_user(dev->unique, u->unique, dev->unique_len)) return -EFAULT; dev->unique[dev->unique_len] = '\0'; @@ -121,7 +110,7 @@ int drm_setunique(struct inode *inode, struct file *filp, */ ret = sscanf(dev->unique, "PCI:%d:%d:%d", &bus, &slot, &func); if (ret != 3) - return DRM_ERR(EINVAL); + return -EINVAL; domain = bus >> 8; bus &= 0xff; @@ -134,7 +123,7 @@ int drm_setunique(struct inode *inode, struct file *filp, return 0; } -static int drm_set_busid(drm_device_t * dev) +static int drm_set_busid(struct drm_device * dev) { int len; if (dev->unique != NULL) @@ -167,7 +156,7 @@ static int drm_set_busid(drm_device_t * dev) * Get a mapping information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_map structure. * @@ -176,21 +165,16 @@ static int drm_set_busid(drm_device_t * dev) * Searches for the mapping with the specified offset and copies its information * into userspace */ -int drm_getmap(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getmap(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t __user *argp = (void __user *)arg; - drm_map_t map; - drm_map_list_t *r_list = NULL; + struct drm_map *map = data; + struct drm_map_list *r_list = NULL; struct list_head *list; int idx; int i; - if (copy_from_user(&map, argp, sizeof(map))) - return -EFAULT; - idx = map.offset; + idx = map->offset; mutex_lock(&dev->struct_mutex); if (idx < 0) { @@ -201,7 +185,7 @@ int drm_getmap(struct inode *inode, struct file *filp, i = 0; list_for_each(list, &dev->maplist) { if (i == idx) { - r_list = list_entry(list, drm_map_list_t, head); + r_list = list_entry(list, struct drm_map_list, head); break; } i++; @@ -211,16 +195,14 @@ int drm_getmap(struct inode *inode, struct file *filp, return -EINVAL; } - map.offset = r_list->map->offset; - map.size = r_list->map->size; - map.type = r_list->map->type; - map.flags = r_list->map->flags; - map.handle = (void *)(unsigned long) r_list->user_token; - map.mtrr = r_list->map->mtrr; + map->offset = r_list->map->offset; + map->size = r_list->map->size; + map->type = r_list->map->type; + map->flags = r_list->map->flags; + map->handle = (void *)(unsigned long) r_list->user_token; + map->mtrr = r_list->map->mtrr; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &map, sizeof(map))) - return -EFAULT; return 0; } @@ -228,7 +210,7 @@ int drm_getmap(struct inode *inode, struct file *filp, * Get client information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_client structure. * @@ -237,82 +219,68 @@ int drm_getmap(struct inode *inode, struct file *filp, * Searches for the client with the specified index and copies its information * into userspace */ -int drm_getclient(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getclient(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_client_t __user *argp = (drm_client_t __user *)arg; - drm_client_t client; - drm_file_t *pt; + struct drm_client *client = data; + struct drm_file *pt; int idx; int i; - if (copy_from_user(&client, argp, sizeof(client))) - return -EFAULT; - idx = client.idx; + idx = client->idx; mutex_lock(&dev->struct_mutex); - - if (list_empty(&dev->filelist)) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } i = 0; list_for_each_entry(pt, &dev->filelist, lhead) { - if (i++ >= idx) - break; + if (i++ >= idx) { + client->auth = pt->authenticated; + client->pid = pt->pid; + client->uid = pt->uid; + client->magic = pt->magic; + client->iocs = pt->ioctl_count; + mutex_unlock(&dev->struct_mutex); + + return 0; + } } - - client.auth = pt->authenticated; - client.pid = pt->pid; - client.uid = pt->uid; - client.magic = pt->magic; - client.iocs = pt->ioctl_count; mutex_unlock(&dev->struct_mutex); - if (copy_to_user(argp, &client, sizeof(client))) - return -EFAULT; - return 0; + return -EINVAL; } /** * Get statistics information. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_stats structure. * * \return zero on success or a negative number on failure. */ -int drm_getstats(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_getstats(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_stats_t stats; + struct drm_stats *stats = data; int i; - memset(&stats, 0, sizeof(stats)); + memset(stats, 0, sizeof(stats)); mutex_lock(&dev->struct_mutex); for (i = 0; i < dev->counters; i++) { if (dev->types[i] == _DRM_STAT_LOCK) - stats.data[i].value - = (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); + stats->data[i].value = + (dev->lock.hw_lock ? dev->lock.hw_lock->lock : 0); else - stats.data[i].value = atomic_read(&dev->counts[i]); - stats.data[i].type = dev->types[i]; + stats->data[i].value = atomic_read(&dev->counts[i]); + stats->data[i].type = dev->types[i]; } - stats.count = dev->counters; + stats->count = dev->counters; mutex_unlock(&dev->struct_mutex); - if (copy_to_user((drm_stats_t __user *) arg, &stats, sizeof(stats))) - return -EFAULT; return 0; } @@ -320,39 +288,28 @@ int drm_getstats(struct inode *inode, struct file *filp, * Setversion ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Sets the requested interface version */ -int drm_setversion(DRM_IOCTL_ARGS) +int drm_setversion(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_set_version_t sv; - drm_set_version_t retv; - int if_version; - drm_set_version_t __user *argp = (void __user *)data; - - if (copy_from_user(&sv, argp, sizeof(sv))) - return -EFAULT; - - retv.drm_di_major = DRM_IF_MAJOR; - retv.drm_di_minor = DRM_IF_MINOR; - retv.drm_dd_major = dev->driver->major; - retv.drm_dd_minor = dev->driver->minor; - - if (copy_to_user(argp, &retv, sizeof(retv))) - return -EFAULT; - - if (sv.drm_di_major != -1) { - if (sv.drm_di_major != DRM_IF_MAJOR || - sv.drm_di_minor < 0 || sv.drm_di_minor > DRM_IF_MINOR) - return -EINVAL; - if_version = DRM_IF_VERSION(sv.drm_di_major, sv.drm_di_minor); + struct drm_set_version *sv = data; + int if_version, retcode = 0; + + if (sv->drm_di_major != -1) { + if (sv->drm_di_major != DRM_IF_MAJOR || + sv->drm_di_minor < 0 || sv->drm_di_minor > DRM_IF_MINOR) { + retcode = -EINVAL; + goto done; + } + if_version = DRM_IF_VERSION(sv->drm_di_major, + sv->drm_di_minor); dev->if_version = max(if_version, dev->if_version); - if (sv.drm_di_minor >= 1) { + if (sv->drm_di_minor >= 1) { /* * Version 1.1 includes tying of DRM to specific device */ @@ -360,20 +317,30 @@ int drm_setversion(DRM_IOCTL_ARGS) } } - if (sv.drm_dd_major != -1) { - if (sv.drm_dd_major != dev->driver->major || - sv.drm_dd_minor < 0 || sv.drm_dd_minor > dev->driver->minor) - return -EINVAL; + if (sv->drm_dd_major != -1) { + if (sv->drm_dd_major != dev->driver->major || + sv->drm_dd_minor < 0 || sv->drm_dd_minor > + dev->driver->minor) { + retcode = -EINVAL; + goto done; + } if (dev->driver->set_version) - dev->driver->set_version(dev, &sv); + dev->driver->set_version(dev, sv); } - return 0; + +done: + sv->drm_di_major = DRM_IF_MAJOR; + sv->drm_di_minor = DRM_IF_MINOR; + sv->drm_dd_major = dev->driver->major; + sv->drm_dd_minor = dev->driver->minor; + + return retcode; } /** No-op ioctl. */ -int drm_noop(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +int drm_noop(struct drm_device *dev, void *data, + struct drm_file *file_priv) { DRM_DEBUG("\n"); return 0; diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 88716712..25166b6f 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -41,7 +41,7 @@ * Get interrupt from bus id. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_irq_busid structure. * \return zero on success or a negative number on failure. @@ -50,30 +50,24 @@ * This IOCTL is deprecated, and will now return EINVAL for any busid not equal * to that of the device that this DRM instance attached to. */ -int drm_irq_by_busid(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_irq_by_busid(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_irq_busid_t __user *argp = (void __user *)arg; - drm_irq_busid_t p; + struct drm_irq_busid *p = data; if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return -EINVAL; - if (copy_from_user(&p, argp, sizeof(p))) - return -EFAULT; - - if ((p.busnum >> 8) != drm_get_pci_domain(dev) || - (p.busnum & 0xff) != dev->pdev->bus->number || - p.devnum != PCI_SLOT(dev->pdev->devfn) || p.funcnum != PCI_FUNC(dev->pdev->devfn)) + if ((p->busnum >> 8) != drm_get_pci_domain(dev) || + (p->busnum & 0xff) != dev->pdev->bus->number || + p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) return -EINVAL; - p.irq = dev->irq; + p->irq = dev->irq; + + DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, + p->irq); - DRM_DEBUG("%d:%d:%d => IRQ %d\n", p.busnum, p.devnum, p.funcnum, p.irq); - if (copy_to_user(argp, &p, sizeof(p))) - return -EFAULT; return 0; } @@ -86,7 +80,7 @@ int drm_irq_by_busid(struct inode *inode, struct file *filp, * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions * before and after the installation. */ -static int drm_irq_install(drm_device_t * dev) +int drm_irq_install(struct drm_device * dev) { int ret; unsigned long sh_flags = 0; @@ -146,6 +140,7 @@ static int drm_irq_install(drm_device_t * dev) return 0; } +EXPORT_SYMBOL(drm_irq_install); /** * Uninstall the IRQ handler. @@ -154,7 +149,7 @@ static int drm_irq_install(drm_device_t * dev) * * Calls the driver's \c drm_driver_irq_uninstall() function, and stops the irq. */ -int drm_irq_uninstall(drm_device_t * dev) +int drm_irq_uninstall(struct drm_device * dev) { int irq_enabled; @@ -185,31 +180,27 @@ EXPORT_SYMBOL(drm_irq_uninstall); * IRQ control ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_control structure. * \return zero on success or a negative number on failure. * * Calls irq_install() or irq_uninstall() according to \p arg. */ -int drm_control(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_control(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_control_t ctl; + struct drm_control *ctl = data; /* if we haven't irq we fallback for compatibility reasons - this used to be a separate function in drm_dma.h */ - if (copy_from_user(&ctl, (drm_control_t __user *) arg, sizeof(ctl))) - return -EFAULT; - switch (ctl.func) { + switch (ctl->func) { case DRM_INST_HANDLER: if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) return 0; if (dev->if_version < DRM_IF_VERSION(1, 2) && - ctl.irq != dev->irq) + ctl->irq != dev->irq) return -EINVAL; return drm_irq_install(dev); case DRM_UNINST_HANDLER: @@ -225,7 +216,7 @@ int drm_control(struct inode *inode, struct file *filp, * Wait for VBLANK. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param data user argument, pointing to a drm_wait_vblank structure. * \return zero on success or a negative number on failure. @@ -240,12 +231,9 @@ int drm_control(struct inode *inode, struct file *filp, * * If a signal is not requested, then calls vblank_wait(). */ -int drm_wait_vblank(DRM_IOCTL_ARGS) +int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_wait_vblank_t __user *argp = (void __user *)data; - drm_wait_vblank_t vblwait; + union drm_wait_vblank *vblwait = data; struct timeval now; int ret = 0; unsigned int flags, seq; @@ -253,18 +241,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if ((!dev->irq) || (!dev->irq_enabled)) return -EINVAL; - if (copy_from_user(&vblwait, argp, sizeof(vblwait))) - return -EFAULT; - - if (vblwait.request.type & + if (vblwait->request.type & ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", - vblwait.request.type, + vblwait->request.type, (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); return -EINVAL; } - flags = vblwait.request.type & _DRM_VBLANK_FLAGS_MASK; + flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) @@ -273,10 +258,10 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 : &dev->vbl_received); - switch (vblwait.request.type & _DRM_VBLANK_TYPES_MASK) { + switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { case _DRM_VBLANK_RELATIVE: - vblwait.request.sequence += seq; - vblwait.request.type &= ~_DRM_VBLANK_RELATIVE; + vblwait->request.sequence += seq; + vblwait->request.type &= ~_DRM_VBLANK_RELATIVE; case _DRM_VBLANK_ABSOLUTE: break; default: @@ -284,15 +269,15 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) } if ((flags & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait.request.sequence) <= (1<<23)) { - vblwait.request.sequence = seq + 1; + (seq - vblwait->request.sequence) <= (1<<23)) { + vblwait->request.sequence = seq + 1; } if (flags & _DRM_VBLANK_SIGNAL) { unsigned long irqflags; struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_sigs2 : &dev->vbl_sigs; - drm_vbl_sig_t *vbl_sig; + struct drm_vbl_sig *vbl_sig; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -301,12 +286,13 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * that case */ list_for_each_entry(vbl_sig, vbl_sigs, head) { - if (vbl_sig->sequence == vblwait.request.sequence - && vbl_sig->info.si_signo == vblwait.request.signal + if (vbl_sig->sequence == vblwait->request.sequence + && vbl_sig->info.si_signo == + vblwait->request.signal && vbl_sig->task == current) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; goto done; } } @@ -322,14 +308,14 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) if (! (vbl_sig = - drm_alloc(sizeof(drm_vbl_sig_t), DRM_MEM_DRIVER))) { + drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { return -ENOMEM; } memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); - vbl_sig->sequence = vblwait.request.sequence; - vbl_sig->info.si_signo = vblwait.request.signal; + vbl_sig->sequence = vblwait->request.sequence; + vbl_sig->info.si_signo = vblwait->request.signal; vbl_sig->task = current; spin_lock_irqsave(&dev->vbl_lock, irqflags); @@ -338,25 +324,22 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - vblwait.reply.sequence = seq; + vblwait->reply.sequence = seq; } else { if (flags & _DRM_VBLANK_SECONDARY) { if (dev->driver->vblank_wait2) - ret = dev->driver->vblank_wait2(dev, &vblwait.request.sequence); + ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); } else if (dev->driver->vblank_wait) ret = dev->driver->vblank_wait(dev, - &vblwait.request.sequence); + &vblwait->request.sequence); do_gettimeofday(&now); - vblwait.reply.tval_sec = now.tv_sec; - vblwait.reply.tval_usec = now.tv_usec; + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; } done: - if (copy_to_user(argp, &vblwait, sizeof(vblwait))) - return -EFAULT; - return ret; } @@ -369,7 +352,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS) * * If a signal is not requested, then calls vblank_wait(). */ -void drm_vbl_send_signals(drm_device_t * dev) +void drm_vbl_send_signals(struct drm_device * dev) { unsigned long flags; int i; @@ -377,7 +360,7 @@ void drm_vbl_send_signals(drm_device_t * dev) spin_lock_irqsave(&dev->vbl_lock, flags); for (i = 0; i < 2; i++) { - drm_vbl_sig_t *vbl_sig, *tmp; + struct drm_vbl_sig *vbl_sig, *tmp; struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : &dev->vbl_received); @@ -413,7 +396,7 @@ EXPORT_SYMBOL(drm_vbl_send_signals); */ static void drm_locked_tasklet_func(unsigned long data) { - drm_device_t *dev = (drm_device_t*)data; + struct drm_device *dev = (struct drm_device*)data; unsigned long irqflags; spin_lock_irqsave(&dev->tasklet_lock, irqflags); @@ -450,7 +433,7 @@ static void drm_locked_tasklet_func(unsigned long data) * context, it must not make any assumptions about this. Also, the HW lock will * be held with the kernel context or any client context. */ -void drm_locked_tasklet(drm_device_t *dev, void (*func)(drm_device_t*)) +void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*)) { unsigned long irqflags; static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0); diff --git a/linux-core/drm_lock.c b/linux-core/drm_lock.c index f02df36b..b8e4a5d9 100644 --- a/linux-core/drm_lock.c +++ b/linux-core/drm_lock.c @@ -41,39 +41,33 @@ static int drm_notifier(void *priv); * Lock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ -int drm_lock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; DECLARE_WAITQUEUE(entry, current); - drm_lock_t lock; + struct drm_lock *lock = data; int ret = 0; - ++priv->lock_count; + ++file_priv->lock_count; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", - lock.context, current->pid, - dev->lock.hw_lock->lock, lock.flags); + lock->context, current->pid, + dev->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) - if (lock.context < 0) + if (lock->context < 0) return -EINVAL; add_wait_queue(&dev->lock.lock_queue, &entry); @@ -87,8 +81,8 @@ int drm_lock(struct inode *inode, struct file *filp, ret = -EINTR; break; } - if (drm_lock_take(&dev->lock, lock.context)) { - dev->lock.filp = filp; + if (drm_lock_take(&dev->lock, lock->context)) { + dev->lock.file_priv = file_priv; dev->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ @@ -107,7 +101,8 @@ int drm_lock(struct inode *inode, struct file *filp, __set_current_state(TASK_RUNNING); remove_wait_queue(&dev->lock.lock_queue, &entry); - DRM_DEBUG( "%d %s\n", lock.context, ret ? "interrupted" : "has lock" ); + DRM_DEBUG("%d %s\n", lock->context, + ret ? "interrupted" : "has lock"); if (ret) return ret; sigemptyset(&dev->sigmask); @@ -115,24 +110,26 @@ int drm_lock(struct inode *inode, struct file *filp, sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); - dev->sigdata.context = lock.context; + dev->sigdata.context = lock->context; dev->sigdata.lock = dev->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); - if (dev->driver->dma_ready && (lock.flags & _DRM_LOCK_READY)) + if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY)) dev->driver->dma_ready(dev); - if (dev->driver->dma_quiescent && (lock.flags & _DRM_LOCK_QUIESCENT)) { + if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) + { if (dev->driver->dma_quiescent(dev)) { - DRM_DEBUG( "%d waiting for DMA quiescent\n", lock.context); - return DRM_ERR(EBUSY); + DRM_DEBUG("%d waiting for DMA quiescent\n", + lock->context); + return -EBUSY; } } if (dev->driver->kernel_context_switch && - dev->last_context != lock.context) { + dev->last_context != lock->context) { dev->driver->kernel_context_switch(dev, dev->last_context, - lock.context); + lock->context); } return 0; @@ -142,27 +139,21 @@ int drm_lock(struct inode *inode, struct file *filp, * Unlock ioctl. * * \param inode device inode. - * \param filp file pointer. + * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ -int drm_unlock(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_lock_t lock; + struct drm_lock *lock = data; unsigned long irqflags; - if (copy_from_user(&lock, (drm_lock_t __user *) arg, sizeof(lock))) - return -EFAULT; - - if (lock.context == DRM_KERNEL_CONTEXT) { + if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", - current->pid, lock.context); + current->pid, lock->context); return -EINVAL; } @@ -184,7 +175,7 @@ int drm_unlock(struct inode *inode, struct file *filp, if (dev->driver->kernel_context_switch_unlock) dev->driver->kernel_context_switch_unlock(dev); else { - if (drm_lock_free(&dev->lock,lock.context)) { + if (drm_lock_free(&dev->lock,lock->context)) { /* FIXME: Should really bail out here. */ } } @@ -202,7 +193,7 @@ int drm_unlock(struct inode *inode, struct file *filp, * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ -int drm_lock_take(drm_lock_data_t *lock_data, +int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; @@ -252,13 +243,13 @@ int drm_lock_take(drm_lock_data_t *lock_data, * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ -static int drm_lock_transfer(drm_lock_data_t *lock_data, +static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; - lock_data->filp = NULL; + lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; @@ -278,7 +269,7 @@ static int drm_lock_transfer(drm_lock_data_t *lock_data, * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ -int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) +int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; @@ -320,7 +311,7 @@ int drm_lock_free(drm_lock_data_t *lock_data, unsigned int context) */ static int drm_notifier(void *priv) { - drm_sigdata_t *s = (drm_sigdata_t *) priv; + struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ @@ -351,7 +342,7 @@ static int drm_notifier(void *priv) * having to worry about starvation. */ -void drm_idlelock_take(drm_lock_data_t *lock_data) +void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; @@ -370,7 +361,7 @@ void drm_idlelock_take(drm_lock_data_t *lock_data) } EXPORT_SYMBOL(drm_idlelock_take); -void drm_idlelock_release(drm_lock_data_t *lock_data) +void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; @@ -391,13 +382,12 @@ void drm_idlelock_release(drm_lock_data_t *lock_data) EXPORT_SYMBOL(drm_idlelock_release); -int drm_i_have_hw_lock(struct file *filp) +int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { - DRM_DEVICE; - return (priv->lock_count && dev->lock.hw_lock && + return (file_priv->lock_count && dev->lock.hw_lock && _DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) && - dev->lock.filp == filp); + dev->lock.file_priv == file_priv); } EXPORT_SYMBOL(drm_i_have_hw_lock); diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index b1423c12..f68a3a3e 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -38,9 +38,9 @@ static struct { spinlock_t lock; - drm_u64_t cur_used; - drm_u64_t low_threshold; - drm_u64_t high_threshold; + uint64_t cur_used; + uint64_t low_threshold; + uint64_t high_threshold; } drm_memctl = { .lock = SPIN_LOCK_UNLOCKED }; @@ -82,9 +82,9 @@ void drm_free_memctl(size_t size) } EXPORT_SYMBOL(drm_free_memctl); -void drm_query_memctl(drm_u64_t *cur_used, - drm_u64_t *low_threshold, - drm_u64_t *high_threshold) +void drm_query_memctl(uint64_t *cur_used, + uint64_t *low_threshold, + uint64_t *high_threshold) { spin_lock(&drm_memctl.lock); *cur_used = drm_memctl.cur_used; @@ -214,7 +214,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { unsigned long *phys_addr_map, i, num_pages = PAGE_ALIGN(size) / PAGE_SIZE; @@ -258,12 +258,12 @@ static void *agp_remap(unsigned long offset, unsigned long size, /** Wrapper around agp_allocate_memory() */ #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,11) -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(pages, type); } #else -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { return drm_agp_allocate_memory(dev->agp->bridge, pages, type); } @@ -289,7 +289,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) #else /* __OS_HAS_AGP*/ static void *agp_remap(unsigned long offset, unsigned long size, - drm_device_t * dev) + struct drm_device * dev) { return NULL; } diff --git a/linux-core/drm_memory_debug.c b/linux-core/drm_memory_debug.c index c124f8f8..c196ee2b 100644 --- a/linux-core/drm_memory_debug.c +++ b/linux-core/drm_memory_debug.c @@ -291,7 +291,7 @@ void drm_free_pages(unsigned long address, int order, int area) #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp(drm_device_t *dev, int pages, u32 type) +DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; diff --git a/linux-core/drm_memory_debug.h b/linux-core/drm_memory_debug.h index 9d0dedfb..b055ac00 100644 --- a/linux-core/drm_memory_debug.h +++ b/linux-core/drm_memory_debug.h @@ -277,7 +277,7 @@ void drm_free_pages (unsigned long address, int order, int area) { #if __OS_HAS_AGP -DRM_AGP_MEM *drm_alloc_agp (drm_device_t *dev, int pages, u32 type) { +DRM_AGP_MEM *drm_alloc_agp (struct drm_device *dev, int pages, u32 type) { DRM_AGP_MEM *handle; if (!pages) { diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c index 2caf596b..cf0d92fa 100644 --- a/linux-core/drm_mm.c +++ b/linux-core/drm_mm.c @@ -44,26 +44,26 @@ #include "drmP.h" #include <linux/slab.h> -unsigned long drm_mm_tail_space(drm_mm_t *mm) +unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_mm_remove_space_from_tail(drm_mm_t *mm, unsigned long size) } -static int drm_mm_create_tail_node(drm_mm_t *mm, +static int drm_mm_create_tail_node(struct drm_mm *mm, unsigned long start, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,13 +98,13 @@ static int drm_mm_create_tail_node(drm_mm_t *mm, } -int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - drm_mm_node_t *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, drm_mm_node_t, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { return drm_mm_create_tail_node(mm, entry->start + entry->size, size); } @@ -112,12 +112,12 @@ int drm_mm_add_space_to_tail(drm_mm_t *mm, unsigned long size) return 0; } -static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, +static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size) { - drm_mm_node_t *child; + struct drm_mm_node *child; - child = (drm_mm_node_t *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,12 +137,12 @@ static drm_mm_node_t *drm_mm_split_at_start(drm_mm_node_t *parent, return child; } -drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, +struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment) { - drm_mm_node_t *align_splitoff = NULL; - drm_mm_node_t *child; + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; unsigned tmp = 0; if (alignment) @@ -173,26 +173,26 @@ drm_mm_node_t *drm_mm_get_block(drm_mm_node_t * parent, * Otherwise add to the free stack. */ -void drm_mm_put_block(drm_mm_node_t * cur) +void drm_mm_put_block(struct drm_mm_node * cur) { - drm_mm_t *mm = cur->mm; + struct drm_mm *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - drm_mm_node_t *prev_node = NULL; - drm_mm_node_t *next_node; + struct drm_mm_node *prev_node = NULL; + struct drm_mm_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, drm_mm_node_t, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, drm_mm_node_t, ml_entry); + next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -217,14 +217,14 @@ void drm_mm_put_block(drm_mm_node_t * cur) } EXPORT_SYMBOL(drm_mm_put_block); -drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, +struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - drm_mm_node_t *entry; - drm_mm_node_t *best; + struct drm_mm_node *entry; + struct drm_mm_node *best; unsigned long best_size; unsigned wasted; @@ -232,7 +232,7 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, drm_mm_node_t, fl_entry); + entry = list_entry(list, struct drm_mm_node, fl_entry); wasted = 0; if (entry->size < size) @@ -258,14 +258,14 @@ drm_mm_node_t *drm_mm_search_free(const drm_mm_t * mm, return best; } -int drm_mm_clean(drm_mm_t * mm) +int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) +int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); @@ -275,12 +275,12 @@ int drm_mm_init(drm_mm_t * mm, unsigned long start, unsigned long size) EXPORT_SYMBOL(drm_mm_init); -void drm_mm_takedown(drm_mm_t * mm) +void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; - drm_mm_node_t *entry; + struct drm_mm_node *entry; - entry = list_entry(bnode, drm_mm_node_t, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 567a7d2b..3d866333 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -30,10 +30,10 @@ #include "drmP.h" -int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -51,12 +51,12 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, return 0; } -drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) +struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { - drm_device_t *dev = priv->head->dev; - drm_hash_item_t *hash; + struct drm_device *dev = priv->head->dev; + struct drm_hash_item *hash; int ret; - drm_user_object_t *item; + struct drm_user_object *item; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -64,10 +64,10 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) if (ret) { return NULL; } - item = drm_hash_entry(hash, drm_user_object_t, hash); + item = drm_hash_entry(hash, struct drm_user_object, hash); if (priv != item->owner) { - drm_open_hash_t *ht = &priv->refd_object_hash[_DRM_REF_USE]; + struct drm_open_hash *ht = &priv->refd_object_hash[_DRM_REF_USE]; ret = drm_ht_find_item(ht, (unsigned long)item, &hash); if (ret) { DRM_ERROR("Object not registered for usage\n"); @@ -77,9 +77,9 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key) return item; } -static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) +static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = priv->head->dev; int ret; if (atomic_dec_and_test(&item->refcount)) { @@ -90,7 +90,7 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item) } } -int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) +int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) { DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -105,8 +105,8 @@ int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item) return 0; } -static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, - drm_ref_t action) +static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, + enum drm_ref_type action) { int ret = 0; @@ -124,12 +124,12 @@ static int drm_object_ref_action(drm_file_t * priv, drm_user_object_t * ro, return ret; } -int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, - drm_ref_t ref_action) +int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object, + enum drm_ref_type ref_action) { int ret = 0; - drm_ref_object_t *item; - drm_open_hash_t *ht = &priv->refd_object_hash[ref_action]; + struct drm_ref_object *item; + struct drm_open_hash *ht = &priv->refd_object_hash[ref_action]; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); if (!referenced_object->shareable && priv != referenced_object->owner) { @@ -181,11 +181,11 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object, return ret; } -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action) +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action) { - drm_hash_item_t *hash; + struct drm_hash_item *hash; int ret; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); @@ -194,32 +194,32 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, if (ret) return NULL; - return drm_hash_entry(hash, drm_ref_object_t, hash); + return drm_hash_entry(hash, struct drm_ref_object, hash); } -static void drm_remove_other_references(drm_file_t * priv, - drm_user_object_t * ro) +static void drm_remove_other_references(struct drm_file * priv, + struct drm_user_object * ro) { int i; - drm_open_hash_t *ht; - drm_hash_item_t *hash; - drm_ref_object_t *item; + struct drm_open_hash *ht; + struct drm_hash_item *hash; + struct drm_ref_object *item; for (i = _DRM_REF_USE + 1; i < _DRM_NO_REF_TYPES; ++i) { ht = &priv->refd_object_hash[i]; while (!drm_ht_find_item(ht, (unsigned long)ro, &hash)) { - item = drm_hash_entry(hash, drm_ref_object_t, hash); + item = drm_hash_entry(hash, struct drm_ref_object, hash); drm_remove_ref_object(priv, item); } } } -void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) +void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item) { int ret; - drm_user_object_t *user_object = (drm_user_object_t *) item->hash.key; - drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action]; - drm_ref_t unref_action; + struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key; + struct drm_open_hash *ht = &priv->refd_object_hash[item->unref_action]; + enum drm_ref_type unref_action; DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); unref_action = item->unref_action; @@ -244,12 +244,12 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item) } -int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type, drm_user_object_t ** object) +int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type, struct drm_user_object ** object) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_hash_item_t *hash; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_hash_item *hash; int ret; mutex_lock(&dev->struct_mutex); @@ -258,7 +258,7 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, DRM_ERROR("Could not find user object to reference.\n"); goto out_err; } - uo = drm_hash_entry(hash, drm_user_object_t, hash); + uo = drm_hash_entry(hash, struct drm_user_object, hash); if (uo->type != type) { ret = -EINVAL; goto out_err; @@ -274,12 +274,12 @@ int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, return ret; } -int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type) +int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type) { - drm_device_t *dev = priv->head->dev; - drm_user_object_t *uo; - drm_ref_object_t *ro; + struct drm_device *dev = priv->head->dev; + struct drm_user_object *uo; + struct drm_ref_object *ro; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index e0716aa4..2ddbe74b 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -39,14 +39,14 @@ struct drm_device; #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member) -typedef enum { +enum drm_object_type { drm_fence_type, drm_buffer_type, drm_ttm_type /* * Add other user space object types here. */ -} drm_object_type_t; +}; /* * A user object is a structure that helps the drm give out user handles @@ -55,20 +55,20 @@ typedef enum { * Designed to be accessible using a user space 32-bit handle. */ -typedef struct drm_user_object { - drm_hash_item_t hash; +struct drm_user_object { + struct drm_hash_item hash; struct list_head list; - drm_object_type_t type; + enum drm_object_type type; atomic_t refcount; int shareable; - drm_file_t *owner; - void (*ref_struct_locked) (drm_file_t * priv, + struct drm_file *owner; + void (*ref_struct_locked) (struct drm_file * priv, struct drm_user_object * obj, - drm_ref_t ref_action); - void (*unref) (drm_file_t * priv, struct drm_user_object * obj, - drm_ref_t unref_action); - void (*remove) (drm_file_t * priv, struct drm_user_object * obj); -} drm_user_object_t; + enum drm_ref_type ref_action); + void (*unref) (struct drm_file * priv, struct drm_user_object * obj, + enum drm_ref_type unref_action); + void (*remove) (struct drm_file * priv, struct drm_user_object * obj); +}; /* * A ref object is a structure which is used to @@ -77,24 +77,24 @@ typedef struct drm_user_object { * process exits. Designed to be accessible using a pointer to the _user_ object. */ -typedef struct drm_ref_object { - drm_hash_item_t hash; +struct drm_ref_object { + struct drm_hash_item hash; struct list_head list; atomic_t refcount; - drm_ref_t unref_action; -} drm_ref_object_t; + enum drm_ref_type unref_action; +}; /** * Must be called with the struct_mutex held. */ -extern int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item, +extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, int shareable); /** * Must be called with the struct_mutex held. */ -extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, +extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* @@ -104,23 +104,23 @@ extern drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item); +extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); /* * Must be called with the struct_mutex held. May temporarily release it. */ -extern int drm_add_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action); +extern int drm_add_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. */ -drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, - drm_user_object_t * referenced_object, - drm_ref_t ref_action); +struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, + struct drm_user_object * referenced_object, + enum drm_ref_type ref_action); /* * Must be called with the struct_mutex held. * If "item" has been obtained by a call to drm_lookup_ref_object. You may not @@ -128,19 +128,19 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv, * This function may temporarily release the struct_mutex. */ -extern void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item); -extern int drm_user_object_ref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type, - drm_user_object_t ** object); -extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token, - drm_object_type_t type); +extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item); +extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type, + struct drm_user_object ** object); +extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, + enum drm_object_type type); /*************************************************** * Fence objects. (drm_fence.c) */ -typedef struct drm_fence_object { - drm_user_object_t base; +struct drm_fence_object { + struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -156,29 +156,29 @@ typedef struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; -} drm_fence_object_t; +}; #define _DRM_FENCE_CLASSES 8 #define _DRM_FENCE_TYPE_EXE 0x00 -typedef struct drm_fence_class_manager { +struct drm_fence_class_manager { struct list_head ring; uint32_t pending_flush; wait_queue_head_t fence_queue; int pending_exe_flush; uint32_t last_exe_flush; uint32_t exe_flush_sequence; -} drm_fence_class_manager_t; +}; -typedef struct drm_fence_manager { +struct drm_fence_manager { int initialized; rwlock_t lock; - drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; -} drm_fence_manager_t; +}; -typedef struct drm_fence_driver { +struct drm_fence_driver { uint32_t num_classes; uint32_t wrap_diff; uint32_t flush_diff; @@ -189,7 +189,7 @@ typedef struct drm_fence_driver { int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); void (*poke_flush) (struct drm_device * dev, uint32_t class); -} drm_fence_driver_t; +}; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, uint32_t sequence, uint32_t type); @@ -197,23 +197,40 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, +extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); -extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(drm_fence_object_t * fence, +extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, - drm_fence_object_t ** c_fence); -extern int drm_fence_add_user_object(drm_file_t * priv, - drm_fence_object_t * fence, int shareable); -extern int drm_fence_ioctl(DRM_IOCTL_ARGS); - + struct drm_fence_object ** c_fence); +extern int drm_fence_add_user_object(struct drm_file * priv, + struct drm_fence_object * fence, int shareable); + +extern int drm_fence_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); +extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); /************************************************** *TTMs */ @@ -235,7 +252,7 @@ extern int drm_fence_ioctl(DRM_IOCTL_ARGS); #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 struct drm_ttm_backend; -typedef struct drm_ttm_backend_func { +struct drm_ttm_backend_func { int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -244,16 +261,16 @@ typedef struct drm_ttm_backend_func { unsigned long offset, int cached); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); -} drm_ttm_backend_func_t; +}; -typedef struct drm_ttm_backend { +struct drm_ttm_backend { uint32_t flags; int mem_type; - drm_ttm_backend_func_t *func; -} drm_ttm_backend_t; + struct drm_ttm_backend_func *func; +}; -typedef struct drm_ttm { +struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; @@ -262,7 +279,7 @@ typedef struct drm_ttm { struct drm_device *dev; int destroy; uint32_t mapping_offset; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; enum { ttm_bound, ttm_evicted, @@ -270,14 +287,14 @@ typedef struct drm_ttm { ttm_unpopulated, } state; -} drm_ttm_t; +}; -extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern void drm_ttm_unbind(drm_ttm_t * ttm); -extern void drm_ttm_evict(drm_ttm_t * ttm); -extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(struct drm_ttm * ttm); +extern void drm_ttm_evict(struct drm_ttm * ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -285,7 +302,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); * when the last vma exits. */ -extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_destroy_ttm(struct drm_ttm * ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -308,19 +325,19 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); * Buffer objects. (drm_bo.c, drm_bo_move.c) */ -typedef struct drm_bo_mem_reg { - drm_mm_node_t *mm_node; +struct drm_bo_mem_reg { + struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; uint32_t mem_type; - uint32_t flags; - uint32_t mask; -} drm_bo_mem_reg_t; + uint64_t flags; + uint64_t mask; +}; -typedef struct drm_buffer_object { +struct drm_buffer_object { struct drm_device *dev; - drm_user_object_t base; + struct drm_user_object base; /* * If there is a possibility that the usage variable is zero, @@ -329,30 +346,31 @@ typedef struct drm_buffer_object { atomic_t usage; unsigned long buffer_start; - drm_bo_type_t type; + enum drm_bo_type type; unsigned long offset; atomic_t mapped; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; struct list_head lru; struct list_head ddestroy; uint32_t fence_type; uint32_t fence_class; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; /* For pinned buffers */ - drm_mm_node_t *pinned_node; + int pinned; + struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; /* For vm */ - drm_ttm_t *ttm; - drm_map_list_t map_list; + struct drm_ttm *ttm; + struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; uint32_t vm_flags; @@ -364,15 +382,15 @@ typedef struct drm_buffer_object { struct list_head p_mm_list; #endif -} drm_buffer_object_t; +}; #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 -typedef struct drm_mem_type_manager { +struct drm_mem_type_manager { int has_type; int use_type; - drm_mm_t manager; + struct drm_mm manager; struct list_head lru; struct list_head pinned; uint32_t flags; @@ -380,7 +398,7 @@ typedef struct drm_mem_type_manager { unsigned long io_offset; unsigned long io_size; void *io_addr; -} drm_mem_type_manager_t; +}; #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ @@ -390,13 +408,13 @@ typedef struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ -typedef struct drm_buffer_manager { +struct drm_buffer_manager { struct mutex init_mutex; struct mutex evict_mutex; int nice_mode; int initialized; - drm_file_t *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct drm_file *last_to_validate; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -407,81 +425,104 @@ typedef struct drm_buffer_manager { uint32_t fence_type; unsigned long cur_pages; atomic_t count; -} drm_buffer_manager_t; +}; -typedef struct drm_bo_driver { +struct drm_bo_driver { const uint32_t *mem_type_prio; const uint32_t *mem_busy_prio; uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) + struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * class, uint32_t * type); - int (*invalidate_caches) (struct drm_device * dev, uint32_t flags); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man); + struct drm_mem_type_manager * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -} drm_bo_driver_t; +}; /* * buffer objects (drm_bo.c) */ -extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, - unsigned long p_offset, unsigned long p_size); -extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - drm_bo_type_t type, uint32_t mask, - uint32_t hint, uint32_t page_alignment, - unsigned long buffer_start, - drm_buffer_object_t ** buf_obj); -extern int drm_bo_ioctl(DRM_IOCTL_ARGS); -extern int drm_mm_init_ioctl(DRM_IOCTL_ARGS); +extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_bo_set_pin_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); + +extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); -extern int drm_fence_buffer_objects(drm_file_t * priv, +extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); +extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +extern int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); +extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, + unsigned long p_offset, unsigned long p_size); extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); +extern int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int sharable); +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); +extern int drm_bo_set_pin(struct drm_device *dev, + struct drm_buffer_object *bo, int pin); /* * Buffer object memory move helpers. * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, int evict, - int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, - drm_bo_mem_reg_t * new_mem); + struct drm_bo_mem_reg * new_mem); + +extern int drm_mem_reg_ioremap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void **virtual); +extern void drm_mem_reg_iounmap(struct drm_device *dev, + struct drm_bo_mem_reg *mem, void *virtual); -extern int drm_mem_reg_ioremap(struct drm_device *dev, drm_bo_mem_reg_t * mem, +extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem, void **virtual); -extern void drm_mem_reg_iounmap(struct drm_device *dev, drm_bo_mem_reg_t * mem, +extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem, void *virtual); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h index 2ea105c5..2688479a 100644 --- a/linux-core/drm_os_linux.h +++ b/linux-core/drm_os_linux.h @@ -6,11 +6,6 @@ #include <linux/interrupt.h> /* For task queue support */ #include <linux/delay.h> -/** File pointer type */ -#define DRMFILE struct file * -/** Ioctl arguments */ -#define DRM_IOCTL_ARGS struct inode *inode, struct file *filp, unsigned int cmd, unsigned long data -#define DRM_ERR(d) -(d) /** Current process ID */ #define DRM_CURRENTPID current->pid #define DRM_SUSER(p) capable(CAP_SYS_ADMIN) @@ -51,9 +46,6 @@ #define DRM_WRITEMEMORYBARRIER() wmb() /** Read/write memory barrier */ #define DRM_MEMORYBARRIER() mb() -/** DRM device local declaration */ -#define DRM_DEVICE drm_file_t *priv = filp->private_data; \ - drm_device_t *dev = priv->head->dev /** IRQ handler arguments and return type and values */ #define DRM_IRQ_ARGS int irq, void *arg @@ -93,14 +85,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define MTRR_TYPE_WRCOMB 1 #endif -/** For data going into the kernel through the ioctl argument */ -#define DRM_COPY_FROM_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_from_user(&arg1, arg2, arg3) ) \ - return -EFAULT -/** For data going from the kernel through the ioctl argument */ -#define DRM_COPY_TO_USER_IOCTL(arg1, arg2, arg3) \ - if ( copy_to_user(arg1, &arg2, arg3) ) \ - return -EFAULT /** Other copying of data to kernel space */ #define DRM_COPY_FROM_USER(arg1, arg2, arg3) \ copy_from_user(arg1, arg2, arg3) @@ -117,8 +101,6 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size) #define DRM_GET_USER_UNCHECKED(val, uaddr) \ __get_user(val, uaddr) -#define DRM_GET_PRIV_WITH_RETURN(_priv, _filp) _priv = _filp->private_data - #define DRM_HZ HZ #define DRM_WAIT_ON( ret, queue, timeout, condition ) \ diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c index 76252204..a608eed3 100644 --- a/linux-core/drm_pci.c +++ b/linux-core/drm_pci.c @@ -47,7 +47,7 @@ /** * \brief Allocate a PCI consistent memory block, for DMA. */ -drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align, +drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align, dma_addr_t maxaddr) { drm_dma_handle_t *dmah; @@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc); * * This function is for internal use in the Linux-specific DRM core code. */ -void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { unsigned long addr; size_t sz; @@ -167,7 +167,7 @@ void __drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) /** * \brief Free a PCI consistent memory block */ -void drm_pci_free(drm_device_t * dev, drm_dma_handle_t *dmah) +void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah) { __drm_pci_free(dev, dmah); kfree(dmah); diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index e93a0406..08bf99d6 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -90,7 +90,7 @@ static struct drm_proc_list { * "/proc/dri/%minor%/", and each entry in proc_list as * "/proc/dri/%minor%/%name%". */ -int drm_proc_init(drm_device_t * dev, int minor, +int drm_proc_init(struct drm_device * dev, int minor, struct proc_dir_entry *root, struct proc_dir_entry **dev_root) { struct proc_dir_entry *ent; @@ -165,7 +165,7 @@ int drm_proc_cleanup(int minor, struct proc_dir_entry *root, static int drm_name_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; if (offset > DRM_PROC_LIMIT) { @@ -207,10 +207,10 @@ static int drm_name_info(char *buf, char **start, off_t offset, int request, static int drm__vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_map *map; + struct drm_map_list *r_list; /* Hardcoded from _DRM_FRAME_BUFFER, _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, @@ -264,7 +264,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request, static int drm_vm_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -287,10 +287,10 @@ static int drm_vm_info(char *buf, char **start, off_t offset, int request, static int drm__queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; int i; - drm_queue_t *q; + struct drm_queue *q; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -337,7 +337,7 @@ static int drm__queues_info(char *buf, char **start, off_t offset, static int drm_queues_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -360,9 +360,9 @@ static int drm_queues_info(char *buf, char **start, off_t offset, int request, static int drm__bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma || offset > DRM_PROC_LIMIT) { @@ -409,7 +409,7 @@ static int drm__bufs_info(char *buf, char **start, off_t offset, int request, static int drm_bufs_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -432,13 +432,13 @@ static int drm_bufs_info(char *buf, char **start, off_t offset, int request, static int drm__objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_buffer_manager_t *bm = &dev->bm; - drm_fence_manager_t *fm = &dev->fm; - drm_u64_t used_mem; - drm_u64_t low_mem; - drm_u64_t high_mem; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; + uint64_t used_mem; + uint64_t low_mem; + uint64_t high_mem; if (offset > DRM_PROC_LIMIT) { @@ -496,7 +496,7 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, static int drm_objects_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -519,9 +519,9 @@ static int drm_objects_info(char *buf, char **start, off_t offset, int request, static int drm__clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_file_t *priv; + struct drm_file *priv; if (offset > DRM_PROC_LIMIT) { *eof = 1; @@ -552,7 +552,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset, static int drm_clients_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); @@ -566,9 +566,9 @@ static int drm_clients_info(char *buf, char **start, off_t offset, static int drm__vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_vma_entry_t *pt; + struct drm_vma_entry *pt; struct vm_area_struct *vma; #if defined(__i386__) unsigned int pgprot; @@ -625,7 +625,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request, static int drm_vma_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; int ret; mutex_lock(&dev->struct_mutex); diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c index e5c9f877..3c0f672e 100644 --- a/linux-core/drm_scatter.c +++ b/linux-core/drm_scatter.c @@ -36,7 +36,7 @@ #define DEBUG_SCATTER 0 -void drm_sg_cleanup(drm_sg_mem_t * entry) +void drm_sg_cleanup(struct drm_sg_mem *entry) { struct page *page; int i; @@ -55,6 +55,7 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) entry->pages * sizeof(*entry->pagelist), DRM_MEM_PAGES); drm_free(entry, sizeof(*entry), DRM_MEM_SGLISTS); } +EXPORT_SYMBOL(drm_sg_cleanup); #ifdef _LP64 # define ScatterHandle(x) (unsigned int)((x >> 32) + (x & ((1L << 32) - 1))) @@ -62,14 +63,9 @@ void drm_sg_cleanup(drm_sg_mem_t * entry) # define ScatterHandle(x) (unsigned int)(x) #endif -int drm_sg_alloc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t __user *argp = (void __user *)arg; - drm_scatter_gather_t request; - drm_sg_mem_t *entry; + struct drm_sg_mem *entry; unsigned long pages, i, j; DRM_DEBUG("%s\n", __FUNCTION__); @@ -80,17 +76,13 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, if (dev->sg) return -EINVAL; - if (copy_from_user(&request, argp, sizeof(request))) - return -EFAULT; - entry = drm_alloc(sizeof(*entry), DRM_MEM_SGLISTS); if (!entry) return -ENOMEM; memset(entry, 0, sizeof(*entry)); - - pages = (request.size + PAGE_SIZE - 1) / PAGE_SIZE; - DRM_DEBUG("sg size=%ld pages=%ld\n", request.size, pages); + pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE; + DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages); entry->pages = pages; entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist), @@ -142,12 +134,7 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, SetPageReserved(entry->pagelist[j]); } - request.handle = entry->handle; - - if (copy_to_user(argp, &request, sizeof(request))) { - drm_sg_cleanup(entry); - return -EFAULT; - } + request->handle = entry->handle; dev->sg = entry; @@ -196,28 +183,32 @@ int drm_sg_alloc(struct inode *inode, struct file *filp, failed: drm_sg_cleanup(entry); return -ENOMEM; + } +EXPORT_SYMBOL(drm_sg_alloc); -int drm_sg_free(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +int drm_sg_alloc_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_scatter_gather_t request; - drm_sg_mem_t *entry; + struct drm_scatter_gather *request = data; + + return drm_sg_alloc(dev, request); + +} + +int drm_sg_free(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_scatter_gather *request = data; + struct drm_sg_mem *entry; if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL; - if (copy_from_user(&request, - (drm_scatter_gather_t __user *) arg, - sizeof(request))) - return -EFAULT; - entry = dev->sg; dev->sg = NULL; - if (!entry || entry->handle != request.handle) + if (!entry || entry->handle != request->handle) return -EINVAL; DRM_DEBUG("sg free virtual = %p\n", entry->virtual); diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index e15db6d6..118e82ae 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -38,13 +38,13 @@ #include "drm_sman.h" -typedef struct drm_owner_item { - drm_hash_item_t owner_hash; +struct drm_owner_item { + struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; -} drm_owner_item_t; +}; -void drm_sman_takedown(drm_sman_t * sman) +void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); @@ -56,12 +56,12 @@ void drm_sman_takedown(drm_sman_t * sman) EXPORT_SYMBOL(drm_sman_takedown); int -drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; - sman->mm = (drm_sman_mm_t *) drm_calloc(num_managers, sizeof(*sman->mm), + sman->mm = (struct drm_sman_mm *) drm_calloc(num_managers, sizeof(*sman->mm), DRM_MEM_MM); if (!sman->mm) { ret = -ENOMEM; @@ -88,8 +88,8 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - drm_mm_t *mm = (drm_mm_t *) private; - drm_mm_node_t *tmp; + struct drm_mm *mm = (struct drm_mm *) private; + struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { @@ -101,30 +101,30 @@ static void *drm_sman_mm_allocate(void *private, unsigned long size, static void drm_sman_mm_free(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { - drm_mm_t *mm = (drm_mm_t *) private; + struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - drm_mm_node_t *node = (drm_mm_node_t *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } int -drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { - drm_sman_mm_t *sman_mm; - drm_mm_t *mm; + struct drm_sman_mm *sman_mm; + struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); @@ -153,8 +153,8 @@ drm_sman_set_range(drm_sman_t * sman, unsigned int manager, EXPORT_SYMBOL(drm_sman_set_range); int -drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, - drm_sman_mm_t * allocator) +drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, + struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; @@ -163,16 +163,16 @@ drm_sman_set_manager(drm_sman_t * sman, unsigned int manager, } EXPORT_SYMBOL(drm_sman_set_manager); -static drm_owner_item_t *drm_sman_get_owner_item(drm_sman_t * sman, +static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; - drm_hash_item_t *owner_hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *owner_hash_item; + struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { - return drm_hash_entry(owner_hash_item, drm_owner_item_t, + return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } @@ -194,14 +194,14 @@ out: return NULL; } -drm_memblock_item_t *drm_sman_alloc(drm_sman_t *sman, unsigned int manager, +struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; - drm_sman_mm_t *sman_mm; - drm_owner_item_t *owner_item; - drm_memblock_item_t *memblock; + struct drm_sman_mm *sman_mm; + struct drm_owner_item *owner_item; + struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); @@ -246,9 +246,9 @@ out: EXPORT_SYMBOL(drm_sman_alloc); -static void drm_sman_free(drm_memblock_item_t *item) +static void drm_sman_free(struct drm_memblock_item *item) { - drm_sman_t *sman = item->sman; + struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); @@ -256,40 +256,40 @@ static void drm_sman_free(drm_memblock_item_t *item) drm_free(item, sizeof(*item), DRM_MEM_MM); } -int drm_sman_free_key(drm_sman_t *sman, unsigned int key) +int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { - drm_hash_item_t *hash_item; - drm_memblock_item_t *memblock_item; + struct drm_hash_item *hash_item; + struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; - memblock_item = drm_hash_entry(hash_item, drm_memblock_item_t, user_hash); + memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); -static void drm_sman_remove_owner(drm_sman_t *sman, - drm_owner_item_t *owner_item) +static void drm_sman_remove_owner(struct drm_sman *sman, + struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); drm_free(owner_item, sizeof(*owner_item), DRM_MEM_MM); } -int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) +int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *hash_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; @@ -300,10 +300,10 @@ int drm_sman_owner_clean(drm_sman_t *sman, unsigned long owner) EXPORT_SYMBOL(drm_sman_owner_clean); -static void drm_sman_do_owner_cleanup(drm_sman_t *sman, - drm_owner_item_t *owner_item) +static void drm_sman_do_owner_cleanup(struct drm_sman *sman, + struct drm_owner_item *owner_item) { - drm_memblock_item_t *entry, *next; + struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { @@ -312,28 +312,28 @@ static void drm_sman_do_owner_cleanup(drm_sman_t *sman, drm_sman_remove_owner(sman, owner_item); } -void drm_sman_owner_cleanup(drm_sman_t *sman, unsigned long owner) +void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { - drm_hash_item_t *hash_item; - drm_owner_item_t *owner_item; + struct drm_hash_item *hash_item; + struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } - owner_item = drm_hash_entry(hash_item, drm_owner_item_t, owner_hash); + owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } EXPORT_SYMBOL(drm_sman_owner_cleanup); -void drm_sman_cleanup(drm_sman_t *sman) +void drm_sman_cleanup(struct drm_sman *sman) { - drm_owner_item_t *entry, *next; + struct drm_owner_item *entry, *next; unsigned int i; - drm_sman_mm_t *sman_mm; + struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index ddc732a1..39a39fef 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -50,7 +50,7 @@ * for memory management. */ -typedef struct drm_sman_mm { +struct drm_sman_mm { /* private info. If allocated, needs to be destroyed by the destroy function */ void *private; @@ -74,30 +74,30 @@ typedef struct drm_sman_mm { "alloc" function */ unsigned long (*offset) (void *private, void *ref); -} drm_sman_mm_t; +}; -typedef struct drm_memblock_item { +struct drm_memblock_item { struct list_head owner_list; - drm_hash_item_t user_hash; + struct drm_hash_item user_hash; void *mm_info; - drm_sman_mm_t *mm; + struct drm_sman_mm *mm; struct drm_sman *sman; -} drm_memblock_item_t; +}; -typedef struct drm_sman { - drm_sman_mm_t *mm; +struct drm_sman { + struct drm_sman_mm *mm; int num_managers; - drm_open_hash_t owner_hash_tab; - drm_open_hash_t user_hash_tab; + struct drm_open_hash owner_hash_tab; + struct drm_open_hash user_hash_tab; struct list_head owner_items; -} drm_sman_t; +}; /* * Take down a memory manager. This function should only be called after a * successful init and after a call to drm_sman_cleanup. */ -extern void drm_sman_takedown(drm_sman_t * sman); +extern void drm_sman_takedown(struct drm_sman * sman); /* * Allocate structures for a manager. @@ -112,7 +112,7 @@ extern void drm_sman_takedown(drm_sman_t * sman); * */ -extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, +extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* @@ -120,7 +120,7 @@ extern int drm_sman_init(drm_sman_t * sman, unsigned int num_managers, * manager unless a customized allogator is used. */ -extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, +extern int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size); /* @@ -129,23 +129,23 @@ extern int drm_sman_set_range(drm_sman_t * sman, unsigned int manager, * so it can be destroyed after this call. */ -extern int drm_sman_set_manager(drm_sman_t * sman, unsigned int mananger, - drm_sman_mm_t * allocator); +extern int drm_sman_set_manager(struct drm_sman * sman, unsigned int mananger, + struct drm_sman_mm * allocator); /* * Allocate a memory block. Aligment is not implemented yet. */ -extern drm_memblock_item_t *drm_sman_alloc(drm_sman_t * sman, - unsigned int manager, - unsigned long size, - unsigned alignment, - unsigned long owner); +extern struct drm_memblock_item *drm_sman_alloc(struct drm_sman * sman, + unsigned int manager, + unsigned long size, + unsigned alignment, + unsigned long owner); /* * Free a memory block identified by its user hash key. */ -extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); +extern int drm_sman_free_key(struct drm_sman * sman, unsigned int key); /* * returns 1 iff there are no stale memory blocks associated with this owner. @@ -154,7 +154,7 @@ extern int drm_sman_free_key(drm_sman_t * sman, unsigned int key); * resources associated with owner. */ -extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); +extern int drm_sman_owner_clean(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with this owner. Note that this @@ -164,13 +164,13 @@ extern int drm_sman_owner_clean(drm_sman_t * sman, unsigned long owner); * is not going to be referenced anymore. */ -extern void drm_sman_owner_cleanup(drm_sman_t * sman, unsigned long owner); +extern void drm_sman_owner_cleanup(struct drm_sman * sman, unsigned long owner); /* * Frees all stale memory blocks associated with the memory manager. * See idling above. */ -extern void drm_sman_cleanup(drm_sman_t * sman); +extern void drm_sman_cleanup(struct drm_sman * sman); #endif diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 0cc8a105..db8c0bd8 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -50,11 +50,11 @@ MODULE_PARM_DESC(debug, "Enable debug output"); module_param_named(cards_limit, drm_cards_limit, int, 0444); module_param_named(debug, drm_debug, int, 0600); -drm_head_t **drm_heads; -struct drm_sysfs_class *drm_class; +struct drm_head **drm_heads; +struct class *drm_class; struct proc_dir_entry *drm_proc_root; -static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev, +static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { @@ -160,9 +160,9 @@ error_out_unreg: * create the proc init entry via proc_init(). This routines assigns * minor numbers to secondary heads of multi-headed cards */ -static int drm_get_head(drm_device_t * dev, drm_head_t * head) +static int drm_get_head(struct drm_device * dev, struct drm_head * head) { - drm_head_t **heads = drm_heads; + struct drm_head **heads = drm_heads; int ret; int minor; @@ -171,7 +171,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) for (minor = 0; minor < drm_cards_limit; minor++, heads++) { if (!*heads) { - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = dev, .device = MKDEV(DRM_MAJOR, minor), .minor = minor, @@ -202,7 +202,7 @@ static int drm_get_head(drm_device_t * dev, drm_head_t * head) err_g2: drm_proc_cleanup(minor, drm_proc_root, head->dev_root); err_g1: - *head = (drm_head_t) { + *head = (struct drm_head) { .dev = NULL}; return ret; } @@ -221,7 +221,7 @@ err_g1: int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver) { - drm_device_t *dev; + struct drm_device *dev; int ret; DRM_DEBUG("\n"); @@ -232,18 +232,22 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, if (!drm_fb_loaded) { pci_set_drvdata(pdev, dev); - pci_request_regions(pdev, driver->pci_driver.name); + ret = pci_request_regions(pdev, driver->pci_driver.name); + if (ret) + goto err_g1; } - pci_enable_device(pdev); + ret = pci_enable_device(pdev); + if (ret) + goto err_g2; pci_set_master(pdev); if ((ret = drm_fill_in_dev(dev, pdev, ent, driver))) { printk(KERN_ERR "DRM: fill_in_dev failed\n"); - goto err_g1; + goto err_g3; } if ((ret = drm_get_head(dev, &dev->primary))) - goto err_g1; + goto err_g3; DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n", driver->name, driver->major, driver->minor, driver->patchlevel, @@ -251,12 +255,16 @@ int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, return 0; -err_g1: - if (!drm_fb_loaded) { - pci_set_drvdata(pdev, NULL); - pci_release_regions(pdev); + err_g3: + if (!drm_fb_loaded) pci_disable_device(pdev); - } + err_g2: + if (!drm_fb_loaded) + pci_release_regions(pdev); + err_g1: + if (!drm_fb_loaded) + pci_set_drvdata(pdev, NULL); + drm_free(dev, sizeof(*dev), DRM_MEM_STUB); printk(KERN_ERR "DRM: drm_get_dev failed.\n"); return ret; @@ -274,7 +282,7 @@ EXPORT_SYMBOL(drm_get_dev); * "drm" data, otherwise unregisters the "drm" data, frees the dev list and * unregisters the character device. */ -int drm_put_dev(drm_device_t * dev) +int drm_put_dev(struct drm_device * dev) { DRM_DEBUG("release primary %s\n", dev->driver->pci_driver.name); @@ -302,7 +310,7 @@ int drm_put_dev(drm_device_t * dev) * last minor released. * */ -int drm_put_head(drm_head_t * head) +int drm_put_head(struct drm_head * head) { int minor = head->minor; @@ -311,7 +319,7 @@ int drm_put_head(drm_head_t * head) drm_proc_cleanup(minor, drm_proc_root, head->dev_root); drm_sysfs_device_remove(head->dev_class); - *head = (drm_head_t){.dev = NULL}; + *head = (struct drm_head){.dev = NULL}; drm_heads[minor] = NULL; return 0; diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index ace0778b..cf4349b0 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -1,3 +1,4 @@ + /* * drm_sysfs.c - Modifications to drm_sysfs_class.c to support * extra sysfs attribute from DRM. Normal drm_sysfs_class @@ -15,38 +16,8 @@ #include <linux/kdev_t.h> #include <linux/err.h> -#include "drmP.h" #include "drm_core.h" - -struct drm_sysfs_class { - struct class_device_attribute attr; - struct class class; -}; -#define to_drm_sysfs_class(d) container_of(d, struct drm_sysfs_class, class) - -struct simple_dev { - dev_t dev; - struct class_device class_dev; -}; -#define to_simple_dev(d) container_of(d, struct simple_dev, class_dev) - -static void release_simple_dev(struct class_device *class_dev) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - kfree(s_dev); -} - -static ssize_t show_dev(struct class_device *class_dev, char *buf) -{ - struct simple_dev *s_dev = to_simple_dev(class_dev); - return print_dev_t(buf, s_dev->dev); -} - -static void drm_sysfs_class_release(struct class *class) -{ - struct drm_sysfs_class *cs = to_drm_sysfs_class(class); - kfree(cs); -} +#include "drmP.h" /* Display the version of drm_core. This doesn't work right in current design */ static ssize_t version_show(struct class *dev, char *buf) @@ -68,38 +39,27 @@ static CLASS_ATTR(version, S_IRUGO, version_show, NULL); * Note, the pointer created here is to be destroyed when finished by making a * call to drm_sysfs_destroy(). */ -struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) +struct class *drm_sysfs_create(struct module *owner, char *name) { - struct drm_sysfs_class *cs; - int retval; + struct class *class; + int err; - cs = kmalloc(sizeof(*cs), GFP_KERNEL); - if (!cs) { - retval = -ENOMEM; - goto error; + class = class_create(owner, name); + if (IS_ERR(class)) { + err = PTR_ERR(class); + goto err_out; } - memset(cs, 0x00, sizeof(*cs)); - - cs->class.name = name; - cs->class.class_release = drm_sysfs_class_release; - cs->class.release = release_simple_dev; - cs->attr.attr.name = "dev"; - cs->attr.attr.mode = S_IRUGO; - cs->attr.attr.owner = owner; - cs->attr.show = show_dev; - cs->attr.store = NULL; + err = class_create_file(class, &class_attr_version); + if (err) + goto err_out_class; - retval = class_register(&cs->class); - if (retval) - goto error; - class_create_file(&cs->class, &class_attr_version); + return class; - return cs; - - error: - kfree(cs); - return ERR_PTR(retval); +err_out_class: + class_destroy(class); +err_out: + return ERR_PTR(err); } /** @@ -109,17 +69,18 @@ struct drm_sysfs_class *drm_sysfs_create(struct module *owner, char *name) * Note, the pointer to be destroyed must have been created with a call to * drm_sysfs_create(). */ -void drm_sysfs_destroy(struct drm_sysfs_class *cs) +void drm_sysfs_destroy(struct class *class) { - if ((cs == NULL) || (IS_ERR(cs))) + if ((class == NULL) || (IS_ERR(class))) return; - class_unregister(&cs->class); + class_remove_file(class, &class_attr_version); + class_destroy(class); } static ssize_t show_dri(struct class_device *class_device, char *buf) { - drm_device_t * dev = ((drm_head_t *)class_get_devdata(class_device))->dev; + struct drm_device * dev = ((struct drm_head *)class_get_devdata(class_device))->dev; if (dev->driver->dri_library_name) return dev->driver->dri_library_name(dev, buf); return snprintf(buf, PAGE_SIZE, "%s\n", dev->driver->pci_driver.name); @@ -131,7 +92,7 @@ static struct class_device_attribute class_device_attrs[] = { /** * drm_sysfs_device_add - adds a class device to sysfs for a character driver - * @cs: pointer to the struct drm_sysfs_class that this device should be registered to. + * @cs: pointer to the struct class that this device should be registered to. * @dev: the dev_t for the device to be added. * @device: a pointer to a struct device that is assiociated with this class device. * @fmt: string for the class device's name @@ -140,47 +101,42 @@ static struct class_device_attribute class_device_attrs[] = { * class. A "dev" file will be created, showing the dev_t for the device. The * pointer to the struct class_device will be returned from the call. Any further * sysfs files that might be required can be created using this pointer. - * Note: the struct drm_sysfs_class passed to this function must have previously been + * Note: the struct class passed to this function must have previously been * created with a call to drm_sysfs_create(). */ -struct class_device *drm_sysfs_device_add(struct drm_sysfs_class *cs, - drm_head_t * head) +struct class_device *drm_sysfs_device_add(struct class *cs, struct drm_head *head) { - struct simple_dev *s_dev = NULL; - int i, retval; - - if ((cs == NULL) || (IS_ERR(cs))) { - retval = -ENODEV; - goto error; + struct class_device *class_dev; + int i, j, err; + + class_dev = class_device_create(cs, NULL, + MKDEV(DRM_MAJOR, head->minor), + &(head->dev->pdev)->dev, + "card%d", head->minor); + if (IS_ERR(class_dev)) { + err = PTR_ERR(class_dev); + goto err_out; } - s_dev = kmalloc(sizeof(*s_dev), GFP_KERNEL); - if (!s_dev) { - retval = -ENOMEM; - goto error; - } - memset(s_dev, 0x00, sizeof(*s_dev)); + class_set_devdata(class_dev, head); - s_dev->dev = MKDEV(DRM_MAJOR, head->minor); - s_dev->class_dev.dev = &head->dev->pdev->dev; - s_dev->class_dev.class = &cs->class; - - snprintf(s_dev->class_dev.class_id, BUS_ID_SIZE, "card%d", head->minor); - retval = class_device_register(&s_dev->class_dev); - if (retval) - goto error; - - class_device_create_file(&s_dev->class_dev, &cs->attr); - class_set_devdata(&s_dev->class_dev, head); - - for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_create_file(&s_dev->class_dev, &class_device_attrs[i]); + for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) { + err = class_device_create_file(class_dev, + &class_device_attrs[i]); + if (err) + goto err_out_files; + } - return &s_dev->class_dev; + return class_dev; -error: - kfree(s_dev); - return ERR_PTR(retval); +err_out_files: + if (i > 0) + for (j = 0; j < i; j++) + class_device_remove_file(class_dev, + &class_device_attrs[i]); + class_device_unregister(class_dev); +err_out: + return ERR_PTR(err); } /** @@ -192,11 +148,9 @@ error: */ void drm_sysfs_device_remove(struct class_device *class_dev) { - struct simple_dev *s_dev = to_simple_dev(class_dev); int i; for (i = 0; i < ARRAY_SIZE(class_device_attrs); i++) - class_device_remove_file(&s_dev->class_dev, &class_device_attrs[i]); - - class_device_unregister(&s_dev->class_dev); + class_device_remove_file(class_dev, &class_device_attrs[i]); + class_device_unregister(class_dev); } diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 31503c9c..60c64cba 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void) * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t * ttm) +static void ttm_alloc_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm) } } -static void ttm_free_pages(drm_ttm_t * ttm) +static void ttm_free_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(drm_ttm_t * ttm, int noncached) +static int drm_set_caching(struct drm_ttm * ttm, int noncached) { int i; struct page **cur_page; @@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(drm_ttm_t * ttm) +int drm_destroy_ttm(struct drm_ttm * ttm) { int i; struct page **cur_page; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return 0; @@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) drm_set_caching(ttm, 0); @@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) { struct page *p; - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; p = ttm->pages[index]; if (!p) { @@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) return p; } -static int drm_ttm_populate(drm_ttm_t * ttm) +static int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (ttm->state != ttm_unpopulated) return 0; @@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) { - drm_bo_driver_t *bo_driver = dev->driver->bo_driver; - drm_ttm_t *ttm; + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; if (!bo_driver) return NULL; @@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(drm_ttm_t * ttm) +void drm_ttm_evict(struct drm_ttm * ttm) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; int ret; if (ttm->state == ttm_bound) { @@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(struct drm_ttm * ttm) { if (ttm->state == ttm_evicted) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; if (be->func->needs_ub_cache_adjust(be)) { drm_set_caching(ttm, 0); } @@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -void drm_ttm_unbind(drm_ttm_t * ttm) +void drm_ttm_unbind(struct drm_ttm * ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) { int ret = 0; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return -EINVAL; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 72d63c10..c4e790ef 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -85,11 +85,11 @@ pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; - drm_map_list_t *r_list; - drm_hash_item_t *hash; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map *map = NULL; + struct drm_map_list *r_list; + struct drm_hash_item *hash; /* * Find the right map @@ -103,7 +103,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) goto vm_nopage_error; - r_list = drm_hash_entry(hash, drm_map_list_t, hash); + r_list = drm_hash_entry(hash, struct drm_map_list, hash); map = r_list->map; if (map && map->type == _DRM_AGP) { @@ -172,7 +172,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; unsigned long offset; unsigned long i; struct page *page; @@ -203,11 +203,11 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma, */ static void drm_vm_shm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; - drm_map_t *map; - drm_map_list_t *r_list; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; + struct drm_map *map; + struct drm_map_list *r_list; int found_maps = 0; DRM_DEBUG("0x%08lx,0x%08lx\n", @@ -285,9 +285,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma) static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_device_dma *dma = dev->dma; unsigned long offset; unsigned long page_nr; struct page *page; @@ -321,10 +321,10 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma, static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma, unsigned long address) { - drm_map_t *map = (drm_map_t *) vma->vm_private_data; - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_sg_mem_t *entry = dev->sg; + struct drm_map *map = (struct drm_map *) vma->vm_private_data; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_sg_mem *entry = dev->sg; unsigned long offset; unsigned long map_offset; unsigned long page_offset; @@ -418,9 +418,9 @@ static struct vm_operations_struct drm_vm_sg_ops = { */ static void drm_vm_open_locked(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *vma_entry; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *vma_entry; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -436,8 +436,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma) static void drm_vm_open(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; mutex_lock(&dev->struct_mutex); drm_vm_open_locked(vma); @@ -454,9 +454,9 @@ static void drm_vm_open(struct vm_area_struct *vma) */ static void drm_vm_close(struct vm_area_struct *vma) { - drm_file_t *priv = vma->vm_file->private_data; - drm_device_t *dev = priv->head->dev; - drm_vma_entry_t *pt, *temp; + struct drm_file *priv = vma->vm_file->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_vma_entry *pt, *temp; DRM_DEBUG("0x%08lx,0x%08lx\n", vma->vm_start, vma->vm_end - vma->vm_start); @@ -477,7 +477,7 @@ static void drm_vm_close(struct vm_area_struct *vma) /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -486,9 +486,9 @@ static void drm_vm_close(struct vm_area_struct *vma) */ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; - drm_device_dma_t *dma; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; + struct drm_device_dma *dma; unsigned long length = vma->vm_end - vma->vm_start; dev = priv->head->dev; @@ -524,7 +524,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma) return 0; } -unsigned long drm_core_get_map_ofs(drm_map_t * map) +unsigned long drm_core_get_map_ofs(struct drm_map * map) { return map->offset; } @@ -543,7 +543,7 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); /** * mmap DMA memory. * - * \param filp file pointer. + * \param file_priv DRM file private. * \param vma virtual memory area. * \return zero on success or a negative number on failure. * @@ -555,11 +555,11 @@ EXPORT_SYMBOL(drm_core_get_reg_ofs); */ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_map_t *map = NULL; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; + struct drm_map *map = NULL; unsigned long offset = 0; - drm_hash_item_t *hash; + struct drm_hash_item *hash; DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n", vma->vm_start, vma->vm_end, vma->vm_pgoff); @@ -585,7 +585,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) return -EINVAL; } - map = drm_hash_entry(hash, drm_map_list_t, hash)->map; + map = drm_hash_entry(hash, struct drm_map_list, hash)->map; if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) return -EPERM; @@ -676,8 +676,8 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) int drm_mmap(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev = priv->head->dev; int ret; mutex_lock(&dev->struct_mutex); @@ -713,11 +713,11 @@ EXPORT_SYMBOL(drm_mmap); static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; - drm_device_t *dev; + struct drm_ttm *ttm; + struct drm_device *dev; unsigned long pfn; int err; unsigned long bus_base; @@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -798,7 +798,7 @@ out_unlock: static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; drm_vm_open_locked(vma); atomic_inc(&bo->usage); @@ -815,8 +815,8 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); drm_bo_vm_open_locked(vma); @@ -831,8 +831,8 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; - drm_device_t *dev = bo->dev; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; + struct drm_device *dev = bo->dev; drm_vm_close(vma); if (bo) { @@ -865,7 +865,7 @@ static struct vm_operations_struct drm_bo_vm_ops = { * mmap buffer object memory. * * \param vma virtual memory area. - * \param filp file pointer. + * \param file_priv DRM file private. * \param map The buffer object drm map. * \return zero on success or a negative number on failure. */ diff --git a/linux-core/ffb_context.c b/linux-core/ffb_context.c index e6ae60c3..586c3503 100644 --- a/linux-core/ffb_context.c +++ b/linux-core/ffb_context.c @@ -13,7 +13,7 @@ #include "drmP.h" #include "ffb_drv.h" -static int ffb_alloc_queue(drm_device_t * dev, int is_2d_only) { +static int ffb_alloc_queue(struct drm_device * dev, int is_2d_only) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int i; @@ -351,7 +351,7 @@ static void FFBWait(ffb_fbcPtr ffb) } while (--limit); } -int ffb_context_switch(drm_device_t * dev, int old, int new) { +int ffb_context_switch(struct drm_device * dev, int old, int new) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; #if DRM_DMA_HISTOGRAM @@ -401,7 +401,7 @@ int ffb_resctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; int idx; @@ -421,7 +421,7 @@ int ffb_addctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -449,7 +449,7 @@ int ffb_modctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; struct ffb_hw_context *hwctx; drm_ctx_t ctx; @@ -480,7 +480,7 @@ int ffb_getctx(struct inode * inode, struct file * filp, unsigned int cmd, int ffb_switchctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; drm_ctx_t ctx; if (copy_from_user(&ctx, (drm_ctx_t __user *) arg, sizeof(ctx))) @@ -504,7 +504,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, unsigned long arg) { drm_ctx_t ctx; drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->dev; + struct drm_device *dev = priv->dev; ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int idx; @@ -523,7 +523,7 @@ int ffb_rmctx(struct inode * inode, struct file * filp, unsigned int cmd, return 0; } -static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) +static void ffb_driver_reclaim_buffers_locked(struct drm_device * dev) { ffb_dev_priv_t *fpriv = (ffb_dev_priv_t *) dev->dev_private; int context = _DRM_LOCKING_CONTEXT(dev->lock.hw_lock->lock); @@ -537,13 +537,13 @@ static void ffb_driver_reclaim_buffers_locked(drm_device_t * dev) } } -static void ffb_driver_lastclose(drm_device_t * dev) +static void ffb_driver_lastclose(struct drm_device * dev) { if (dev->dev_private) kfree(dev->dev_private); } -static void ffb_driver_unload(drm_device_t * dev) +static void ffb_driver_unload(struct drm_device * dev) { if (ffb_position != NULL) kfree(ffb_position); @@ -571,7 +571,7 @@ unsigned long ffb_driver_get_map_ofs(drm_map_t * map) return (map->offset & 0xffffffff); } -unsigned long ffb_driver_get_reg_ofs(drm_device_t * dev) +unsigned long ffb_driver_get_reg_ofs(struct drm_device * dev) { ffb_dev_priv_t *ffb_priv = (ffb_dev_priv_t *) dev->dev_private; diff --git a/linux-core/ffb_drv.c b/linux-core/ffb_drv.c index 9c88f061..f2b4cc7f 100644 --- a/linux-core/ffb_drv.c +++ b/linux-core/ffb_drv.c @@ -114,7 +114,7 @@ static void ffb_apply_upa_parent_ranges(int parent, return; } -static int ffb_init_one(drm_device_t *dev, int prom_node, int parent_node, +static int ffb_init_one(struct drm_device *dev, int prom_node, int parent_node, int instance) { struct linux_prom64_registers regs[2*PROMREG_MAX]; @@ -167,7 +167,7 @@ static int __init ffb_scan_siblings(int root, int instance) static drm_map_t *ffb_find_map(struct file *filp, unsigned long off) { drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_device *dev; drm_map_list_t *r_list; struct list_head *list; drm_map_t *map; @@ -237,10 +237,10 @@ unsigned long ffb_get_unmapped_area(struct file *filp, /* This functions must be here since it references drm_numdevs) * which drm_drv.h declares. */ -static int ffb_driver_firstopen(drm_device_t *dev) +static int ffb_driver_firstopen(struct drm_device *dev) { ffb_dev_priv_t *ffb_priv; - drm_device_t *temp_dev; + struct drm_device *temp_dev; int ret = 0; int i; diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h index f76b0d92..bad3c94d 100644 --- a/linux-core/ffb_drv.h +++ b/linux-core/ffb_drv.h @@ -281,4 +281,4 @@ extern unsigned long ffb_get_unmapped_area(struct file *filp, unsigned long pgoff, unsigned long flags); extern unsigned long ffb_driver_get_map_ofs(drm_map_t *map) -extern unsigned long ffb_driver_get_reg_ofs(drm_device_t *dev) +extern unsigned long ffb_driver_get_reg_ofs(struct drm_device *dev) diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c index 49379434..7c37b4bb 100644 --- a/linux-core/i810_dma.c +++ b/linux-core/i810_dma.c @@ -46,9 +46,9 @@ #define I810_BUF_UNMAPPED 0 #define I810_BUF_MAPPED 1 -static inline void i810_print_status_page(drm_device_t * dev) +static inline void i810_print_status_page(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = dev->dev_private; u32 *temp = dev_priv->hw_status_page; int i; @@ -64,16 +64,16 @@ static inline void i810_print_status_page(drm_device_t * dev) } } -static drm_buf_t *i810_freelist_get(drm_device_t * dev) +static struct drm_buf *i810_freelist_get(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; int used; /* Linear search might not be the best solution */ for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; /* In use is already a pointer */ used = cmpxchg(buf_priv->in_use, I810_BUF_FREE, @@ -89,7 +89,7 @@ static drm_buf_t *i810_freelist_get(drm_device_t * dev) * yet, the hardware updates in use for us once its on the ring buffer. */ -static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) +static int i810_freelist_put(struct drm_device * dev, struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used; @@ -106,10 +106,10 @@ static int i810_freelist_put(drm_device_t * dev, drm_buf_t * buf) static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev; + struct drm_file *priv = filp->private_data; + struct drm_device *dev; drm_i810_private_t *dev_priv; - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; lock_kernel(); @@ -139,10 +139,9 @@ static const struct file_operations i810_buffer_fops = { .fasync = drm_fasync, }; -static int i810_map_buffer(drm_buf_t * buf, struct file *filp) +static int i810_map_buffer(struct drm_buf * buf, struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; + struct drm_device *dev = file_priv->head->dev; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_private_t *dev_priv = dev->dev_private; const struct file_operations *old_fops; @@ -152,14 +151,14 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) return -EINVAL; down_write(¤t->mm->mmap_sem); - old_fops = filp->f_op; - filp->f_op = &i810_buffer_fops; + old_fops = file_priv->filp->f_op; + file_priv->filp->f_op = &i810_buffer_fops; dev_priv->mmap_buffer = buf; - buf_priv->virtual = (void *)do_mmap(filp, 0, buf->total, + buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, PROT_READ | PROT_WRITE, MAP_SHARED, buf->bus_address); dev_priv->mmap_buffer = NULL; - filp->f_op = old_fops; + file_priv->filp->f_op = old_fops; if (IS_ERR(buf_priv->virtual)) { /* Real error */ DRM_ERROR("mmap error\n"); @@ -171,7 +170,7 @@ static int i810_map_buffer(drm_buf_t * buf, struct file *filp) return retcode; } -static int i810_unmap_buffer(drm_buf_t * buf) +static int i810_unmap_buffer(struct drm_buf * buf) { drm_i810_buf_priv_t *buf_priv = buf->dev_private; int retcode = 0; @@ -191,10 +190,10 @@ static int i810_unmap_buffer(drm_buf_t * buf) return retcode; } -static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, - struct file *filp) +static int i810_dma_get_buffer(struct drm_device * dev, drm_i810_dma_t * d, + struct drm_file *file_priv) { - drm_buf_t *buf; + struct drm_buf *buf; drm_i810_buf_priv_t *buf_priv; int retcode = 0; @@ -205,13 +204,13 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, return retcode; } - retcode = i810_map_buffer(buf, filp); + retcode = i810_map_buffer(buf, file_priv); if (retcode) { i810_freelist_put(dev, buf); DRM_ERROR("mapbuf failed, retcode %d\n", retcode); return retcode; } - buf->filp = filp; + buf->file_priv = file_priv; buf_priv = buf->dev_private; d->granted = 1; d->request_idx = buf->idx; @@ -221,9 +220,9 @@ static int i810_dma_get_buffer(drm_device_t * dev, drm_i810_dma_t * d, return retcode; } -static int i810_dma_cleanup(drm_device_t * dev) +static int i810_dma_cleanup(struct drm_device * dev) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; /* Make sure interrupts are disabled here because the uninstall ioctl * may not have been called from userspace and after dev_private @@ -252,7 +251,7 @@ static int i810_dma_cleanup(drm_device_t * dev) dev->dev_private = NULL; for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; if (buf_priv->kernel_virtual && buf->total) @@ -262,7 +261,7 @@ static int i810_dma_cleanup(drm_device_t * dev) return 0; } -static int i810_wait_ring(drm_device_t * dev, int n) +static int i810_wait_ring(struct drm_device * dev, int n) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -295,7 +294,7 @@ static int i810_wait_ring(drm_device_t * dev, int n) return iters; } -static void i810_kernel_lost_context(drm_device_t * dev) +static void i810_kernel_lost_context(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_ring_buffer_t *ring = &(dev_priv->ring); @@ -307,9 +306,9 @@ static void i810_kernel_lost_context(drm_device_t * dev) ring->space += ring->Size; } -static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) +static int i810_freelist_init(struct drm_device * dev, drm_i810_private_t * dev_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int my_idx = 24; u32 *hw_status = (u32 *) (dev_priv->hw_status_page + my_idx); int i; @@ -320,7 +319,7 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) } for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; buf_priv->in_use = hw_status++; @@ -342,11 +341,11 @@ static int i810_freelist_init(drm_device_t * dev, drm_i810_private_t * dev_priv) return 0; } -static int i810_dma_initialize(drm_device_t * dev, +static int i810_dma_initialize(struct drm_device * dev, drm_i810_private_t * dev_priv, drm_i810_init_t * init) { - drm_map_list_t *r_list; + struct drm_map_list *r_list; memset(dev_priv, 0, sizeof(drm_i810_private_t)); list_for_each_entry(r_list, &dev->maplist, head) { @@ -399,7 +398,7 @@ static int i810_dma_initialize(drm_device_t * dev, i810_dma_cleanup(dev); DRM_ERROR("can not ioremap virtual address for" " ring buffer\n"); - return DRM_ERR(ENOMEM); + return -ENOMEM; } dev_priv->ring.virtual_start = dev_priv->ring.map.handle; @@ -449,99 +448,29 @@ static int i810_dma_initialize(drm_device_t * dev, return 0; } -/* i810 DRM version 1.1 used a smaller init structure with different - * ordering of values than is currently used (drm >= 1.2). There is - * no defined way to detect the XFree version to correct this problem, - * however by checking using this procedure we can detect the correct - * thing to do. - * - * #1 Read the Smaller init structure from user-space - * #2 Verify the overlay_physical is a valid physical address, or NULL - * If it isn't then we have a v1.1 client. Fix up params. - * If it is, then we have a 1.2 client... get the rest of the data. - */ -static int i810_dma_init_compat(drm_i810_init_t * init, unsigned long arg) +static int i810_dma_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - - /* Get v1.1 init data */ - if (copy_from_user(init, (drm_i810_pre12_init_t __user *) arg, - sizeof(drm_i810_pre12_init_t))) { - return -EFAULT; - } - - if ((!init->overlay_physical) || (init->overlay_physical > 4096)) { - - /* This is a v1.2 client, just get the v1.2 init data */ - DRM_INFO("Using POST v1.2 init.\n"); - if (copy_from_user(init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } - } else { - - /* This is a v1.1 client, fix the params */ - DRM_INFO("Using PRE v1.2 init.\n"); - init->pitch_bits = init->h; - init->pitch = init->w; - init->h = init->overlay_physical; - init->w = init->overlay_offset; - init->overlay_physical = 0; - init->overlay_offset = 0; - } - - return 0; -} - -static int i810_dma_init(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) -{ - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv; - drm_i810_init_t init; + drm_i810_init_t *init = data; int retcode = 0; - /* Get only the init func */ - if (copy_from_user - (&init, (void __user *)arg, sizeof(drm_i810_init_func_t))) - return -EFAULT; - - switch (init.func) { - case I810_INIT_DMA: - /* This case is for backward compatibility. It - * handles XFree 4.1.0 and 4.2.0, and has to - * do some parameter checking as described below. - * It will someday go away. - */ - retcode = i810_dma_init_compat(&init, arg); - if (retcode) - return retcode; - - dev_priv = drm_alloc(sizeof(drm_i810_private_t), - DRM_MEM_DRIVER); - if (dev_priv == NULL) - return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); - break; - - default: + switch (init->func) { case I810_INIT_DMA_1_4: DRM_INFO("Using v1.4 init.\n"); - if (copy_from_user(&init, (drm_i810_init_t __user *) arg, - sizeof(drm_i810_init_t))) { - return -EFAULT; - } dev_priv = drm_alloc(sizeof(drm_i810_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) return -ENOMEM; - retcode = i810_dma_initialize(dev, dev_priv, &init); + retcode = i810_dma_initialize(dev, dev_priv, init); break; case I810_CLEANUP_DMA: DRM_INFO("DMA Cleanup\n"); retcode = i810_dma_cleanup(dev); break; + default: + return -EINVAL; } return retcode; @@ -553,7 +482,7 @@ static int i810_dma_init(struct inode *inode, struct file *filp, * Use 'volatile' & local var tmp to force the emitted values to be * identical to the verified ones. */ -static void i810EmitContextVerified(drm_device_t * dev, +static void i810EmitContextVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -586,7 +515,7 @@ static void i810EmitContextVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) +static void i810EmitTexVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; int i, j = 0; @@ -619,7 +548,7 @@ static void i810EmitTexVerified(drm_device_t * dev, volatile unsigned int *code) /* Need to do some additional checking when setting the dest buffer. */ -static void i810EmitDestVerified(drm_device_t * dev, +static void i810EmitDestVerified(struct drm_device * dev, volatile unsigned int *code) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -654,7 +583,7 @@ static void i810EmitDestVerified(drm_device_t * dev, ADVANCE_LP_RING(); } -static void i810EmitState(drm_device_t * dev) +static void i810EmitState(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; @@ -685,14 +614,14 @@ static void i810EmitState(drm_device_t * dev) /* need to verify */ -static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, +static void i810_dma_dispatch_clear(struct drm_device * dev, int flags, unsigned int clear_color, unsigned int clear_zval) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -760,12 +689,12 @@ static void i810_dma_dispatch_clear(drm_device_t * dev, int flags, } } -static void i810_dma_dispatch_swap(drm_device_t * dev) +static void i810_dma_dispatch_swap(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; int nbox = sarea_priv->nbox; - drm_clip_rect_t *pbox = sarea_priv->boxes; + struct drm_clip_rect *pbox = sarea_priv->boxes; int pitch = dev_priv->pitch; int cpp = 2; int i; @@ -806,13 +735,13 @@ static void i810_dma_dispatch_swap(drm_device_t * dev) } } -static void i810_dma_dispatch_vertex(drm_device_t * dev, - drm_buf_t * buf, int discard, int used) +static void i810_dma_dispatch_vertex(struct drm_device * dev, + struct drm_buf * buf, int discard, int used) { drm_i810_private_t *dev_priv = dev->dev_private; drm_i810_buf_priv_t *buf_priv = buf->dev_private; drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv; - drm_clip_rect_t *box = sarea_priv->boxes; + struct drm_clip_rect *box = sarea_priv->boxes; int nbox = sarea_priv->nbox; unsigned long address = (unsigned long)buf->bus_address; unsigned long start = address - dev->agp->base; @@ -886,7 +815,7 @@ static void i810_dma_dispatch_vertex(drm_device_t * dev, } } -static void i810_dma_dispatch_flip(drm_device_t * dev) +static void i810_dma_dispatch_flip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; int pitch = dev_priv->pitch; @@ -933,7 +862,7 @@ static void i810_dma_dispatch_flip(drm_device_t * dev) } -static void i810_dma_quiescent(drm_device_t * dev) +static void i810_dma_quiescent(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; RING_LOCALS; @@ -952,10 +881,10 @@ static void i810_dma_quiescent(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); } -static int i810_flush_queue(drm_device_t * dev) +static int i810_flush_queue(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i, ret = 0; RING_LOCALS; @@ -971,7 +900,7 @@ static int i810_flush_queue(drm_device_t * dev) i810_wait_ring(dev, dev_priv->ring.Size - 8); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; int used = cmpxchg(buf_priv->in_use, I810_BUF_HARDWARE, @@ -987,9 +916,10 @@ static int i810_flush_queue(drm_device_t * dev) } /* Must be called with the lock held */ -static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) +static void i810_reclaim_buffers(struct drm_device *dev, + struct drm_file *file_priv) { - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; int i; if (!dma) @@ -1002,10 +932,10 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) i810_flush_queue(dev); for (i = 0; i < dma->buf_count; i++) { - drm_buf_t *buf = dma->buflist[i]; + struct drm_buf *buf = dma->buflist[i]; drm_i810_buf_priv_t *buf_priv = buf->dev_private; - if (buf->filp == filp && buf_priv) { + if (buf->file_priv == file_priv && buf_priv) { int used = cmpxchg(buf_priv->in_use, I810_BUF_CLIENT, I810_BUF_FREE); @@ -1017,47 +947,38 @@ static void i810_reclaim_buffers(drm_device_t *dev, struct file *filp) } } -static int i810_flush_ioctl(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_flush_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_flush_queue(dev); return 0; } -static int i810_dma_vertex(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_dma_vertex(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_vertex_t vertex; + drm_i810_vertex_t *vertex = data; - if (copy_from_user - (&vertex, (drm_i810_vertex_t __user *) arg, sizeof(vertex))) - return -EFAULT; - - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n", - vertex.idx, vertex.used, vertex.discard); + vertex->idx, vertex->used, vertex->discard); - if (vertex.idx < 0 || vertex.idx > dma->buf_count) + if (vertex->idx < 0 || vertex->idx > dma->buf_count) return -EINVAL; i810_dma_dispatch_vertex(dev, - dma->buflist[vertex.idx], - vertex.discard, vertex.used); + dma->buflist[vertex->idx], + vertex->discard, vertex->used); - atomic_add(vertex.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1065,48 +986,37 @@ static int i810_dma_vertex(struct inode *inode, struct file *filp, return 0; } -static int i810_clear_bufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_clear_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_i810_clear_t clear; - - if (copy_from_user - (&clear, (drm_i810_clear_t __user *) arg, sizeof(clear))) - return -EFAULT; + drm_i810_clear_t *clear = data; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); /* GH: Someone's doing nasty things... */ if (!dev->dev_private) { return -EINVAL; } - i810_dma_dispatch_clear(dev, clear.flags, - clear.clear_color, clear.clear_depth); + i810_dma_dispatch_clear(dev, clear->flags, + clear->clear_color, clear->clear_depth); return 0; } -static int i810_swap_bufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_swap_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - DRM_DEBUG("i810_swap_bufs\n"); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); i810_dma_dispatch_swap(dev); return 0; } -static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_getage(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) @@ -1116,52 +1026,45 @@ static int i810_getage(struct inode *inode, struct file *filp, unsigned int cmd, return 0; } -static int i810_getbuf(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_getbuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; int retcode = 0; - drm_i810_dma_t d; + drm_i810_dma_t *d = data; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - if (copy_from_user(&d, (drm_i810_dma_t __user *) arg, sizeof(d))) - return -EFAULT; - - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); - d.granted = 0; + d->granted = 0; - retcode = i810_dma_get_buffer(dev, &d, filp); + retcode = i810_dma_get_buffer(dev, d, file_priv); DRM_DEBUG("i810_dma: %d returning %d, granted = %d\n", - current->pid, retcode, d.granted); + current->pid, retcode, d->granted); - if (copy_to_user((drm_dma_t __user *) arg, &d, sizeof(d))) - return -EFAULT; sarea_priv->last_dispatch = (int)hw_status[5]; return retcode; } -static int i810_copybuf(struct inode *inode, - struct file *filp, unsigned int cmd, unsigned long arg) +static int i810_copybuf(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static int i810_docopy(struct inode *inode, struct file *filp, unsigned int cmd, - unsigned long arg) +static int i810_docopy(struct drm_device *dev, void *data, + struct drm_file *file_priv) { /* Never copy - 2.4.x doesn't need it */ return 0; } -static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, +static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf, int used, unsigned int last_render) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1221,30 +1124,25 @@ static void i810_dma_dispatch_mc(drm_device_t * dev, drm_buf_t * buf, int used, ADVANCE_LP_RING(); } -static int i810_dma_mc(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_dma_mc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; - drm_device_dma_t *dma = dev->dma; + struct drm_device_dma *dma = dev->dma; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; u32 *hw_status = dev_priv->hw_status_page; drm_i810_sarea_t *sarea_priv = (drm_i810_sarea_t *) dev_priv->sarea_priv; - drm_i810_mc_t mc; + drm_i810_mc_t *mc = data; - if (copy_from_user(&mc, (drm_i810_mc_t __user *) arg, sizeof(mc))) - return -EFAULT; + LOCK_TEST_WITH_RETURN(dev, file_priv); - LOCK_TEST_WITH_RETURN(dev, filp); - - if (mc.idx >= dma->buf_count || mc.idx < 0) + if (mc->idx >= dma->buf_count || mc->idx < 0) return -EINVAL; - i810_dma_dispatch_mc(dev, dma->buflist[mc.idx], mc.used, - mc.last_render); + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); - atomic_add(mc.used, &dev->counts[_DRM_STAT_SECONDARY]); + atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); atomic_inc(&dev->counts[_DRM_STAT_DMA]); sarea_priv->last_enqueue = dev_priv->counter - 1; sarea_priv->last_dispatch = (int)hw_status[5]; @@ -1252,51 +1150,41 @@ static int i810_dma_mc(struct inode *inode, struct file *filp, return 0; } -static int i810_rstatus(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_rstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; return (int)(((u32 *) (dev_priv->hw_status_page))[4]); } -static int i810_ov0_info(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_ov0_info(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - drm_i810_overlay_t data; + drm_i810_overlay_t *ov = data; + + ov->offset = dev_priv->overlay_offset; + ov->physical = dev_priv->overlay_physical; - data.offset = dev_priv->overlay_offset; - data.physical = dev_priv->overlay_physical; - if (copy_to_user - ((drm_i810_overlay_t __user *) arg, &data, sizeof(data))) - return -EFAULT; return 0; } -static int i810_fstatus(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_fstatus(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); return I810_READ(0x30008); } -static int i810_ov0_flip(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_ov0_flip(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = (drm_i810_private_t *) dev->dev_private; - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); //Tell the overlay to update I810_WRITE(0x30000, dev_priv->overlay_physical | 0x80000000); @@ -1305,7 +1193,7 @@ static int i810_ov0_flip(struct inode *inode, struct file *filp, /* Not sure why this isn't set all the time: */ -static void i810_do_init_pageflip(drm_device_t * dev) +static void i810_do_init_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1315,7 +1203,7 @@ static void i810_do_init_pageflip(drm_device_t * dev) dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; } -static int i810_do_cleanup_pageflip(drm_device_t * dev) +static int i810_do_cleanup_pageflip(struct drm_device * dev) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1327,16 +1215,14 @@ static int i810_do_cleanup_pageflip(drm_device_t * dev) return 0; } -static int i810_flip_bufs(struct inode *inode, struct file *filp, - unsigned int cmd, unsigned long arg) +static int i810_flip_bufs(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - drm_file_t *priv = filp->private_data; - drm_device_t *dev = priv->head->dev; drm_i810_private_t *dev_priv = dev->dev_private; DRM_DEBUG("%s\n", __FUNCTION__); - LOCK_TEST_WITH_RETURN(dev, filp); + LOCK_TEST_WITH_RETURN(dev, file_priv); if (!dev_priv->page_flipping) i810_do_init_pageflip(dev); @@ -1345,7 +1231,7 @@ static int i810_flip_bufs(struct inode *inode, struct file *filp, return 0; } -int i810_driver_load(drm_device_t *dev, unsigned long flags) +int i810_driver_load(struct drm_device *dev, unsigned long flags) { /* i810 has 4 more counters */ dev->counters += 4; @@ -1357,12 +1243,12 @@ int i810_driver_load(drm_device_t *dev, unsigned long flags) return 0; } -void i810_driver_lastclose(drm_device_t * dev) +void i810_driver_lastclose(struct drm_device * dev) { i810_dma_cleanup(dev); } -void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) +void i810_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) { if (dev->dev_private) { drm_i810_private_t *dev_priv = dev->dev_private; @@ -1372,33 +1258,34 @@ void i810_driver_preclose(drm_device_t * dev, DRMFILE filp) } } -void i810_driver_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void i810_driver_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { - i810_reclaim_buffers(dev, filp); + i810_reclaim_buffers(dev, file_priv); } -int i810_driver_dma_quiescent(drm_device_t * dev) +int i810_driver_dma_quiescent(struct drm_device * dev) { i810_dma_quiescent(dev); return 0; } -drm_ioctl_desc_t i810_ioctls[] = { - [DRM_IOCTL_NR(DRM_I810_INIT)] = {i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_VERTEX)] = {i810_dma_vertex, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_CLEAR)] = {i810_clear_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLUSH)] = {i810_flush_ioctl, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETAGE)] = {i810_getage, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_GETBUF)] = {i810_getbuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_SWAP)] = {i810_swap_bufs, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_COPY)] = {i810_copybuf, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_DOCOPY)] = {i810_docopy, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0INFO)] = {i810_ov0_info, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FSTATUS)] = {i810_fstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_OV0FLIP)] = {i810_ov0_flip, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_MC)] = {i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_I810_RSTATUS)] = {i810_rstatus, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_I810_FLIP)] = {i810_flip_bufs, DRM_AUTH} +struct drm_ioctl_desc i810_ioctls[] = { + DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETAGE, i810_getage, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_GETBUF, i810_getbuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_SWAP, i810_swap_bufs, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_COPY, i810_copybuf, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_DOCOPY, i810_docopy, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0INFO, i810_ov0_info, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FSTATUS, i810_fstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_OV0FLIP, i810_ov0_flip, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_MC, i810_dma_mc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_I810_RSTATUS, i810_rstatus, DRM_AUTH), + DRM_IOCTL_DEF(DRM_I810_FLIP, i810_flip_bufs, DRM_AUTH) }; int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); @@ -1414,7 +1301,7 @@ int i810_max_ioctl = DRM_ARRAY_SIZE(i810_ioctls); * \returns * A value of 1 is always retured to indictate every i810 is AGP. */ -int i810_driver_device_is_agp(drm_device_t * dev) +int i810_driver_device_is_agp(struct drm_device * dev) { return 1; } diff --git a/linux-core/i810_drm.h b/linux-core/i810_drm.h index beec4a2a..d803aeca 100644 --- a/linux-core/i810_drm.h +++ b/linux-core/i810_drm.h @@ -102,13 +102,8 @@ typedef enum _drm_i810_init_func { /* This is the init structure after v1.2 */ typedef struct _drm_i810_init { drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else unsigned int mmio_offset; unsigned int buffers_offset; -#endif int sarea_priv_offset; unsigned int ring_start; unsigned int ring_end; @@ -124,29 +119,6 @@ typedef struct _drm_i810_init { unsigned int pitch_bits; } drm_i810_init_t; -/* This is the init structure prior to v1.2 */ -typedef struct _drm_i810_pre12_init { - drm_i810_init_func_t func; -#if CONFIG_XFREE86_VERSION < XFREE86_VERSION(4,1,0,0) - int ring_map_idx; - int buffer_map_idx; -#else - unsigned int mmio_offset; - unsigned int buffers_offset; -#endif - int sarea_priv_offset; - unsigned int ring_start; - unsigned int ring_end; - unsigned int ring_size; - unsigned int front_offset; - unsigned int back_offset; - unsigned int depth_offset; - unsigned int w; - unsigned int h; - unsigned int pitch; - unsigned int pitch_bits; -} drm_i810_pre12_init_t; - /* Warning: If you change the SAREA structure you must change the Xserver * structure as well */ @@ -163,7 +135,7 @@ typedef struct _drm_i810_sarea { unsigned int dirty; unsigned int nbox; - drm_clip_rect_t boxes[I810_NR_SAREA_CLIPRECTS]; + struct drm_clip_rect boxes[I810_NR_SAREA_CLIPRECTS]; /* Maintain an LRU of contiguous regions of texture space. If * you think you own a region of texture memory, and it has an diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h index 69d79499..c525e165 100644 --- a/linux-core/i810_drv.h +++ b/linux-core/i810_drv.h @@ -77,8 +77,8 @@ typedef struct _drm_i810_ring_buffer { } drm_i810_ring_buffer_t; typedef struct drm_i810_private { - drm_map_t *sarea_map; - drm_map_t *mmio_map; + struct drm_map *sarea_map; + struct drm_map *mmio_map; drm_i810_sarea_t *sarea_priv; drm_i810_ring_buffer_t ring; @@ -88,7 +88,7 @@ typedef struct drm_i810_private { dma_addr_t dma_status_page; - drm_buf_t *mmap_buffer; + struct drm_buf *mmap_buffer; u32 front_di1, back_di1, zi1; @@ -115,17 +115,18 @@ typedef struct drm_i810_private { } drm_i810_private_t; /* i810_dma.c */ -extern int i810_driver_dma_quiescent(drm_device_t * dev); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, - struct file *filp); +extern int i810_driver_dma_quiescent(struct drm_device * dev); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv); extern int i810_driver_load(struct drm_device *, unsigned long flags); -extern void i810_driver_lastclose(drm_device_t * dev); -extern void i810_driver_preclose(drm_device_t * dev, DRMFILE filp); -extern void i810_driver_reclaim_buffers_locked(drm_device_t * dev, - struct file *filp); -extern int i810_driver_device_is_agp(drm_device_t * dev); - -extern drm_ioctl_desc_t i810_ioctls[]; +extern void i810_driver_lastclose(struct drm_device * dev); +extern void i810_driver_preclose(struct drm_device * dev, + struct drm_file *file_priv); +extern void i810_driver_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv); +extern int i810_driver_device_is_agp(struct drm_device * dev); + +extern struct drm_ioctl_desc i810_ioctls[]; extern int i810_max_ioctl; #define I810_BASE(reg) ((unsigned long) \ diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 6e93ae21..69a1aab7 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,14 +33,13 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(drm_device_t * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) { - *class = 0; if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else @@ -48,7 +47,7 @@ int i915_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) return 0; } -int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) +int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Only emit once per batchbuffer submission. @@ -64,8 +63,8 @@ int i915_invalidate_caches(drm_device_t * dev, uint32_t flags) return i915_emit_mi_flush(dev, flush_cmd); } -int i915_init_mem_type(drm_device_t * dev, uint32_t type, - drm_mem_type_manager_t * man) +int i915_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -109,7 +108,7 @@ int i915_init_mem_type(drm_device_t * dev, uint32_t type, return 0; } -uint32_t i915_evict_mask(drm_buffer_object_t *bo) +uint32_t i915_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: @@ -120,14 +119,14 @@ uint32_t i915_evict_mask(drm_buffer_object_t *bo) } } -static void i915_emit_copy_blit(drm_device_t * dev, +static void i915_emit_copy_blit(struct drm_device * dev, uint32_t src_offset, uint32_t dst_offset, uint32_t pages, int direction) { uint32_t cur_pages; uint32_t stride = PAGE_SIZE; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; RING_LOCALS; if (!dev_priv) @@ -154,10 +153,10 @@ static void i915_emit_copy_blit(drm_device_t * dev, return; } -static int i915_move_blit(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_blit(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; int dir = 0; if ((old_mem->mem_type == new_mem->mem_type) && @@ -184,11 +183,11 @@ static int i915_move_blit(drm_buffer_object_t * bo, * then blit and subsequently move out again. */ -static int i915_move_flip(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_flip(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_device_t *dev = bo->dev; - drm_bo_mem_reg_t tmp_mem; + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; @@ -220,10 +219,10 @@ out_cleanup: return ret; } -int i915_move(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index e612a46c..27e95d65 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = { }; #ifdef I915_HAVE_FENCE -static drm_fence_driver_t i915_fence_driver = { +static struct drm_fence_driver i915_fence_driver = { .num_classes = 1, .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), .flush_diff = (1U << (BREADCRUMB_BITS - 2)), @@ -56,7 +56,7 @@ static drm_fence_driver_t i915_fence_driver = { static uint32_t i915_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t i915_bo_driver = { +static struct drm_bo_driver i915_bo_driver = { .mem_type_prio = i915_mem_prios, .mem_busy_prio = i915_busy_prios, .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index 00873485..abea8ee1 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -38,12 +38,12 @@ * Implements an intel sync flush operation. */ -static void i915_perform_flush(drm_device_t * dev) +static void i915_perform_flush(struct drm_device * dev) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[0]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; uint32_t i_status; @@ -109,9 +109,9 @@ static void i915_perform_flush(drm_device_t * dev) } -void i915_poke_flush(drm_device_t * dev, uint32_t class) +void i915_poke_flush(struct drm_device * dev, uint32_t class) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -119,10 +119,10 @@ void i915_poke_flush(drm_device_t * dev, uint32_t class) write_unlock_irqrestore(&fm->lock, flags); } -int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { - drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; + struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; if (!dev_priv) return -EINVAL; @@ -135,16 +135,16 @@ int i915_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, return 0; } -void i915_fence_handler(drm_device_t * dev) +void i915_fence_handler(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; write_lock(&fm->lock); i915_perform_flush(dev); write_unlock(&fm->lock); } -int i915_fence_has_irq(drm_device_t *dev, uint32_t class, uint32_t flags) +int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) { /* * We have an irq that tells us when we have a new breadcrumb. diff --git a/linux-core/intel_crt.c b/linux-core/intel_crt.c index cdfb314f..d2e1f95c 100644 --- a/linux-core/intel_crt.c +++ b/linux-core/intel_crt.c @@ -34,8 +34,8 @@ static void intel_crt_dpms(struct drm_output *output, int mode) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 temp; temp = I915_READ(ADPA); @@ -93,10 +93,10 @@ static void intel_crt_mode_set(struct drm_output *output, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - drm_device_t *dev = output->dev; + struct drm_device *dev = output->dev; struct drm_crtc *crtc = output->crtc; struct intel_crtc *intel_crtc = crtc->driver_private; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int dpll_md_reg; u32 adpa, dpll_md; @@ -139,8 +139,8 @@ static void intel_crt_mode_set(struct drm_output *output, */ static bool intel_crt_detect_hotplug(struct drm_output *output) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; u32 temp; unsigned long timeout = jiffies + msecs_to_jiffies(1000); @@ -175,7 +175,7 @@ static bool intel_crt_detect_ddc(struct drm_output *output) static enum drm_output_status intel_crt_detect(struct drm_output *output) { - drm_device_t *dev = output->dev; + struct drm_device *dev = output->dev; if (IS_I945G(dev)| IS_I945GM(dev) || IS_I965G(dev)) { if (intel_crt_detect_hotplug(output)) @@ -221,7 +221,7 @@ static const struct drm_output_funcs intel_crt_output_funcs = { .cleanup = intel_crt_destroy, }; -void intel_crt_init(drm_device_t *dev) +void intel_crt_init(struct drm_device *dev) { struct drm_output *output; struct intel_output *intel_output; diff --git a/linux-core/intel_display.c b/linux-core/intel_display.c index 3fce0941..3ff9f822 100644 --- a/linux-core/intel_display.c +++ b/linux-core/intel_display.c @@ -170,7 +170,7 @@ static const intel_limit_t intel_limits[] = { static const intel_limit_t *intel_limit(struct drm_crtc *crtc) { - drm_device_t *dev = crtc->dev; + struct drm_device *dev = crtc->dev; const intel_limit_t *limit; if (IS_I9XX(dev)) { @@ -278,8 +278,8 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock) static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, int refclk, intel_clock_t *best_clock) { - drm_device_t *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; intel_clock_t clock; const intel_limit_t *limit = intel_limit(crtc); int err = target; @@ -334,9 +334,9 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target, } void -intel_set_vblank(drm_device_t *dev) +intel_set_vblank(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc; struct intel_crtc *intel_crtc; int vbl_pipe = 0; @@ -352,7 +352,7 @@ intel_set_vblank(drm_device_t *dev) i915_enable_interrupt(dev); } void -intel_wait_for_vblank(drm_device_t *dev) +intel_wait_for_vblank(struct drm_device *dev) { /* Wait for 20ms, i.e. one cycle at 50hz. */ udelay(20000); @@ -361,8 +361,8 @@ intel_wait_for_vblank(drm_device_t *dev) void intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) { - drm_device_t *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int pipe = intel_crtc->pipe; unsigned long Start, Offset; @@ -389,12 +389,12 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) switch (pipe) { case 0: - dev_priv->sarea_priv->pipeA_x = x; - dev_priv->sarea_priv->pipeA_y = y; + dev_priv->sarea_priv->planeA_x = x; + dev_priv->sarea_priv->planeA_y = y; break; case 1: - dev_priv->sarea_priv->pipeB_x = x; - dev_priv->sarea_priv->pipeB_y = y; + dev_priv->sarea_priv->planeB_x = x; + dev_priv->sarea_priv->planeB_y = y; break; default: DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); @@ -410,8 +410,8 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y) */ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) { - drm_device_t *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int pipe = intel_crtc->pipe; int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B; @@ -513,12 +513,12 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) switch (pipe) { case 0: - dev_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; - dev_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; + dev_priv->sarea_priv->planeA_w = enabled ? crtc->mode.hdisplay : 0; + dev_priv->sarea_priv->planeA_h = enabled ? crtc->mode.vdisplay : 0; break; case 1: - dev_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; - dev_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; + dev_priv->sarea_priv->planeB_w = enabled ? crtc->mode.hdisplay : 0; + dev_priv->sarea_priv->planeB_h = enabled ? crtc->mode.vdisplay : 0; break; default: DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); @@ -576,7 +576,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, /** Returns the core display clock speed for i830 - i945 */ -static int intel_get_core_clock_speed(drm_device_t *dev) +static int intel_get_core_clock_speed(struct drm_device *dev) { /* Core clock values taken from the published datasheets. @@ -636,9 +636,9 @@ static int intel_get_core_clock_speed(drm_device_t *dev) * Return the pipe currently connected to the panel fitter, * or -1 if the panel fitter is not present or not in use */ -static int intel_panel_fitter_pipe (drm_device_t *dev) +static int intel_panel_fitter_pipe (struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 pfit_control; /* i830 doesn't have a panel fitter */ @@ -664,8 +664,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode, int x, int y) { - drm_device_t *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int pipe = intel_crtc->pipe; int fp_reg = (pipe == 0) ? FPA0 : FPB0; @@ -938,8 +938,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc, /** Loads the palette/gamma unit for the CRTC with the prepared values */ void intel_crtc_load_lut(struct drm_crtc *crtc) { - drm_device_t *dev = crtc->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int palreg = (intel_crtc->pipe == 0) ? PALETTE_A : PALETTE_B; int i; @@ -970,7 +970,7 @@ static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, /* Returns the clock of the currently programmed mode of the given pipe. */ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int pipe = intel_crtc->pipe; u32 dpll = I915_READ((pipe == 0) ? DPLL_A : DPLL_B); @@ -1045,10 +1045,10 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) } /** Returns the currently programmed mode of the given pipe. */ -struct drm_display_mode *intel_crtc_mode_get(drm_device_t *dev, +struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = crtc->driver_private; int pipe = intel_crtc->pipe; struct drm_display_mode *mode; @@ -1089,7 +1089,7 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { }; -void intel_crtc_init(drm_device_t *dev, int pipe) +void intel_crtc_init(struct drm_device *dev, int pipe) { struct drm_crtc *crtc; struct intel_crtc *intel_crtc; @@ -1115,7 +1115,7 @@ void intel_crtc_init(drm_device_t *dev, int pipe) crtc->driver_private = intel_crtc; } -struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe) +struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe) { struct drm_crtc *crtc = NULL; @@ -1127,7 +1127,7 @@ struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe) return crtc; } -int intel_output_clones(drm_device_t *dev, int type_mask) +int intel_output_clones(struct drm_device *dev, int type_mask) { int index_mask = 0; struct drm_output *output; @@ -1143,7 +1143,7 @@ int intel_output_clones(drm_device_t *dev, int type_mask) } -static void intel_setup_outputs(drm_device_t *dev) +static void intel_setup_outputs(struct drm_device *dev) { struct drm_output *output; @@ -1194,7 +1194,7 @@ static void intel_setup_outputs(drm_device_t *dev) } } -void intel_modeset_init(drm_device_t *dev) +void intel_modeset_init(struct drm_device *dev) { int num_pipe; int i; @@ -1223,7 +1223,7 @@ void intel_modeset_init(drm_device_t *dev) //drm_initial_config(dev, false); } -void intel_modeset_cleanup(drm_device_t *dev) +void intel_modeset_cleanup(struct drm_device *dev) { drm_mode_config_cleanup(dev); } diff --git a/linux-core/intel_drv.h b/linux-core/intel_drv.h index 0a03e37b..7de4b924 100644 --- a/linux-core/intel_drv.h +++ b/linux-core/intel_drv.h @@ -38,7 +38,7 @@ #define INTEL_DVO_CHIP_TVOUT 4 struct intel_i2c_chan { - drm_device_t *drm_dev; /* for getting at dev. private (mmio etc.) */ + struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */ u32 reg; /* GPIO reg */ struct i2c_adapter adapter; struct i2c_algo_bit_data algo; @@ -58,23 +58,23 @@ struct intel_crtc { u8 lut_r[256], lut_g[256], lut_b[256]; }; -struct intel_i2c_chan *intel_i2c_create(drm_device_t *dev, const u32 reg, +struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name); void intel_i2c_destroy(struct intel_i2c_chan *chan); int intel_ddc_get_modes(struct drm_output *output); extern bool intel_ddc_probe(struct drm_output *output); -extern void intel_crt_init(drm_device_t *dev); -extern void intel_sdvo_init(drm_device_t *dev, int output_device); -extern void intel_lvds_init(drm_device_t *dev); +extern void intel_crt_init(struct drm_device *dev); +extern void intel_sdvo_init(struct drm_device *dev, int output_device); +extern void intel_lvds_init(struct drm_device *dev); extern void intel_crtc_load_lut(struct drm_crtc *crtc); extern void intel_output_prepare (struct drm_output *output); extern void intel_output_commit (struct drm_output *output); -extern struct drm_display_mode *intel_crtc_mode_get(drm_device_t *dev, +extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, struct drm_crtc *crtc); -extern void intel_wait_for_vblank(drm_device_t *dev); -extern struct drm_crtc *intel_get_crtc_from_pipe(drm_device_t *dev, int pipe); +extern void intel_wait_for_vblank(struct drm_device *dev); +extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe); extern int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc); extern int intelfb_remove(struct drm_device *dev, struct drm_crtc *crtc); diff --git a/linux-core/intel_fb.c b/linux-core/intel_fb.c index 3bfbcda3..048295ab 100644 --- a/linux-core/intel_fb.c +++ b/linux-core/intel_fb.c @@ -446,7 +446,7 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc) struct device *device = &dev->pdev->dev; struct drm_framebuffer *fb; struct drm_display_mode *mode = crtc->desired_mode; - drm_buffer_object_t *fbo = NULL; + struct drm_buffer_object *fbo = NULL; int ret; info = framebuffer_alloc(sizeof(struct intelfb_par), device); @@ -468,13 +468,11 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc) fb->bits_per_pixel = 32; fb->pitch = fb->width * ((fb->bits_per_pixel + 1) / 8); fb->depth = 24; - ret = drm_buffer_object_create(dev, - fb->width * fb->height * 4, + ret = drm_buffer_object_create(dev, fb->width * fb->height * 4, drm_bo_type_kernel, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE | - DRM_BO_FLAG_MEM_VRAM | /* FIXME! */ - DRM_BO_FLAG_NO_MOVE, + DRM_BO_FLAG_MEM_VRAM, 0, 0, 0, &fbo); if (ret || !fbo) { @@ -483,6 +481,15 @@ int intelfb_probe(struct drm_device *dev, struct drm_crtc *crtc) framebuffer_release(info); return -EINVAL; } + + ret = drm_bo_set_pin(dev, fbo, 1); + if (ret) { + printk(KERN_ERR "failed to pin framebuffer, aborting\n"); + drm_framebuffer_destroy(fb); + framebuffer_release(info); + return -EINVAL; + } + fb->offset = fbo->offset; fb->bo = fbo; printk("allocated %dx%d fb: 0x%08lx, bo %p\n", fb->width, diff --git a/linux-core/intel_i2c.c b/linux-core/intel_i2c.c index d4cf7eef..b512e592 100644 --- a/linux-core/intel_i2c.c +++ b/linux-core/intel_i2c.c @@ -46,7 +46,7 @@ static int get_clock(void *data) { struct intel_i2c_chan *chan = data; - drm_i915_private_t *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; u32 val; val = I915_READ(chan->reg); @@ -56,7 +56,7 @@ static int get_clock(void *data) static int get_data(void *data) { struct intel_i2c_chan *chan = data; - drm_i915_private_t *dev_priv = chan->drm_dev->dev_private; + struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; u32 val; val = I915_READ(chan->reg); @@ -66,8 +66,8 @@ static int get_data(void *data) static void set_clock(void *data, int state_high) { struct intel_i2c_chan *chan = data; - drm_device_t *dev = chan->drm_dev; - drm_i915_private_t *dev_priv = chan->drm_dev->dev_private; + struct drm_device *dev = chan->drm_dev; + struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; u32 reserved = 0, clock_bits; /* On most chips, these bits must be preserved in software. */ @@ -87,8 +87,8 @@ static void set_clock(void *data, int state_high) static void set_data(void *data, int state_high) { struct intel_i2c_chan *chan = data; - drm_device_t *dev = chan->drm_dev; - drm_i915_private_t *dev_priv = chan->drm_dev->dev_private; + struct drm_device *dev = chan->drm_dev; + struct drm_i915_private *dev_priv = chan->drm_dev->dev_private; u32 reserved = 0, data_bits; /* On most chips, these bits must be preserved in software. */ @@ -127,7 +127,7 @@ static void set_data(void *data, int state_high) * %GPIOH * see PRM for details on how these different busses are used. */ -struct intel_i2c_chan *intel_i2c_create(drm_device_t *dev, const u32 reg, +struct intel_i2c_chan *intel_i2c_create(struct drm_device *dev, const u32 reg, const char *name) { struct intel_i2c_chan *chan; diff --git a/linux-core/intel_lvds.c b/linux-core/intel_lvds.c index 942eb2ab..4f15c13a 100644 --- a/linux-core/intel_lvds.c +++ b/linux-core/intel_lvds.c @@ -43,7 +43,7 @@ */ static void intel_lvds_set_backlight(struct drm_device *dev, int level) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 blc_pwm_ctl; blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; @@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level) */ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; @@ -67,7 +67,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev) */ static void intel_lvds_set_power(struct drm_device *dev, bool on) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; u32 pp_status; if (on) { @@ -104,7 +104,7 @@ static void intel_lvds_dpms(struct drm_output *output, int mode) static void intel_lvds_save(struct drm_output *output) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->savePP_ON = I915_READ(LVDSPP_ON); dev_priv->savePP_OFF = I915_READ(LVDSPP_OFF); @@ -125,7 +125,7 @@ static void intel_lvds_save(struct drm_output *output) static void intel_lvds_restore(struct drm_output *output) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); I915_WRITE(LVDSPP_ON, dev_priv->savePP_ON); @@ -142,7 +142,7 @@ static int intel_lvds_mode_valid(struct drm_output *output, struct drm_display_mode *mode) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode; if (fixed_mode) { @@ -160,7 +160,7 @@ static bool intel_lvds_mode_fixup(struct drm_output *output, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = output->crtc->driver_private; struct drm_output *tmp_output; @@ -214,7 +214,7 @@ static bool intel_lvds_mode_fixup(struct drm_output *output, static void intel_lvds_prepare(struct drm_output *output) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & @@ -226,7 +226,7 @@ static void intel_lvds_prepare(struct drm_output *output) static void intel_lvds_commit( struct drm_output *output) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; if (dev_priv->backlight_duty_cycle == 0) dev_priv->backlight_duty_cycle = @@ -240,7 +240,7 @@ static void intel_lvds_mode_set(struct drm_output *output, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = output->crtc->driver_private; u32 pfit_control; @@ -285,7 +285,7 @@ static enum drm_output_status intel_lvds_detect(struct drm_output *output) static int intel_lvds_get_modes(struct drm_output *output) { struct drm_device *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; int ret = 0; ret = intel_ddc_get_modes(output); @@ -359,7 +359,7 @@ static const struct drm_output_funcs intel_lvds_output_funcs = { */ void intel_lvds_init(struct drm_device *dev) { - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_output *output; struct intel_output *intel_output; struct drm_display_mode *scan; /* *modes, *bios_mode; */ diff --git a/linux-core/intel_sdvo.c b/linux-core/intel_sdvo.c index 196298ff..11f3a0a6 100644 --- a/linux-core/intel_sdvo.c +++ b/linux-core/intel_sdvo.c @@ -62,8 +62,8 @@ struct intel_sdvo_priv { */ static void intel_sdvo_write_sdvox(struct drm_output *output, u32 val) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = output->driver_private; struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; u32 bval = val, cval = val; @@ -567,8 +567,8 @@ static void intel_sdvo_mode_set(struct drm_output *output, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_crtc *crtc = output->crtc; struct intel_crtc *intel_crtc = crtc->driver_private; struct intel_output *intel_output = output->driver_private; @@ -698,8 +698,8 @@ static void intel_sdvo_mode_set(struct drm_output *output, static void intel_sdvo_dpms(struct drm_output *output, int mode) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = output->driver_private; struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; u32 temp; @@ -745,8 +745,8 @@ static void intel_sdvo_dpms(struct drm_output *output, int mode) static void intel_sdvo_save(struct drm_output *output) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = output->driver_private; struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int o; @@ -782,8 +782,8 @@ static void intel_sdvo_save(struct drm_output *output) static void intel_sdvo_restore(struct drm_output *output) { - drm_device_t *dev = output->dev; - drm_i915_private_t *dev_priv = dev->dev_private; + struct drm_device *dev = output->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_output *intel_output = output->driver_private; struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; int o; @@ -941,7 +941,7 @@ static const struct drm_output_funcs intel_sdvo_output_funcs = { .cleanup = intel_sdvo_destroy }; -void intel_sdvo_init(drm_device_t *dev, int output_device) +void intel_sdvo_init(struct drm_device *dev, int output_device) { struct drm_output *output; struct intel_output *intel_output; diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c index ef6f1e44..1eb6d9e6 100644 --- a/linux-core/mga_drv.c +++ b/linux-core/mga_drv.c @@ -36,7 +36,7 @@ #include "drm_pciids.h" -static int mga_driver_device_is_agp(drm_device_t * dev); +static int mga_driver_device_is_agp(struct drm_device * dev); static struct pci_device_id pciidlist[] = { mga_PCI_IDS @@ -127,7 +127,7 @@ MODULE_LICENSE("GPL and additional rights"); * \returns * If the device is a PCI G450, zero is returned. Otherwise 2 is returned. */ -static int mga_driver_device_is_agp(drm_device_t * dev) +static int mga_driver_device_is_agp(struct drm_device * dev) { const struct pci_dev * const pdev = dev->pdev; diff --git a/linux-core/nouveau_dma.c b/linux-core/nouveau_dma.c new file mode 120000 index 00000000..f8e0bdc3 --- /dev/null +++ b/linux-core/nouveau_dma.c @@ -0,0 +1 @@ +../shared-core/nouveau_dma.c
\ No newline at end of file diff --git a/linux-core/nouveau_dma.h b/linux-core/nouveau_dma.h new file mode 120000 index 00000000..a545e387 --- /dev/null +++ b/linux-core/nouveau_dma.h @@ -0,0 +1 @@ +../shared-core/nouveau_dma.h
\ No newline at end of file diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index ac030d89..6c73b0d3 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -32,7 +32,7 @@ static struct pci_device_id pciidlist[] = { nouveau_PCI_IDS }; -extern drm_ioctl_desc_t nouveau_ioctls[]; +extern struct drm_ioctl_desc nouveau_ioctls[]; extern int nouveau_max_ioctl; static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c new file mode 100644 index 00000000..97d5330b --- /dev/null +++ b/linux-core/nouveau_sgdma.c @@ -0,0 +1,335 @@ +#include "drmP.h" +#include "nouveau_drv.h" + +#define NV_CTXDMA_PAGE_SHIFT 12 +#define NV_CTXDMA_PAGE_SIZE (1 << NV_CTXDMA_PAGE_SHIFT) +#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1) + +struct nouveau_sgdma_be { + struct drm_ttm_backend backend; + struct drm_device *dev; + + int pages; + int pages_populated; + dma_addr_t *pagelist; + int is_bound; + + unsigned int pte_start; +}; + +static int +nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be) +{ + return ((be->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); +} + +static int +nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages, + struct page **pages) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int p, d, o; + + DRM_DEBUG("num_pages = %ld\n", num_pages); + + if (nvbe->pagelist) + return -EINVAL; + nvbe->pages = (num_pages << PAGE_SHIFT) >> NV_CTXDMA_PAGE_SHIFT; + nvbe->pagelist = drm_alloc(nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + + nvbe->pages_populated = d = 0; + for (p = 0; p < num_pages; p++) { + for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) { + nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev, + pages[p], o, + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + if (pci_dma_mapping_error(nvbe->pagelist[d])) { + be->func->clear(be); + DRM_ERROR("pci_map_page failed\n"); + return -EINVAL; + } + nvbe->pages_populated = ++d; + } + } + + return 0; +} + +static void +nouveau_sgdma_clear(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + int d; + + DRM_DEBUG("\n"); + + if (nvbe && nvbe->pagelist) { + if (nvbe->is_bound) + be->func->unbind(be); + + for (d = 0; d < nvbe->pages_populated; d++) { + pci_unmap_page(nvbe->dev->pdev, nvbe->pagelist[d], + NV_CTXDMA_PAGE_SIZE, + PCI_DMA_BIDIRECTIONAL); + } + drm_free(nvbe->pagelist, nvbe->pages*sizeof(dma_addr_t), + DRM_MEM_PAGES); + } +} + +static int +nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, + int cached) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + uint64_t offset = (pg_start << PAGE_SHIFT); + uint32_t i; + + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + + if (offset & NV_CTXDMA_PAGE_MASK) + return -EINVAL; + nvbe->pte_start = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) + nvbe->pte_start += 2; /* skip ctxdma header */ + + for (i = nvbe->pte_start; i < nvbe->pte_start + nvbe->pages; i++) { + uint64_t pteval = nvbe->pagelist[i - nvbe->pte_start]; + + if (pteval & NV_CTXDMA_PAGE_MASK) { + DRM_ERROR("Bad pteval 0x%llx\n", pteval); + return -EINVAL; + } + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, i, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (i<<1)+0, pteval | 0x21); + INSTANCE_WR(gpuobj, (i<<1)+1, 0x00000000); + } + } + + nvbe->is_bound = 1; + return 0; +} + +static int +nouveau_sgdma_unbind(struct drm_ttm_backend *be) +{ + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; + + DRM_DEBUG("\n"); + + if (nvbe->is_bound) { + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + unsigned int pte; + + pte = nvbe->pte_start; + while (pte < (nvbe->pte_start + nvbe->pages)) { + uint64_t pteval = dev_priv->gart_info.sg_dummy_bus; + + if (dev_priv->card_type < NV_50) { + INSTANCE_WR(gpuobj, pte, pteval | 3); + } else { + INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010); + INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004); + } + + pte++; + } + + nvbe->is_bound = 0; + } + + return 0; +} + +static void +nouveau_sgdma_destroy(struct drm_ttm_backend *be) +{ + DRM_DEBUG("\n"); + if (be) { + struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; + if (nvbe) { + if (nvbe->pagelist) + be->func->clear(be); + drm_ctl_free(nvbe, sizeof(*nvbe), DRM_MEM_TTM); + } + } +} + +static struct drm_ttm_backend_func nouveau_sgdma_backend = { + .needs_ub_cache_adjust = nouveau_sgdma_needs_ub_cache_adjust, + .populate = nouveau_sgdma_populate, + .clear = nouveau_sgdma_clear, + .bind = nouveau_sgdma_bind, + .unbind = nouveau_sgdma_unbind, + .destroy = nouveau_sgdma_destroy +}; + +struct drm_ttm_backend * +nouveau_sgdma_init_ttm(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_sgdma_be *nvbe; + + if (!dev_priv->gart_info.sg_ctxdma) + return NULL; + + nvbe = drm_ctl_calloc(1, sizeof(*nvbe), DRM_MEM_TTM); + if (!nvbe) + return NULL; + + nvbe->dev = dev; + + nvbe->backend.func = &nouveau_sgdma_backend; + nvbe->backend.mem_type = DRM_BO_MEM_TT; + + return &nvbe->backend; +} + +int +nouveau_sgdma_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = NULL; + uint32_t aper_size, obj_size; + int i, ret; + + if (dev_priv->card_type < NV_50) { + aper_size = (64 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 4; + obj_size += 8; /* ctxdma header */ + } else { + /* 1 entire VM page table */ + aper_size = (512 * 1024 * 1024); + obj_size = (aper_size >> NV_CTXDMA_PAGE_SHIFT) * 8; + } + + if ((ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16, + NVOBJ_FLAG_ALLOW_NO_REFS | + NVOBJ_FLAG_ZERO_ALLOC | + NVOBJ_FLAG_ZERO_FREE, &gpuobj))) { + DRM_ERROR("Error creating sgdma object: %d\n", ret); + return ret; + } + + if (dev_priv->card_type < NV_50) { + dev_priv->gart_info.sg_dummy_page = + alloc_page(GFP_KERNEL|__GFP_DMA32); + SetPageLocked(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_bus = + pci_map_page(dev->pdev, + dev_priv->gart_info.sg_dummy_page, 0, + PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + + /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and + * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE + * on those cards? */ + INSTANCE_WR(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY | + (1 << 12) /* PT present */ | + (0 << 13) /* PT *not* linear */ | + (NV_DMA_ACCESS_RW << 14) | + (NV_DMA_TARGET_PCI << 16)); + INSTANCE_WR(gpuobj, 1, aper_size - 1); + for (i=2; i<2+(aper_size>>12); i++) { + INSTANCE_WR(gpuobj, i, + dev_priv->gart_info.sg_dummy_bus | 3); + } + } else { + for (i=0; i<obj_size; i+=8) { + INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010); + INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004); + } + } + + dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; + dev_priv->gart_info.aper_base = 0; + dev_priv->gart_info.aper_size = aper_size; + dev_priv->gart_info.sg_ctxdma = gpuobj; + return 0; +} + +void +nouveau_sgdma_takedown(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + + if (dev_priv->gart_info.sg_dummy_page) { + pci_unmap_page(dev->pdev, dev_priv->gart_info.sg_dummy_bus, + NV_CTXDMA_PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); + unlock_page(dev_priv->gart_info.sg_dummy_page); + __free_page(dev_priv->gart_info.sg_dummy_page); + dev_priv->gart_info.sg_dummy_page = NULL; + dev_priv->gart_info.sg_dummy_bus = 0; + } + + nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma); +} + +int +nouveau_sgdma_nottm_hack_init(struct drm_device *dev) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct drm_ttm_backend *be; + struct drm_scatter_gather sgreq; + int ret; + + dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); + if (!dev_priv->gart_info.sg_be) + return -ENOMEM; + be = dev_priv->gart_info.sg_be; + + /* Hack the aperture size down to the amount of system memory + * we're going to bind into it. + */ + if (dev_priv->gart_info.aper_size > 32*1024*1024) + dev_priv->gart_info.aper_size = 32*1024*1024; + + sgreq.size = dev_priv->gart_info.aper_size; + if ((ret = drm_sg_alloc(dev, &sgreq))) { + DRM_ERROR("drm_sg_alloc failed: %d\n", ret); + return ret; + } + dev_priv->gart_info.sg_handle = sgreq.handle; + + if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) { + DRM_ERROR("failed populate: %d\n", ret); + return ret; + } + + if ((ret = be->func->bind(be, 0, 0))) { + DRM_ERROR("failed bind: %d\n", ret); + return ret; + } + + return 0; +} + +void +nouveau_sgdma_nottm_hack_takedown(struct drm_device *dev) +{ +} + +int +nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) +{ + struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; + int pte; + + pte = (offset >> NV_CTXDMA_PAGE_SHIFT); + if (dev_priv->card_type < NV_50) { + *page = INSTANCE_RD(gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; + return 0; + } + + DRM_ERROR("Unimplemented on NV50\n"); + return -EINVAL; +} + diff --git a/linux-core/nv04_instmem.c b/linux-core/nv04_instmem.c new file mode 120000 index 00000000..e720fb5b --- /dev/null +++ b/linux-core/nv04_instmem.c @@ -0,0 +1 @@ +../shared-core/nv04_instmem.c
\ No newline at end of file diff --git a/linux-core/nv50_instmem.c b/linux-core/nv50_instmem.c new file mode 120000 index 00000000..4e45344a --- /dev/null +++ b/linux-core/nv50_instmem.c @@ -0,0 +1 @@ +../shared-core/nv50_instmem.c
\ No newline at end of file diff --git a/linux-core/sis_drv.c b/linux-core/sis_drv.c index 114ec8f9..c9112c63 100644 --- a/linux-core/sis_drv.c +++ b/linux-core/sis_drv.c @@ -36,14 +36,14 @@ static struct pci_device_id pciidlist[] = { }; -static int sis_driver_load(drm_device_t *dev, unsigned long chipset) +static int sis_driver_load(struct drm_device *dev, unsigned long chipset) { drm_sis_private_t *dev_priv; int ret; dev_priv = drm_calloc(1, sizeof(drm_sis_private_t), DRM_MEM_DRIVER); if (dev_priv == NULL) - return DRM_ERR(ENOMEM); + return -ENOMEM; dev->dev_private = (void *)dev_priv; dev_priv->chipset = chipset; @@ -55,7 +55,7 @@ static int sis_driver_load(drm_device_t *dev, unsigned long chipset) return ret; } -static int sis_driver_unload(drm_device_t *dev) +static int sis_driver_unload(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c index 21c1f2d7..7e162a8e 100644 --- a/linux-core/sis_mm.c +++ b/linux-core/sis_mm.c @@ -81,19 +81,16 @@ unsigned long sis_sman_mm_offset(void *private, void *ref) #endif -static int sis_fb_init(DRM_IOCTL_ARGS) +static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_fb_t fb; + drm_sis_fb_t *fb = data; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_sis_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); #if defined(__linux__) && defined(CONFIG_FB_SIS) { - drm_sman_mm_t sman_mm; + struct drm_sman_mm sman_mm; sman_mm.private = (void *)0xFFFFFFFF; sman_mm.allocate = sis_sman_mm_allocate; sman_mm.free = sis_sman_mm_free; @@ -104,7 +101,7 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } #else ret = drm_sman_set_range(&dev_priv->sman, VIDEO_TYPE, 0, - fb.size >> SIS_MM_ALIGN_SHIFT); + fb->size >> SIS_MM_ALIGN_SHIFT); #endif if (ret) { @@ -114,24 +111,21 @@ static int sis_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; } -static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv, - unsigned long data, int pool) +static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv, + void *data, int pool) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t __user *argp = (drm_sis_mem_t __user *) data; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int retval = 0; - drm_memblock_item_t *item; - - DRM_COPY_FROM_USER_IOCTL(mem, argp, sizeof(mem)); + struct drm_memblock_item *item; mutex_lock(&dev->struct_mutex); @@ -139,73 +133,65 @@ static int sis_drm_alloc(drm_device_t * dev, drm_file_t * priv, dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } - mem.size = (mem.size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, pool, mem.size, 0, - (unsigned long)priv); + mem->size = (mem->size + SIS_MM_ALIGN_MASK) >> SIS_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, pool, mem->size, 0, + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((pool == 0) ? + mem->offset = ((pool == 0) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << SIS_MM_ALIGN_SHIFT); - mem.free = item->user_hash.key; - mem.size = mem.size << SIS_MM_ALIGN_SHIFT; + mem->free = item->user_hash.key; + mem->size = mem->size << SIS_MM_ALIGN_SHIFT; } else { - mem.offset = 0; - mem.size = 0; - mem.free = 0; - retval = DRM_ERR(ENOMEM); + mem->offset = 0; + mem->size = 0; + mem->free = 0; + retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL(argp, mem, sizeof(mem)); - - DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem.size, - mem.offset); + DRM_DEBUG("alloc %d, size = %d, offset = %d\n", pool, mem->size, + mem->offset); return retval; } -static int sis_drm_free(DRM_IOCTL_ARGS) +static int sis_drm_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_mem_t mem; + drm_sis_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_sis_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.free); + ret = drm_sman_free_key(&dev_priv->sman, mem->free); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.free); + DRM_DEBUG("free = 0x%lx\n", mem->free); return ret; } -static int sis_fb_alloc(DRM_IOCTL_ARGS) +static int sis_fb_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, VIDEO_TYPE); + return sis_drm_alloc(dev, file_priv, data, VIDEO_TYPE); } -static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_init(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; drm_sis_private_t *dev_priv = dev->dev_private; - drm_sis_agp_t agp; + drm_sis_agp_t *agp = data; int ret; dev_priv = dev->dev_private; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_sis_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, AGP_TYPE, 0, - agp.size >> SIS_MM_ALIGN_SHIFT); + agp->size >> SIS_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -214,23 +200,23 @@ static int sis_ioctl_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -static int sis_ioctl_agp_alloc(DRM_IOCTL_ARGS) +static int sis_ioctl_agp_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - return sis_drm_alloc(dev, priv, data, AGP_TYPE); + return sis_drm_alloc(dev, file_priv, data, AGP_TYPE); } -static drm_local_map_t *sis_reg_init(drm_device_t *dev) +static drm_local_map_t *sis_reg_init(struct drm_device *dev) { - drm_map_list_t *entry; + struct drm_map_list *entry; drm_local_map_t *map; list_for_each_entry(entry, &dev->maplist, head) { @@ -245,7 +231,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev) } int -sis_idle(drm_device_t *dev) +sis_idle(struct drm_device *dev) { drm_sis_private_t *dev_priv = dev->dev_private; uint32_t idle_reg; @@ -314,13 +300,13 @@ void sis_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void sis_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_sis_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -329,20 +315,18 @@ void sis_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } -drm_ioctl_desc_t sis_ioctls[] = { - [DRM_IOCTL_NR(DRM_SIS_FB_ALLOC)] = {sis_fb_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_INIT)] = - {sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY}, - [DRM_IOCTL_NR(DRM_SIS_AGP_ALLOC)] = {sis_ioctl_agp_alloc, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_AGP_FREE)] = {sis_drm_free, DRM_AUTH}, - [DRM_IOCTL_NR(DRM_SIS_FB_INIT)] = - {sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY} +struct drm_ioctl_desc sis_ioctls[] = { + DRM_IOCTL_DEF(DRM_SIS_FB_ALLOC, sis_fb_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_INIT, sis_ioctl_agp_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_SIS_AGP_ALLOC, sis_ioctl_agp_alloc, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_AGP_FREE, sis_drm_free, DRM_AUTH), + DRM_IOCTL_DEF(DRM_SIS_FB_INIT, sis_fb_init, DRM_AUTH | DRM_MASTER | DRM_ROOT_ONLY), }; int sis_max_ioctl = DRM_ARRAY_SIZE(sis_ioctls); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index ebc8c371..eb5ea826 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,19 +32,18 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(drm_device_t * dev) +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * class, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) { - *class = 0; *type = 3; return 0; } -int via_invalidate_caches(drm_device_t * dev, uint32_t flags) +int via_invalidate_caches(struct drm_device * dev, uint64_t flags) { /* * FIXME: Invalidate texture caches here. @@ -54,14 +53,14 @@ int via_invalidate_caches(drm_device_t * dev, uint32_t flags) } -static int via_vram_info(drm_device_t *dev, +static int via_vram_info(struct drm_device *dev, unsigned long *offset, unsigned long *size) { struct pci_dev *pdev = dev->pdev; unsigned long flags; - int ret = DRM_ERR(EINVAL); + int ret = -EINVAL; int i; for (i=0; i<6; ++i) { flags = pci_resource_flags(pdev, i); @@ -82,8 +81,8 @@ static int via_vram_info(drm_device_t *dev, return 0; } -int via_init_mem_type(drm_device_t * dev, uint32_t type, - drm_mem_type_manager_t * man) +int via_init_mem_type(struct drm_device * dev, uint32_t type, + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -144,7 +143,7 @@ int via_init_mem_type(drm_device_t * dev, uint32_t type, return 0; } -uint32_t via_evict_mask(drm_buffer_object_t *bo) +uint32_t via_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c index 2f508374..d44c26f4 100644 --- a/linux-core/via_dmablit.c +++ b/linux-core/via_dmablit.c @@ -206,7 +206,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg) */ static void -via_fire_dmablit(drm_device_t *dev, drm_via_sg_info_t *vsg, int engine) +via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -236,7 +236,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) first_pfn + 1; if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages); down_read(¤t->mm->mmap_sem); ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr, @@ -248,7 +248,7 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) if (ret < 0) return ret; vsg->state = dr_via_pages_locked; - return DRM_ERR(EINVAL); + return -EINVAL; } vsg->state = dr_via_pages_locked; DRM_DEBUG("DMA pages locked\n"); @@ -271,14 +271,14 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) vsg->descriptors_per_page; if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages); vsg->state = dr_via_desc_pages_alloc; for (i=0; i<vsg->num_desc_pages; ++i) { if (NULL == (vsg->desc_pages[i] = (drm_via_descriptor_t *) __get_free_page(GFP_KERNEL))) - return DRM_ERR(ENOMEM); + return -ENOMEM; } DRM_DEBUG("Allocated %d pages for %d descriptors.\n", vsg->num_desc_pages, vsg->num_desc); @@ -286,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg) } static void -via_abort_dmablit(drm_device_t *dev, int engine) +via_abort_dmablit(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -294,7 +294,7 @@ via_abort_dmablit(drm_device_t *dev, int engine) } static void -via_dmablit_engine_off(drm_device_t *dev, int engine) +via_dmablit_engine_off(struct drm_device *dev, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -311,7 +311,7 @@ via_dmablit_engine_off(drm_device_t *dev, int engine) */ void -via_dmablit_handler(drm_device_t *dev, int engine, int from_irq) +via_dmablit_handler(struct drm_device *dev, int engine, int from_irq) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_blitq_t *blitq = dev_priv->blit_queues + engine; @@ -432,7 +432,7 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que */ static int -via_dmablit_sync(drm_device_t *dev, uint32_t handle, int engine) +via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -465,7 +465,7 @@ static void via_dmablit_timer(unsigned long data) { drm_via_blitq_t *blitq = (drm_via_blitq_t *) data; - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); @@ -509,7 +509,7 @@ via_dmablit_workqueue(struct work_struct *work) #else drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq); #endif - drm_device_t *dev = blitq->dev; + struct drm_device *dev = blitq->dev; unsigned long irqsave; drm_via_sg_info_t *cur_sg; int cur_released; @@ -552,7 +552,7 @@ via_dmablit_workqueue(struct work_struct *work) void -via_init_dmablit(drm_device_t *dev) +via_init_dmablit(struct drm_device *dev) { int i,j; drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; @@ -568,7 +568,7 @@ via_init_dmablit(drm_device_t *dev) blitq->head = 0; blitq->cur = 0; blitq->serviced = 0; - blitq->num_free = VIA_NUM_BLIT_SLOTS; + blitq->num_free = VIA_NUM_BLIT_SLOTS - 1; blitq->num_outstanding = 0; blitq->is_active = 0; blitq->aborting = 0; @@ -594,7 +594,7 @@ via_init_dmablit(drm_device_t *dev) static int -via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) +via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer) { int draw = xfer->to_fb; int ret = 0; @@ -606,7 +606,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t * if (xfer->num_lines <= 0 || xfer->line_length <= 0) { DRM_ERROR("Zero size bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -619,7 +619,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t * if ((xfer->mem_stride - xfer->line_length) >= PAGE_SIZE) { DRM_ERROR("Too large system memory stride. Stride: %d, " "Length: %d\n", xfer->mem_stride, xfer->line_length); - return DRM_ERR(EINVAL); + return -EINVAL; } if ((xfer->mem_stride == xfer->line_length) && @@ -637,7 +637,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t * if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) { DRM_ERROR("Too large PCI DMA bitblt.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -648,7 +648,7 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t * if (xfer->mem_stride < xfer->line_length || abs(xfer->fb_stride) < xfer->line_length) { DRM_ERROR("Invalid frame-buffer / memory stride.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } /* @@ -661,13 +661,13 @@ via_build_sg_info(drm_device_t *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t * if ((((unsigned long)xfer->mem_addr & 3) != ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 3) != (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #else if ((((unsigned long)xfer->mem_addr & 15) || ((unsigned long)xfer->fb_addr & 3)) || ((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) { DRM_ERROR("Invalid DRM bitblt alignment.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } #endif @@ -707,7 +707,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine) DRM_WAIT_ON(ret, blitq->busy_queue, DRM_HZ, blitq->num_free > 0); if (ret) { - return (DRM_ERR(EINTR) == ret) ? DRM_ERR(EAGAIN) : ret; + return (-EINTR == ret) ? -EAGAIN : ret; } spin_lock_irqsave(&blitq->blit_lock, irqsave); @@ -740,7 +740,7 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq) static int -via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) +via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer) { drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private; drm_via_sg_info_t *vsg; @@ -751,7 +751,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) if (dev_priv == NULL) { DRM_ERROR("Called without initialization.\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } engine = (xfer->to_fb) ? 0 : 1; @@ -761,7 +761,7 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) } if (NULL == (vsg = kmalloc(sizeof(*vsg), GFP_KERNEL))) { via_dmablit_release_slot(blitq); - return DRM_ERR(ENOMEM); + return -ENOMEM; } if (0 != (ret = via_build_sg_info(dev, vsg, xfer))) { via_dmablit_release_slot(blitq); @@ -792,21 +792,18 @@ via_dmablit(drm_device_t *dev, drm_via_dmablit_t *xfer) */ int -via_dma_blit_sync( DRM_IOCTL_ARGS ) +via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_blitsync_t sync; + drm_via_blitsync_t *sync = data; int err; - DRM_DEVICE; - DRM_COPY_FROM_USER_IOCTL(sync, (drm_via_blitsync_t *)data, sizeof(sync)); - - if (sync.engine >= VIA_NUM_BLIT_ENGINES) - return DRM_ERR(EINVAL); + if (sync->engine >= VIA_NUM_BLIT_ENGINES) + return -EINVAL; - err = via_dmablit_sync(dev, sync.sync_handle, sync.engine); + err = via_dmablit_sync(dev, sync->sync_handle, sync->engine); - if (DRM_ERR(EINTR) == err) - err = DRM_ERR(EAGAIN); + if (-EINTR == err) + err = -EAGAIN; return err; } @@ -819,17 +816,12 @@ via_dma_blit_sync( DRM_IOCTL_ARGS ) */ int -via_dma_blit( DRM_IOCTL_ARGS ) +via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv ) { - drm_via_dmablit_t xfer; + drm_via_dmablit_t *xfer = data; int err; - DRM_DEVICE; - - DRM_COPY_FROM_USER_IOCTL(xfer, (drm_via_dmablit_t __user *)data, sizeof(xfer)); - - err = via_dmablit(dev, &xfer); - DRM_COPY_TO_USER_IOCTL((void __user *)data, xfer, sizeof(xfer)); + err = via_dmablit(dev, xfer); return err; } diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h index f6ae03ec..726ad25d 100644 --- a/linux-core/via_dmablit.h +++ b/linux-core/via_dmablit.h @@ -59,7 +59,7 @@ typedef struct _drm_via_sg_info { } drm_via_sg_info_t; typedef struct _drm_via_blitq { - drm_device_t *dev; + struct drm_device *dev; uint32_t cur_blit_handle; uint32_t done_blit_handle; unsigned serviced; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index 02249939..a6d4ece9 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -39,10 +39,10 @@ */ -static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) +static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_class_manager_t *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -113,7 +113,7 @@ static uint32_t via_perform_flush(drm_device_t *dev, uint32_t class) * Emit a fence sequence. */ -int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, +int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; @@ -142,7 +142,7 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, *native_type = DRM_FENCE_TYPE_EXE; break; default: - ret = DRM_ERR(EINVAL); + ret = -EINVAL; break; } return ret; @@ -152,10 +152,10 @@ int via_fence_emit_sequence(drm_device_t * dev, uint32_t class, uint32_t flags, * Manual poll (from the fence manager). */ -void via_poke_flush(drm_device_t * dev, uint32_t class) +void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; @@ -200,11 +200,11 @@ int via_fence_has_irq(struct drm_device * dev, uint32_t class, void via_fence_timer(unsigned long data) { - drm_device_t *dev = (drm_device_t *) data; + struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - drm_fence_class_manager_t *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.class[0]; if (!dev_priv) return; diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c index d97269f5..35ca6bfc 100644 --- a/linux-core/via_mm.c +++ b/linux-core/via_mm.c @@ -33,18 +33,15 @@ #define VIA_MM_ALIGN_SHIFT 4 #define VIA_MM_ALIGN_MASK ( (1 << VIA_MM_ALIGN_SHIFT) - 1) -int via_agp_init(DRM_IOCTL_ARGS) +int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_agp_t agp; + drm_via_agp_t *agp = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(agp, (drm_via_agp_t __user *) data, - sizeof(agp)); mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_AGP, 0, - agp.size >> VIA_MM_ALIGN_SHIFT); + agp->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("AGP memory manager initialisation error\n"); @@ -53,25 +50,22 @@ int via_agp_init(DRM_IOCTL_ARGS) } dev_priv->agp_initialized = 1; - dev_priv->agp_offset = agp.offset; + dev_priv->agp_offset = agp->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", agp.offset, agp.size); + DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size); return 0; } -int via_fb_init(DRM_IOCTL_ARGS) +int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; - drm_via_fb_t fb; + drm_via_fb_t *fb = data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; int ret; - DRM_COPY_FROM_USER_IOCTL(fb, (drm_via_fb_t __user *) data, sizeof(fb)); - mutex_lock(&dev->struct_mutex); ret = drm_sman_set_range(&dev_priv->sman, VIA_MEM_VIDEO, 0, - fb.size >> VIA_MM_ALIGN_SHIFT); + fb->size >> VIA_MM_ALIGN_SHIFT); if (ret) { DRM_ERROR("VRAM memory manager initialisation error\n"); @@ -80,10 +74,10 @@ int via_fb_init(DRM_IOCTL_ARGS) } dev_priv->vram_initialized = 1; - dev_priv->vram_offset = fb.offset; + dev_priv->vram_offset = fb->offset; mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("offset = %u, size = %u", fb.offset, fb.size); + DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size); return 0; @@ -123,80 +117,71 @@ void via_lastclose(struct drm_device *dev) mutex_unlock(&dev->struct_mutex); } -int via_mem_alloc(DRM_IOCTL_ARGS) +int via_mem_alloc(struct drm_device *dev, void *data, + struct drm_file *file_priv) { - DRM_DEVICE; - - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int retval = 0; - drm_memblock_item_t *item; + struct drm_memblock_item *item; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; unsigned long tmpSize; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - - if (mem.type > VIA_MEM_AGP) { + if (mem->type > VIA_MEM_AGP) { DRM_ERROR("Unknown memory type allocation\n"); - return DRM_ERR(EINVAL); + return -EINVAL; } mutex_lock(&dev->struct_mutex); - if (0 == ((mem.type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : + if (0 == ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_initialized : dev_priv->agp_initialized)) { DRM_ERROR ("Attempt to allocate from uninitialized memory manager.\n"); mutex_unlock(&dev->struct_mutex); - return DRM_ERR(EINVAL); + return -EINVAL; } - tmpSize = (mem.size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; - item = drm_sman_alloc(&dev_priv->sman, mem.type, tmpSize, 0, - (unsigned long)priv); + tmpSize = (mem->size + VIA_MM_ALIGN_MASK) >> VIA_MM_ALIGN_SHIFT; + item = drm_sman_alloc(&dev_priv->sman, mem->type, tmpSize, 0, + (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); if (item) { - mem.offset = ((mem.type == VIA_MEM_VIDEO) ? + mem->offset = ((mem->type == VIA_MEM_VIDEO) ? dev_priv->vram_offset : dev_priv->agp_offset) + (item->mm-> offset(item->mm, item->mm_info) << VIA_MM_ALIGN_SHIFT); - mem.index = item->user_hash.key; + mem->index = item->user_hash.key; } else { - mem.offset = 0; - mem.size = 0; - mem.index = 0; + mem->offset = 0; + mem->size = 0; + mem->index = 0; DRM_DEBUG("Video memory allocation failed\n"); - retval = DRM_ERR(ENOMEM); + retval = -ENOMEM; } - DRM_COPY_TO_USER_IOCTL((drm_via_mem_t __user *) data, mem, sizeof(mem)); return retval; } -int via_mem_free(DRM_IOCTL_ARGS) +int via_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { - DRM_DEVICE; drm_via_private_t *dev_priv = dev->dev_private; - drm_via_mem_t mem; + drm_via_mem_t *mem = data; int ret; - DRM_COPY_FROM_USER_IOCTL(mem, (drm_via_mem_t __user *) data, - sizeof(mem)); - mutex_lock(&dev->struct_mutex); - ret = drm_sman_free_key(&dev_priv->sman, mem.index); + ret = drm_sman_free_key(&dev_priv->sman, mem->index); mutex_unlock(&dev->struct_mutex); - DRM_DEBUG("free = 0x%lx\n", mem.index); + DRM_DEBUG("free = 0x%lx\n", mem->index); return ret; } -void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) +void via_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file *file_priv) { drm_via_private_t *dev_priv = dev->dev_private; - drm_file_t *priv = filp->private_data; mutex_lock(&dev->struct_mutex); - if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)priv)) { + if (drm_sman_owner_clean(&dev_priv->sman, (unsigned long)file_priv)) { mutex_unlock(&dev->struct_mutex); return; } @@ -205,7 +190,7 @@ void via_reclaim_buffers_locked(drm_device_t * dev, struct file *filp) dev->driver->dma_quiescent(dev); } - drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)priv); + drm_sman_owner_cleanup(&dev_priv->sman, (unsigned long)file_priv); mutex_unlock(&dev->struct_mutex); return; } diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c new file mode 100644 index 00000000..261f4e13 --- /dev/null +++ b/linux-core/xgi_cmdlist.c @@ -0,0 +1,322 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +static void xgi_emit_flush(struct xgi_info * info, bool stop); +static void xgi_emit_nop(struct xgi_info * info); +static unsigned int get_batch_command(enum xgi_batch_type type); +static void triggerHWCommandList(struct xgi_info * info); +static void xgi_cmdlist_reset(struct xgi_info * info); + + +/** + * Graphic engine register (2d/3d) acessing interface + */ +static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data) +{ +#ifdef XGI_MMIO_DEBUG + DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n", + map->handle, addr, data); +#endif + DRM_WRITE32(map, addr, data); +} + + +int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp) +{ + struct xgi_mem_alloc mem_alloc = { + .location = XGI_MEMLOC_NON_LOCAL, + .size = size, + }; + int err; + + err = xgi_alloc(info, &mem_alloc, filp); + if (err) { + return err; + } + + info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr); + info->cmdring.size = mem_alloc.size; + info->cmdring.ring_hw_base = mem_alloc.hw_addr; + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; + + return 0; +} + + +/** + * get_batch_command - Get the command ID for the current begin type. + * @type: Type of the current batch + * + * See section 3.2.2 "Begin" (page 15) of the 3D SPG. + * + * This function assumes that @type is on the range [0,3]. + */ +unsigned int get_batch_command(enum xgi_batch_type type) +{ + static const unsigned int ports[4] = { + 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2 + }; + + return ports[type]; +} + + +int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + struct xgi_info *const info = dev->dev_private; + const struct xgi_cmd_info *const pCmdInfo = + (struct xgi_cmd_info *) data; + const unsigned int cmd = get_batch_command(pCmdInfo->type); + u32 begin[4]; + + + begin[0] = (cmd << 24) | BEGIN_VALID_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size; + begin[2] = pCmdInfo->hw_addr >> 4; + begin[3] = 0; + + if (info->cmdring.last_ptr == NULL) { + const unsigned int portOffset = BASE_3D_ENG + (cmd << 2); + + + /* Enable PCI Trigger Mode + */ + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | + M2REG_CLEAR_COUNTERS_MASK | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, + (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | + M2REG_PCI_TRIGGER_MODE_MASK); + + + /* Send PCI begin command + */ + dwWriteReg(info->mmio_map, portOffset, begin[0]); + dwWriteReg(info->mmio_map, portOffset + 4, begin[1]); + dwWriteReg(info->mmio_map, portOffset + 8, begin[2]); + dwWriteReg(info->mmio_map, portOffset + 12, begin[3]); + } else { + DRM_DEBUG("info->cmdring.last_ptr != NULL\n"); + + if (pCmdInfo->type == BTYPE_3D) { + xgi_emit_flush(info, FALSE); + } + + info->cmdring.last_ptr[1] = begin[1]; + info->cmdring.last_ptr[2] = begin[2]; + info->cmdring.last_ptr[3] = begin[3]; + DRM_WRITEMEMORYBARRIER(); + info->cmdring.last_ptr[0] = begin[0]; + + triggerHWCommandList(info); + } + + info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); + drm_fence_flush_old(info->dev, 0, info->next_sequence); + return 0; +} + + +/* + state: 0 - console + 1 - graphic + 2 - fb + 3 - logout +*/ +int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from) +{ +#define STATE_CONSOLE 0 +#define STATE_GRAPHIC 1 +#define STATE_FBTERM 2 +#define STATE_LOGOUT 3 +#define STATE_REBOOT 4 +#define STATE_SHUTDOWN 5 + + if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) { + DRM_INFO("Leaving graphical mode (probably VT switch)\n"); + } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) { + DRM_INFO("Entering graphical mode (probably VT switch)\n"); + xgi_cmdlist_reset(info); + } else if ((from == STATE_GRAPHIC) + && ((to == STATE_LOGOUT) + || (to == STATE_REBOOT) + || (to == STATE_SHUTDOWN))) { + DRM_INFO("Leaving graphical mode (probably X shutting down)\n"); + } else { + DRM_ERROR("Invalid state change.\n"); + return -EINVAL; + } + + return 0; +} + + +int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + struct xgi_state_info *const state = + (struct xgi_state_info *) data; + struct xgi_info *info = dev->dev_private; + + + return xgi_state_change(info, state->_toState, state->_fromState); +} + + +void xgi_cmdlist_reset(struct xgi_info * info) +{ + info->cmdring.last_ptr = NULL; + info->cmdring.ring_offset = 0; +} + + +void xgi_cmdlist_cleanup(struct xgi_info * info) +{ + if (info->cmdring.ring_hw_base != 0) { + /* If command lists have been issued, terminate the command + * list chain with a flush command. + */ + if (info->cmdring.last_ptr != NULL) { + xgi_emit_flush(info, FALSE); + xgi_emit_nop(info); + } + + xgi_waitfor_pci_idle(info); + + (void) memset(&info->cmdring, 0, sizeof(info->cmdring)); + } +} + +static void triggerHWCommandList(struct xgi_info * info) +{ + static unsigned int s_triggerID = 1; + + dwWriteReg(info->mmio_map, + BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, + 0x05000000 + (0x0ffff & s_triggerID++)); +} + + +/** + * Emit a flush to the CRTL command stream. + * @info XGI info structure + * + * This function assumes info->cmdring.ptr is non-NULL. + */ +void xgi_emit_flush(struct xgi_info * info, bool stop) +{ + const u32 flush_command[8] = { + ((0x10 << 24) + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)), + BEGIN_LINK_ENABLE_MASK | (0x00004), + 0x00000000, 0x00000000, + + /* Flush the 2D engine with the default 32 clock delay. + */ + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + M2REG_FLUSH_ENGINE_COMMAND | M2REG_FLUSH_2D_ENGINE_MASK, + }; + const unsigned int flush_size = sizeof(flush_command); + u32 *batch_addr; + u32 hw_addr; + + /* check buf is large enough to contain a new flush batch */ + if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { + info->cmdring.ring_offset = 0; + } + + hw_addr = info->cmdring.ring_hw_base + + info->cmdring.ring_offset; + batch_addr = info->cmdring.ptr + + (info->cmdring.ring_offset / 4); + + (void) memcpy(batch_addr, flush_command, flush_size); + + if (stop) { + *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + } + + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); + info->cmdring.last_ptr[2] = hw_addr >> 4; + info->cmdring.last_ptr[3] = 0; + DRM_WRITEMEMORYBARRIER(); + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); + + triggerHWCommandList(info); + + info->cmdring.ring_offset += flush_size; + info->cmdring.last_ptr = batch_addr; +} + + +/** + * Emit an empty command to the CRTL command stream. + * @info XGI info structure + * + * This function assumes info->cmdring.ptr is non-NULL. In addition, since + * this function emits a command that does not have linkage information, + * it sets info->cmdring.ptr to NULL. + */ +void xgi_emit_nop(struct xgi_info * info) +{ + info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[2] = 0; + info->cmdring.last_ptr[3] = 0; + DRM_WRITEMEMORYBARRIER(); + info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK); + + triggerHWCommandList(info); + + info->cmdring.last_ptr = NULL; +} + + +void xgi_emit_irq(struct xgi_info * info) +{ + if (info->cmdring.last_ptr == NULL) + return; + + xgi_emit_flush(info, TRUE); +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h new file mode 100644 index 00000000..f6f1c1ef --- /dev/null +++ b/linux-core/xgi_cmdlist.h @@ -0,0 +1,66 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +struct xgi_cmdring_info { + /** + * Kernel space pointer to the base of the command ring. + */ + u32 * ptr; + + /** + * Size, in bytes, of the command ring. + */ + unsigned int size; + + /** + * Base address of the command ring from the hardware's PoV. + */ + unsigned int ring_hw_base; + + u32 * last_ptr; + + /** + * Offset, in bytes, from the start of the ring to the next available + * location to store a command. + */ + unsigned int ring_offset; +}; + +struct xgi_info; +extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size, + struct drm_file * filp); + +extern int xgi_state_change(struct xgi_info * info, unsigned int to, + unsigned int from); + +extern void xgi_cmdlist_cleanup(struct xgi_info * info); + +extern void xgi_emit_irq(struct xgi_info * info); + +#endif /* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h new file mode 120000 index 00000000..677586d7 --- /dev/null +++ b/linux-core/xgi_drm.h @@ -0,0 +1 @@ +../shared-core/xgi_drm.h
\ No newline at end of file diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c new file mode 100644 index 00000000..bc6873a9 --- /dev/null +++ b/linux-core/xgi_drv.c @@ -0,0 +1,431 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "drmP.h" +#include "drm.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +#include "drm_pciids.h" + +static struct pci_device_id pciidlist[] = { + xgi_PCI_IDS +}; + +static struct drm_fence_driver xgi_fence_driver = { + .num_classes = 1, + .wrap_diff = BEGIN_BEGIN_IDENTIFICATION_MASK, + .flush_diff = BEGIN_BEGIN_IDENTIFICATION_MASK - 1, + .sequence_mask = BEGIN_BEGIN_IDENTIFICATION_MASK, + .lazy_capable = 1, + .emit = xgi_fence_emit_sequence, + .poke_flush = xgi_poke_flush, + .has_irq = xgi_fence_has_irq +}; + +int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); + +static struct drm_ioctl_desc xgi_ioctls[] = { + DRM_IOCTL_DEF(DRM_XGI_BOOTSTRAP, xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), + DRM_IOCTL_DEF(DRM_XGI_ALLOC, xgi_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), +}; + +static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent); +static int xgi_driver_load(struct drm_device *dev, unsigned long flags); +static int xgi_driver_unload(struct drm_device *dev); +static void xgi_driver_lastclose(struct drm_device * dev); +static void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp); +static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); + + +static struct drm_driver driver = { + .driver_features = + DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | + DRIVER_IRQ_SHARED | DRIVER_SG, + .dev_priv_size = sizeof(struct xgi_info), + .load = xgi_driver_load, + .unload = xgi_driver_unload, + .lastclose = xgi_driver_lastclose, + .dma_quiescent = NULL, + .irq_preinstall = NULL, + .irq_postinstall = NULL, + .irq_uninstall = NULL, + .irq_handler = xgi_kern_isr, + .reclaim_buffers = drm_core_reclaim_buffers, + .reclaim_buffers_idlelocked = xgi_reclaim_buffers_locked, + .get_map_ofs = drm_core_get_map_ofs, + .get_reg_ofs = drm_core_get_reg_ofs, + .ioctls = xgi_ioctls, + .dma_ioctl = NULL, + + .fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .ioctl = drm_ioctl, + .mmap = drm_mmap, + .poll = drm_poll, + .fasync = drm_fasync, +#if defined(CONFIG_COMPAT) && LINUX_VERSION_CODE > KERNEL_VERSION(2,6,9) + .compat_ioctl = xgi_compat_ioctl, +#endif + }, + + .pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = probe, + .remove = __devexit_p(drm_cleanup_pci), + }, + + .fence_driver = &xgi_fence_driver, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + +}; + +static int probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_dev(pdev, ent, &driver); +} + + +static int __init xgi_init(void) +{ + driver.num_ioctls = xgi_max_ioctl; + return drm_init(&driver, pciidlist); +} + +static void __exit xgi_exit(void) +{ + drm_exit(&driver); +} + +module_init(xgi_init); +module_exit(xgi_exit); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); + + +void xgi_engine_init(struct xgi_info * info) +{ + u8 temp; + + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* -------> copy from OT2D + * PCI Retry Control Register. + * disable PCI read retry & enable write retry in mem. (10xx xxxx)b + */ + temp = IN3X5B(info->mmio_map, 0x55); + OUT3X5B(info->mmio_map, 0x55, (temp & 0xbf) | 0x80); + + xgi_enable_ge(info); + + /* Enable linear addressing of the card. */ + temp = IN3X5B(info->mmio_map, 0x21); + OUT3X5B(info->mmio_map, 0x21, temp | 0x20); + + /* Enable 32-bit internal data path */ + temp = IN3X5B(info->mmio_map, 0x2A); + OUT3X5B(info->mmio_map, 0x2A, temp | 0x40); + + /* Enable PCI burst write ,disable burst read and enable MMIO. */ + /* + * 0x3D4.39 Enable PCI burst write, disable burst read and enable MMIO. + * 7 ---- Pixel Data Format 1: big endian 0: little endian + * 6 5 4 3---- Memory Data with Big Endian Format, BE[3:0]# with Big Endian Format + * 2 ---- PCI Burst Write Enable + * 1 ---- PCI Burst Read Enable + * 0 ---- MMIO Control + */ + temp = IN3X5B(info->mmio_map, 0x39); + OUT3X5B(info->mmio_map, 0x39, (temp | 0x05) & 0xfd); + + /* enable GEIO decode */ + /* temp = IN3X5B(info->mmio_map, 0x29); + * OUT3X5B(info->mmio_map, 0x29, temp | 0x08); + */ + + /* Enable graphic engine I/O PCI retry function*/ + /* temp = IN3X5B(info->mmio_map, 0x62); + * OUT3X5B(info->mmio_map, 0x62, temp | 0x50); + */ + + /* protect all register except which protected by 3c5.0e.7 */ + /* OUT3C5B(info->mmio_map, 0x11, 0x87); */ +} + + +int xgi_bootstrap(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + struct xgi_info *info = dev->dev_private; + struct xgi_bootstrap * bs = (struct xgi_bootstrap *) data; + struct drm_map_list *maplist; + int err; + + + DRM_SPININIT(&info->fence_lock, "fence lock"); + info->next_sequence = 0; + info->complete_sequence = 0; + + if (info->mmio_map == NULL) { + err = drm_addmap(dev, info->mmio.base, info->mmio.size, + _DRM_REGISTERS, _DRM_KERNEL, + &info->mmio_map); + if (err) { + DRM_ERROR("Unable to map MMIO region: %d\n", err); + return err; + } + + xgi_enable_mmio(info); + xgi_engine_init(info); + } + + + info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024; + + DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n", + (unsigned long) info->fb.base, info->fb.size); + + + if ((info->fb.base == 0) || (info->fb.size == 0)) { + DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + return -EINVAL; + } + + + /* Init the resource manager */ + if (!info->fb_heap_initialized) { + err = xgi_fb_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize FB heap.\n"); + return err; + } + } + + + info->pcie.size = bs->gart.size; + + /* Init the resource manager */ + if (!info->pcie_heap_initialized) { + err = xgi_pcie_heap_init(info); + if (err) { + DRM_ERROR("Unable to initialize GART heap.\n"); + return err; + } + + /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ + err = xgi_cmdlist_initialize(info, 0x100000, filp); + if (err) { + DRM_ERROR("xgi_cmdlist_initialize() failed\n"); + return err; + } + } + + + if (info->pcie_map == NULL) { + err = drm_addmap(info->dev, 0, info->pcie.size, + _DRM_SCATTER_GATHER, _DRM_LOCKED, + & info->pcie_map); + if (err) { + DRM_ERROR("Could not add map for GART backing " + "store.\n"); + return err; + } + } + + + maplist = drm_find_matching_map(dev, info->pcie_map); + if (maplist == NULL) { + DRM_ERROR("Could not find GART backing store map.\n"); + return -EINVAL; + } + + bs->gart = *info->pcie_map; + bs->gart.handle = (void *)(unsigned long) maplist->user_token; + return 0; +} + + +void xgi_driver_lastclose(struct drm_device * dev) +{ + struct xgi_info * info = dev->dev_private; + + if (info != NULL) { + if (info->mmio_map != NULL) { + xgi_cmdlist_cleanup(info); + xgi_disable_ge(info); + xgi_disable_mmio(info); + } + + /* The core DRM lastclose routine will destroy all of our + * mappings for us. NULL out the pointers here so that + * xgi_bootstrap can do the right thing. + */ + info->pcie_map = NULL; + info->mmio_map = NULL; + info->fb_map = NULL; + + if (info->pcie_heap_initialized) { + drm_ati_pcigart_cleanup(dev, &info->gart_info); + } + + if (info->fb_heap_initialized + || info->pcie_heap_initialized) { + drm_sman_cleanup(&info->sman); + + info->fb_heap_initialized = FALSE; + info->pcie_heap_initialized = FALSE; + } + } +} + + +void xgi_reclaim_buffers_locked(struct drm_device * dev, + struct drm_file * filp) +{ + struct xgi_info * info = dev->dev_private; + + mutex_lock(&info->dev->struct_mutex); + if (drm_sman_owner_clean(&info->sman, (unsigned long) filp)) { + mutex_unlock(&info->dev->struct_mutex); + return; + } + + if (dev->driver->dma_quiescent) { + dev->driver->dma_quiescent(dev); + } + + drm_sman_owner_cleanup(&info->sman, (unsigned long) filp); + mutex_unlock(&info->dev->struct_mutex); + return; +} + + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) +{ + struct drm_device *dev = (struct drm_device *) arg; + struct xgi_info *info = dev->dev_private; + const u32 irq_bits = DRM_READ32(info->mmio_map, + (0x2800 + + M2REG_AUTO_LINK_STATUS_ADDRESS)) + & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK + | M2REG_ACTIVE_INTERRUPT_0_MASK + | M2REG_ACTIVE_INTERRUPT_2_MASK + | M2REG_ACTIVE_INTERRUPT_3_MASK); + + + if (irq_bits != 0) { + DRM_WRITE32(info->mmio_map, + 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, + M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + xgi_fence_handler(dev); + return IRQ_HANDLED; + } else { + return IRQ_NONE; + } +} + + +int xgi_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); + int err; + + if (!info) + return -ENOMEM; + + (void) memset(info, 0, sizeof(*info)); + dev->dev_private = info; + info->dev = dev; + + info->mmio.base = drm_get_resource_start(dev, 1); + info->mmio.size = drm_get_resource_len(dev, 1); + + DRM_INFO("mmio base: 0x%lx, size: 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); + + + if ((info->mmio.base == 0) || (info->mmio.size == 0)) { + DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n", + (unsigned long) info->mmio.base, info->mmio.size); + err = -EINVAL; + goto fail; + } + + + info->fb.base = drm_get_resource_start(dev, 0); + info->fb.size = drm_get_resource_len(dev, 0); + + DRM_INFO("fb base: 0x%lx, size: 0x%x\n", + (unsigned long) info->fb.base, info->fb.size); + + + err = drm_sman_init(&info->sman, 2, 12, 8); + if (err) { + goto fail; + } + + + return 0; + +fail: + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + return err; +} + +int xgi_driver_unload(struct drm_device *dev) +{ + struct xgi_info * info = dev->dev_private; + + drm_sman_takedown(&info->sman); + drm_free(info, sizeof(*info), DRM_MEM_DRIVER); + dev->dev_private = NULL; + + return 0; +} diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h new file mode 100644 index 00000000..a68dc03b --- /dev/null +++ b/linux-core/xgi_drv.h @@ -0,0 +1,117 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#include "drmP.h" +#include "drm.h" +#include "drm_sman.h" + +#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>" + +#define DRIVER_NAME "xgi" +#define DRIVER_DESC "XGI XP5 / XP10 / XG47" +#define DRIVER_DATE "20070918" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#include "xgi_cmdlist.h" +#include "xgi_drm.h" + +struct xgi_aperture { + dma_addr_t base; + unsigned int size; +}; + +struct xgi_info { + struct drm_device *dev; + + bool bootstrap_done; + + /* physical characteristics */ + struct xgi_aperture mmio; + struct xgi_aperture fb; + struct xgi_aperture pcie; + + struct drm_map *mmio_map; + struct drm_map *pcie_map; + struct drm_map *fb_map; + + /* look up table parameters */ + struct ati_pcigart_info gart_info; + unsigned int lutPageSize; + + struct drm_sman sman; + bool fb_heap_initialized; + bool pcie_heap_initialized; + + struct xgi_cmdring_info cmdring; + + DRM_SPINTYPE fence_lock; + unsigned complete_sequence; + unsigned next_sequence; +}; + +extern long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg); + +extern int xgi_fb_heap_init(struct xgi_info * info); + +extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + struct drm_file * filp); + +extern int xgi_free(struct xgi_info * info, unsigned long index, + struct drm_file * filp); + +extern int xgi_pcie_heap_init(struct xgi_info * info); + +extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address); + +extern void xgi_enable_mmio(struct xgi_info * info); +extern void xgi_disable_mmio(struct xgi_info * info); +extern void xgi_enable_ge(struct xgi_info * info); +extern void xgi_disable_ge(struct xgi_info * info); + +extern void xgi_poke_flush(struct drm_device * dev, uint32_t class); +extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, uint32_t * native_type); +extern void xgi_fence_handler(struct drm_device * dev); +extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, + uint32_t flags); + +extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_submit_cmdlist(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_state_change_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c new file mode 100644 index 00000000..2e2d0094 --- /dev/null +++ b/linux-core/xgi_fb.c @@ -0,0 +1,130 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_drv.h" + +#define XGI_FB_HEAP_START 0x1000000 + +int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, + struct drm_file * filp) +{ + struct drm_memblock_item *block; + const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL) + ? "on-card" : "GART"; + + + if ((alloc->location != XGI_MEMLOC_LOCAL) + && (alloc->location != XGI_MEMLOC_NON_LOCAL)) { + DRM_ERROR("Invalid memory pool (0x%08x) specified.\n", + alloc->location); + return -EINVAL; + } + + if ((alloc->location == XGI_MEMLOC_LOCAL) + ? !info->fb_heap_initialized : !info->pcie_heap_initialized) { + DRM_ERROR("Attempt to allocate from uninitialized memory " + "pool (0x%08x).\n", alloc->location); + return -EINVAL; + } + + mutex_lock(&info->dev->struct_mutex); + block = drm_sman_alloc(&info->sman, alloc->location, alloc->size, + 0, (unsigned long) filp); + mutex_unlock(&info->dev->struct_mutex); + + if (block == NULL) { + alloc->size = 0; + DRM_ERROR("%s memory allocation failed\n", mem_name); + return -ENOMEM; + } else { + alloc->offset = (*block->mm->offset)(block->mm, + block->mm_info); + alloc->hw_addr = alloc->offset; + alloc->index = block->user_hash.key; + + if (block->user_hash.key != (unsigned long) alloc->index) { + DRM_ERROR("%s truncated handle %lx for pool %d " + "offset %x\n", + __func__, block->user_hash.key, + alloc->location, alloc->offset); + } + + if (alloc->location == XGI_MEMLOC_NON_LOCAL) { + alloc->hw_addr += info->pcie.base; + } + + DRM_DEBUG("%s memory allocation succeeded: 0x%x\n", + mem_name, alloc->offset); + } + + return 0; +} + + +int xgi_alloc_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + struct xgi_info *info = dev->dev_private; + + return xgi_alloc(info, (struct xgi_mem_alloc *) data, filp); +} + + +int xgi_free(struct xgi_info * info, unsigned long index, + struct drm_file * filp) +{ + int err; + + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_free_key(&info->sman, index); + mutex_unlock(&info->dev->struct_mutex); + + return err; +} + + +int xgi_free_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + struct xgi_info *info = dev->dev_private; + + return xgi_free(info, *(unsigned long *) data, filp); +} + + +int xgi_fb_heap_init(struct xgi_info * info) +{ + int err; + + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL, + XGI_FB_HEAP_START, + info->fb.size - XGI_FB_HEAP_START); + mutex_unlock(&info->dev->struct_mutex); + + info->fb_heap_initialized = (err == 0); + return err; +} diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c new file mode 100644 index 00000000..adedf300 --- /dev/null +++ b/linux-core/xgi_fence.c @@ -0,0 +1,127 @@ +/* + * (C) Copyright IBM Corporation 2007 + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick <idr@us.ibm.com> + */ + +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) +{ + struct xgi_info * info = dev->dev_private; + struct drm_fence_class_manager * fc = &dev->fm.class[class]; + uint32_t pending_flush_types = 0; + uint32_t signaled_flush_types = 0; + + + if ((info == NULL) || (class != 0)) + return 0; + + DRM_SPINLOCK(&info->fence_lock); + + pending_flush_types = fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); + + if (pending_flush_types) { + if (pending_flush_types & DRM_FENCE_TYPE_EXE) { + const u32 begin_id = DRM_READ32(info->mmio_map, + 0x2820) + & BEGIN_BEGIN_IDENTIFICATION_MASK; + + if (begin_id != info->complete_sequence) { + info->complete_sequence = begin_id; + signaled_flush_types |= DRM_FENCE_TYPE_EXE; + } + } + + if (signaled_flush_types) { + drm_fence_handler(dev, 0, info->complete_sequence, + signaled_flush_types); + } + } + + DRM_SPINUNLOCK(&info->fence_lock); + + return fc->pending_flush | + ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0); +} + + +int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, + uint32_t flags, uint32_t * sequence, + uint32_t * native_type) +{ + struct xgi_info * info = dev->dev_private; + + if ((info == NULL) || (class != 0)) + return -EINVAL; + + + DRM_SPINLOCK(&info->fence_lock); + info->next_sequence++; + if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { + info->next_sequence = 1; + } + DRM_SPINUNLOCK(&info->fence_lock); + + + xgi_emit_irq(info); + + *sequence = (uint32_t) info->next_sequence; + *native_type = DRM_FENCE_TYPE_EXE; + + return 0; +} + + +void xgi_poke_flush(struct drm_device * dev, uint32_t class) +{ + struct drm_fence_manager * fm = &dev->fm; + unsigned long flags; + + + write_lock_irqsave(&fm->lock, flags); + xgi_do_flush(dev, class); + write_unlock_irqrestore(&fm->lock, flags); +} + + +void xgi_fence_handler(struct drm_device * dev) +{ + struct drm_fence_manager * fm = &dev->fm; + + + write_lock(&fm->lock); + xgi_do_flush(dev, 0); + write_unlock(&fm->lock); +} + + +int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags) +{ + return ((class == 0) && (flags == DRM_FENCE_TYPE_EXE)) ? 1 : 0; +} diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c new file mode 100644 index 00000000..c54044fa --- /dev/null +++ b/linux-core/xgi_ioc32.c @@ -0,0 +1,140 @@ +/* + * (C) Copyright IBM Corporation 2007 + * Copyright (C) Paul Mackerras 2005. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Ian Romanick <idr@us.ibm.com> + */ + +#include <linux/compat.h> + +#include "drmP.h" +#include "drm.h" + +#include "xgi_drm.h" + +/* This is copied from drm_ioc32.c. + */ +struct drm_map32 { + u32 offset; /**< Requested physical address (0 for SAREA)*/ + u32 size; /**< Requested physical size (bytes) */ + enum drm_map_type type; /**< Type of memory to map */ + enum drm_map_flags flags; /**< Flags */ + u32 handle; /**< User-space: "Handle" to pass to mmap() */ + int mtrr; /**< MTRR slot used */ +}; + +struct drm32_xgi_bootstrap { + struct drm_map32 gart; +}; + + +extern int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); + +static int compat_xgi_bootstrap(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct drm32_xgi_bootstrap __user *const argp = (void __user *)arg; + struct drm32_xgi_bootstrap bs32; + struct xgi_bootstrap __user *bs; + int err; + void *handle; + + + if (copy_from_user(&bs32, argp, sizeof(bs32))) { + return -EFAULT; + } + + bs = compat_alloc_user_space(sizeof(*bs)); + if (!access_ok(VERIFY_WRITE, bs, sizeof(*bs))) { + return -EFAULT; + } + + if (__put_user(bs32.gart.offset, &bs->gart.offset) + || __put_user(bs32.gart.size, &bs->gart.size) + || __put_user(bs32.gart.type, &bs->gart.type) + || __put_user(bs32.gart.flags, &bs->gart.flags)) { + return -EFAULT; + } + + err = drm_ioctl(filp->f_dentry->d_inode, filp, XGI_IOCTL_BOOTSTRAP, + (unsigned long)bs); + if (err) { + return err; + } + + if (__get_user(bs32.gart.offset, &bs->gart.offset) + || __get_user(bs32.gart.mtrr, &bs->gart.mtrr) + || __get_user(handle, &bs->gart.handle)) { + return -EFAULT; + } + + bs32.gart.handle = (unsigned long)handle; + if (bs32.gart.handle != (unsigned long)handle && printk_ratelimit()) { + printk(KERN_ERR "%s truncated handle %p for type %d " + "offset %x\n", + __func__, handle, bs32.gart.type, bs32.gart.offset); + } + + if (copy_to_user(argp, &bs32, sizeof(bs32))) { + return -EFAULT; + } + + return 0; +} + + +drm_ioctl_compat_t *xgi_compat_ioctls[] = { + [DRM_XGI_BOOTSTRAP] = compat_xgi_bootstrap, +}; + +/** + * Called whenever a 32-bit process running under a 64-bit kernel + * performs an ioctl on /dev/dri/card<n>. + * + * \param filp file pointer. + * \param cmd command. + * \param arg user argument. + * \return zero on success or negative number on failure. + */ +long xgi_compat_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + const unsigned int nr = DRM_IOCTL_NR(cmd); + drm_ioctl_compat_t *fn = NULL; + int ret; + + if (nr < DRM_COMMAND_BASE) + return drm_compat_ioctl(filp, cmd, arg); + + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(xgi_compat_ioctls)) + fn = xgi_compat_ioctls[nr - DRM_COMMAND_BASE]; + + lock_kernel(); + ret = (fn != NULL) + ? (*fn)(filp, cmd, arg) + : drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg); + unlock_kernel(); + + return ret; +} diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c new file mode 100644 index 00000000..50a721c0 --- /dev/null +++ b/linux-core/xgi_misc.c @@ -0,0 +1,477 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_drv.h" +#include "xgi_regs.h" + +#include <linux/delay.h> + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static unsigned int s_invalid_begin = 0; + +static bool xgi_validate_signal(struct drm_map * map) +{ + if (DRM_READ32(map, 0x2800) & 0x001c0000) { + u16 check; + + /* Check Read back status */ + DRM_WRITE8(map, 0x235c, 0x80); + check = DRM_READ16(map, 0x2360); + + if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { + return FALSE; + } + + /* Check RO channel */ + DRM_WRITE8(map, 0x235c, 0x83); + check = DRM_READ16(map, 0x2360); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RW channel */ + DRM_WRITE8(map, 0x235c, 0x88); + check = DRM_READ16(map, 0x2360); + if ((check & 0x0f) != ((check & 0xf0) >> 4)) { + return FALSE; + } + + /* Check RO channel outstanding */ + DRM_WRITE8(map, 0x235c, 0x8f); + check = DRM_READ16(map, 0x2360); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* Check RW channel outstanding */ + DRM_WRITE8(map, 0x235c, 0x90); + check = DRM_READ16(map, 0x2360); + if (0 != (check & 0x3ff)) { + return FALSE; + } + + /* No pending PCIE request. GE stall. */ + } + + return TRUE; +} + + +static void xgi_ge_hang_reset(struct drm_map * map) +{ + int time_out = 0xffff; + + DRM_WRITE8(map, 0xb057, 8); + while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + if (0 == time_out) { + u8 old_3ce; + u8 old_3cf; + u8 old_index; + u8 old_36; + + DRM_INFO("Can not reset back 0x%x!\n", + DRM_READ32(map, 0x2800)); + + DRM_WRITE8(map, 0xb057, 0); + + /* Have to use 3x5.36 to reset. */ + /* Save and close dynamic gating */ + + old_3ce = DRM_READ8(map, 0x3ce); + DRM_WRITE8(map, 0x3ce, 0x2a); + old_3cf = DRM_READ8(map, 0x3cf); + DRM_WRITE8(map, 0x3cf, old_3cf & 0xfe); + + /* Reset GE */ + old_index = DRM_READ8(map, 0x3d4); + DRM_WRITE8(map, 0x3d4, 0x36); + old_36 = DRM_READ8(map, 0x3d5); + DRM_WRITE8(map, 0x3d5, old_36 | 0x10); + + while (0 != ((--time_out) & 0xfff)) + /* empty */ ; + + DRM_WRITE8(map, 0x3d5, old_36); + DRM_WRITE8(map, 0x3d4, old_index); + + /* Restore dynamic gating */ + DRM_WRITE8(map, 0x3cf, old_3cf); + DRM_WRITE8(map, 0x3ce, old_3ce); + break; + } + } + + DRM_WRITE8(map, 0xb057, 0); +} + + +bool xgi_ge_irq_handler(struct xgi_info * info) +{ + const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); + bool is_support_auto_reset = FALSE; + + /* Check GE on/off */ + if (0 == (0xffffc0f0 & int_status)) { + if (0 != (0x1000 & int_status)) { + /* We got GE stall interrupt. + */ + DRM_WRITE32(info->mmio_map, 0x2810, + int_status | 0x04000000); + + if (is_support_auto_reset) { + static cycles_t last_tick; + static unsigned continue_int_count = 0; + + /* OE II is busy. */ + + if (!xgi_validate_signal(info->mmio_map)) { + /* Nothing but skip. */ + } else if (0 == continue_int_count++) { + last_tick = get_cycles(); + } else { + const cycles_t new_tick = get_cycles(); + if ((new_tick - last_tick) > + STALL_INTERRUPT_RESET_THRESHOLD) { + continue_int_count = 0; + } else if (continue_int_count >= 3) { + continue_int_count = 0; + + /* GE Hung up, need reset. */ + DRM_INFO("Reset GE!\n"); + + xgi_ge_hang_reset(info->mmio_map); + } + } + } + } else if (0 != (0x1 & int_status)) { + s_invalid_begin++; + DRM_WRITE32(info->mmio_map, 0x2810, + (int_status & ~0x01) | 0x04000000); + } + + return TRUE; + } + + return FALSE; +} + +bool xgi_crt_irq_handler(struct xgi_info * info) +{ + bool ret = FALSE; + u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); + + /* CRT1 interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x37) & 0x01) { + u8 op3cf_3d; + u8 op3cf_37; + + /* What happened? + */ + op3cf_37 = IN3CFB(info->mmio_map, 0x37); + + /* Clear CRT interrupt + */ + op3cf_3d = IN3CFB(info->mmio_map, 0x3d); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04)); + OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04)); + ret = TRUE; + } + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); + + return (ret); +} + +bool xgi_dvi_irq_handler(struct xgi_info * info) +{ + bool ret = FALSE; + const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce); + + /* DVI interrupt just happened + */ + if (IN3CFB(info->mmio_map, 0x38) & 0x20) { + const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4); + u8 op3cf_39; + u8 op3cf_37; + u8 op3x5_5a; + + /* What happened? + */ + op3cf_37 = IN3CFB(info->mmio_map, 0x37); + + /* Notify BIOS that DVI plug/unplug happened + */ + op3x5_5a = IN3X5B(info->mmio_map, 0x5a); + OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7); + + DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4); + + /* Clear DVI interrupt + */ + op3cf_39 = IN3CFB(info->mmio_map, 0x39); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); + OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); + + ret = TRUE; + } + DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce); + + return (ret); +} + + +static void dump_reg_header(unsigned regbase) +{ + printk("\n=====xgi_dump_register========0x%x===============\n", + regbase); + printk(" 0 1 2 3 4 5 6 7 8 9 a b c d e f\n"); +} + + +static void dump_indexed_reg(struct xgi_info * info, unsigned regbase) +{ + unsigned i, j; + u8 temp; + + + dump_reg_header(regbase); + for (i = 0; i < 0x10; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + DRM_WRITE8(info->mmio_map, regbase - 1, + (i * 0x10) + j); + temp = DRM_READ8(info->mmio_map, regbase); + printk("%3x", temp); + } + printk("\n"); + } +} + + +static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range) +{ + unsigned i, j; + + + dump_reg_header(regbase); + for (i = 0; i < range; i++) { + printk("%1x ", i); + + for (j = 0; j < 0x10; j++) { + u8 temp = DRM_READ8(info->mmio_map, + regbase + (i * 0x10) + j); + printk("%3x", temp); + } + printk("\n"); + } +} + + +void xgi_dump_register(struct xgi_info * info) +{ + dump_indexed_reg(info, 0x3c5); + dump_indexed_reg(info, 0x3d5); + dump_indexed_reg(info, 0x3cf); + + dump_reg(info, 0xB000, 0x05); + dump_reg(info, 0x2200, 0x0B); + dump_reg(info, 0x2300, 0x07); + dump_reg(info, 0x2400, 0x10); + dump_reg(info, 0x2800, 0x10); +} + + +#define WHOLD_GE_STATUS 0x2800 + +/* Test everything except the "whole GE busy" bit, the "master engine busy" + * bit, and the reserved bits [26:21]. + */ +#define IDLE_MASK ~((1U<<31) | (1U<<28) | (0x3f<<21)) + +void xgi_waitfor_pci_idle(struct xgi_info * info) +{ + unsigned int idleCount = 0; + u32 old_status = 0; + unsigned int same_count = 0; + + while (idleCount < 5) { + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + & IDLE_MASK; + + if (status == old_status) { + same_count++; + + if ((same_count % 100) == 0) { + DRM_ERROR("GE status stuck at 0x%08x for %u iterations!\n", + old_status, same_count); + } + } else { + old_status = status; + same_count = 0; + } + + if (status != 0) { + msleep(1); + idleCount = 0; + } else { + idleCount++; + } + } +} + + +void xgi_enable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20); + + /* Enable MMIO */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_disable_mmio(struct xgi_info * info) +{ + u8 protect = 0; + u8 temp; + + /* Unprotect registers */ + DRM_WRITE8(info->mmio_map, 0x3C4, 0x11); + protect = DRM_READ8(info->mmio_map, 0x3C5); + DRM_WRITE8(info->mmio_map, 0x3C5, 0x92); + + /* Disable MMIO access */ + DRM_WRITE8(info->mmio_map, 0x3D4, 0x39); + temp = DRM_READ8(info->mmio_map, 0x3D5); + DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE); + + /* Protect registers */ + OUT3C5B(info->mmio_map, 0x11, protect); +} + + +void xgi_enable_ge(struct xgi_info * info) +{ + u8 bOld3cf2a; + int wait = 0; + + OUT3C5B(info->mmio_map, 0x11, 0x92); + + /* Save and close dynamic gating + */ + bOld3cf2a = IN3CFB(info->mmio_map, XGI_MISC_CTRL); + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a & ~EN_GEPWM); + + /* Enable 2D and 3D GE + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Enable 2D engine only + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, GE_ENABLE); + + /* Enable 2D+3D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + /* Restore dynamic gating + */ + OUT3CFB(info->mmio_map, XGI_MISC_CTRL, bOld3cf2a); +} + + +void xgi_disable_ge(struct xgi_info * info) +{ + int wait = 0; + + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Reset both 3D and 2D engine + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, + (GE_ENABLE | GE_RESET | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + OUT3X5B(info->mmio_map, XGI_GE_CNTL, (GE_ENABLE | GE_ENABLE_3D)); + + wait = 10; + while (wait--) { + DRM_READ8(info->mmio_map, 0x36); + } + + /* Disable 2D engine and 3D engine. + */ + OUT3X5B(info->mmio_map, XGI_GE_CNTL, 0); +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h new file mode 100644 index 00000000..af19a11a --- /dev/null +++ b/linux-core/xgi_misc.h @@ -0,0 +1,37 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(struct xgi_info * info); + +extern bool xgi_ge_irq_handler(struct xgi_info * info); +extern bool xgi_crt_irq_handler(struct xgi_info * info); +extern bool xgi_dvi_irq_handler(struct xgi_info * info); +extern void xgi_waitfor_pci_idle(struct xgi_info * info); + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c new file mode 100644 index 00000000..a7d3ea24 --- /dev/null +++ b/linux-core/xgi_pcie.c @@ -0,0 +1,126 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" + +void xgi_gart_flush(struct drm_device *dev) +{ + struct xgi_info *const info = dev->dev_private; + u8 temp; + + DRM_MEMORYBARRIER(); + + /* Set GART in SFB */ + temp = DRM_READ8(info->mmio_map, 0xB00C); + DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02); + + /* Set GART base address to HW */ + DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr); + + /* Flush GART table. */ + DRM_WRITE8(info->mmio_map, 0xB03F, 0x40); + DRM_WRITE8(info->mmio_map, 0xB03F, 0x00); +} + + +int xgi_pcie_heap_init(struct xgi_info * info) +{ + u8 temp = 0; + int err; + struct drm_scatter_gather request; + + /* Get current FB aperture size */ + temp = IN3X5B(info->mmio_map, 0x27); + DRM_INFO("In3x5(0x27): 0x%x \n", temp); + + if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */ + info->pcie.base = 256 * 1024 * 1024; + } else { /* 128MB; Jong 06/05/2006; 0x08000000 */ + info->pcie.base = 128 * 1024 * 1024; + } + + + DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base); + + /* Get current lookup table page size */ + temp = DRM_READ8(info->mmio_map, 0xB00C); + if (temp & 0x04) { /* 8KB */ + info->lutPageSize = 8 * 1024; + } else { /* 4KB */ + info->lutPageSize = 4 * 1024; + } + + DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize); + + + request.size = info->pcie.size; + err = drm_sg_alloc(info->dev, & request); + if (err) { + DRM_ERROR("cannot allocate PCIE GART backing store! " + "size = %d\n", info->pcie.size); + return err; + } + + info->gart_info.gart_table_location = DRM_ATI_GART_MAIN; + info->gart_info.gart_reg_if = DRM_ATI_GART_PCI; + info->gart_info.table_size = info->dev->sg->pages * sizeof(u32); + + if (!drm_ati_pcigart_init(info->dev, &info->gart_info)) { + DRM_ERROR("failed to init PCI GART!\n"); + return -ENOMEM; + } + + + xgi_gart_flush(info->dev); + + mutex_lock(&info->dev->struct_mutex); + err = drm_sman_set_range(&info->sman, XGI_MEMLOC_NON_LOCAL, + 0, info->pcie.size); + mutex_unlock(&info->dev->struct_mutex); + if (err) { + drm_ati_pcigart_cleanup(info->dev, &info->gart_info); + } + + info->pcie_heap_initialized = (err == 0); + return err; +} + + +/** + * xgi_find_pcie_virt + * @address: GE HW address + * + * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not + * the same block + */ +void *xgi_find_pcie_virt(struct xgi_info * info, u32 address) +{ + const unsigned long offset = address - info->pcie.base; + + return ((u8 *) info->dev->sg->virtual) + offset; +} diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h new file mode 100644 index 00000000..5c0100a0 --- /dev/null +++ b/linux-core/xgi_regs.h @@ -0,0 +1,169 @@ +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan. + * + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation on the rights to use, copy, modify, merge, + * publish, distribute, sublicense, and/or sell copies of the Software, + * and to permit persons to whom the Software is furnished to do so, + * subject to the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER + * DEALINGS IN THE SOFTWARE. + ***************************************************************************/ + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#include "drmP.h" +#include "drm.h" + +#define MAKE_MASK(bits) ((1U << (bits)) - 1) + +#define ONE_BIT_MASK MAKE_MASK(1) +#define TWENTY_BIT_MASK MAKE_MASK(20) +#define TWENTYONE_BIT_MASK MAKE_MASK(21) +#define TWENTYTWO_BIT_MASK MAKE_MASK(22) + + +/* Port 0x3d4/0x3d5, index 0x2a */ +#define XGI_INTERFACE_SEL 0x2a +#define DUAL_64BIT (1U<<7) +#define INTERNAL_32BIT (1U<<6) +#define EN_SEP_WR (1U<<5) +#define POWER_DOWN_SEL (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define SUBS_MCLK_PCICLK (1U<<2) +#define MEM_SIZE_MASK (3<<0) +#define MEM_SIZE_32MB (0<<0) +#define MEM_SIZE_64MB (1<<0) +#define MEM_SIZE_128MB (2<<0) +#define MEM_SIZE_256MB (3<<0) + +/* Port 0x3d4/0x3d5, index 0x36 */ +#define XGI_GE_CNTL 0x36 +#define GE_ENABLE (1U<<7) +/*#define RESERVED_6 (1U<<6) */ +/*#define RESERVED_5 (1U<<5) */ +#define GE_RESET (1U<<4) +/*#define RESERVED_3 (1U<<3) */ +#define GE_ENABLE_3D (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +/*#define RESERVED_0 (1U<<0) */ + +/* Port 0x3ce/0x3cf, index 0x2a */ +#define XGI_MISC_CTRL 0x2a +#define MOTION_VID_SUSPEND (1U<<7) +#define DVI_CRTC_TIMING_SEL (1U<<6) +#define LCD_SEL_CTL_NEW (1U<<5) +#define LCD_SEL_EXT_DELYCTRL (1U<<4) +#define REG_LCDDPARST (1U<<3) +#define LCD2DPAOFF (1U<<2) +/*#define RESERVED_1 (1U<<1) */ +#define EN_GEPWM (1U<<0) /* Enable GE power management */ + + +#define BASE_3D_ENG 0x2800 + +#define M2REG_FLUSH_ENGINE_ADDRESS 0x000 +#define M2REG_FLUSH_ENGINE_COMMAND 0x00 +#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21) +#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20) +#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK + +#define M2REG_RESET_ADDRESS 0x004 +#define M2REG_RESET_COMMAND 0x01 +#define M2REG_RESET_STATUS2_MASK (ONE_BIT_MASK<<10) +#define M2REG_RESET_STATUS1_MASK (ONE_BIT_MASK<<9) +#define M2REG_RESET_STATUS0_MASK (ONE_BIT_MASK<<8) +#define M2REG_RESET_3DENG_MASK (ONE_BIT_MASK<<4) +#define M2REG_RESET_2DENG_MASK (ONE_BIT_MASK<<2) + +/* Write register */ +#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x010 +#define M2REG_AUTO_LINK_SETTING_COMMAND 0x04 +#define M2REG_CLEAR_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_CLEAR_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_CLEAR_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_CLEAR_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4) +#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1) +#define M2REG_INVALID_LIST_AUTO_INTERRUPT_MASK (ONE_BIT_MASK<<0) + +/* Read register */ +#define M2REG_AUTO_LINK_STATUS_ADDRESS 0x010 +#define M2REG_AUTO_LINK_STATUS_COMMAND 0x04 +#define M2REG_ACTIVE_TIMER_INTERRUPT_MASK (ONE_BIT_MASK<<11) +#define M2REG_ACTIVE_INTERRUPT_3_MASK (ONE_BIT_MASK<<10) +#define M2REG_ACTIVE_INTERRUPT_2_MASK (ONE_BIT_MASK<<9) +#define M2REG_ACTIVE_INTERRUPT_0_MASK (ONE_BIT_MASK<<8) +#define M2REG_INVALID_LIST_AUTO_INTERRUPTED_MODE_MASK (ONE_BIT_MASK<<0) + +#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x014 +#define M2REG_PCI_TRIGGER_REGISTER_COMMAND 0x05 + + +/** + * Begin instruction, double-word 0 + */ +#define BEGIN_STOP_STORE_CURRENT_POINTER_MASK (ONE_BIT_MASK<<22) +#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20) +#define BEGIN_BEGIN_IDENTIFICATION_MASK TWENTY_BIT_MASK + +/** + * Begin instruction, double-word 1 + */ +#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31) +#define BEGIN_COMMAND_LIST_LENGTH_MASK TWENTYTWO_BIT_MASK + + +/* Hardware access functions */ +static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data) +{ + DRM_WRITE8(map, 0x3C4, index); + DRM_WRITE8(map, 0x3C5, data); +} + +static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data) +{ + DRM_WRITE8(map, 0x3D4, index); + DRM_WRITE8(map, 0x3D5, data); +} + +static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data) +{ + DRM_WRITE8(map, 0x3CE, index); + DRM_WRITE8(map, 0x3CF, data); +} + +static inline u8 IN3C5B(struct drm_map * map, u8 index) +{ + DRM_WRITE8(map, 0x3C4, index); + return DRM_READ8(map, 0x3C5); +} + +static inline u8 IN3X5B(struct drm_map * map, u8 index) +{ + DRM_WRITE8(map, 0x3D4, index); + return DRM_READ8(map, 0x3D5); +} + +static inline u8 IN3CFB(struct drm_map * map, u8 index) +{ + DRM_WRITE8(map, 0x3CE, index); + return DRM_READ8(map, 0x3CF); +} + +#endif |