summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorAlan Hourihane <alanh@fairlite.demon.co.uk>2007-06-29 20:09:44 +0100
committerAlan Hourihane <alanh@fairlite.demon.co.uk>2007-06-29 20:09:44 +0100
commit8a78dead291ffdb5a8774419cdca369a1e27cad9 (patch)
tree399b8378e32036bdc7dadc10c639465a8fb21681 /linux-core
parente79e2a58161d44754fd55507e155b7e12a09c4d2 (diff)
parenta27af4c4a665864df09123f177ca7269e48f6171 (diff)
Merge branch 'master' of git+ssh://git.freedesktop.org/git/mesa/drm into modesetting-101
Conflicts: linux-core/drm_drv.c linux-core/drm_fops.c linux-core/drm_objects.h linux-core/drm_stub.c shared-core/i915_dma.c
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile.kernel7
-rw-r--r--linux-core/ati_pcigart.c6
-rw-r--r--linux-core/drmP.h54
-rw-r--r--linux-core/drm_agpsupport.c28
-rw-r--r--linux-core/drm_bo.c137
-rw-r--r--linux-core/drm_bo_move.c6
-rw-r--r--linux-core/drm_bufs.c43
-rw-r--r--linux-core/drm_compat.c123
-rw-r--r--linux-core/drm_compat.h9
-rw-r--r--linux-core/drm_context.c162
-rw-r--r--linux-core/drm_drawable.c226
-rw-r--r--linux-core/drm_drv.c93
-rw-r--r--linux-core/drm_fence.c151
-rw-r--r--linux-core/drm_fops.c47
-rw-r--r--linux-core/drm_ioc32.c1
-rw-r--r--linux-core/drm_ioctl.c14
-rw-r--r--linux-core/drm_irq.c20
-rw-r--r--linux-core/drm_memory.c2
-rw-r--r--linux-core/drm_object.c9
-rw-r--r--linux-core/drm_objects.h28
-rw-r--r--linux-core/drm_pci.c2
-rw-r--r--linux-core/drm_proc.c19
-rw-r--r--linux-core/drm_stub.c16
-rw-r--r--linux-core/drm_vm.c36
-rw-r--r--linux-core/i810_dma.c6
-rw-r--r--linux-core/i915_drv.c6
-rw-r--r--linux-core/i915_fence.c2
-rw-r--r--linux-core/i915_ioc32.c1
-rw-r--r--linux-core/mga_ioc32.c1
-rw-r--r--linux-core/nouveau_ioc32.c1
l---------linux-core/nouveau_notifier.c1
l---------linux-core/nv04_fifo.c1
l---------linux-core/nv10_fifo.c1
l---------linux-core/nv40_fifo.c1
l---------linux-core/nv50_fifo.c1
l---------linux-core/nv50_graph.c1
l---------linux-core/nv50_mc.c1
-rw-r--r--linux-core/r128_ioc32.c1
-rw-r--r--linux-core/radeon_drv.c3
-rw-r--r--linux-core/radeon_ioc32.c32
-rw-r--r--linux-core/sis_mm.c2
41 files changed, 627 insertions, 674 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index b4fe7fa8..ac613d2e 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -23,12 +23,13 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \
intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
- nouveau_object.o nouveau_irq.o \
+ nouveau_object.o nouveau_irq.o nouveau_notifier.o \
nv04_timer.o \
- nv04_mc.o nv40_mc.o \
+ nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \
- nv40_graph.o
+ nv40_graph.o nv50_graph.o
radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 52bf8922..524618a8 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -151,7 +151,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ order = drm_order((gart_info->table_size +
+ (PAGE_SIZE-1)) / PAGE_SIZE);
num_pages = 1 << order;
address = drm_ati_alloc_pcigart_table(order);
if (!address) {
@@ -169,7 +170,8 @@ int drm_ati_pcigart_init(drm_device_t *dev, drm_ati_pcigart_info *gart_info)
PCI_DMA_TODEVICE);
if (bus_address == 0) {
DRM_ERROR("unable to map PCIGART pages!\n");
- order = drm_order((gart_info->table_size + (PAGE_SIZE-1)) / PAGE_SIZE);
+ order = drm_order((gart_info->table_size +
+ (PAGE_SIZE-1)) / PAGE_SIZE);
drm_ati_free_pcigart_table(address, order);
address = NULL;
goto done;
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 4e36dab1..510de608 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -76,6 +76,7 @@
#include <asm/pgalloc.h>
#include "drm.h"
#include <linux/slab.h>
+#include <linux/idr.h>
#define __OS_HAS_AGP (defined(CONFIG_AGP) || (defined(CONFIG_AGP_MODULE) && defined(MODULE)))
#define __OS_HAS_MTRR (defined(CONFIG_MTRR))
@@ -302,19 +303,14 @@ typedef struct drm_devstate {
} drm_devstate_t;
typedef struct drm_magic_entry {
- drm_hash_item_t hash_item;
struct list_head head;
+ drm_hash_item_t hash_item;
struct drm_file *priv;
} drm_magic_entry_t;
-typedef struct drm_magic_head {
- struct drm_magic_entry *head;
- struct drm_magic_entry *tail;
-} drm_magic_head_t;
-
typedef struct drm_vma_entry {
+ struct list_head head;
struct vm_area_struct *vma;
- struct drm_vma_entry *next;
pid_t pid;
} drm_vma_entry_t;
@@ -413,8 +409,7 @@ typedef struct drm_file {
uid_t uid;
drm_magic_t magic;
unsigned long ioctl_count;
- struct drm_file *next;
- struct drm_file *prev;
+ struct list_head lhead;
struct drm_head *head;
int remove_auth_on_close;
unsigned long lock_count;
@@ -497,8 +492,7 @@ typedef struct drm_agp_mem {
DRM_AGP_MEM *memory;
unsigned long bound; /**< address */
int pages;
- struct drm_agp_mem *prev; /**< previous entry */
- struct drm_agp_mem *next; /**< next entry */
+ struct list_head head;
} drm_agp_mem_t;
/**
@@ -508,7 +502,7 @@ typedef struct drm_agp_mem {
*/
typedef struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
- drm_agp_mem_t *memory; /**< memory entries */
+ struct list_head memory;
unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
struct agp_bridge_data *bridge;
@@ -580,6 +574,10 @@ typedef struct drm_ctx_list {
drm_file_t *tag; /**< associated fd private data */
} drm_ctx_list_t;
+struct drm_ctx_sarea_list {
+ drm_map_t *map;
+};
+
typedef struct drm_vbl_sig {
struct list_head head;
unsigned int sequence;
@@ -604,6 +602,9 @@ typedef struct ati_pcigart_info {
int table_size;
} drm_ati_pcigart_info;
+struct drm_drawable_list {
+ drm_drawable_info_t info;
+};
#include "drm_objects.h"
@@ -734,15 +735,14 @@ typedef struct drm_device {
/** \name Authentication */
/*@{ */
- drm_file_t *file_first; /**< file list head */
- drm_file_t *file_last; /**< file list tail */
+ struct list_head filelist;
drm_open_hash_t magiclist;
struct list_head magicfree;
/*@} */
/** \name Memory management */
/*@{ */
- drm_map_list_t *maplist; /**< Linked list of regions */
+ struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
drm_open_hash_t map_hash; /**< User token hash table for maps */
drm_mm_t offset_manager; /**< User token manager */
@@ -752,14 +752,13 @@ typedef struct drm_device {
/** \name Context handle management */
/*@{ */
- drm_ctx_list_t *ctxlist; /**< Linked list of context handles */
+ struct list_head ctxlist; /**< Linked list of context handles */
int ctx_count; /**< Number of context handles */
struct mutex ctxlist_mutex; /**< For ctxlist */
- drm_map_t **context_sareas; /**< per-context SAREA's */
- int max_context;
+ struct idr ctx_idr;
- drm_vma_entry_t *vmalist; /**< List of vmas (for debugging) */
+ struct list_head vmalist; /**< List of vmas (for debugging) */
drm_lock_data_t lock; /**< Information on hardware lock */
/*@} */
@@ -795,8 +794,8 @@ typedef struct drm_device {
atomic_t vbl_received;
atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
spinlock_t vbl_lock;
- drm_vbl_sig_t vbl_sigs; /**< signal list to send on VBLANK */
- drm_vbl_sig_t vbl_sigs2; /**< signals to send on secondary VBLANK */
+ struct list_head vbl_sigs; /**< signal list to send on VBLANK */
+ struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
unsigned int vbl_pending;
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@@ -818,7 +817,6 @@ typedef struct drm_device {
struct pci_controller *hose;
#endif
drm_sg_mem_t *sg; /**< Scatter gather memory */
- unsigned long *ctx_bitmap; /**< context bitmap */
void *dev_private; /**< device private data */
drm_sigdata_t sigdata; /**< For block_all_signals */
sigset_t sigmask;
@@ -834,10 +832,7 @@ typedef struct drm_device {
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
- unsigned int drw_bitfield_length;
- u32 *drw_bitfield;
- unsigned int drw_info_length;
- drm_drawable_info_t **drw_info;
+ struct idr drw_idr;
/*@} */
/* DRM mode setting */
@@ -1018,6 +1013,7 @@ extern int drm_update_drawable_info(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg);
extern drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev,
drm_drawable_t id);
+extern void drm_drawable_free_all(drm_device_t *dev);
/* Authentication IOCTL support (drm_auth.h) */
extern int drm_getmagic(struct inode *inode, struct file *filp,
@@ -1140,9 +1136,7 @@ extern struct drm_sysfs_class *drm_class;
extern struct proc_dir_entry *drm_proc_root;
extern drm_local_map_t *drm_getsarea(struct drm_device *dev);
-extern int drm_wait_on(drm_device_t *dev, wait_queue_head_t *queue,
- int timeout, int (*fn)(drm_device_t *dev, void *priv),
- void *priv);
+
/* Proc support (drm_proc.h) */
extern int drm_proc_init(drm_device_t * dev,
int minor,
@@ -1205,7 +1199,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
unsigned int token)
{
drm_map_list_t *_entry;
- list_for_each_entry(_entry, &dev->maplist->head, head)
+ list_for_each_entry(_entry, &dev->maplist, head)
if (_entry->user_token == token)
return _entry->map;
return NULL;
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 7d8f0737..f134563a 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -249,11 +249,7 @@ int drm_agp_alloc(drm_device_t *dev, drm_agp_buffer_t *request)
entry->memory = memory;
entry->bound = 0;
entry->pages = pages;
- entry->prev = NULL;
- entry->next = dev->agp->memory;
- if (dev->agp->memory)
- dev->agp->memory->prev = entry;
- dev->agp->memory = entry;
+ list_add(&entry->head, &dev->agp->memory);
request->handle = entry->handle;
request->physical = memory->physical;
@@ -280,10 +276,12 @@ int drm_agp_alloc_ioctl(struct inode *inode, struct file *filp,
return err;
if (copy_to_user(argp, &request, sizeof(request))) {
- drm_agp_mem_t *entry = dev->agp->memory;
-
- dev->agp->memory = entry->next;
- dev->agp->memory->prev = NULL;
+ drm_agp_mem_t *entry;
+ list_for_each_entry(entry, &dev->agp->memory, head) {
+ if (entry->handle == request.handle)
+ break;
+ }
+ list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
return -EFAULT;
@@ -306,7 +304,7 @@ static drm_agp_mem_t *drm_agp_lookup_entry(drm_device_t * dev,
{
drm_agp_mem_t *entry;
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ list_for_each_entry(entry, &dev->agp->memory, head) {
if (entry->handle == handle)
return entry;
}
@@ -435,13 +433,7 @@ int drm_agp_free(drm_device_t *dev, drm_agp_buffer_t *request)
if (entry->bound)
drm_unbind_agp(entry->memory);
- if (entry->prev)
- entry->prev->next = entry->next;
- else
- dev->agp->memory = entry->next;
-
- if (entry->next)
- entry->next->prev = entry->prev;
+ list_del(&entry->head);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
@@ -502,7 +494,7 @@ drm_agp_head_t *drm_agp_init(drm_device_t *dev)
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
}
- head->memory = NULL;
+ INIT_LIST_HEAD(&head->memory);
head->cant_use_aperture = head->agp_info.cant_use_aperture;
head->page_mask = head->agp_info.page_mask;
return head;
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 50f62dce..fd097968 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -67,6 +67,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+ DRM_ASSERT_LOCKED(&bo->mutex);
+
man = &bo->dev->bm.man[bo->pinned_mem_type];
list_add_tail(&bo->pinned_lru, &man->pinned);
}
@@ -75,6 +78,8 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo)
{
drm_mem_type_manager_t *man;
+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
@@ -134,6 +139,8 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo)
int ret = 0;
bo->ttm = NULL;
+ DRM_ASSERT_LOCKED(&bo->mutex);
+
switch (bo->type) {
case drm_bo_type_dc:
bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT);
@@ -262,29 +269,25 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo,
int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals,
int no_wait)
{
-
- drm_fence_object_t *fence = bo->fence;
int ret;
- if (fence) {
- drm_device_t *dev = bo->dev;
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(dev, fence);
- bo->fence = NULL;
+ DRM_ASSERT_LOCKED(&bo->mutex);
+
+ if (bo->fence) {
+ if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
+ drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
if (no_wait) {
return -EBUSY;
}
ret =
- drm_fence_object_wait(dev, fence, lazy, ignore_signals,
+ drm_fence_object_wait(bo->fence, lazy, ignore_signals,
bo->fence_type);
if (ret)
return ret;
- drm_fence_usage_deref_unlocked(dev, fence);
- bo->fence = NULL;
-
+ drm_fence_usage_deref_unlocked(&bo->fence);
}
return 0;
}
@@ -312,10 +315,8 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors)
"Evicting buffer.\n");
}
}
- if (bo->fence) {
- drm_fence_usage_deref_unlocked(dev, bo->fence);
- bo->fence = NULL;
- }
+ if (bo->fence)
+ drm_fence_usage_deref_unlocked(&bo->fence);
}
return 0;
}
@@ -331,16 +332,17 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
atomic_inc(&bo->usage);
mutex_unlock(&dev->struct_mutex);
mutex_lock(&bo->mutex);
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
- if (bo->fence && drm_fence_object_signaled(bo->fence, bo->fence_type)) {
- drm_fence_usage_deref_locked(dev, bo->fence);
- bo->fence = NULL;
- }
+ if (bo->fence && drm_fence_object_signaled(bo->fence,
+ bo->fence_type, 0))
+ drm_fence_usage_deref_unlocked(&bo->fence);
if (bo->fence && remove_all)
(void)drm_bo_expire_fence(bo, 0);
@@ -371,7 +373,7 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all)
}
if (list_empty(&bo->ddestroy)) {
- drm_fence_object_flush(dev, bo->fence, bo->fence_type);
+ drm_fence_object_flush(bo->fence, bo->fence_type);
list_add_tail(&bo->ddestroy, &bm->ddestroy);
schedule_delayed_work(&bm->wq,
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
@@ -392,6 +394,8 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
drm_device_t *dev = bo->dev;
drm_buffer_manager_t *bm = &dev->bm;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
@@ -414,6 +418,7 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo)
atomic_dec(&bm->count);
+ BUG_ON(!list_empty(&bo->base.list));
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
return;
@@ -488,10 +493,15 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-void drm_bo_usage_deref_locked(drm_buffer_object_t * bo)
+void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo)
{
- if (atomic_dec_and_test(&bo->usage)) {
- drm_bo_destroy_locked(bo);
+ struct drm_buffer_object *tmp_bo = *bo;
+ bo = NULL;
+
+ DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
+
+ if (atomic_dec_and_test(&tmp_bo->usage)) {
+ drm_bo_destroy_locked(tmp_bo);
}
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
@@ -501,18 +511,22 @@ static void drm_bo_base_deref_locked(drm_file_t * priv, drm_user_object_t * uo)
drm_buffer_object_t *bo =
drm_user_object_entry(uo, drm_buffer_object_t, base);
+ DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
+
drm_bo_takedown_vm_locked(bo);
- drm_bo_usage_deref_locked(bo);
+ drm_bo_usage_deref_locked(&bo);
}
-static void drm_bo_usage_deref_unlocked(drm_buffer_object_t * bo)
+static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo)
{
- drm_device_t *dev = bo->dev;
+ struct drm_buffer_object *tmp_bo = *bo;
+ drm_device_t *dev = tmp_bo->dev;
- if (atomic_dec_and_test(&bo->usage)) {
+ *bo = NULL;
+ if (atomic_dec_and_test(&tmp_bo->usage)) {
mutex_lock(&dev->struct_mutex);
- if (atomic_read(&bo->usage) == 0)
- drm_bo_destroy_locked(bo);
+ if (atomic_read(&tmp_bo->usage) == 0)
+ drm_bo_destroy_locked(tmp_bo);
mutex_unlock(&dev->struct_mutex);
}
}
@@ -598,18 +612,17 @@ int drm_fence_buffer_objects(drm_file_t * priv,
if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
count++;
if (entry->fence)
- drm_fence_usage_deref_locked(dev, entry->fence);
- entry->fence = fence;
+ drm_fence_usage_deref_locked(&entry->fence);
+ entry->fence = drm_fence_reference_locked(fence);
DRM_FLAG_MASKED(entry->priv_flags, 0,
_DRM_BO_FLAG_UNFENCED);
DRM_WAKEUP(&entry->event_queue);
drm_bo_add_to_lru(entry);
}
mutex_unlock(&entry->mutex);
- drm_bo_usage_deref_locked(entry);
+ drm_bo_usage_deref_locked(&entry);
l = f_list.next;
}
- atomic_add(count, &fence->usage);
DRM_DEBUG("Fenced %d buffers\n", count);
out:
mutex_unlock(&dev->struct_mutex);
@@ -724,7 +737,7 @@ static int drm_bo_mem_force_space(drm_device_t * dev,
ret = drm_bo_evict(entry, mem_type, no_wait);
mutex_unlock(&entry->mutex);
- drm_bo_usage_deref_unlocked(entry);
+ drm_bo_usage_deref_unlocked(&entry);
if (ret)
return ret;
mutex_lock(&dev->struct_mutex);
@@ -944,10 +957,8 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
- drm_device_t *dev = bo->dev;
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(dev, fence);
- bo->fence = NULL;
+ if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
return 1;
@@ -966,16 +977,13 @@ static int drm_bo_busy(drm_buffer_object_t * bo)
BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
if (fence) {
- drm_device_t *dev = bo->dev;
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(dev, fence);
- bo->fence = NULL;
+ if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
- drm_fence_object_flush(dev, fence, DRM_FENCE_TYPE_EXE);
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(dev, fence);
- bo->fence = NULL;
+ drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
+ if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+ drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
return 1;
@@ -1172,7 +1180,7 @@ static int drm_buffer_object_map(drm_file_t * priv, uint32_t handle,
drm_bo_fill_rep_arg(bo, rep);
out:
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(bo);
+ drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@@ -1198,7 +1206,7 @@ static int drm_buffer_object_unmap(drm_file_t * priv, uint32_t handle)
}
drm_remove_ref_object(priv, ro);
- drm_bo_usage_deref_locked(bo);
+ drm_bo_usage_deref_locked(&bo);
out:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1462,11 +1470,14 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
uint32_t flags, uint32_t mask, uint32_t hint,
drm_bo_arg_reply_t * rep)
{
+ struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
int ret;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+ mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
+ mutex_unlock(&dev->struct_mutex);
if (!bo) {
return -EINVAL;
}
@@ -1491,16 +1502,20 @@ static int drm_bo_handle_validate(drm_file_t * priv, uint32_t handle,
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(bo);
+ drm_bo_usage_deref_unlocked(&bo);
return ret;
}
static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
drm_bo_arg_reply_t * rep)
{
+ struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
+ mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
if (!bo) {
return -EINVAL;
}
@@ -1509,18 +1524,22 @@ static int drm_bo_handle_info(drm_file_t * priv, uint32_t handle,
(void)drm_bo_busy(bo);
drm_bo_fill_rep_arg(bo, rep);
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(bo);
+ drm_bo_usage_deref_unlocked(&bo);
return 0;
}
static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
uint32_t hint, drm_bo_arg_reply_t * rep)
{
+ struct drm_device *dev = priv->head->dev;
drm_buffer_object_t *bo;
int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
int ret;
+ mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(priv, handle, 1);
+ mutex_unlock(&dev->struct_mutex);
+
if (!bo) {
return -EINVAL;
}
@@ -1537,7 +1556,7 @@ static int drm_bo_handle_wait(drm_file_t * priv, uint32_t handle,
out:
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(bo);
+ drm_bo_usage_deref_unlocked(&bo);
return ret;
}
@@ -1622,7 +1641,7 @@ int drm_buffer_object_create(drm_device_t *dev,
out_err:
mutex_unlock(&bo->mutex);
- drm_bo_usage_deref_unlocked(bo);
+ drm_bo_usage_deref_unlocked(&bo);
return ret;
}
EXPORT_SYMBOL(drm_buffer_object_create);
@@ -1700,7 +1719,7 @@ int drm_bo_ioctl(DRM_IOCTL_ARGS)
mask &
DRM_BO_FLAG_SHAREABLE);
if (rep.ret)
- drm_bo_usage_deref_unlocked(entry);
+ drm_bo_usage_deref_unlocked(&entry);
if (rep.ret)
break;
@@ -1928,7 +1947,7 @@ restart:
allow_errors);
mutex_lock(&dev->struct_mutex);
- drm_bo_usage_deref_locked(entry);
+ drm_bo_usage_deref_locked(&entry);
if (ret)
return ret;
@@ -1938,10 +1957,8 @@ restart:
do_restart = ((next->prev != list) && (next->prev != prev));
- if (nentry != NULL && do_restart) {
- drm_bo_usage_deref_locked(nentry);
- nentry = NULL;
- }
+ if (nentry != NULL && do_restart)
+ drm_bo_usage_deref_locked(&nentry);
if (do_restart)
goto restart;
@@ -2321,6 +2338,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
if (list->user_token) {
drm_ht_remove_item(&dev->map_hash, &list->hash);
list->user_token = 0;
@@ -2337,7 +2355,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo)
drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
list->map = NULL;
list->user_token = 0ULL;
- drm_bo_usage_deref_locked(bo);
+ drm_bo_usage_deref_locked(&bo);
}
static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
@@ -2346,6 +2364,7 @@ static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo)
drm_local_map_t *map;
drm_device_t *dev = bo->dev;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
if (!list->map)
return -ENOMEM;
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index eaeef1b0..28c1509e 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -308,7 +308,7 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo,
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
- atomic_inc(&bo->fence->usage);
+ drm_fence_reference_unlocked(&fbo->fence, bo->fence);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
@@ -341,7 +341,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
drm_buffer_object_t *old_obj;
if (bo->fence)
- drm_fence_usage_deref_unlocked(dev, bo->fence);
+ drm_fence_usage_deref_unlocked(&bo->fence);
ret = drm_fence_object_create(dev, fence_class, fence_type,
fence_flags | DRM_FENCE_FLAG_EMIT,
&bo->fence);
@@ -398,7 +398,7 @@ int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo,
DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
drm_bo_add_to_lru(old_obj);
- drm_bo_usage_deref_locked(old_obj);
+ drm_bo_usage_deref_locked(&old_obj);
mutex_unlock(&dev->struct_mutex);
}
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 1ba53c8f..38f0dd3a 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -51,10 +51,8 @@ EXPORT_SYMBOL(drm_get_resource_len);
static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
drm_local_map_t *map)
{
- struct list_head *list;
-
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
+ drm_map_list_t *entry;
+ list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
((entry->map->offset == map->offset) ||
((map->type == _DRM_SHM) && (map->flags&_DRM_CONTAINS_LOCK)))) {
@@ -237,14 +235,14 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
* skipped and we double check that dev->agp->memory is
* actually set as well as being invalid before EPERM'ing
*/
- for (entry = dev->agp->memory; entry; entry = entry->next) {
+ list_for_each_entry(entry, &dev->agp->memory, head) {
if ((map->offset >= entry->bound) &&
(map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
- if (dev->agp->memory && !valid) {
+ if (!list_empty(&dev->agp->memory) && !valid) {
drm_free(map, sizeof(*map), DRM_MEM_MAPS);
return -EPERM;
}
@@ -288,7 +286,7 @@ static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
list->map = map;
mutex_lock(&dev->struct_mutex);
- list_add(&list->head, &dev->maplist->head);
+ list_add(&list->head, &dev->maplist);
/* Assign a 32-bit handle */
@@ -380,29 +378,28 @@ int drm_addmap_ioctl(struct inode *inode, struct file *filp,
*/
int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
{
- struct list_head *list;
- drm_map_list_t *r_list = NULL;
+ drm_map_list_t *r_list = NULL, *list_t;
drm_dma_handle_t dmah;
+ int found = 0;
/* Find the list entry for the map and remove it */
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
-
+ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
- list_del(list);
+ list_del(&r_list->head);
drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
- drm_free(list, sizeof(*list), DRM_MEM_MAPS);
+ drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
+ found = 1;
break;
}
}
+ if (!found) {
+ return -EINVAL;
+ }
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
- if (list == (&dev->maplist->head)) {
- return -EINVAL;
- }
switch (map->type) {
case _DRM_REGISTERS:
@@ -465,7 +462,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
drm_device_t *dev = priv->head->dev;
drm_map_t request;
drm_local_map_t *map = NULL;
- struct list_head *list;
+ drm_map_list_t *r_list;
int ret;
if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
@@ -473,9 +470,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
-
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->user_token == (unsigned long)request.handle &&
r_list->map->flags & _DRM_REMOVABLE) {
@@ -487,7 +482,7 @@ int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
/* List has wrapped around to the head pointer, or its empty we didn't
* find anything.
*/
- if (list == (&dev->maplist->head)) {
+ if (list_empty(&dev->maplist) || !map) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
@@ -611,14 +606,14 @@ int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
/* Make sure buffers are located in AGP memory that we own */
valid = 0;
- for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
+ list_for_each_entry(agp_entry, &dev->agp->memory, head) {
if ((agp_offset >= agp_entry->bound) &&
(agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
valid = 1;
break;
}
}
- if (dev->agp->memory && !valid) {
+ if (!list_empty(&dev->agp->memory) && !valid) {
DRM_DEBUG("zone invalid\n");
return -EINVAL;
}
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 9ac5658c..08d20d06 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -555,3 +555,126 @@ void drm_bo_finish_unmap(drm_buffer_object_t *bo)
#endif
+#ifdef DRM_IDR_COMPAT_FN
+/* only called when idp->lock is held */
+static void __free_layer(struct idr *idp, struct idr_layer *p)
+{
+ p->ary[0] = idp->id_free;
+ idp->id_free = p;
+ idp->id_free_cnt++;
+}
+
+static void free_layer(struct idr *idp, struct idr_layer *p)
+{
+ unsigned long flags;
+
+ /*
+ * Depends on the return element being zeroed.
+ */
+ spin_lock_irqsave(&idp->lock, flags);
+ __free_layer(idp, p);
+ spin_unlock_irqrestore(&idp->lock, flags);
+}
+
+/**
+ * idr_for_each - iterate through all stored pointers
+ * @idp: idr handle
+ * @fn: function to be called for each pointer
+ * @data: data passed back to callback function
+ *
+ * Iterate over the pointers registered with the given idr. The
+ * callback function will be called for each pointer currently
+ * registered, passing the id, the pointer and the data pointer passed
+ * to this function. It is not safe to modify the idr tree while in
+ * the callback, so functions such as idr_get_new and idr_remove are
+ * not allowed.
+ *
+ * We check the return of @fn each time. If it returns anything other
+ * than 0, we break out and return that value.
+ *
+* The caller must serialize idr_find() vs idr_get_new() and idr_remove().
+ */
+int idr_for_each(struct idr *idp,
+ int (*fn)(int id, void *p, void *data), void *data)
+{
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+ struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ max = 1 << n;
+
+ id = 0;
+ while (id < max) {
+ while (n > 0 && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+ p = p->ary[(id >> n) & IDR_MASK];
+ }
+
+ if (p) {
+ error = fn(id, (void *)p, data);
+ if (error)
+ break;
+ }
+
+ id += 1 << n;
+ while (n < fls(id)) {
+ n += IDR_BITS;
+ p = *--paa;
+ }
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(idr_for_each);
+
+/**
+ * idr_remove_all - remove all ids from the given idr tree
+ * @idp: idr handle
+ *
+ * idr_destroy() only frees up unused, cached idp_layers, but this
+ * function will remove all id mappings and leave all idp_layers
+ * unused.
+ *
+ * A typical clean-up sequence for objects stored in an idr tree, will
+ * use idr_for_each() to free all objects, if necessay, then
+ * idr_remove_all() to remove all ids, and idr_destroy() to free
+ * up the cached idr_layers.
+ */
+void idr_remove_all(struct idr *idp)
+{
+ int n, id, max, error = 0;
+ struct idr_layer *p;
+ struct idr_layer *pa[MAX_LEVEL];
+ struct idr_layer **paa = &pa[0];
+
+ n = idp->layers * IDR_BITS;
+ p = idp->top;
+ max = 1 << n;
+
+ id = 0;
+ while (id < max && !error) {
+ while (n > IDR_BITS && p) {
+ n -= IDR_BITS;
+ *paa++ = p;
+ p = p->ary[(id >> n) & IDR_MASK];
+ }
+
+ id += 1 << n;
+ while (n < fls(id)) {
+ if (p) {
+ memset(p, 0, sizeof *p);
+ free_layer(idp, p);
+ }
+ n += IDR_BITS;
+ p = *--paa;
+ }
+ }
+ idp->top = NULL;
+ idp->layers = 0;
+}
+EXPORT_SYMBOL(idr_remove_all);
+#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index bada1fdf..1cf7f165 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -312,4 +312,13 @@ extern int drm_bo_remap_bound(struct drm_buffer_object *bo);
extern int drm_bo_map_bound(struct vm_area_struct *vma);
#endif
+
+/* fixme when functions are upstreamed */
+#define DRM_IDR_COMPAT_FN
+#ifdef DRM_IDR_COMPAT_FN
+int idr_for_each(struct idr *idp,
+ int (*fn)(int id, void *p, void *data), void *data);
+void idr_remove_all(struct idr *idp);
+#endif
+
#endif
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index 49042272..101a298c 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -53,25 +53,21 @@
* \param ctx_handle context handle.
*
* Clears the bit specified by \p ctx_handle in drm_device::ctx_bitmap and the entry
- * in drm_device::context_sareas, while holding the drm_device::struct_mutex
+ * in drm_device::ctx_idr, while holding the drm_device::struct_mutex
* lock.
*/
void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
{
- if (ctx_handle < 0)
- goto failed;
- if (!dev->ctx_bitmap)
- goto failed;
-
- if (ctx_handle < DRM_MAX_CTXBITMAP) {
- mutex_lock(&dev->struct_mutex);
- clear_bit(ctx_handle, dev->ctx_bitmap);
- dev->context_sareas[ctx_handle] = NULL;
- mutex_unlock(&dev->struct_mutex);
- return;
- }
- failed:
- DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
+ struct drm_ctx_sarea_list *ctx;
+
+ mutex_lock(&dev->struct_mutex);
+ ctx = idr_find(&dev->ctx_idr, ctx_handle);
+ if (ctx) {
+ idr_remove(&dev->ctx_idr, ctx_handle);
+ drm_free(ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
+ } else
+ DRM_ERROR("Attempt to free invalid context handle: %d\n", ctx_handle);
+ mutex_unlock(&dev->struct_mutex);
return;
}
@@ -81,62 +77,34 @@ void drm_ctxbitmap_free(drm_device_t * dev, int ctx_handle)
* \param dev DRM device.
* \return (non-negative) context handle on success or a negative number on failure.
*
- * Find the first zero bit in drm_device::ctx_bitmap and (re)allocates
- * drm_device::context_sareas to accommodate the new entry while holding the
+ * Allocate a new idr from drm_device::ctx_idr while holding the
* drm_device::struct_mutex lock.
*/
static int drm_ctxbitmap_next(drm_device_t * dev)
{
- int bit;
+ int new_id;
+ int ret;
+ struct drm_ctx_sarea_list *new_ctx;
- if (!dev->ctx_bitmap)
+ new_ctx = drm_calloc(1, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
+ if (!new_ctx)
return -1;
+again:
+ if (idr_pre_get(&dev->ctx_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ drm_free(new_ctx, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
+ return -ENOMEM;
+ }
mutex_lock(&dev->struct_mutex);
- bit = find_first_zero_bit(dev->ctx_bitmap, DRM_MAX_CTXBITMAP);
- if (bit < DRM_MAX_CTXBITMAP) {
- set_bit(bit, dev->ctx_bitmap);
- DRM_DEBUG("drm_ctxbitmap_next bit : %d\n", bit);
- if ((bit + 1) > dev->max_context) {
- dev->max_context = (bit + 1);
- if (dev->context_sareas) {
- drm_map_t **ctx_sareas;
-
- ctx_sareas = drm_realloc(dev->context_sareas,
- (dev->max_context -
- 1) *
- sizeof(*dev->
- context_sareas),
- dev->max_context *
- sizeof(*dev->
- context_sareas),
- DRM_MEM_MAPS);
- if (!ctx_sareas) {
- clear_bit(bit, dev->ctx_bitmap);
- mutex_unlock(&dev->struct_mutex);
- return -1;
- }
- dev->context_sareas = ctx_sareas;
- dev->context_sareas[bit] = NULL;
- } else {
- /* max_context == 1 at this point */
- dev->context_sareas =
- drm_alloc(dev->max_context *
- sizeof(*dev->context_sareas),
- DRM_MEM_MAPS);
- if (!dev->context_sareas) {
- clear_bit(bit, dev->ctx_bitmap);
- mutex_unlock(&dev->struct_mutex);
- return -1;
- }
- dev->context_sareas[bit] = NULL;
- }
- }
+ ret = idr_get_new_above(&dev->ctx_idr, new_ctx, DRM_RESERVED_CONTEXTS, &new_id);
+ if (ret == -EAGAIN) {
mutex_unlock(&dev->struct_mutex);
- return bit;
+ goto again;
}
+
mutex_unlock(&dev->struct_mutex);
- return -1;
+ return new_id;
}
/**
@@ -144,31 +112,20 @@ static int drm_ctxbitmap_next(drm_device_t * dev)
*
* \param dev DRM device.
*
- * Allocates and initialize drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Initialise the drm_device::ctx_idr
*/
int drm_ctxbitmap_init(drm_device_t * dev)
{
- int i;
- int temp;
+ idr_init(&dev->ctx_idr);
+ return 0;
+}
- mutex_lock(&dev->struct_mutex);
- dev->ctx_bitmap = (unsigned long *)drm_alloc(PAGE_SIZE,
- DRM_MEM_CTXBITMAP);
- if (dev->ctx_bitmap == NULL) {
- mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
- }
- memset((void *)dev->ctx_bitmap, 0, PAGE_SIZE);
- dev->context_sareas = NULL;
- dev->max_context = -1;
- mutex_unlock(&dev->struct_mutex);
- for (i = 0; i < DRM_RESERVED_CONTEXTS; i++) {
- temp = drm_ctxbitmap_next(dev);
- DRM_DEBUG("drm_ctxbitmap_init : %d\n", temp);
- }
+static int drm_ctx_sarea_free(int id, void *p, void *data)
+{
+ struct drm_ctx_sarea_list *ctx_entry = p;
+ drm_free(ctx_entry, sizeof(struct drm_ctx_sarea_list), DRM_MEM_CTXLIST);
return 0;
}
@@ -177,17 +134,14 @@ int drm_ctxbitmap_init(drm_device_t * dev)
*
* \param dev DRM device.
*
- * Frees drm_device::ctx_bitmap and drm_device::context_sareas, while holding
- * the drm_device::struct_mutex lock.
+ * Free all idr members using drm_ctx_sarea_free helper function
+ * while holding the drm_device::struct_mutex lock.
*/
void drm_ctxbitmap_cleanup(drm_device_t * dev)
{
mutex_lock(&dev->struct_mutex);
- if (dev->context_sareas)
- drm_free(dev->context_sareas,
- sizeof(*dev->context_sareas) *
- dev->max_context, DRM_MEM_MAPS);
- drm_free((void *)dev->ctx_bitmap, PAGE_SIZE, DRM_MEM_CTXBITMAP);
+ idr_for_each(&dev->ctx_idr, drm_ctx_sarea_free, NULL);
+ idr_remove_all(&dev->ctx_idr);
mutex_unlock(&dev->struct_mutex);
}
@@ -206,7 +160,7 @@ void drm_ctxbitmap_cleanup(drm_device_t * dev)
* \param arg user argument pointing to a drm_ctx_priv_map structure.
* \return zero on success or a negative number on failure.
*
- * Gets the map from drm_device::context_sareas with the handle specified and
+ * Gets the map from drm_device::ctx_idr with the handle specified and
* returns its handle.
*/
int drm_getsareactx(struct inode *inode, struct file *filp,
@@ -218,22 +172,24 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map;
drm_map_list_t *_entry;
+ struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request, argp, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
- if (dev->max_context < 0
- || request.ctx_id >= (unsigned)dev->max_context) {
+
+ ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
+ if (!ctx_sarea) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
+ map = ctx_sarea->map;
- map = dev->context_sareas[request.ctx_id];
mutex_unlock(&dev->struct_mutex);
request.handle = NULL;
- list_for_each_entry(_entry, &dev->maplist->head,head) {
+ list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
request.handle =
(void *)(unsigned long)_entry->user_token;
@@ -258,7 +214,7 @@ int drm_getsareactx(struct inode *inode, struct file *filp,
* \return zero on success or a negative number on failure.
*
* Searches the mapping specified in \p arg and update the entry in
- * drm_device::context_sareas with it.
+ * drm_device::ctx_idr with it.
*/
int drm_setsareactx(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
@@ -268,15 +224,14 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
drm_ctx_priv_map_t request;
drm_map_t *map = NULL;
drm_map_list_t *r_list = NULL;
- struct list_head *list;
+ struct drm_ctx_sarea_list *ctx_sarea;
if (copy_from_user(&request,
(drm_ctx_priv_map_t __user *) arg, sizeof(request)))
return -EFAULT;
mutex_lock(&dev->struct_mutex);
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map
&& r_list->user_token == (unsigned long) request.handle)
goto found;
@@ -289,11 +244,14 @@ int drm_setsareactx(struct inode *inode, struct file *filp,
map = r_list->map;
if (!map)
goto bad;
- if (dev->max_context < 0)
- goto bad;
- if (request.ctx_id >= (unsigned)dev->max_context)
+
+ mutex_lock(&dev->struct_mutex);
+
+ ctx_sarea = idr_find(&dev->ctx_idr, request.ctx_id);
+ if (!ctx_sarea)
goto bad;
- dev->context_sareas[request.ctx_id] = map;
+
+ ctx_sarea->map = map;
mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -449,7 +407,7 @@ int drm_addctx(struct inode *inode, struct file *filp,
ctx_entry->tag = priv;
mutex_lock(&dev->ctxlist_mutex);
- list_add(&ctx_entry->head, &dev->ctxlist->head);
+ list_add(&ctx_entry->head, &dev->ctxlist);
++dev->ctx_count;
mutex_unlock(&dev->ctxlist_mutex);
@@ -575,10 +533,10 @@ int drm_rmctx(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->ctxlist_mutex);
- if (!list_empty(&dev->ctxlist->head)) {
+ if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
- list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->handle == ctx.handle) {
list_del(&pos->head);
drm_free(pos, sizeof(*pos), DRM_MEM_CTXLIST);
diff --git a/linux-core/drm_drawable.c b/linux-core/drm_drawable.c
index 0817e321..eb44a189 100644
--- a/linux-core/drm_drawable.c
+++ b/linux-core/drm_drawable.c
@@ -44,82 +44,35 @@ int drm_adddraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
unsigned long irqflags;
- int i, j;
- u32 *bitfield = dev->drw_bitfield;
- unsigned int bitfield_length = dev->drw_bitfield_length;
- drm_drawable_info_t **info = dev->drw_info;
- unsigned int info_length = dev->drw_info_length;
+ struct drm_drawable_list *draw_info;
drm_draw_t draw;
-
- for (i = 0, j = 0; i < bitfield_length; i++) {
- if (bitfield[i] == ~0)
- continue;
-
- for (; j < 8 * sizeof(*bitfield); j++)
- if (!(bitfield[i] & (1 << j)))
- goto done;
- }
-done:
-
- if (i == bitfield_length) {
- bitfield_length++;
-
- bitfield = drm_alloc(bitfield_length * sizeof(*bitfield),
- DRM_MEM_BUFS);
-
- if (!bitfield) {
- DRM_ERROR("Failed to allocate new drawable bitfield\n");
- return DRM_ERR(ENOMEM);
- }
-
- if (8 * sizeof(*bitfield) * bitfield_length > info_length) {
- info_length += 8 * sizeof(*bitfield);
-
- info = drm_alloc(info_length * sizeof(*info),
- DRM_MEM_BUFS);
-
- if (!info) {
- DRM_ERROR("Failed to allocate new drawable info"
- " array\n");
-
- drm_free(bitfield,
- bitfield_length * sizeof(*bitfield),
- DRM_MEM_BUFS);
- return DRM_ERR(ENOMEM);
- }
- }
-
- bitfield[i] = 0;
+ int new_id = 0;
+ int ret;
+
+ draw_info = drm_calloc(1, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
+ if (!draw_info)
+ return -ENOMEM;
+
+again:
+ if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
+ DRM_ERROR("Out of memory expanding drawable idr\n");
+ drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
+ return -ENOMEM;
}
- draw.handle = i * 8 * sizeof(*bitfield) + j + 1;
- DRM_DEBUG("%d\n", draw.handle);
-
spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- bitfield[i] |= 1 << j;
- info[draw.handle - 1] = NULL;
-
- if (bitfield != dev->drw_bitfield) {
- memcpy(bitfield, dev->drw_bitfield, dev->drw_bitfield_length *
- sizeof(*bitfield));
- drm_free(dev->drw_bitfield, sizeof(*bitfield) *
- dev->drw_bitfield_length, DRM_MEM_BUFS);
- dev->drw_bitfield = bitfield;
- dev->drw_bitfield_length = bitfield_length;
- }
-
- if (info != dev->drw_info) {
- memcpy(info, dev->drw_info, dev->drw_info_length *
- sizeof(*info));
- drm_free(dev->drw_info, sizeof(*info) * dev->drw_info_length,
- DRM_MEM_BUFS);
- dev->drw_info = info;
- dev->drw_info_length = info_length;
+ ret = idr_get_new_above(&dev->drw_idr, draw_info, 1, &new_id);
+ if (ret == -EAGAIN) {
+ spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ goto again;
}
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
+ draw.handle = new_id;
+
+ DRM_DEBUG("%d\n", draw.handle);
+
DRM_COPY_TO_USER_IOCTL((drm_draw_t __user *)data, draw, sizeof(draw));
return 0;
@@ -132,87 +85,24 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
{
DRM_DEVICE;
drm_draw_t draw;
- int id, idx;
- unsigned int shift;
unsigned long irqflags;
- u32 *bitfield = dev->drw_bitfield;
- unsigned int bitfield_length = dev->drw_bitfield_length;
- drm_drawable_info_t **info = dev->drw_info;
- unsigned int info_length = dev->drw_info_length;
+ struct drm_drawable_list *draw_info;
DRM_COPY_FROM_USER_IOCTL(draw, (drm_draw_t __user *) data,
sizeof(draw));
- id = draw.handle - 1;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
+ draw_info = idr_find(&dev->drw_idr, draw.handle);
+ if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", draw.handle);
- return 0;
+ return -EINVAL;
}
spin_lock_irqsave(&dev->drw_lock, irqflags);
- bitfield[idx] &= ~(1 << shift);
+ idr_remove(&dev->drw_idr, draw.handle);
+ drm_free(draw_info, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
- if (info[id]) {
- drm_free(info[id]->rects, info[id]->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
- drm_free(info[id], sizeof(**info), DRM_MEM_BUFS);
- }
-
- /* Can we shrink the arrays? */
- if (idx == bitfield_length - 1) {
- while (idx >= 0 && !bitfield[idx])
- --idx;
-
- bitfield_length = idx + 1;
-
- if (idx != id / (8 * sizeof(*bitfield)))
- bitfield = drm_alloc(bitfield_length *
- sizeof(*bitfield), DRM_MEM_BUFS);
-
- if (!bitfield && bitfield_length) {
- bitfield = dev->drw_bitfield;
- bitfield_length = dev->drw_bitfield_length;
- }
- }
-
- if (bitfield != dev->drw_bitfield) {
- info_length = 8 * sizeof(*bitfield) * bitfield_length;
-
- info = drm_alloc(info_length * sizeof(*info), DRM_MEM_BUFS);
-
- if (!info && info_length) {
- info = dev->drw_info;
- info_length = dev->drw_info_length;
- }
-
- spin_lock_irqsave(&dev->drw_lock, irqflags);
-
- memcpy(bitfield, dev->drw_bitfield, bitfield_length *
- sizeof(*bitfield));
- drm_free(dev->drw_bitfield, sizeof(*bitfield) *
- dev->drw_bitfield_length, DRM_MEM_BUFS);
- dev->drw_bitfield = bitfield;
- dev->drw_bitfield_length = bitfield_length;
-
- if (info != dev->drw_info) {
- memcpy(info, dev->drw_info, info_length *
- sizeof(*info));
- drm_free(dev->drw_info, sizeof(*info) *
- dev->drw_info_length, DRM_MEM_BUFS);
- dev->drw_info = info;
- dev->drw_info_length = info_length;
- }
-
- spin_unlock_irqrestore(&dev->drw_lock, irqflags);
- }
-
DRM_DEBUG("%d\n", draw.handle);
return 0;
}
@@ -220,36 +110,22 @@ int drm_rmdraw(DRM_IOCTL_ARGS)
int drm_update_drawable_info(DRM_IOCTL_ARGS) {
DRM_DEVICE;
drm_update_draw_t update;
- unsigned int id, idx, shift, bitfield_length = dev->drw_bitfield_length;
- u32 *bitfield = dev->drw_bitfield;
unsigned long irqflags;
drm_drawable_info_t *info;
drm_clip_rect_t *rects;
+ struct drm_drawable_list *draw_info;
int err;
DRM_COPY_FROM_USER_IOCTL(update, (drm_update_draw_t __user *) data,
sizeof(update));
- id = update.handle - 1;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
+ draw_info = idr_find(&dev->drw_idr, update.handle);
+ if (!draw_info) {
DRM_ERROR("No such drawable %d\n", update.handle);
return DRM_ERR(EINVAL);
}
- info = dev->drw_info[id];
-
- if (!info) {
- info = drm_calloc(1, sizeof(drm_drawable_info_t), DRM_MEM_BUFS);
-
- if (!info) {
- DRM_ERROR("Failed to allocate drawable info memory\n");
- return DRM_ERR(ENOMEM);
- }
- }
+ info = &draw_info->info;
switch (update.type) {
case DRM_DRAWABLE_CLIPRECTS:
@@ -284,12 +160,11 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
info->rects = rects;
info->num_rects = update.num;
- dev->drw_info[id] = info;
spin_unlock_irqrestore(&dev->drw_lock, irqflags);
DRM_DEBUG("Updated %d cliprects for drawable %d\n",
- info->num_rects, id);
+ info->num_rects, update.handle);
break;
default:
DRM_ERROR("Invalid update type %d\n", update.type);
@@ -299,11 +174,9 @@ int drm_update_drawable_info(DRM_IOCTL_ARGS) {
return 0;
error:
- if (!dev->drw_info[id])
- drm_free(info, sizeof(*info), DRM_MEM_BUFS);
- else if (rects != dev->drw_info[id]->rects)
- drm_free(rects, update.num *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ if (rects != info->rects)
+ drm_free(rects, update.num * sizeof(drm_clip_rect_t),
+ DRM_MEM_BUFS);
return err;
}
@@ -312,19 +185,28 @@ error:
* Caller must hold the drawable spinlock!
*/
drm_drawable_info_t *drm_get_drawable_info(drm_device_t *dev, drm_drawable_t id) {
- u32 *bitfield = dev->drw_bitfield;
- unsigned int idx, shift;
-
- id--;
- idx = id / (8 * sizeof(*bitfield));
- shift = id % (8 * sizeof(*bitfield));
-
- if (idx < 0 || idx >= dev->drw_bitfield_length ||
- !(bitfield[idx] & (1 << shift))) {
+ struct drm_drawable_list *draw_info;
+ draw_info = idr_find(&dev->drw_idr, id);
+ if (!draw_info) {
DRM_DEBUG("No such drawable %d\n", id);
return NULL;
}
- return dev->drw_info[id];
+ return &draw_info->info;
}
EXPORT_SYMBOL(drm_get_drawable_info);
+
+static int drm_drawable_free(int idr, void *p, void *data)
+{
+ struct drm_drawable_list *drw_entry = p;
+ drm_free(drw_entry->info.rects, drw_entry->info.num_rects *
+ sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
+ drm_free(drw_entry, sizeof(struct drm_drawable_list), DRM_MEM_BUFS);
+ return 0;
+}
+
+void drm_drawable_free_all(drm_device_t *dev)
+{
+ idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
+ idr_remove_all(&dev->drw_idr);
+}
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 76fb90c9..0b4f8095 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -151,8 +151,8 @@ static drm_ioctl_desc_t drm_ioctls[] = {
int drm_lastclose(drm_device_t * dev)
{
drm_magic_entry_t *pt, *next;
- drm_map_list_t *r_list, *r_list_tmp;
- drm_vma_entry_t *vma, *vma_next;
+ drm_map_list_t *r_list, *list_t;
+ drm_vma_entry_t *vma, *vma_temp;
int i;
DRM_DEBUG("\n");
@@ -175,18 +175,9 @@ int drm_lastclose(drm_device_t * dev)
drm_irq_uninstall(dev);
/* Free drawable information memory */
- for (i = 0; i < dev->drw_bitfield_length / sizeof(*dev->drw_bitfield);
- i++) {
- drm_drawable_info_t *info = drm_get_drawable_info(dev, i);
-
- if (info) {
- drm_free(info->rects, info->num_rects *
- sizeof(drm_clip_rect_t), DRM_MEM_BUFS);
- drm_free(info, sizeof(*info), DRM_MEM_BUFS);
- }
- }
-
mutex_lock(&dev->struct_mutex);
+
+ drm_drawable_free_all(dev);
del_timer(&dev->timer);
if (dev->unique) {
@@ -207,19 +198,17 @@ int drm_lastclose(drm_device_t * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp) {
- drm_agp_mem_t *entry;
- drm_agp_mem_t *nexte;
+ drm_agp_mem_t *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
intact until drv_cleanup is called. */
- for (entry = dev->agp->memory; entry; entry = nexte) {
- nexte = entry->next;
+ list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
if (entry->bound)
drm_unbind_agp(entry->memory);
drm_free_agp(entry->memory, entry->pages);
drm_free(entry, sizeof(*entry), DRM_MEM_AGPLISTS);
}
- dev->agp->memory = NULL;
+ INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
drm_agp_release(dev);
@@ -233,19 +222,14 @@ int drm_lastclose(drm_device_t * dev)
}
/* Clear vma list (only built for debugging) */
- if (dev->vmalist) {
- for (vma = dev->vmalist; vma; vma = vma_next) {
- vma_next = vma->next;
- drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
- }
- dev->vmalist = NULL;
+ list_for_each_entry_safe(vma, vma_temp, &dev->vmalist, head) {
+ list_del(&vma->head);
+ drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
-
- if (dev->maplist) {
- list_for_each_entry_safe(r_list, r_list_tmp, &dev->maplist->head, head) {
- if (!(r_list->map->flags & _DRM_DRIVER))
- drm_rmmap_locked(dev, r_list->map);
- }
+
+ list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -379,6 +363,13 @@ static void drm_cleanup(drm_device_t * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_mm_takedown(&dev->offset_manager);
+ drm_ht_remove(&dev->object_hash);
+
+ if (!drm_fb_loaded)
+ pci_disable_device(dev->pdev);
+
drm_ctxbitmap_cleanup(dev);
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
@@ -398,16 +389,6 @@ static void drm_cleanup(drm_device_t * dev)
dev->agp = NULL;
}
-
- // drm_bo_driver_finish(dev);
- if (dev->maplist) {
- drm_free(dev->maplist, sizeof(*dev->maplist), DRM_MEM_MAPS);
- dev->maplist = NULL;
- drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
- drm_ht_remove(&dev->object_hash);
- }
-
if (!drm_fb_loaded)
pci_disable_device(dev->pdev);
@@ -604,7 +585,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
goto err_i1;
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
&& (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls))
- ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
+ ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE];
else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE))
ioctl = &drm_ioctls[nr];
else
@@ -633,39 +614,11 @@ err_i1:
}
EXPORT_SYMBOL(drm_ioctl);
-int drm_wait_on(drm_device_t *dev, wait_queue_head_t *queue, int timeout,
- int (*fn)(drm_device_t *dev, void *priv), void *priv)
-{
- DECLARE_WAITQUEUE(entry, current);
- unsigned long end = jiffies + (timeout);
- int ret = 0;
- add_wait_queue(queue, &entry);
-
- for (;;) {
- __set_current_state(TASK_INTERRUPTIBLE);
- if ((*fn)(dev, priv))
- break;
- if (time_after_eq(jiffies, end)) {
- ret = -EBUSY;
- break;
- }
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1);
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- __set_current_state(TASK_RUNNING);
- remove_wait_queue(queue, &entry);
- return ret;
-}
-EXPORT_SYMBOL(drm_wait_on);
-
drm_local_map_t *drm_getsarea(struct drm_device *dev)
{
drm_map_list_t *entry;
- list_for_each_entry(entry, &dev->maplist->head, head) {
+ list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && entry->map->type == _DRM_SHM &&
(entry->map->flags & _DRM_CONTAINS_LOCK)) {
return entry->map;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index ce161dc3..5215feb6 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -124,52 +124,76 @@ static void drm_fence_unring(drm_device_t * dev, struct list_head *ring)
write_unlock_irqrestore(&fm->lock, flags);
}
-void drm_fence_usage_deref_locked(drm_device_t * dev,
- drm_fence_object_t * fence)
+void drm_fence_usage_deref_locked(drm_fence_object_t ** fence)
{
+ struct drm_fence_object *tmp_fence = *fence;
+ struct drm_device *dev = tmp_fence->dev;
drm_fence_manager_t *fm = &dev->fm;
- if (atomic_dec_and_test(&fence->usage)) {
- drm_fence_unring(dev, &fence->ring);
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
+ *fence = NULL;
+ if (atomic_dec_and_test(&tmp_fence->usage)) {
+ drm_fence_unring(dev, &tmp_fence->ring);
DRM_DEBUG("Destroyed a fence object 0x%08lx\n",
- fence->base.hash.key);
+ tmp_fence->base.hash.key);
atomic_dec(&fm->count);
- drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ BUG_ON(!list_empty(&tmp_fence->base.list));
+ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
}
-void drm_fence_usage_deref_unlocked(drm_device_t * dev,
- drm_fence_object_t * fence)
+void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence)
{
+ struct drm_fence_object *tmp_fence = *fence;
+ struct drm_device *dev = tmp_fence->dev;
drm_fence_manager_t *fm = &dev->fm;
- if (atomic_dec_and_test(&fence->usage)) {
+ *fence = NULL;
+ if (atomic_dec_and_test(&tmp_fence->usage)) {
mutex_lock(&dev->struct_mutex);
- if (atomic_read(&fence->usage) == 0) {
- drm_fence_unring(dev, &fence->ring);
+ if (atomic_read(&tmp_fence->usage) == 0) {
+ drm_fence_unring(dev, &tmp_fence->ring);
atomic_dec(&fm->count);
- drm_ctl_free(fence, sizeof(*fence), DRM_MEM_FENCE);
+ BUG_ON(!list_empty(&tmp_fence->base.list));
+ drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE);
}
mutex_unlock(&dev->struct_mutex);
}
}
-static void drm_fence_object_destroy(drm_file_t * priv,
- drm_user_object_t * base)
+struct drm_fence_object
+*drm_fence_reference_locked(struct drm_fence_object *src)
+{
+ DRM_ASSERT_LOCKED(&src->dev->struct_mutex);
+
+ atomic_inc(&src->usage);
+ return src;
+}
+
+void drm_fence_reference_unlocked(struct drm_fence_object **dst,
+ struct drm_fence_object *src)
+{
+ mutex_lock(&src->dev->struct_mutex);
+ *dst = src;
+ atomic_inc(&src->usage);
+ mutex_unlock(&src->dev->struct_mutex);
+}
+
+
+static void drm_fence_object_destroy(drm_file_t *priv, drm_user_object_t * base)
{
- drm_device_t *dev = priv->head->dev;
drm_fence_object_t *fence =
drm_user_object_entry(base, drm_fence_object_t, base);
- drm_fence_usage_deref_locked(dev, fence);
+ drm_fence_usage_deref_locked(&fence);
}
-static int fence_signaled(drm_device_t * dev,
- drm_fence_object_t * fence,
- uint32_t mask, int poke_flush)
+int drm_fence_object_signaled(drm_fence_object_t * fence,
+ uint32_t mask, int poke_flush)
{
unsigned long flags;
int signaled;
+ struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
@@ -200,16 +224,10 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc,
}
}
-int drm_fence_object_signaled(drm_fence_object_t * fence,
- uint32_t type)
-{
- return ((fence->signaled & type) == type);
-}
-
-int drm_fence_object_flush(drm_device_t * dev,
- drm_fence_object_t * fence,
+int drm_fence_object_flush(drm_fence_object_t * fence,
uint32_t type)
{
+ struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
drm_fence_driver_t *driver = dev->driver->fence_driver;
@@ -272,24 +290,23 @@ void drm_fence_flush_old(drm_device_t * dev, uint32_t class, uint32_t sequence)
mutex_unlock(&dev->struct_mutex);
return;
}
- fence = list_entry(fc->ring.next, drm_fence_object_t, ring);
- atomic_inc(&fence->usage);
+ fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring));
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
read_unlock_irqrestore(&fm->lock, flags);
if (diff < driver->wrap_diff) {
- drm_fence_object_flush(dev, fence, fence->type);
+ drm_fence_object_flush(fence, fence->type);
}
- drm_fence_usage_deref_unlocked(dev, fence);
+ drm_fence_usage_deref_unlocked(&fence);
}
EXPORT_SYMBOL(drm_fence_flush_old);
-static int drm_fence_lazy_wait(drm_device_t *dev,
- drm_fence_object_t *fence,
+static int drm_fence_lazy_wait(drm_fence_object_t *fence,
int ignore_signals,
uint32_t mask)
{
+ struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
int signaled;
@@ -298,13 +315,13 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
do {
DRM_WAIT_ON(ret, fc->fence_queue, 3 * DRM_HZ,
- (signaled = fence_signaled(dev, fence, mask, 1)));
+ (signaled = drm_fence_object_signaled(fence, mask, 1)));
if (signaled)
return 0;
if (time_after_eq(jiffies, _end))
break;
} while (ret == -EINTR && ignore_signals);
- if (fence_signaled(dev, fence, mask, 0))
+ if (drm_fence_object_signaled(fence, mask, 0))
return 0;
if (time_after_eq(jiffies, _end))
ret = -EBUSY;
@@ -319,10 +336,10 @@ static int drm_fence_lazy_wait(drm_device_t *dev,
return 0;
}
-int drm_fence_object_wait(drm_device_t * dev,
- drm_fence_object_t * fence,
+int drm_fence_object_wait(drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask)
{
+ struct drm_device *dev = fence->dev;
drm_fence_driver_t *driver = dev->driver->fence_driver;
int ret = 0;
unsigned long _end;
@@ -334,16 +351,16 @@ int drm_fence_object_wait(drm_device_t * dev,
return -EINVAL;
}
- if (fence_signaled(dev, fence, mask, 0))
+ if (drm_fence_object_signaled(fence, mask, 0))
return 0;
_end = jiffies + 3 * DRM_HZ;
- drm_fence_object_flush(dev, fence, mask);
+ drm_fence_object_flush(fence, mask);
if (lazy && driver->lazy_capable) {
- ret = drm_fence_lazy_wait(dev, fence, ignore_signals, mask);
+ ret = drm_fence_lazy_wait(fence, ignore_signals, mask);
if (ret)
return ret;
@@ -351,7 +368,7 @@ int drm_fence_object_wait(drm_device_t * dev,
if (driver->has_irq(dev, fence->class,
DRM_FENCE_TYPE_EXE)) {
- ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
+ ret = drm_fence_lazy_wait(fence, ignore_signals,
DRM_FENCE_TYPE_EXE);
if (ret)
return ret;
@@ -359,13 +376,13 @@ int drm_fence_object_wait(drm_device_t * dev,
if (driver->has_irq(dev, fence->class,
mask & ~DRM_FENCE_TYPE_EXE)) {
- ret = drm_fence_lazy_wait(dev, fence, ignore_signals,
+ ret = drm_fence_lazy_wait(fence, ignore_signals,
mask);
if (ret)
return ret;
}
}
- if (drm_fence_object_signaled(fence, mask))
+ if (drm_fence_object_signaled(fence, mask, 0))
return 0;
/*
@@ -377,7 +394,7 @@ int drm_fence_object_wait(drm_device_t * dev,
#endif
do {
schedule();
- signaled = fence_signaled(dev, fence, mask, 1);
+ signaled = drm_fence_object_signaled(fence, mask, 1);
} while (!signaled && !time_after_eq(jiffies, _end));
if (!signaled)
@@ -386,9 +403,10 @@ int drm_fence_object_wait(drm_device_t * dev,
return 0;
}
-int drm_fence_object_emit(drm_device_t * dev, drm_fence_object_t * fence,
+int drm_fence_object_emit(drm_fence_object_t * fence,
uint32_t fence_flags, uint32_t class, uint32_t type)
{
+ struct drm_device *dev = fence->dev;
drm_fence_manager_t *fm = &dev->fm;
drm_fence_driver_t *driver = dev->driver->fence_driver;
drm_fence_class_manager_t *fc = &fm->class[fence->class];
@@ -432,15 +450,22 @@ static int drm_fence_object_init(drm_device_t * dev, uint32_t class,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
+
+ /*
+ * Avoid hitting BUG() for kernel-only fence objects.
+ */
+
+ INIT_LIST_HEAD(&fence->base.list);
fence->class = class;
fence->type = type;
fence->flush_mask = 0;
fence->submitted_flush = 0;
fence->signaled = 0;
fence->sequence = 0;
+ fence->dev = dev;
write_unlock_irqrestore(&fm->lock, flags);
if (fence_flags & DRM_FENCE_FLAG_EMIT) {
- ret = drm_fence_object_emit(dev, fence, fence_flags,
+ ret = drm_fence_object_emit(fence, fence_flags,
fence->class, type);
}
return ret;
@@ -454,15 +479,16 @@ int drm_fence_add_user_object(drm_file_t * priv, drm_fence_object_t * fence,
mutex_lock(&dev->struct_mutex);
ret = drm_add_user_object(priv, &fence->base, shareable);
- mutex_unlock(&dev->struct_mutex);
if (ret)
- return ret;
+ goto out;
+ atomic_inc(&fence->usage);
fence->base.type = drm_fence_type;
fence->base.remove = &drm_fence_object_destroy;
DRM_DEBUG("Fence 0x%08lx created\n", fence->base.hash.key);
- return 0;
+out:
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
-
EXPORT_SYMBOL(drm_fence_add_user_object);
int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
@@ -472,12 +498,12 @@ int drm_fence_object_create(drm_device_t * dev, uint32_t class, uint32_t type,
int ret;
drm_fence_manager_t *fm = &dev->fm;
- fence = drm_ctl_alloc(sizeof(*fence), DRM_MEM_FENCE);
+ fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE);
if (!fence)
return -ENOMEM;
ret = drm_fence_object_init(dev, class, type, flags, fence);
if (ret) {
- drm_fence_usage_deref_unlocked(dev, fence);
+ drm_fence_usage_deref_unlocked(&fence);
return ret;
}
*c_fence = fence;
@@ -534,8 +560,7 @@ drm_fence_object_t *drm_lookup_fence_object(drm_file_t * priv, uint32_t handle)
mutex_unlock(&dev->struct_mutex);
return NULL;
}
- fence = drm_user_object_entry(uo, drm_fence_object_t, base);
- atomic_inc(&fence->usage);
+ fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base));
mutex_unlock(&dev->struct_mutex);
return fence;
}
@@ -569,16 +594,11 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
arg.flags &
DRM_FENCE_FLAG_SHAREABLE);
if (ret) {
- drm_fence_usage_deref_unlocked(dev, fence);
+ drm_fence_usage_deref_unlocked(&fence);
return ret;
}
-
- /*
- * usage > 0. No need to lock dev->struct_mutex;
- */
-
- atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
+
break;
case drm_fence_destroy:
mutex_lock(&dev->struct_mutex);
@@ -609,14 +629,14 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
- ret = drm_fence_object_flush(dev, fence, arg.type);
+ ret = drm_fence_object_flush(fence, arg.type);
break;
case drm_fence_wait:
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
ret =
- drm_fence_object_wait(dev, fence,
+ drm_fence_object_wait(fence,
arg.flags & DRM_FENCE_FLAG_WAIT_LAZY,
0, arg.type);
break;
@@ -625,7 +645,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
fence = drm_lookup_fence_object(priv, arg.handle);
if (!fence)
return -EINVAL;
- ret = drm_fence_object_emit(dev, fence, arg.flags, arg.class,
+ ret = drm_fence_object_emit(fence, arg.flags, arg.class,
arg.type);
break;
case drm_fence_buffers:
@@ -643,7 +663,6 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
DRM_FENCE_FLAG_SHAREABLE);
if (ret)
return ret;
- atomic_inc(&fence->usage);
arg.handle = fence->base.hash.key;
break;
default:
@@ -654,7 +673,7 @@ int drm_fence_ioctl(DRM_IOCTL_ARGS)
arg.type = fence->type;
arg.signaled = fence->signaled;
read_unlock_irqrestore(&fm->lock, flags);
- drm_fence_usage_deref_unlocked(dev, fence);
+ drm_fence_usage_deref_unlocked(&fence);
DRM_COPY_TO_USER_IOCTL((void __user *)data, arg, sizeof(arg));
return ret;
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index e4748978..9116685c 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -79,14 +79,7 @@ static int drm_setup(drm_device_t * dev)
drm_ht_create(&dev->magiclist, DRM_MAGIC_HASH_ORDER);
INIT_LIST_HEAD(&dev->magicfree);
- dev->ctxlist = drm_alloc(sizeof(*dev->ctxlist), DRM_MEM_CTXLIST);
- if (dev->ctxlist == NULL)
- return -ENOMEM;
- memset(dev->ctxlist, 0, sizeof(*dev->ctxlist));
- INIT_LIST_HEAD(&dev->ctxlist->head);
-
- dev->vmalist = NULL;
- // dev->sigdata.lock = NULL;
+ dev->sigdata.lock = NULL;
init_waitqueue_head(&dev->lock.lock_queue);
dev->queue_count = 0;
dev->queue_reserved = 0;
@@ -268,6 +261,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
priv->authenticated = capable(CAP_SYS_ADMIN);
priv->lock_count = 0;
+ INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->user_objects);
INIT_LIST_HEAD(&priv->refd_objects);
INIT_LIST_HEAD(&priv->fbs);
@@ -292,19 +286,10 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
}
mutex_lock(&dev->struct_mutex);
- if (!dev->file_last) {
- priv->next = NULL;
- priv->prev = NULL;
- dev->file_first = priv;
- dev->file_last = priv;
- /* first opener automatically becomes master */
+ if (list_empty(&dev->filelist))
priv->master = 1;
- } else {
- priv->next = NULL;
- priv->prev = dev->file_last;
- dev->file_last->next = priv;
- dev->file_last = priv;
- }
+
+ list_add(&priv->lhead, &dev->filelist);
mutex_unlock(&dev->struct_mutex);
#ifdef __alpha__
@@ -481,10 +466,10 @@ int drm_release(struct inode *inode, struct file *filp)
mutex_lock(&dev->ctxlist_mutex);
- if (dev->ctxlist && (!list_empty(&dev->ctxlist->head))) {
+ if (!list_empty(&dev->ctxlist)) {
drm_ctx_list_t *pos, *n;
- list_for_each_entry_safe(pos, n, &dev->ctxlist->head, head) {
+ list_for_each_entry_safe(pos, n, &dev->ctxlist, head) {
if (pos->tag == priv &&
pos->handle != DRM_KERNEL_CONTEXT) {
if (dev->driver->context_dtor)
@@ -505,22 +490,12 @@ int drm_release(struct inode *inode, struct file *filp)
drm_fb_release(filp);
drm_object_release(filp);
if (priv->remove_auth_on_close == 1) {
- drm_file_t *temp = dev->file_first;
- while (temp) {
+ drm_file_t *temp;
+
+ list_for_each_entry(temp, &dev->filelist, lhead)
temp->authenticated = 0;
- temp = temp->next;
- }
- }
- if (priv->prev) {
- priv->prev->next = priv->next;
- } else {
- dev->file_first = priv->next;
- }
- if (priv->next) {
- priv->next->prev = priv->prev;
- } else {
- dev->file_last = priv->prev;
}
+ list_del(&priv->lhead);
mutex_unlock(&dev->struct_mutex);
if (dev->driver->postclose)
diff --git a/linux-core/drm_ioc32.c b/linux-core/drm_ioc32.c
index c112e81f..bbab3ea2 100644
--- a/linux-core/drm_ioc32.c
+++ b/linux-core/drm_ioc32.c
@@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm_core.h"
diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c
index f790a4b3..97df972f 100644
--- a/linux-core/drm_ioctl.c
+++ b/linux-core/drm_ioctl.c
@@ -199,7 +199,7 @@ int drm_getmap(struct inode *inode, struct file *filp,
}
i = 0;
- list_for_each(list, &dev->maplist->head) {
+ list_for_each(list, &dev->maplist) {
if (i == idx) {
r_list = list_entry(list, drm_map_list_t, head);
break;
@@ -252,12 +252,18 @@ int drm_getclient(struct inode *inode, struct file *filp,
return -EFAULT;
idx = client.idx;
mutex_lock(&dev->struct_mutex);
- for (i = 0, pt = dev->file_first; i < idx && pt; i++, pt = pt->next) ;
-
- if (!pt) {
+
+ if (list_empty(&dev->filelist)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
+
+ i = 0;
+ list_for_each_entry(pt, &dev->filelist, lhead) {
+ if (i++ >= idx)
+ break;
+ }
+
client.auth = pt->authenticated;
client.pid = pt->pid;
client.uid = pt->uid;
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index b0f6e3e1..88716712 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -119,8 +119,8 @@ static int drm_irq_install(drm_device_t * dev)
spin_lock_init(&dev->vbl_lock);
- INIT_LIST_HEAD(&dev->vbl_sigs.head);
- INIT_LIST_HEAD(&dev->vbl_sigs2.head);
+ INIT_LIST_HEAD(&dev->vbl_sigs);
+ INIT_LIST_HEAD(&dev->vbl_sigs2);
dev->vbl_pending = 0;
}
@@ -290,7 +290,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
- drm_vbl_sig_t *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
+ struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
? &dev->vbl_sigs2 : &dev->vbl_sigs;
drm_vbl_sig_t *vbl_sig;
@@ -300,7 +300,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
* for the same vblank sequence number; nothing to be done in
* that case
*/
- list_for_each_entry(vbl_sig, &vbl_sigs->head, head) {
+ list_for_each_entry(vbl_sig, vbl_sigs, head) {
if (vbl_sig->sequence == vblwait.request.sequence
&& vbl_sig->info.si_signo == vblwait.request.signal
&& vbl_sig->task == current) {
@@ -334,7 +334,7 @@ int drm_wait_vblank(DRM_IOCTL_ARGS)
spin_lock_irqsave(&dev->vbl_lock, irqflags);
- list_add_tail((struct list_head *)vbl_sig, &vbl_sigs->head);
+ list_add_tail(&vbl_sig->head, vbl_sigs);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -377,20 +377,18 @@ void drm_vbl_send_signals(drm_device_t * dev)
spin_lock_irqsave(&dev->vbl_lock, flags);
for (i = 0; i < 2; i++) {
- struct list_head *list, *tmp;
- drm_vbl_sig_t *vbl_sig;
- drm_vbl_sig_t *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+ drm_vbl_sig_t *vbl_sig, *tmp;
+ struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
&dev->vbl_received);
- list_for_each_safe(list, tmp, &vbl_sigs->head) {
- vbl_sig = list_entry(list, drm_vbl_sig_t, head);
+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
vbl_sig->info.si_code = vbl_seq;
send_sig_info(vbl_sig->info.si_signo,
&vbl_sig->info, vbl_sig->task);
- list_del(list);
+ list_del(&vbl_sig->head);
drm_free(vbl_sig, sizeof(*vbl_sig),
DRM_MEM_DRIVER);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index 759e1f15..b1423c12 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -228,7 +228,7 @@ static void *agp_remap(unsigned long offset, unsigned long size,
offset -= dev->hose->mem_space->start;
#endif
- for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
+ list_for_each_entry(agpmem, &dev->agp->memory, head)
if (agpmem->bound <= offset
&& (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
(offset + size))
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index 03906034..567a7d2b 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -36,6 +36,8 @@ int drm_add_user_object(drm_file_t * priv, drm_user_object_t * item,
drm_device_t *dev = priv->head->dev;
int ret;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
atomic_set(&item->refcount, 1);
item->shareable = shareable;
item->owner = priv;
@@ -56,6 +58,8 @@ drm_user_object_t *drm_lookup_user_object(drm_file_t * priv, uint32_t key)
int ret;
drm_user_object_t *item;
+ DRM_ASSERT_LOCKED(&dev->struct_mutex);
+
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
if (ret) {
return NULL;
@@ -88,6 +92,8 @@ static void drm_deref_user_object(drm_file_t * priv, drm_user_object_t * item)
int drm_remove_user_object(drm_file_t * priv, drm_user_object_t * item)
{
+ DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
+
if (item->owner != priv) {
DRM_ERROR("Cannot destroy object not owned by you.\n");
return -EINVAL;
@@ -125,6 +131,7 @@ int drm_add_ref_object(drm_file_t * priv, drm_user_object_t * referenced_object,
drm_ref_object_t *item;
drm_open_hash_t *ht = &priv->refd_object_hash[ref_action];
+ DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
if (!referenced_object->shareable && priv != referenced_object->owner) {
DRM_ERROR("Not allowed to reference this object\n");
return -EINVAL;
@@ -181,6 +188,7 @@ drm_ref_object_t *drm_lookup_ref_object(drm_file_t * priv,
drm_hash_item_t *hash;
int ret;
+ DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
ret = drm_ht_find_item(&priv->refd_object_hash[ref_action],
(unsigned long)referenced_object, &hash);
if (ret)
@@ -213,6 +221,7 @@ void drm_remove_ref_object(drm_file_t * priv, drm_ref_object_t * item)
drm_open_hash_t *ht = &priv->refd_object_hash[item->unref_action];
drm_ref_t unref_action;
+ DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex);
unref_action = item->unref_action;
if (atomic_dec_and_test(&item->refcount)) {
ret = drm_ht_remove_item(ht, &item->hash);
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index b6754453..e0716aa4 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -141,6 +141,7 @@ extern int drm_user_object_unref(drm_file_t * priv, uint32_t user_token,
typedef struct drm_fence_object {
drm_user_object_t base;
+ struct drm_device *dev;
atomic_t usage;
/*
@@ -196,15 +197,15 @@ extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class,
uint32_t sequence);
-extern int drm_fence_object_flush(struct drm_device *dev,
- drm_fence_object_t * fence, uint32_t type);
-extern int drm_fence_object_signaled(drm_fence_object_t * fence, uint32_t type);
-extern void drm_fence_usage_deref_locked(struct drm_device *dev,
- drm_fence_object_t * fence);
-extern void drm_fence_usage_deref_unlocked(struct drm_device *dev,
- drm_fence_object_t * fence);
-extern int drm_fence_object_wait(struct drm_device *dev,
- drm_fence_object_t * fence,
+extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type);
+extern int drm_fence_object_signaled(drm_fence_object_t * fence,
+ uint32_t type, int flush);
+extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence);
+extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence);
+extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
+extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
+ struct drm_fence_object *src);
+extern int drm_fence_object_wait(drm_fence_object_t * fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t class,
@@ -445,7 +446,7 @@ extern int drm_bo_pci_offset(struct drm_device *dev,
unsigned long *bus_size);
extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem);
-extern void drm_bo_usage_deref_locked(drm_buffer_object_t * bo);
+extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo);
extern int drm_fence_buffer_objects(drm_file_t * priv,
struct list_head *list,
uint32_t fence_flags,
@@ -482,5 +483,12 @@ extern int drm_mem_reg_ioremap(struct drm_device *dev, drm_bo_mem_reg_t * mem,
void **virtual);
extern void drm_mem_reg_iounmap(struct drm_device *dev, drm_bo_mem_reg_t * mem,
void *virtual);
+#ifdef CONFIG_DEBUG_MUTEXES
+#define DRM_ASSERT_LOCKED(_mutex) \
+ BUG_ON(!mutex_is_locked(_mutex) || \
+ ((_mutex)->owner != current_thread_info()))
+#else
+#define DRM_ASSERT_LOCKED(_mutex)
+#endif
#endif
diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c
index 40a65f3e..76252204 100644
--- a/linux-core/drm_pci.c
+++ b/linux-core/drm_pci.c
@@ -51,10 +51,8 @@ drm_dma_handle_t *drm_pci_alloc(drm_device_t * dev, size_t size, size_t align,
dma_addr_t maxaddr)
{
drm_dma_handle_t *dmah;
-#if 1
unsigned long addr;
size_t sz;
-#endif
#ifdef DRM_DEBUG_MEMORY
int area = DRM_MEM_DMA;
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index af62c62a..e93a0406 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -211,7 +211,6 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int len = 0;
drm_map_t *map;
drm_map_list_t *r_list;
- struct list_head *list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP,
@@ -231,9 +230,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
- if (dev->maplist != NULL)
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
@@ -242,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
+ i,
+ map->offset,
+ map->size, type, map->flags,
+ (unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
@@ -253,7 +250,7 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
- }
+ }
if (len > request + offset)
return request;
@@ -535,7 +532,7 @@ static int drm__clients_info(char *buf, char **start, off_t offset,
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
- for (priv = dev->file_first; priv; priv = priv->next) {
+ list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor,
@@ -588,7 +585,7 @@ static int drm__vma_info(char *buf, char **start, off_t offset, int request,
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%08lx\n",
atomic_read(&dev->vma_count),
high_memory, virt_to_phys(high_memory));
- for (pt = dev->vmalist; pt; pt = pt->next) {
+ list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 01ffe679..0cc8a105 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -60,6 +60,11 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
{
int retcode;
+ INIT_LIST_HEAD(&dev->filelist);
+ INIT_LIST_HEAD(&dev->ctxlist);
+ INIT_LIST_HEAD(&dev->vmalist);
+ INIT_LIST_HEAD(&dev->maplist);
+
spin_lock_init(&dev->count_lock);
spin_lock_init(&dev->drw_lock);
spin_lock_init(&dev->tasklet_lock);
@@ -70,6 +75,8 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
mutex_init(&dev->bm.init_mutex);
mutex_init(&dev->bm.evict_mutex);
+ idr_init(&dev->drw_idr);
+
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
@@ -94,15 +101,6 @@ static int drm_fill_in_dev(drm_device_t * dev, struct pci_dev *pdev,
return -ENOMEM;
}
- dev->maplist = drm_calloc(1, sizeof(*dev->maplist), DRM_MEM_MAPS);
- if (dev->maplist == NULL) {
- drm_ht_remove(&dev->object_hash);
- drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
- return -ENOMEM;
- }
- INIT_LIST_HEAD(&dev->maplist->head);
-
/* the DRM has 6 counters */
dev->counters = 6;
dev->types[0] = _DRM_STAT_LOCK;
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index b8871539..72d63c10 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -122,7 +122,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
/*
* It's AGP memory - find the real physical page to map
*/
- for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
+ list_for_each_entry(agpmem, &dev->agp->memory, head) {
if (agpmem->bound <= baddr &&
agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
break;
@@ -205,10 +205,9 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *prev, *next;
+ drm_vma_entry_t *pt, *temp;
drm_map_t *map;
drm_map_list_t *r_list;
- struct list_head *list;
int found_maps = 0;
DRM_DEBUG("0x%08lx,0x%08lx\n",
@@ -218,19 +217,12 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
map = vma->vm_private_data;
mutex_lock(&dev->struct_mutex);
- for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
- next = pt->next;
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma->vm_private_data == map)
found_maps++;
if (pt->vma == vma) {
- if (prev) {
- prev->next = pt->next;
- } else {
- dev->vmalist = pt->next;
- }
+ list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
- } else {
- prev = pt;
}
}
/* We were the only map that was found */
@@ -239,9 +231,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
* we delete this mappings information.
*/
found_maps = 0;
- list = &dev->maplist->head;
- list_for_each(list, &dev->maplist->head) {
- r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map == map)
found_maps++;
}
@@ -439,9 +429,8 @@ static void drm_vm_open_locked(struct vm_area_struct *vma)
vma_entry = drm_ctl_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
if (vma_entry) {
vma_entry->vma = vma;
- vma_entry->next = dev->vmalist;
vma_entry->pid = current->pid;
- dev->vmalist = vma_entry;
+ list_add(&vma_entry->head, &dev->vmalist);
}
}
@@ -467,20 +456,16 @@ static void drm_vm_close(struct vm_area_struct *vma)
{
drm_file_t *priv = vma->vm_file->private_data;
drm_device_t *dev = priv->head->dev;
- drm_vma_entry_t *pt, *prev;
+ drm_vma_entry_t *pt, *temp;
DRM_DEBUG("0x%08lx,0x%08lx\n",
vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
- for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
+ list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) {
- if (prev) {
- prev->next = pt->next;
- } else {
- dev->vmalist = pt->next;
- }
+ list_del(&pt->head);
drm_ctl_free(pt, sizeof(*pt), DRM_MEM_VMAS);
break;
}
@@ -855,7 +840,8 @@ static void drm_bo_vm_close(struct vm_area_struct *vma)
#ifdef DRM_ODD_MM_COMPAT
drm_bo_delete_vma(bo, vma);
#endif
- drm_bo_usage_deref_locked(bo);
+ drm_bo_usage_deref_locked((struct drm_buffer_object **)
+ &vma->vm_private_data);
mutex_unlock(&dev->struct_mutex);
}
return;
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 3f2dad38..49379434 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -346,12 +346,10 @@ static int i810_dma_initialize(drm_device_t * dev,
drm_i810_private_t * dev_priv,
drm_i810_init_t * init)
{
- struct list_head *list;
-
+ drm_map_list_t *r_list;
memset(dev_priv, 0, sizeof(drm_i810_private_t));
- list_for_each(list, &dev->maplist->head) {
- drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
+ list_for_each_entry(r_list, &dev->maplist, head) {
if (r_list->map &&
r_list->map->type == _DRM_SHM &&
r_list->map->flags & _DRM_CONTAINS_LOCK) {
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index d58ca589..e612a46c 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -42,9 +42,9 @@ static struct pci_device_id pciidlist[] = {
#ifdef I915_HAVE_FENCE
static drm_fence_driver_t i915_fence_driver = {
.num_classes = 1,
- .wrap_diff = (1 << 30),
- .flush_diff = (1 << 29),
- .sequence_mask = 0xffffffffU,
+ .wrap_diff = (1U << (BREADCRUMB_BITS - 1)),
+ .flush_diff = (1U << (BREADCRUMB_BITS - 2)),
+ .sequence_mask = BREADCRUMB_MASK,
.lazy_capable = 1,
.emit = i915_fence_emit_sequence,
.poke_flush = i915_poke_flush,
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index 88daa57c..00873485 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -61,7 +61,7 @@ static void i915_perform_flush(drm_device_t * dev)
* First update fences with the current breadcrumb.
*/
- diff = sequence - fc->last_exe_flush;
+ diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
if (diff < driver->wrap_diff && diff != 0) {
drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE);
}
diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c
index 396f5c21..c1e776b7 100644
--- a/linux-core/i915_ioc32.c
+++ b/linux-core/i915_ioc32.c
@@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/linux-core/mga_ioc32.c b/linux-core/mga_ioc32.c
index 5775cd63..75f2a231 100644
--- a/linux-core/mga_ioc32.c
+++ b/linux-core/mga_ioc32.c
@@ -32,7 +32,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/linux-core/nouveau_ioc32.c b/linux-core/nouveau_ioc32.c
index a752a581..f55ae7a2 100644
--- a/linux-core/nouveau_ioc32.c
+++ b/linux-core/nouveau_ioc32.c
@@ -32,7 +32,6 @@
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/linux-core/nouveau_notifier.c b/linux-core/nouveau_notifier.c
new file mode 120000
index 00000000..285469c5
--- /dev/null
+++ b/linux-core/nouveau_notifier.c
@@ -0,0 +1 @@
+../shared-core/nouveau_notifier.c \ No newline at end of file
diff --git a/linux-core/nv04_fifo.c b/linux-core/nv04_fifo.c
new file mode 120000
index 00000000..d10beb19
--- /dev/null
+++ b/linux-core/nv04_fifo.c
@@ -0,0 +1 @@
+../shared-core/nv04_fifo.c \ No newline at end of file
diff --git a/linux-core/nv10_fifo.c b/linux-core/nv10_fifo.c
new file mode 120000
index 00000000..8630ad04
--- /dev/null
+++ b/linux-core/nv10_fifo.c
@@ -0,0 +1 @@
+../shared-core/nv10_fifo.c \ No newline at end of file
diff --git a/linux-core/nv40_fifo.c b/linux-core/nv40_fifo.c
new file mode 120000
index 00000000..cc71e7a4
--- /dev/null
+++ b/linux-core/nv40_fifo.c
@@ -0,0 +1 @@
+../shared-core/nv40_fifo.c \ No newline at end of file
diff --git a/linux-core/nv50_fifo.c b/linux-core/nv50_fifo.c
new file mode 120000
index 00000000..4c9990a9
--- /dev/null
+++ b/linux-core/nv50_fifo.c
@@ -0,0 +1 @@
+../shared-core/nv50_fifo.c \ No newline at end of file
diff --git a/linux-core/nv50_graph.c b/linux-core/nv50_graph.c
new file mode 120000
index 00000000..03f69e68
--- /dev/null
+++ b/linux-core/nv50_graph.c
@@ -0,0 +1 @@
+../shared-core/nv50_graph.c \ No newline at end of file
diff --git a/linux-core/nv50_mc.c b/linux-core/nv50_mc.c
new file mode 120000
index 00000000..f4bb369e
--- /dev/null
+++ b/linux-core/nv50_mc.c
@@ -0,0 +1 @@
+../shared-core/nv50_mc.c \ No newline at end of file
diff --git a/linux-core/r128_ioc32.c b/linux-core/r128_ioc32.c
index 73630540..6b757576 100644
--- a/linux-core/r128_ioc32.c
+++ b/linux-core/r128_ioc32.c
@@ -30,7 +30,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index 43b9aca0..327a6a97 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -61,7 +61,7 @@ static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@@ -71,6 +71,7 @@ static struct drm_driver driver = {
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
.vblank_wait = radeon_driver_vblank_wait,
+ .vblank_wait2 = radeon_driver_vblank_wait2,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/linux-core/radeon_ioc32.c b/linux-core/radeon_ioc32.c
index b980bedb..bc8aa35a 100644
--- a/linux-core/radeon_ioc32.c
+++ b/linux-core/radeon_ioc32.c
@@ -28,7 +28,6 @@
* IN THE SOFTWARE.
*/
#include <linux/compat.h>
-#include <linux/ioctl32.h>
#include "drmP.h"
#include "drm.h"
@@ -350,6 +349,36 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long) request);
}
+/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
+#if defined (CONFIG_X86_64) || defined(CONFIG_IA64)
+typedef struct drm_radeon_setparam32 {
+ int param;
+ u64 value;
+} __attribute__((packed)) drm_radeon_setparam32_t;
+
+static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_radeon_setparam32_t req32;
+ drm_radeon_setparam_t __user *request;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.param, &request->param)
+ || __put_user((void __user *)(unsigned long)req32.value,
+ &request->value))
+ return -EFAULT;
+
+ return drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
+}
+#else
+#define compat_radeon_cp_setparam NULL
+#endif /* X86_64 || IA64 */
+
drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
[DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
@@ -358,6 +387,7 @@ drm_ioctl_compat_t *radeon_compat_ioctls[] = {
[DRM_RADEON_VERTEX2] = compat_radeon_cp_vertex2,
[DRM_RADEON_CMDBUF] = compat_radeon_cp_cmdbuf,
[DRM_RADEON_GETPARAM] = compat_radeon_cp_getparam,
+ [DRM_RADEON_SETPARAM] = compat_radeon_cp_setparam,
[DRM_RADEON_ALLOC] = compat_radeon_mem_alloc,
[DRM_RADEON_IRQ_EMIT] = compat_radeon_irq_emit,
};
diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c
index 5efbada4..21c1f2d7 100644
--- a/linux-core/sis_mm.c
+++ b/linux-core/sis_mm.c
@@ -233,7 +233,7 @@ static drm_local_map_t *sis_reg_init(drm_device_t *dev)
drm_map_list_t *entry;
drm_local_map_t *map;
- list_for_each_entry(entry, &dev->maplist->head, head) {
+ list_for_each_entry(entry, &dev->maplist, head) {
map = entry->map;
if (!map)
continue;