diff options
Diffstat (limited to 'linux-core')
-rw-r--r-- | linux-core/Makefile | 4 | ||||
-rw-r--r-- | linux-core/Makefile.kernel | 4 | ||||
-rw-r--r-- | linux-core/drmP.h | 1 | ||||
-rw-r--r-- | linux-core/drm_agpsupport.c | 15 | ||||
-rw-r--r-- | linux-core/drm_bo.c | 327 | ||||
-rw-r--r-- | linux-core/drm_bo_move.c | 196 | ||||
-rw-r--r-- | linux-core/drm_compat.h | 5 | ||||
-rw-r--r-- | linux-core/drm_drv.c | 12 | ||||
-rw-r--r-- | linux-core/drm_fence.c | 237 | ||||
-rw-r--r-- | linux-core/drm_fops.c | 13 | ||||
-rw-r--r-- | linux-core/drm_object.c | 29 | ||||
-rw-r--r-- | linux-core/drm_objects.h | 180 | ||||
-rw-r--r-- | linux-core/drm_ttm.c | 12 | ||||
-rw-r--r-- | linux-core/i915_buffer.c | 11 | ||||
-rw-r--r-- | linux-core/i915_fence.c | 9 | ||||
-rw-r--r-- | linux-core/nouveau_drv.c | 11 | ||||
-rw-r--r-- | linux-core/nouveau_sgdma.c | 16 | ||||
l--------- | linux-core/nouveau_swmthd.c | 1 | ||||
l--------- | linux-core/nouveau_swmthd.h | 1 | ||||
l--------- | linux-core/nv30_graph.c | 1 | ||||
-rw-r--r-- | linux-core/via_buffer.c | 3 | ||||
-rw-r--r-- | linux-core/via_fence.c | 7 | ||||
-rw-r--r-- | linux-core/xgi_cmdlist.c | 32 | ||||
-rw-r--r-- | linux-core/xgi_drv.c | 6 | ||||
-rw-r--r-- | linux-core/xgi_drv.h | 4 | ||||
-rw-r--r-- | linux-core/xgi_fence.c | 8 | ||||
-rw-r--r-- | linux-core/xgi_misc.c | 24 |
27 files changed, 729 insertions, 440 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile index 1cdf3b30..6eb5bf5c 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -163,7 +163,7 @@ endif all: modules modules: includes - make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules + +make -C $(LINUXDIR) $(GETCONFIG) SUBDIRS=`pwd` DRMSRCDIR=`pwd` modules ifeq ($(HEADERFROMBOOT),1) @@ -269,7 +269,7 @@ PAGE_AGP := $(shell cat $(LINUXDIR)/include/asm/agp.h 2>/dev/null | \ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif -EXTRA_CFLAGS += -g -O0 +EXTRA_CFLAGS += -g # Start with all modules turned off. CONFIG_DRM_GAMMA := n diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index a7ab1f1d..6cbe3a27 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -23,13 +23,13 @@ i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \ i915_buffer.o intel_display.o intel_crt.o intel_lvds.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ - nouveau_object.o nouveau_irq.o nouveau_notifier.o \ + nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ nouveau_sgdma.o nouveau_dma.o \ nv04_timer.o \ nv04_mc.o nv40_mc.o nv50_mc.o \ nv04_fb.o nv10_fb.o nv40_fb.o \ nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \ - nv04_graph.o nv10_graph.o nv20_graph.o nv30_graph.o \ + nv04_graph.o nv10_graph.o nv20_graph.o \ nv40_graph.o nv50_graph.o \ nv04_instmem.o nv50_instmem.o radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 64f9a63f..0237f593 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -430,7 +430,6 @@ struct drm_file { */ struct list_head refd_objects; - struct list_head user_objects; struct drm_open_hash refd_object_hash[_DRM_NO_REF_TYPES]; struct file *filp; diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 4618823c..b68efc64 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -535,23 +535,23 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p } static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, - unsigned long offset, - int cached) + struct drm_bo_mem_reg *bo_mem) { - struct drm_agp_ttm_backend *agp_be = + struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); DRM_AGP_MEM *mem = agp_be->mem; int ret; DRM_DEBUG("drm_agp_bind_ttm\n"); mem->is_flushed = TRUE; - mem->type = (cached) ? AGP_USER_CACHED_MEMORY : + mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY; - ret = drm_agp_bind_memory(mem, offset); + ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start); if (ret) { DRM_ERROR("AGP Bind memory failed\n"); } - DRM_FLAG_MASKED(backend->flags, (cached) ? DRM_BE_FLAG_BOUND_CACHED : 0, + DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ? + DRM_BE_FLAG_BOUND_CACHED : 0, DRM_BE_FLAG_BOUND_CACHED); return ret; } @@ -643,7 +643,8 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev) agp_be->bridge = dev->agp->bridge; agp_be->populated = FALSE; agp_be->backend.func = &agp_ttm_backend; - agp_be->backend.mem_type = DRM_BO_MEM_TT; + // agp_be->backend.mem_type = DRM_BO_MEM_TT; + agp_be->backend.dev = dev; return &agp_be->backend; } diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index ea93ed16..217a33b1 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -142,17 +142,12 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo) switch (bo->type) { case drm_bo_type_dc: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); - if (!bo->ttm) - ret = -ENOMEM; - break; case drm_bo_type_kernel: - bo->ttm = drm_ttm_init(dev, bo->mem.num_pages << PAGE_SHIFT); + bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT); if (!bo->ttm) ret = -ENOMEM; break; case drm_bo_type_user: - case drm_bo_type_fake: break; default: DRM_ERROR("Illegal buffer object type\n"); @@ -175,7 +170,8 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; - if (old_is_pci || new_is_pci) + if (old_is_pci || new_is_pci || + ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED)) ret = drm_bo_vm_pre_move(bo, old_is_pci); if (ret) return ret; @@ -190,9 +186,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, goto out_err; if (mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(bo->ttm, new_man->flags & - DRM_BO_FLAG_CACHED, - mem->mm_node->start); + ret = drm_bind_ttm(bo->ttm, mem); if (ret) goto out_err; } @@ -242,7 +236,9 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, _DRM_BO_FLAG_EVICTED); if (bo->mem.mm_node) - bo->offset = bo->mem.mm_node->start << PAGE_SHIFT; + bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) + + bm->man[bo->mem.mem_type].gpu_offset; + return 0; @@ -290,6 +286,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, } return 0; } +EXPORT_SYMBOL(drm_bo_wait); static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { @@ -531,38 +528,76 @@ void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) } EXPORT_SYMBOL(drm_bo_usage_deref_unlocked); +void drm_putback_buffer_objects(struct drm_device *dev) +{ + struct drm_buffer_manager *bm = &dev->bm; + struct list_head *list = &bm->unfenced; + struct drm_buffer_object *entry, *next; + + mutex_lock(&dev->struct_mutex); + list_for_each_entry_safe(entry, next, list, lru) { + atomic_inc(&entry->usage); + mutex_unlock(&dev->struct_mutex); + + mutex_lock(&entry->mutex); + BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); + mutex_lock(&dev->struct_mutex); + + list_del_init(&entry->lru); + DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); + DRM_WAKEUP(&entry->event_queue); + + /* + * FIXME: Might want to put back on head of list + * instead of tail here. + */ + + drm_bo_add_to_lru(entry); + mutex_unlock(&entry->mutex); + drm_bo_usage_deref_locked(&entry); + } + mutex_unlock(&dev->struct_mutex); +} +EXPORT_SYMBOL(drm_putback_buffer_objects); + + /* * Note. The caller has to register (if applicable) * and deregister fence object usage. */ -int drm_fence_buffer_objects(struct drm_file * file_priv, +int drm_fence_buffer_objects(struct drm_device *dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence) { - struct drm_device *dev = file_priv->head->dev; struct drm_buffer_manager *bm = &dev->bm; - struct drm_buffer_object *entry; uint32_t fence_type = 0; + uint32_t fence_class = ~0; int count = 0; int ret = 0; struct list_head *l; - LIST_HEAD(f_list); mutex_lock(&dev->struct_mutex); if (!list) list = &bm->unfenced; + if (fence) + fence_class = fence->fence_class; + list_for_each_entry(entry, list, lru) { BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED)); - fence_type |= entry->fence_type; - if (entry->fence_class != 0) { - DRM_ERROR("Fence class %d is not implemented yet.\n", - entry->fence_class); + fence_type |= entry->new_fence_type; + if (fence_class == ~0) + fence_class = entry->new_fence_class; + else if (entry->new_fence_class != fence_class) { + DRM_ERROR("Unmatching fence classes on unfenced list: " + "%d and %d.\n", + fence_class, + entry->new_fence_class); ret = -EINVAL; goto out; } @@ -574,16 +609,9 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, goto out; } - /* - * Transfer to a local list before we release the dev->struct_mutex; - * This is so we don't get any new unfenced objects while fencing - * the ones we already have.. - */ - - list_splice_init(list, &f_list); - if (fence) { - if ((fence_type & fence->type) != fence_type) { + if ((fence_type & fence->type) != fence_type || + (fence->fence_class != fence_class)) { DRM_ERROR("Given fence doesn't match buffers " "on unfenced list.\n"); ret = -EINVAL; @@ -591,7 +619,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } } else { mutex_unlock(&dev->struct_mutex); - ret = drm_fence_object_create(dev, 0, fence_type, + ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &fence); mutex_lock(&dev->struct_mutex); @@ -600,8 +628,8 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } count = 0; - l = f_list.next; - while (l != &f_list) { + l = list->next; + while (l != list) { prefetch(l->next); entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); @@ -609,11 +637,14 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, mutex_lock(&entry->mutex); mutex_lock(&dev->struct_mutex); list_del_init(l); - if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) { + if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED && + entry->fence_class == fence_class) { count++; if (entry->fence) drm_fence_usage_deref_locked(&entry->fence); entry->fence = drm_fence_reference_locked(fence); + entry->fence_class = entry->new_fence_class; + entry->fence_type = entry->new_fence_type; DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED); DRM_WAKEUP(&entry->event_queue); @@ -621,7 +652,7 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, } mutex_unlock(&entry->mutex); drm_bo_usage_deref_locked(&entry); - l = f_list.next; + l = list->next; } DRM_DEBUG("Fenced %d buffers\n", count); out: @@ -629,7 +660,6 @@ int drm_fence_buffer_objects(struct drm_file * file_priv, *used_fence = fence; return ret; } - EXPORT_SYMBOL(drm_fence_buffer_objects); /* @@ -663,12 +693,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, evict_mem = bo->mem; evict_mem.mm_node = NULL; - if (bo->type == drm_bo_type_fake) { - bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.mm_node = NULL; - goto out1; - } - evict_mem = bo->mem; evict_mem.mask = dev->driver->bo_driver->evict_mask(bo); ret = drm_bo_mem_space(bo, &evict_mem, no_wait); @@ -688,7 +712,6 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, goto out; } - out1: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) @@ -944,6 +967,7 @@ struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv, atomic_inc(&bo->usage); return bo; } +EXPORT_SYMBOL(drm_lookup_buffer_object); /* * Call bo->mutex locked. @@ -1079,9 +1103,12 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { + if (!rep) + return; + rep->handle = bo->base.hash.key; rep->flags = bo->mem.flags; - rep->size = bo->mem.num_pages * PAGE_SIZE; + rep->size = bo->num_pages * PAGE_SIZE; rep->offset = bo->offset; rep->arg_handle = bo->map_list.user_token; rep->mask = bo->mem.mask; @@ -1260,7 +1287,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, if (ret) return ret; - mem.num_pages = bo->mem.num_pages; + mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.mask = new_mem_flags; mem.page_alignment = bo->mem.page_alignment; @@ -1308,7 +1335,7 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0) return 0; if ((flag_diff & DRM_BO_FLAG_CACHED) && - (!(mem->mask & DRM_BO_FLAG_CACHED) || + (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/ (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) { return 0; } @@ -1319,44 +1346,6 @@ static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) -{ - struct drm_buffer_manager *bm = &dev->bm; - struct drm_mem_type_manager *man; - uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; - const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; - uint32_t i; - int type_ok = 0; - uint32_t mem_type = 0; - uint32_t cur_flags; - - if (drm_bo_mem_compat(mem)) - return 0; - - BUG_ON(mem->mm_node); - - for (i = 0; i < num_prios; ++i) { - mem_type = prios[i]; - man = &bm->man[mem_type]; - type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask, - &cur_flags); - if (type_ok) - break; - } - - if (type_ok) { - mem->mm_node = NULL; - mem->mem_type = mem_type; - mem->flags = cur_flags; - DRM_FLAG_MASKED(mem->flags, mem->mask, ~DRM_BO_MASK_MEMTYPE); - return 0; - } - - DRM_ERROR("Illegal fake buffer flags 0x%016llx\n", - (unsigned long long) mem->mask); - return -EINVAL; -} - /* * bo locked. */ @@ -1375,7 +1364,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, (unsigned long long) bo->mem.mask, (unsigned long long) bo->mem.flags); - ret = driver->fence_type(bo, &ftype); + ret = driver->fence_type(bo, &fence_class, &ftype); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); @@ -1404,17 +1393,14 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return ret; } - - bo->fence_class = fence_class; - bo->fence_type = ftype; + + bo->new_fence_class = fence_class; + bo->new_fence_type = ftype; + ret = drm_bo_wait_unmapped(bo, no_wait); - if (ret) + if (ret) { + DRM_ERROR("Timed out waiting for buffer unmap.\n"); return ret; - - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - return ret; } /* @@ -1465,23 +1451,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo, return 0; } -static int drm_bo_handle_validate(struct drm_file *file_priv, - uint32_t handle, - uint32_t fence_class, - uint64_t flags, uint64_t mask, uint32_t hint, - struct drm_bo_info_rep *rep) +int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep) { - struct drm_device *dev = file_priv->head->dev; - struct drm_buffer_object *bo; int ret; - int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; - - mutex_lock(&dev->struct_mutex); - bo = drm_lookup_buffer_object(file_priv, handle, 1); - mutex_unlock(&dev->struct_mutex); - if (!bo) { - return -EINVAL; - } mutex_lock(&bo->mutex); ret = drm_bo_wait_unfenced(bo, no_wait, 0); @@ -1489,24 +1465,56 @@ static int drm_bo_handle_validate(struct drm_file *file_priv, if (ret) goto out; + DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask); ret = drm_bo_new_mask(bo, flags, hint); if (ret) goto out; - ret = - drm_buffer_object_validate(bo, fence_class, - !(hint & DRM_BO_HINT_DONT_FENCE), - no_wait); - drm_bo_fill_rep_arg(bo, rep); - - out: + ret = drm_buffer_object_validate(bo, + fence_class, + !(hint & DRM_BO_HINT_DONT_FENCE), + no_wait); +out: + if (rep) + drm_bo_fill_rep_arg(bo, rep); mutex_unlock(&bo->mutex); + return ret; +} +EXPORT_SYMBOL(drm_bo_do_validate); + + +int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, + uint64_t flags, uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep) +{ + struct drm_device *dev = file_priv->head->dev; + struct drm_buffer_object *bo; + int ret; + int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; + + mutex_lock(&dev->struct_mutex); + bo = drm_lookup_buffer_object(file_priv, handle, 1); + mutex_unlock(&dev->struct_mutex); + + if (!bo) { + return -EINVAL; + } + + ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, + no_wait, rep); + + if (!ret && bo_rep) + *bo_rep = bo; + else + drm_bo_usage_deref_unlocked(&bo); - drm_bo_usage_deref_unlocked(&bo); return ret; } +EXPORT_SYMBOL(drm_bo_handle_validate); /** * Fills out the generic buffer object ioctl reply with the information for @@ -1582,7 +1590,7 @@ int drm_buffer_object_create(struct drm_device *dev, int ret = 0; unsigned long num_pages; - if ((buffer_start & ~PAGE_MASK) && (type != drm_bo_type_fake)) { + if (buffer_start & ~PAGE_MASK) { DRM_ERROR("Invalid buffer object start.\n"); return -EINVAL; } @@ -1611,17 +1619,16 @@ int drm_buffer_object_create(struct drm_device *dev, INIT_LIST_HEAD(&bo->vma_list); #endif bo->dev = dev; - bo->type = type; + if (buffer_start != 0) + bo->type = drm_bo_type_user; + else + bo->type = type; + bo->num_pages = num_pages; bo->mem.mem_type = DRM_BO_MEM_LOCAL; - bo->mem.num_pages = num_pages; + bo->mem.num_pages = bo->num_pages; bo->mem.mm_node = NULL; bo->mem.page_alignment = page_alignment; - if (bo->type == drm_bo_type_fake) { - bo->offset = buffer_start; - bo->buffer_start = 0; - } else { - bo->buffer_start = buffer_start; - } + bo->buffer_start = buffer_start; bo->priv_flags = 0; bo->mem.flags = 0ULL; bo->mem.mask = 0ULL; @@ -1640,18 +1647,12 @@ int drm_buffer_object_create(struct drm_device *dev, } bo->fence_class = 0; - ret = driver->fence_type(bo, &bo->fence_type); + ret = driver->fence_type(bo, &bo->fence_class, &bo->fence_type); if (ret) { DRM_ERROR("Driver did not support given buffer permissions\n"); goto out_err; } - if (bo->type == drm_bo_type_fake) { - ret = drm_bo_check_fake(dev, &bo->mem); - if (ret) - goto out_err; - } - ret = drm_bo_add_ttm(bo); if (ret) goto out_err; @@ -1672,8 +1673,8 @@ int drm_buffer_object_create(struct drm_device *dev, } EXPORT_SYMBOL(drm_buffer_object_create); -int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int shareable) +static int drm_bo_add_user_object(struct drm_file *file_priv, + struct drm_buffer_object *bo, int shareable) { struct drm_device *dev = file_priv->head->dev; int ret; @@ -1692,7 +1693,6 @@ int drm_bo_add_user_object(struct drm_file *file_priv, mutex_unlock(&dev->struct_mutex); return ret; } -EXPORT_SYMBOL(drm_bo_add_user_object); static int drm_bo_lock_test(struct drm_device * dev, struct drm_file *file_priv) { @@ -1742,7 +1742,7 @@ int drm_bo_op_ioctl(struct drm_device *dev, void *data, struct drm_file *file_pr req->bo_req.flags, req->bo_req.mask, req->bo_req.hint, - &rep); + &rep, NULL); break; case drm_bo_fence: ret = -EINVAL; @@ -1784,18 +1784,16 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil struct drm_buffer_object *entry; int ret = 0; - DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align, %d type\n", - (int)(req->size / 1024), req->page_alignment * 4, req->type); + DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n", + (int)(req->size / 1024), req->page_alignment * 4); if (!dev->bm.initialized) { DRM_ERROR("Buffer object manager is not initialized.\n"); return -EINVAL; } - if (req->type == drm_bo_type_fake) - LOCK_TEST_WITH_RETURN(dev, file_priv); ret = drm_buffer_object_create(file_priv->head->dev, - req->size, req->type, req->mask, + req->size, drm_bo_type_dc, req->mask, req->hint, req->page_alignment, req->buffer_start, &entry); if (ret) @@ -1816,32 +1814,6 @@ out: return ret; } - -int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - struct drm_bo_handle_arg *arg = data; - struct drm_user_object *uo; - int ret = 0; - - DRM_DEBUG("drm_bo_destroy_ioctl: buffer %d\n", arg->handle); - - if (!dev->bm.initialized) { - DRM_ERROR("Buffer object manager is not initialized.\n"); - return -EINVAL; - } - - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_buffer_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_bo_map_wait_idle_arg *arg = data; @@ -2093,9 +2065,30 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; struct drm_buffer_object *entry; + struct drm_fence_object *fence; head = &bm->unfenced; + if (list_empty(head)) + return; + + DRM_ERROR("Clean unfenced\n"); + + if (drm_fence_buffer_objects(dev, NULL, 0, NULL, &fence)) { + + /* + * Fixme: Should really wait here. + */ + } + + if (fence) + drm_fence_usage_deref_locked(&fence); + + if (list_empty(head)) + return; + + DRM_ERROR("Really clean unfenced\n"); + list = head->next; while(list != head) { prefetch(list->next); @@ -2255,7 +2248,7 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) if (!man->has_type) { DRM_ERROR("Trying to take down uninitialized " - "memory manager type\n"); + "memory manager type %u\n", mem_type); return ret; } man->use_type = 0; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 21ab4bbb..d6c58970 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -71,9 +71,7 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo, save_flags = old_mem->flags; } if (new_mem->mem_type != DRM_BO_MEM_LOCAL) { - ret = drm_bind_ttm(ttm, - new_mem->flags & DRM_BO_FLAG_CACHED, - new_mem->mm_node->start); + ret = drm_bind_ttm(ttm, new_mem); if (ret) return ret; } @@ -345,6 +343,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, ret = drm_fence_object_create(dev, fence_class, fence_type, fence_flags | DRM_FENCE_FLAG_EMIT, &bo->fence); + bo->fence_type = fence_type; if (ret) return ret; @@ -411,3 +410,194 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, } EXPORT_SYMBOL(drm_bo_move_accel_cleanup); + +int drm_bo_same_page(unsigned long offset, + unsigned long offset2) +{ + return (offset & PAGE_MASK) == (offset2 & PAGE_MASK); +} +EXPORT_SYMBOL(drm_bo_same_page); + +unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end) +{ + + offset = (offset + PAGE_SIZE) & PAGE_MASK; + return (end < offset) ? end : offset; +} +EXPORT_SYMBOL(drm_bo_offset_end); + + +static pgprot_t drm_kernel_io_prot(uint32_t map_type) +{ + pgprot_t tmp = PAGE_KERNEL; + +#if defined(__i386__) || defined(__x86_64__) +#ifdef USE_PAT_WC +#warning using pat + if (drm_use_pat() && map_type == _DRM_TTM) { + pgprot_val(tmp) |= _PAGE_PAT; + return tmp; + } +#endif + if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) { + pgprot_val(tmp) |= _PAGE_PCD; + pgprot_val(tmp) &= ~_PAGE_PWT; + } +#elif defined(__powerpc__) + pgprot_val(tmp) |= _PAGE_NO_CACHE; + if (map_type == _DRM_REGISTERS) + pgprot_val(tmp) |= _PAGE_GUARDED; +#endif +#if defined(__ia64__) + if (map_type == _DRM_TTM) + tmp = pgprot_writecombine(tmp); + else + tmp = pgprot_noncached(tmp); +#endif + return tmp; +} + +static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base, + unsigned long bus_offset, unsigned long bus_size, + struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + + if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) { + map->bo_kmap_type = bo_map_premapped; + map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + } else { + map->bo_kmap_type = bo_map_iomap; + map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + struct drm_device *dev = bo->dev; + struct drm_bo_mem_reg *mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type]; + pgprot_t prot; + struct drm_ttm *ttm = bo->ttm; + struct page *d; + int i; + + BUG_ON(!ttm); + + if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) { + + /* + * We're mapping a single page, and the desired + * page protection is consistent with the bo. + */ + + map->bo_kmap_type = bo_map_kmap; + map->page = drm_ttm_get_page(ttm, start_page); + map->virtual = kmap(map->page); + } else { + /* + * Populate the part we're mapping; + */ + + for (i = start_page; i< start_page + num_pages; ++i) { + d = drm_ttm_get_page(ttm, i); + if (!d) + return -ENOMEM; + } + + /* + * We need to use vmap to get the desired page protection + * or to make the buffer object look contigous. + */ + + prot = (mem->flags & DRM_BO_FLAG_CACHED) ? + PAGE_KERNEL : + drm_kernel_io_prot(man->drm_bus_maptype); + map->bo_kmap_type = bo_map_vmap; + map->virtual = vmap(ttm->pages + start_page, + num_pages, 0, prot); + } + return (!map->virtual) ? -ENOMEM : 0; +} + +/* + * This function is to be used for kernel mapping of buffer objects. + * It chooses the appropriate mapping method depending on the memory type + * and caching policy the buffer currently has. + * Mapping multiple pages or buffers that live in io memory is a bit slow and + * consumes vmalloc space. Be restrictive with such mappings. + * Mapping single pages usually returns the logical kernel address, (which is fast) + * BUG may use slower temporary mappings for high memory pages or + * uncached / write-combined pages. + * + * The function fills in a drm_bo_kmap_obj which can be used to return the + * kernel virtual address of the buffer. + * + * Code servicing a non-priviliged user request is only allowed to map one + * page at a time. We might need to implement a better scheme to stop such + * processes from consuming all vmalloc space. + */ + +int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map) +{ + int ret; + unsigned long bus_base; + unsigned long bus_offset; + unsigned long bus_size; + + map->virtual = NULL; + + if (num_pages > bo->num_pages) + return -EINVAL; + if (start_page > bo->num_pages) + return -EINVAL; +#if 0 + if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + return -EPERM; +#endif + ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base, + &bus_offset, &bus_size); + + if (ret) + return ret; + + if (bus_size == 0) { + return drm_bo_kmap_ttm(bo, start_page, num_pages, map); + } else { + bus_offset += start_page << PAGE_SHIFT; + bus_size = num_pages << PAGE_SHIFT; + return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + } +} +EXPORT_SYMBOL(drm_bo_kmap); + +void drm_bo_kunmap(struct drm_bo_kmap_obj *map) +{ + if (!map->virtual) + return; + + switch(map->bo_kmap_type) { + case bo_map_iomap: + iounmap(map->virtual); + break; + case bo_map_vmap: + vunmap(map->virtual); + break; + case bo_map_kmap: + kunmap(map->page); + break; + case bo_map_premapped: + break; + default: + BUG(); + } + map->virtual = NULL; + map->page = NULL; +} +EXPORT_SYMBOL(drm_bo_kunmap); diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index e906a539..0895e5e5 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -200,7 +200,10 @@ extern void drm_clear_vma(struct vm_area_struct *vma, extern pgprot_t vm_get_page_prot(unsigned long vm_flags); #ifndef GFP_DMA32 -#define GFP_DMA32 0 +#define GFP_DMA32 GFP_KERNEL +#endif +#ifndef __GFP_DMA32 +#define __GFP_DMA32 GFP_KERNEL #endif #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index a313c26c..0d1a6d19 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -142,7 +142,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_FENCE_DESTROY, drm_fence_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_REFERENCE, drm_fence_reference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_UNREFERENCE, drm_fence_unreference_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_FENCE_SIGNALED, drm_fence_signaled_ioctl, DRM_AUTH), @@ -152,7 +151,6 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_FENCE_BUFFERS, drm_fence_buffers_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_CREATE, drm_bo_create_ioctl, DRM_AUTH), - DRM_IOCTL_DEF(DRM_IOCTL_BO_DESTROY, drm_bo_destroy_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_MAP, drm_bo_map_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_UNMAP, drm_bo_unmap_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_BO_REFERENCE, drm_bo_reference_ioctl, DRM_AUTH), @@ -332,6 +330,11 @@ int drm_init(struct drm_driver *driver, while ((pdev = pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* is there already a driver loaded, or (short circuit saves work) */ /* does something like VesaFB have control of the memory region? */ if (pci_dev_driver(pdev) @@ -358,6 +361,11 @@ int drm_init(struct drm_driver *driver, pci_get_subsys(pid->vendor, pid->device, pid->subvendor, pid->subdevice, pdev))) { + /* Are there device class requirements? */ + if ((pid->class != 0) + && ((pdev->class & pid->class_mask) != pid->class)) { + continue; + } /* stealth mode requires a manual probe */ pci_dev_get(pdev); if ((rc = drm_get_dev(pdev, &pciidlist[i], driver))) { diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index 2f16f7ef..e696b42d 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -34,14 +34,14 @@ * Typically called by the IRQ handler. */ -void drm_fence_handler(struct drm_device * dev, uint32_t class, - uint32_t sequence, uint32_t type) +void drm_fence_handler(struct drm_device * dev, uint32_t fence_class, + uint32_t sequence, uint32_t type, uint32_t error) { int wake = 0; uint32_t diff; uint32_t relevant; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; struct drm_fence_object *fence, *next; @@ -49,6 +49,7 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; + diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask; if (fc->pending_exe_flush && is_exe && diff < driver->wrap_diff) @@ -57,9 +58,6 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, diff = (sequence - fc->last_exe_flush) & driver->sequence_mask; ge_last_exe = diff < driver->wrap_diff; - if (ge_last_exe) - fc->pending_flush &= ~type; - if (is_exe && ge_last_exe) { fc->last_exe_flush = sequence; } @@ -75,36 +73,68 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, } } + fc->pending_flush &= ~type; head = (found) ? &fence->ring : &fc->ring; list_for_each_entry_safe_reverse(fence, next, head, ring) { if (&fence->ring == &fc->ring) break; - type |= fence->native_type; + if (error) { + fence->error = error; + fence->signaled = fence->type; + fence->submitted_flush = fence->type; + fence->flush_mask = fence->type; + list_del_init(&fence->ring); + wake = 1; + break; + } + + if (is_exe) + type |= fence->native_type; + relevant = type & fence->type; if ((fence->signaled | relevant) != fence->signaled) { fence->signaled |= relevant; + fence->flush_mask |= relevant; + fence->submitted_flush |= relevant; DRM_DEBUG("Fence 0x%08lx signaled 0x%08x\n", fence->base.hash.key, fence->signaled); - fence->submitted_flush |= relevant; wake = 1; } relevant = fence->flush_mask & - ~(fence->signaled | fence->submitted_flush); + ~(fence->submitted_flush | fence->signaled); - if (relevant) { - fc->pending_flush |= relevant; - fence->submitted_flush = fence->flush_mask; - } + fc->pending_flush |= relevant; + fence->submitted_flush |= relevant; if (!(fence->type & ~fence->signaled)) { DRM_DEBUG("Fence completely signaled 0x%08lx\n", fence->base.hash.key); list_del_init(&fence->ring); } + + } + + /* + * Reinstate lost flush flags. + */ + + if ((fc->pending_flush & type) != type) { + head = head->prev; + list_for_each_entry(fence, head, ring) { + if (&fence->ring == &fc->ring) + break; + diff = (fc->last_exe_flush - fence->sequence) & + driver->sequence_mask; + if (diff > driver->wrap_diff) + break; + + relevant = fence->submitted_flush & ~fence->signaled; + fc->pending_flush |= relevant; + } } if (wake) { @@ -141,6 +171,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) drm_ctl_free(tmp_fence, sizeof(*tmp_fence), DRM_MEM_FENCE); } } +EXPORT_SYMBOL(drm_fence_usage_deref_locked); void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { @@ -160,6 +191,7 @@ void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) mutex_unlock(&dev->struct_mutex); } } +EXPORT_SYMBOL(drm_fence_usage_deref_unlocked); struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src) @@ -178,7 +210,7 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, atomic_inc(&src->usage); mutex_unlock(&src->dev->struct_mutex); } - +EXPORT_SYMBOL(drm_fence_reference_unlocked); static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { @@ -198,7 +230,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); read_lock_irqsave(&fm->lock, flags); signaled = (fence->type & mask & fence->signaled) == (fence->type & mask); @@ -206,6 +238,7 @@ int drm_fence_object_signaled(struct drm_fence_object * fence, return signaled; } +EXPORT_SYMBOL(drm_fence_object_signaled); static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, struct drm_fence_driver * driver, uint32_t sequence) @@ -229,7 +262,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; @@ -241,7 +274,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence, write_lock_irqsave(&fm->lock, flags); fence->flush_mask |= type; - if (fence->submitted_flush == fence->signaled) { + if ((fence->submitted_flush & fence->signaled) + == fence->submitted_flush) { if ((fence->type & DRM_FENCE_TYPE_EXE) && !(fence->submitted_flush & DRM_FENCE_TYPE_EXE)) { drm_fence_flush_exe(fc, driver, fence->sequence); @@ -253,7 +287,7 @@ int drm_fence_object_flush(struct drm_fence_object * fence, } } write_unlock_irqrestore(&fm->lock, flags); - driver->poke_flush(dev, fence->class); + driver->poke_flush(dev, fence->fence_class); return 0; } @@ -262,10 +296,10 @@ int drm_fence_object_flush(struct drm_fence_object * fence, * wrapped around and reused. */ -void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) +void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence) { struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence_class]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; @@ -308,7 +342,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -329,7 +363,15 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence, if (ret == -EBUSY) { DRM_ERROR("Fence timeout. " "GPU lockup or fence driver was " - "taken down.\n"); + "taken down. %d 0x%08x 0x%02x 0x%02x 0x%02x\n", + fence->fence_class, + fence->sequence, + fence->type, + mask, + fence->signaled); + DRM_ERROR("Pending exe flush %d 0x%08x\n", + fc->pending_exe_flush, + fc->exe_flush_sequence); } return ((ret == -EINTR) ? -EAGAIN : ret); } @@ -348,6 +390,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, if (mask & ~fence->type) { DRM_ERROR("Wait trying to extend fence type" " 0x%08x 0x%08x\n", mask, fence->type); + BUG(); return -EINVAL; } @@ -366,7 +409,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, } else { - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, DRM_FENCE_TYPE_EXE); @@ -374,7 +417,7 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return ret; } - if (driver->has_irq(dev, fence->class, + if (driver->has_irq(dev, fence->fence_class, mask & ~DRM_FENCE_TYPE_EXE)) { ret = drm_fence_lazy_wait(fence, ignore_signals, mask); @@ -402,26 +445,28 @@ int drm_fence_object_wait(struct drm_fence_object * fence, return 0; } +EXPORT_SYMBOL(drm_fence_object_wait); + int drm_fence_object_emit(struct drm_fence_object * fence, - uint32_t fence_flags, uint32_t class, uint32_t type) + uint32_t fence_flags, uint32_t fence_class, uint32_t type) { struct drm_device *dev = fence->dev; struct drm_fence_manager *fm = &dev->fm; struct drm_fence_driver *driver = dev->driver->fence_driver; - struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_class_manager *fc = &fm->fence_class[fence->fence_class]; unsigned long flags; uint32_t sequence; uint32_t native_type; int ret; drm_fence_unring(dev, &fence->ring); - ret = driver->emit(dev, class, fence_flags, &sequence, &native_type); + ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type); if (ret) return ret; write_lock_irqsave(&fm->lock, flags); - fence->class = class; + fence->fence_class = fence_class; fence->type = type; fence->flush_mask = 0x00; fence->submitted_flush = 0x00; @@ -434,8 +479,9 @@ int drm_fence_object_emit(struct drm_fence_object * fence, write_unlock_irqrestore(&fm->lock, flags); return 0; } +EXPORT_SYMBOL(drm_fence_object_emit); -static int drm_fence_object_init(struct drm_device * dev, uint32_t class, +static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class, uint32_t type, uint32_t fence_flags, struct drm_fence_object * fence) @@ -456,7 +502,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, */ INIT_LIST_HEAD(&fence->base.list); - fence->class = class; + fence->fence_class = fence_class; fence->type = type; fence->flush_mask = 0; fence->submitted_flush = 0; @@ -466,7 +512,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, write_unlock_irqrestore(&fm->lock, flags); if (fence_flags & DRM_FENCE_FLAG_EMIT) { ret = drm_fence_object_emit(fence, fence_flags, - fence->class, type); + fence->fence_class, type); } return ret; } @@ -491,7 +537,7 @@ out: } EXPORT_SYMBOL(drm_fence_add_user_object); -int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, +int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type, unsigned flags, struct drm_fence_object ** c_fence) { struct drm_fence_object *fence; @@ -501,7 +547,7 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t ty fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) return -ENOMEM; - ret = drm_fence_object_init(dev, class, type, flags, fence); + ret = drm_fence_object_init(dev, fence_class, type, flags, fence); if (ret) { drm_fence_usage_deref_unlocked(&fence); return ret; @@ -517,7 +563,7 @@ EXPORT_SYMBOL(drm_fence_object_create); void drm_fence_manager_init(struct drm_device * dev) { struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *class; + struct drm_fence_class_manager *fence_class; struct drm_fence_driver *fed = dev->driver->fence_driver; int i; unsigned long flags; @@ -533,11 +579,11 @@ void drm_fence_manager_init(struct drm_device * dev) BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES); for (i=0; i<fm->num_classes; ++i) { - class = &fm->class[i]; + fence_class = &fm->fence_class[i]; - INIT_LIST_HEAD(&class->ring); - class->pending_flush = 0; - DRM_INIT_WAITQUEUE(&class->fence_queue); + INIT_LIST_HEAD(&fence_class->ring); + fence_class->pending_flush = 0; + DRM_INIT_WAITQUEUE(&fence_class->fence_queue); } atomic_set(&fm->count, 0); @@ -545,6 +591,24 @@ void drm_fence_manager_init(struct drm_device * dev) write_unlock_irqrestore(&fm->lock, flags); } +void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg) +{ + struct drm_device *dev = fence->dev; + struct drm_fence_manager *fm = &dev->fm; + unsigned long irq_flags; + + read_lock_irqsave(&fm->lock, irq_flags); + arg->handle = fence->base.hash.key; + arg->fence_class = fence->fence_class; + arg->type = fence->type; + arg->signaled = fence->signaled; + arg->error = fence->error; + arg->sequence = fence->sequence; + read_unlock_irqrestore(&fm->lock, irq_flags); +} +EXPORT_SYMBOL(drm_fence_fill_arg); + + void drm_fence_manager_takedown(struct drm_device * dev) { } @@ -572,7 +636,6 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -582,7 +645,7 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * if (arg->flags & DRM_FENCE_FLAG_EMIT) LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_object_create(dev, arg->class, + ret = drm_fence_object_create(dev, arg->fence_class, arg->type, arg->flags, &fence); if (ret) return ret; @@ -597,44 +660,16 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file * /* * usage > 0. No need to lock dev->struct_mutex; */ - - arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); - drm_fence_usage_deref_unlocked(&fence); - - return ret; -} + arg->handle = fence->base.hash.key; -int drm_fence_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) -{ - int ret; - struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_arg *arg = data; - struct drm_user_object *uo; - ret = 0; - if (!fm->initialized) { - DRM_ERROR("The DRM driver does not support fencing.\n"); - return -EINVAL; - } + drm_fence_fill_arg(fence, arg); + drm_fence_usage_deref_unlocked(&fence); - mutex_lock(&dev->struct_mutex); - uo = drm_lookup_user_object(file_priv, arg->handle); - if (!uo || (uo->type != drm_fence_type) || uo->owner != file_priv) { - mutex_unlock(&dev->struct_mutex); - return -EINVAL; - } - ret = drm_remove_user_object(file_priv, uo); - mutex_unlock(&dev->struct_mutex); return ret; } - int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { int ret; @@ -642,7 +677,6 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil struct drm_fence_arg *arg = data; struct drm_fence_object *fence; struct drm_user_object *uo; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -654,12 +688,7 @@ int drm_fence_reference_ioctl(struct drm_device *dev, void *data, struct drm_fil if (ret) return ret; fence = drm_lookup_fence_object(file_priv, arg->handle); - - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -687,7 +716,6 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -699,11 +727,7 @@ int drm_fence_signaled_ioctl(struct drm_device *dev, void *data, struct drm_file if (!fence) return -EINVAL; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -715,7 +739,6 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -728,11 +751,7 @@ int drm_fence_flush_ioctl(struct drm_device *dev, void *data, struct drm_file *f return -EINVAL; ret = drm_fence_object_flush(fence, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -745,7 +764,6 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -760,11 +778,7 @@ int drm_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *fi arg->flags & DRM_FENCE_FLAG_WAIT_LAZY, 0, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -777,7 +791,6 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -789,14 +802,10 @@ int drm_fence_emit_ioctl(struct drm_device *dev, void *data, struct drm_file *fi fence = drm_lookup_fence_object(file_priv, arg->handle); if (!fence) return -EINVAL; - ret = drm_fence_object_emit(fence, arg->flags, arg->class, + ret = drm_fence_object_emit(fence, arg->flags, arg->fence_class, arg->type); - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; @@ -808,7 +817,6 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg *arg = data; struct drm_fence_object *fence; - unsigned long flags; ret = 0; if (!fm->initialized) { @@ -821,23 +829,22 @@ int drm_fence_buffers_ioctl(struct drm_device *dev, void *data, struct drm_file return -EINVAL; } LOCK_TEST_WITH_RETURN(dev, file_priv); - ret = drm_fence_buffer_objects(file_priv, NULL, arg->flags, + ret = drm_fence_buffer_objects(dev, NULL, arg->flags, NULL, &fence); if (ret) return ret; - ret = drm_fence_add_user_object(file_priv, fence, - arg->flags & - DRM_FENCE_FLAG_SHAREABLE); - if (ret) - return ret; + + if (!(arg->flags & DRM_FENCE_FLAG_NO_USER)) { + ret = drm_fence_add_user_object(file_priv, fence, + arg->flags & + DRM_FENCE_FLAG_SHAREABLE); + if (ret) + return ret; + } arg->handle = fence->base.hash.key; - read_lock_irqsave(&fm->lock, flags); - arg->class = fence->class; - arg->type = fence->type; - arg->signaled = fence->signaled; - read_unlock_irqrestore(&fm->lock, flags); + drm_fence_fill_arg(fence, arg); drm_fence_usage_deref_unlocked(&fence); return ret; diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c index 643205c8..a6222613 100644 --- a/linux-core/drm_fops.c +++ b/linux-core/drm_fops.c @@ -263,7 +263,6 @@ static int drm_open_helper(struct inode *inode, struct file *filp, priv->lock_count = 0; INIT_LIST_HEAD(&priv->lhead); - INIT_LIST_HEAD(&priv->user_objects); INIT_LIST_HEAD(&priv->refd_objects); INIT_LIST_HEAD(&priv->fbs); @@ -339,7 +338,6 @@ static void drm_object_release(struct file *filp) { struct drm_file *priv = filp->private_data; struct list_head *head; - struct drm_user_object *user_object; struct drm_ref_object *ref_object; int i; @@ -358,17 +356,6 @@ static void drm_object_release(struct file *filp) { head = &priv->refd_objects; } - /* - * Free leftover user objects created by me. - */ - - head = &priv->user_objects; - while (head->next != head) { - user_object = list_entry(head->next, struct drm_user_object, list); - drm_remove_user_object(priv, user_object); - head = &priv->user_objects; - } - for(i=0; i<_DRM_NO_REF_TYPES; ++i) { drm_ht_remove(&priv->refd_object_hash[i]); } diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c index 3d866333..a6d6c0d7 100644 --- a/linux-core/drm_object.c +++ b/linux-core/drm_object.c @@ -38,7 +38,8 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, DRM_ASSERT_LOCKED(&dev->struct_mutex); - atomic_set(&item->refcount, 1); + /* The refcount will be bumped to 1 when we add the ref object below. */ + atomic_set(&item->refcount, 0); item->shareable = shareable; item->owner = priv; @@ -47,9 +48,13 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item, if (ret) return ret; - list_add_tail(&item->list, &priv->user_objects); - return 0; + ret = drm_add_ref_object(priv, item, _DRM_REF_USE); + if (ret) + ret = drm_ht_remove_item(&dev->object_hash, &item->hash); + + return ret; } +EXPORT_SYMBOL(drm_add_user_object); struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key) { @@ -76,6 +81,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t } return item; } +EXPORT_SYMBOL(drm_lookup_user_object); static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item) { @@ -85,26 +91,10 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object if (atomic_dec_and_test(&item->refcount)) { ret = drm_ht_remove_item(&dev->object_hash, &item->hash); BUG_ON(ret); - list_del_init(&item->list); item->remove(priv, item); } } -int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item) -{ - DRM_ASSERT_LOCKED(&priv->head->dev->struct_mutex); - - if (item->owner != priv) { - DRM_ERROR("Cannot destroy object not owned by you.\n"); - return -EINVAL; - } - item->owner = 0; - item->shareable = 0; - list_del_init(&item->list); - drm_deref_user_object(priv, item); - return 0; -} - static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro, enum drm_ref_type action) { @@ -196,6 +186,7 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv, return drm_hash_entry(hash, struct drm_ref_object, hash); } +EXPORT_SYMBOL(drm_lookup_ref_object); static void drm_remove_other_references(struct drm_file * priv, struct drm_user_object * ro) diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index 2ddbe74b..9c9826e0 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -32,6 +32,7 @@ #define _DRM_OBJECTS_H struct drm_device; +struct drm_bo_mem_reg; /*************************************************** * User space objects. (drm_object.c) @@ -42,10 +43,14 @@ struct drm_device; enum drm_object_type { drm_fence_type, drm_buffer_type, - drm_ttm_type /* * Add other user space object types here. */ + drm_driver_type0 = 256, + drm_driver_type1, + drm_driver_type2, + drm_driver_type3, + drm_driver_type4 }; /* @@ -98,15 +103,6 @@ extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key); /* - * Must be called with the struct_mutex held. - * If "item" has been obtained by a call to drm_lookup_user_object. You may not - * release the struct_mutex before calling drm_remove_ref_object. - * This function may temporarily release the struct_mutex. - */ - -extern int drm_remove_user_object(struct drm_file * priv, struct drm_user_object * item); - -/* * Must be called with the struct_mutex held. May temporarily release it. */ @@ -149,13 +145,14 @@ struct drm_fence_object { */ struct list_head ring; - int class; + int fence_class; uint32_t native_type; uint32_t type; uint32_t signaled; uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; + uint32_t error; }; #define _DRM_FENCE_CLASSES 8 @@ -173,7 +170,7 @@ struct drm_fence_class_manager { struct drm_fence_manager { int initialized; rwlock_t lock; - struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; }; @@ -184,18 +181,18 @@ struct drm_fence_driver { uint32_t flush_diff; uint32_t sequence_mask; int lazy_capable; - int (*has_irq) (struct drm_device * dev, uint32_t class, + int (*has_irq) (struct drm_device * dev, uint32_t fence_class, uint32_t flags); - int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, + int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); - void (*poke_flush) (struct drm_device * dev, uint32_t class); + void (*poke_flush) (struct drm_device * dev, uint32_t fence_class); }; -extern void drm_fence_handler(struct drm_device *dev, uint32_t class, - uint32_t sequence, uint32_t type); +extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class, + uint32_t sequence, uint32_t type, uint32_t error); extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); -extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, +extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class, uint32_t sequence); extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); extern int drm_fence_object_signaled(struct drm_fence_object * fence, @@ -208,8 +205,14 @@ extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, - uint32_t fence_flags, uint32_t class, + uint32_t fence_flags, uint32_t fence_class, struct drm_fence_object ** c_fence); +extern int drm_fence_object_emit(struct drm_fence_object * fence, + uint32_t fence_flags, uint32_t class, + uint32_t type); +extern void drm_fence_fill_arg(struct drm_fence_object *fence, + struct drm_fence_arg *arg); + extern int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable); @@ -258,23 +261,22 @@ struct drm_ttm_backend_func { unsigned long num_pages, struct page ** pages); void (*clear) (struct drm_ttm_backend * backend); int (*bind) (struct drm_ttm_backend * backend, - unsigned long offset, int cached); + struct drm_bo_mem_reg * bo_mem); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); }; -struct drm_ttm_backend { - uint32_t flags; - int mem_type; - struct drm_ttm_backend_func *func; -}; +typedef struct drm_ttm_backend { + struct drm_device *dev; + uint32_t flags; + struct drm_ttm_backend_func *func; +} drm_ttm_backend_t; struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; - unsigned long aper_offset; atomic_t vma_count; struct drm_device *dev; int destroy; @@ -290,11 +292,13 @@ struct drm_ttm { }; extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem); extern void drm_ttm_unbind(struct drm_ttm * ttm); extern void drm_ttm_evict(struct drm_ttm * ttm); extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); +extern void drm_ttm_cache_flush(void); +extern int drm_ttm_populate(struct drm_ttm * ttm); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -333,6 +337,14 @@ struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; + uint32_t desired_tile_stride; + uint32_t hw_tile_stride; +}; + +enum drm_bo_type { + drm_bo_type_dc, + drm_bo_type_user, + drm_bo_type_kernel, /* for initial kernel allocations */ }; struct drm_buffer_object { @@ -356,10 +368,13 @@ struct drm_buffer_object { uint32_t fence_type; uint32_t fence_class; + uint32_t new_fence_type; + uint32_t new_fence_class; struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; + unsigned long num_pages; /* For pinned buffers */ int pinned; @@ -368,7 +383,6 @@ struct drm_buffer_object { struct list_head pinned_lru; /* For vm */ - struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; @@ -395,6 +409,7 @@ struct drm_mem_type_manager { struct list_head pinned; uint32_t flags; uint32_t drm_bus_maptype; + unsigned long gpu_offset; unsigned long io_offset; unsigned long io_size; void *io_addr; @@ -434,7 +449,8 @@ struct drm_bo_driver { uint32_t num_mem_busy_prio; struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); - int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); + int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass, + uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, struct drm_mem_type_manager * man); @@ -451,6 +467,7 @@ extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_f extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin); extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); @@ -471,34 +488,44 @@ extern int drm_bo_pci_offset(struct drm_device *dev, extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); -extern int drm_fence_buffer_objects(struct drm_file * priv, +extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo); +extern void drm_putback_buffer_objects(struct drm_device *dev); +extern int drm_fence_buffer_objects(struct drm_device * dev, struct list_head *list, uint32_t fence_flags, struct drm_fence_object * fence, struct drm_fence_object ** used_fence); extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, + enum drm_bo_type type, uint64_t mask, + uint32_t hint, uint32_t page_alignment, + unsigned long buffer_start, + struct drm_buffer_object **bo); extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); extern int drm_bo_mem_space(struct drm_buffer_object * bo, struct drm_bo_mem_reg * mem, int no_wait); extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); -extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size, - enum drm_bo_type type, uint64_t mask, - uint32_t hint, uint32_t page_alignment, - unsigned long buffer_start, - struct drm_buffer_object **bo); -extern int drm_bo_init_mm(struct drm_device *dev, unsigned type, +extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type); +extern int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size); -extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type); -extern int drm_bo_add_user_object(struct drm_file *file_priv, - struct drm_buffer_object *bo, int sharable); -extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo); -extern int drm_bo_set_pin(struct drm_device *dev, - struct drm_buffer_object *bo, int pin); +extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle, + uint32_t fence_class, uint64_t flags, + uint64_t mask, uint32_t hint, + struct drm_bo_info_rep * rep, + struct drm_buffer_object **bo_rep); +extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv, + uint32_t handle, + int check_owner); +extern int drm_bo_do_validate(struct drm_buffer_object *bo, + uint64_t flags, uint64_t mask, uint32_t hint, + uint32_t fence_class, + int no_wait, + struct drm_bo_info_rep *rep); /* - * Buffer object memory move helpers. + * Buffer object memory move- and map helpers. * drm_bo_move.c */ @@ -514,11 +541,69 @@ extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, uint32_t fence_type, uint32_t fence_flags, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_same_page(unsigned long offset, unsigned long offset2); +extern unsigned long drm_bo_offset_end(unsigned long offset, + unsigned long end); + +struct drm_bo_kmap_obj { + void *virtual; + struct page *page; + enum { + bo_map_iomap, + bo_map_vmap, + bo_map_kmap, + bo_map_premapped, + } bo_kmap_type; +}; + +static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem) +{ + *is_iomem = (map->bo_kmap_type == bo_map_iomap || + map->bo_kmap_type == bo_map_premapped); + return map->virtual; +} +extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map); +extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page, + unsigned long num_pages, struct drm_bo_kmap_obj *map); + -extern int drm_mem_reg_ioremap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void **virtual); -extern void drm_mem_reg_iounmap(struct drm_device *dev, - struct drm_bo_mem_reg *mem, void *virtual); +/* + * drm_regman.c + */ + +struct drm_reg { + struct list_head head; + struct drm_fence_object *fence; + uint32_t fence_type; + uint32_t new_fence_type; +}; + +struct drm_reg_manager { + struct list_head free; + struct list_head lru; + struct list_head unfenced; + + int (*reg_reusable)(const struct drm_reg *reg, const void *data); + void (*reg_destroy)(struct drm_reg *reg); +}; + +extern int drm_regs_alloc(struct drm_reg_manager *manager, + const void *data, + uint32_t fence_class, + uint32_t fence_type, + int interruptible, + int no_wait, + struct drm_reg **reg); + +extern void drm_regs_fence(struct drm_reg_manager *regs, + struct drm_fence_object *fence); + +extern void drm_regs_free(struct drm_reg_manager *manager); +extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg); +extern void drm_regs_init(struct drm_reg_manager *manager, + int (*reg_reusable)(const struct drm_reg *, + const void *), + void (*reg_destroy)(struct drm_reg *)); extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem, void **virtual); @@ -531,5 +616,4 @@ extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * #else #define DRM_ASSERT_LOCKED(_mutex) #endif - #endif diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 60c64cba..33bbe1d4 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -35,11 +35,12 @@ static void drm_ttm_ipi_handler(void *null) flush_agp_cache(); } -static void drm_ttm_cache_flush(void) +void drm_ttm_cache_flush(void) { if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0) DRM_ERROR("Timed out waiting for drm cache flush.\n"); } +EXPORT_SYMBOL(drm_ttm_cache_flush); /* * Use kmalloc if possible. Otherwise fall back to vmalloc. @@ -207,7 +208,7 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) return p; } -static int drm_ttm_populate(struct drm_ttm * ttm) +int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; @@ -308,7 +309,7 @@ void drm_ttm_unbind(struct drm_ttm * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem) { int ret = 0; @@ -325,17 +326,16 @@ int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) if (ret) return ret; - if (ttm->state == ttm_unbound && !cached) { + if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) { drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED); } - if ((ret = be->func->bind(be, aper_offset, cached))) { + if ((ret = be->func->bind(be, bo_mem))) { ttm->state = ttm_evicted; DRM_ERROR("Couldn't bind backend.\n"); return ret; } - ttm->aper_offset = aper_offset; ttm->state = ttm_bound; return 0; diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 69a1aab7..682a899a 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -38,9 +38,11 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, + uint32_t * fclass, + uint32_t * type) { - if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) + if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; else *type = 1; @@ -71,6 +73,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CACHED; man->drm_bus_maptype = 0; + man->gpu_offset = 0; break; case DRM_BO_MEM_TT: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -84,6 +87,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; case DRM_BO_MEM_VRAM: if (!(drm_core_has_AGP(dev) && dev->agp)) { @@ -97,6 +101,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_NEEDS_IOREMAP; man->drm_bus_maptype = _DRM_AGP; + man->gpu_offset = 0; break; case DRM_BO_MEM_PRIV0: /* for OS preallocated space */ DRM_ERROR("PRIV0 not used yet.\n"); @@ -199,7 +204,7 @@ static int i915_move_flip(struct drm_buffer_object * bo, if (ret) return ret; - ret = drm_bind_ttm(bo->ttm, 1, tmp_mem.mm_node->start); + ret = drm_bind_ttm(bo->ttm, &tmp_mem); if (ret) goto out_cleanup; diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index abea8ee1..330c870e 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -42,7 +42,7 @@ static void i915_perform_flush(struct drm_device * dev) { struct drm_i915_private *dev_priv = (struct drm_i915_private *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; - struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_class_manager *fc = &fm->fence_class[0]; struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; @@ -63,7 +63,8 @@ static void i915_perform_flush(struct drm_device * dev) diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK; if (diff < driver->wrap_diff && diff != 0) { - drm_fence_handler(dev, 0, sequence, DRM_FENCE_TYPE_EXE); + drm_fence_handler(dev, 0, sequence, + DRM_FENCE_TYPE_EXE, 0); } if (dev_priv->fence_irq_on && !fc->pending_exe_flush) { @@ -82,7 +83,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } @@ -103,7 +104,7 @@ static void i915_perform_flush(struct drm_device * dev) flush_flags = dev_priv->flush_flags; flush_sequence = dev_priv->flush_sequence; dev_priv->flush_pending = 0; - drm_fence_handler(dev, 0, flush_sequence, flush_flags); + drm_fence_handler(dev, 0, flush_sequence, flush_flags, 0); } } diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c index 6c73b0d3..01de67de 100644 --- a/linux-core/nouveau_drv.c +++ b/linux-core/nouveau_drv.c @@ -29,7 +29,16 @@ #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { - nouveau_PCI_IDS + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + }, + { + PCI_DEVICE(PCI_VENDOR_ID_NVIDIA_SGS, PCI_ANY_ID), + .class = PCI_BASE_CLASS_DISPLAY << 16, + .class_mask = 0xff << 16, + } }; extern struct drm_ioctl_desc nouveau_ioctls[]; diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 97d5330b..b86c5d7c 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -80,16 +80,16 @@ nouveau_sgdma_clear(struct drm_ttm_backend *be) } static int -nouveau_sgdma_bind(struct drm_ttm_backend *be, unsigned long pg_start, - int cached) +nouveau_sgdma_bind(struct drm_ttm_backend *be, struct drm_bo_mem_reg *mem) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be; struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - uint64_t offset = (pg_start << PAGE_SHIFT); + uint64_t offset = (mem->mm_node->start << PAGE_SHIFT); uint32_t i; - DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", pg_start, offset, cached); + DRM_DEBUG("pg=0x%lx (0x%llx), cached=%d\n", mem->mm_node->start, + offset, (mem->flags & DRM_BO_FLAG_CACHED) == 1); if (offset & NV_CTXDMA_PAGE_MASK) return -EINVAL; @@ -188,7 +188,6 @@ nouveau_sgdma_init_ttm(struct drm_device *dev) nvbe->dev = dev; nvbe->backend.func = &nouveau_sgdma_backend; - nvbe->backend.mem_type = DRM_BO_MEM_TT; return &nvbe->backend; } @@ -278,6 +277,8 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_ttm_backend *be; struct drm_scatter_gather sgreq; + struct drm_mm_node mm_node; + struct drm_bo_mem_reg mem; int ret; dev_priv->gart_info.sg_be = nouveau_sgdma_init_ttm(dev); @@ -303,7 +304,10 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) return ret; } - if ((ret = be->func->bind(be, 0, 0))) { + mm_node.start = 0; + mem.mm_node = &mm_node; + + if ((ret = be->func->bind(be, &mem))) { DRM_ERROR("failed bind: %d\n", ret); return ret; } diff --git a/linux-core/nouveau_swmthd.c b/linux-core/nouveau_swmthd.c new file mode 120000 index 00000000..c5390801 --- /dev/null +++ b/linux-core/nouveau_swmthd.c @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.c
\ No newline at end of file diff --git a/linux-core/nouveau_swmthd.h b/linux-core/nouveau_swmthd.h new file mode 120000 index 00000000..33425dcd --- /dev/null +++ b/linux-core/nouveau_swmthd.h @@ -0,0 +1 @@ +../shared-core/nouveau_swmthd.h
\ No newline at end of file diff --git a/linux-core/nv30_graph.c b/linux-core/nv30_graph.c deleted file mode 120000 index 25568ecb..00000000 --- a/linux-core/nv30_graph.c +++ /dev/null @@ -1 +0,0 @@ -../shared-core/nv30_graph.c
\ No newline at end of file diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index eb5ea826..a6c59832 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -37,7 +37,8 @@ struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) return drm_agp_init_ttm(dev); } -int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * fclass, + uint32_t * type) { *type = 3; return 0; diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index a6d4ece9..9af1bf3b 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - struct drm_fence_class_manager *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -98,7 +98,8 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) drm_idlelock_release(&dev->lock); dev_priv->have_idlelock = 0; } - drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, signaled_flush_types); + drm_fence_handler(dev, 0, dev_priv->emit_0_sequence, + signaled_flush_types, 0); } } @@ -204,7 +205,7 @@ void via_fence_timer(unsigned long data) drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - struct drm_fence_class_manager *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.fence_class[0]; if (!dev_priv) return; diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 261f4e13..d7b23c89 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -138,11 +138,11 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, xgi_emit_flush(info, FALSE); } - info->cmdring.last_ptr[1] = begin[1]; - info->cmdring.last_ptr[2] = begin[2]; - info->cmdring.last_ptr[3] = begin[3]; + info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]); + info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]); + info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]); DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = begin[0]; + info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]); triggerHWCommandList(info); } @@ -258,6 +258,8 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) const unsigned int flush_size = sizeof(flush_command); u32 *batch_addr; u32 hw_addr; + unsigned int i; + /* check buf is large enough to contain a new flush batch */ if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) { @@ -269,18 +271,20 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) batch_addr = info->cmdring.ptr + (info->cmdring.ring_offset / 4); - (void) memcpy(batch_addr, flush_command, flush_size); + for (i = 0; i < (flush_size / 4); i++) { + batch_addr[i] = cpu_to_le32(flush_command[i]); + } if (stop) { - *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK; + *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK); } - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4); - info->cmdring.last_ptr[2] = hw_addr >> 4; + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4)); + info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4); info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); @@ -299,13 +303,13 @@ void xgi_emit_flush(struct xgi_info * info, bool stop) */ void xgi_emit_nop(struct xgi_info * info) { - info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK - | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence); + info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK + | (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)); info->cmdring.last_ptr[2] = 0; info->cmdring.last_ptr[3] = 0; DRM_WRITEMEMORYBARRIER(); - info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24) - | (BEGIN_VALID_MASK); + info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24) + | (BEGIN_VALID_MASK)); triggerHWCommandList(info); diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index bc6873a9..4e66197e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -351,9 +351,9 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *) arg; struct xgi_info *info = dev->dev_private; - const u32 irq_bits = DRM_READ32(info->mmio_map, + const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map, (0x2800 - + M2REG_AUTO_LINK_STATUS_ADDRESS)) + + M2REG_AUTO_LINK_STATUS_ADDRESS))) & (M2REG_ACTIVE_TIMER_INTERRUPT_MASK | M2REG_ACTIVE_INTERRUPT_0_MASK | M2REG_ACTIVE_INTERRUPT_2_MASK @@ -363,7 +363,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) if (irq_bits != 0) { DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, - M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits); + cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits)); xgi_fence_handler(dev); return IRQ_HANDLED; } else { diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index a68dc03b..d9a94f5f 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20070918" +#define DRIVER_DATE "20071003" #define DRIVER_MAJOR 1 #define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 0 +#define DRIVER_PATCHLEVEL 3 #include "xgi_cmdlist.h" #include "xgi_drm.h" diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index adedf300..526bc5db 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -33,7 +33,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) { struct xgi_info * info = dev->dev_private; - struct drm_fence_class_manager * fc = &dev->fm.class[class]; + struct drm_fence_class_manager * fc = &dev->fm.fence_class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; @@ -48,8 +48,8 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (pending_flush_types) { if (pending_flush_types & DRM_FENCE_TYPE_EXE) { - const u32 begin_id = DRM_READ32(info->mmio_map, - 0x2820) + const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map, + 0x2820)) & BEGIN_BEGIN_IDENTIFICATION_MASK; if (begin_id != info->complete_sequence) { @@ -60,7 +60,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class) if (signaled_flush_types) { drm_fence_handler(dev, 0, info->complete_sequence, - signaled_flush_types); + signaled_flush_types, 0); } } diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index 50a721c0..4a4a9844 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -38,12 +38,12 @@ static unsigned int s_invalid_begin = 0; static bool xgi_validate_signal(struct drm_map * map) { - if (DRM_READ32(map, 0x2800) & 0x001c0000) { + if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) { u16 check; /* Check Read back status */ DRM_WRITE8(map, 0x235c, 0x80); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x3f) != ((check & 0x3f00) >> 8)) { return FALSE; @@ -51,28 +51,28 @@ static bool xgi_validate_signal(struct drm_map * map) /* Check RO channel */ DRM_WRITE8(map, 0x235c, 0x83); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RW channel */ DRM_WRITE8(map, 0x235c, 0x88); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if ((check & 0x0f) != ((check & 0xf0) >> 4)) { return FALSE; } /* Check RO channel outstanding */ DRM_WRITE8(map, 0x235c, 0x8f); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } /* Check RW channel outstanding */ DRM_WRITE8(map, 0x235c, 0x90); - check = DRM_READ16(map, 0x2360); + check = le16_to_cpu(DRM_READ16(map, 0x2360)); if (0 != (check & 0x3ff)) { return FALSE; } @@ -89,7 +89,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) int time_out = 0xffff; DRM_WRITE8(map, 0xb057, 8); - while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) { + while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) { while (0 != ((--time_out) & 0xfff)) /* empty */ ; @@ -100,7 +100,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) u8 old_36; DRM_INFO("Can not reset back 0x%x!\n", - DRM_READ32(map, 0x2800)); + le32_to_cpu(DRM_READ32(map, 0x2800))); DRM_WRITE8(map, 0xb057, 0); @@ -137,7 +137,7 @@ static void xgi_ge_hang_reset(struct drm_map * map) bool xgi_ge_irq_handler(struct xgi_info * info) { - const u32 int_status = DRM_READ32(info->mmio_map, 0x2810); + const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810)); bool is_support_auto_reset = FALSE; /* Check GE on/off */ @@ -146,7 +146,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) /* We got GE stall interrupt. */ DRM_WRITE32(info->mmio_map, 0x2810, - int_status | 0x04000000); + cpu_to_le32(int_status | 0x04000000)); if (is_support_auto_reset) { static cycles_t last_tick; @@ -176,7 +176,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info) } else if (0 != (0x1 & int_status)) { s_invalid_begin++; DRM_WRITE32(info->mmio_map, 0x2810, - (int_status & ~0x01) | 0x04000000); + cpu_to_le32((int_status & ~0x01) | 0x04000000)); } return TRUE; @@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info) unsigned int same_count = 0; while (idleCount < 5) { - const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) + const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK; if (status == old_status) { |