diff options
Diffstat (limited to 'linux-core')
39 files changed, 2018 insertions, 1257 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile index 1790bdb0..aa212cf9 100644 --- a/linux-core/Makefile +++ b/linux-core/Makefile @@ -271,6 +271,10 @@ ifneq ($(PAGE_AGP),0) EXTRA_CFLAGS += -DHAVE_PAGE_AGP endif +ifeq ($(OS_HAS_GEM), 1) +EXTRA_CFLAGS += -DOS_HAS_GEM=1 +endif + # Start with all modules turned off. CONFIG_DRM_GAMMA := n CONFIG_DRM_TDFX := n @@ -337,8 +341,10 @@ ifneq (,$(findstring i810,$(DRM_MODULES))) CONFIG_DRM_I810 := m endif ifneq (,$(findstring i915,$(DRM_MODULES))) +ifeq ($(OS_HAS_GEM), 1) CONFIG_DRM_I915 := m endif +endif GIT_REVISION := $(shell cd "$(DRMSRCDIR)" && git-describe --abbrev=17) ifneq ($(GIT_REVISION),) diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel index 036461db..b370e015 100644 --- a/linux-core/Makefile.kernel +++ b/linux-core/Makefile.kernel @@ -12,7 +12,7 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \ drm_memory_debug.o ati_pcigart.o drm_sman.o \ - drm_hashtab.o drm_memrange.o drm_compat.o \ + drm_hashtab.o drm_mm.o drm_compat.o \ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o \ drm_crtc.o drm_edid.o drm_modes.o drm_crtc_helper.o \ drm_regman.o drm_vm_nopage_compat.o drm_gem.o @@ -20,10 +20,12 @@ tdfx-objs := tdfx_drv.o r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o i810-objs := i810_drv.o i810_dma.o -i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_gem.o \ +i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ + i915_compat.o i915_suspend.o i915_opregion.o \ + i915_gem.o i915_gem_debug.o i915_gem_proc.o i915_gem_tiling.o \ intel_display.o intel_crt.o intel_lvds.o intel_bios.o \ intel_sdvo.o intel_modes.o intel_i2c.o i915_init.o intel_fb.o \ - intel_tv.o i915_compat.o intel_dvo.o dvo_ch7xxx.o \ + intel_tv.o intel_dvo.o dvo_ch7xxx.o \ dvo_ch7017.o dvo_ivch.o dvo_tfp410.o dvo_sil164.o nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \ nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \ diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 848fa885..1b71b45d 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -461,11 +461,6 @@ struct drm_lock_data { uint32_t kernel_waiters; uint32_t user_waiters; int idle_has_lock; - /** - * Boolean signaling that the lock is held on behalf of the - * file_priv client by the kernel in an ioctl handler. - */ - int kernel_held; }; /** @@ -541,17 +536,17 @@ struct drm_sigdata { * Generic memory manager structs */ -struct drm_memrange_node { +struct drm_mm_node { struct list_head fl_entry; struct list_head ml_entry; int free; unsigned long start; unsigned long size; - struct drm_memrange *mm; + struct drm_mm *mm; void *private; }; -struct drm_memrange { +struct drm_mm { struct list_head fl_entry; struct list_head ml_entry; }; @@ -567,7 +562,7 @@ struct drm_map_list { uint64_t user_token; struct drm_master *master; /** if this map is associated with a specific master */ - struct drm_memrange_node *file_offset_node; + struct drm_mm_node *file_offset_node; }; typedef struct drm_map drm_local_map_t; @@ -888,7 +883,7 @@ struct drm_device { struct list_head maplist; /**< Linked list of regions */ int map_count; /**< Number of mappable regions */ struct drm_open_hash map_hash; /**< User token hash table for maps */ - struct drm_memrange offset_manager; /**< User token manager */ + struct drm_mm offset_manager; /**< User token manager */ struct address_space *dev_mapping; /**< For unmap_mapping_range() */ struct page *ttm_dummy_page; @@ -944,6 +939,14 @@ struct drm_device { /** \name VBLANK IRQ support */ /*@{ */ + /* + * At load time, disabling the vblank interrupt won't be allowed since + * old clients may not call the modeset ioctl and therefore misbehave. + * Once the modeset ioctl *has* been called though, we can safely + * disable them when unused. + */ + int vblank_disable_allowed; + wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ spinlock_t vbl_lock; @@ -952,13 +955,12 @@ struct drm_device { atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */ u32 *last_vblank; /* protected by dev->vbl_lock, used */ /* for wraparound handling */ - u32 *vblank_offset; /* used to track how many vblanks */ int *vblank_enabled; /* so we don't call enable more than once per disable */ - u32 *vblank_premodeset; /* were lost during modeset */ + int *vblank_inmodeset; /* Display driver is setting mode */ struct timer_list vblank_disable_timer; - unsigned long max_vblank_count; /**< size of vblank counter register */ + u32 max_vblank_count; /**< size of vblank counter register */ spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ void (*locked_tasklet_func)(struct drm_device *dev); @@ -1296,7 +1298,6 @@ extern int drm_wait_hotplug(struct drm_device *dev, void *data, struct drm_file extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq); extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); extern u32 drm_vblank_count(struct drm_device *dev, int crtc); -extern void drm_update_vblank_count(struct drm_device *dev, int crtc); extern void drm_handle_vblank(struct drm_device *dev, int crtc); extern void drm_handle_hotplug(struct drm_device *dev); extern int drm_vblank_get(struct drm_device *dev, int crtc); @@ -1399,26 +1400,22 @@ extern int drm_sysfs_connector_add(struct drm_connector *connector); extern void drm_sysfs_connector_remove(struct drm_connector *connector); /* - * Basic memory manager support (drm_memrange.c) + * Basic memory manager support (drm_mm.c) */ -extern struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent, - unsigned long size, - unsigned alignment); -extern void drm_memrange_put_block(struct drm_memrange_node *cur); -extern struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange *mm, - unsigned long size, - unsigned alignment, int best_match); -extern int drm_memrange_init(struct drm_memrange *mm, - unsigned long start, unsigned long size); -extern void drm_memrange_takedown(struct drm_memrange *mm); -extern int drm_memrange_clean(struct drm_memrange *mm); -extern unsigned long drm_memrange_tail_space(struct drm_memrange *mm); -extern int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, - unsigned long size); -extern int drm_memrange_add_space_to_tail(struct drm_memrange *mm, - unsigned long size); -static inline struct drm_memrange *drm_get_mm(struct drm_memrange_node *block) +extern struct drm_mm_node * drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, + unsigned alignment); +extern void drm_mm_put_block(struct drm_mm_node *cur); +extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size, + unsigned alignment, int best_match); +extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size); +extern void drm_mm_takedown(struct drm_mm *mm); +extern int drm_mm_clean(struct drm_mm *mm); +extern unsigned long drm_mm_tail_space(struct drm_mm *mm); +extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); +extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); + +static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block) { return block->mm; } @@ -1488,6 +1485,7 @@ void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); +extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index a1c1be89..09b3fa39 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -417,14 +417,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all) if (!bo->fence) { list_del_init(&bo->lru); if (bo->mem.mm_node) { - drm_memrange_put_block(bo->mem.mm_node); + drm_mm_put_block(bo->mem.mm_node); if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; bo->mem.mm_node = NULL; } list_del_init(&bo->pinned_lru); if (bo->pinned_node) { - drm_memrange_put_block(bo->pinned_node); + drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; } list_del_init(&bo->ddestroy); @@ -778,7 +778,7 @@ out: mutex_lock(&dev->struct_mutex); if (evict_mem.mm_node) { if (evict_mem.mm_node != bo->pinned_node) - drm_memrange_put_block(evict_mem.mm_node); + drm_mm_put_block(evict_mem.mm_node); evict_mem.mm_node = NULL; } drm_bo_add_to_lru(bo); @@ -797,7 +797,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, struct drm_bo_mem_reg *mem, uint32_t mem_type, int no_wait) { - struct drm_memrange_node *node; + struct drm_mm_node *node; struct drm_buffer_manager *bm = &dev->bm; struct drm_buffer_object *entry; struct drm_mem_type_manager *man = &bm->man[mem_type]; @@ -807,7 +807,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, mutex_lock(&dev->struct_mutex); do { - node = drm_memrange_search_free(&man->manager, num_pages, + node = drm_mm_search_free(&man->manager, num_pages, mem->page_alignment, 1); if (node) break; @@ -833,7 +833,7 @@ static int drm_bo_mem_force_space(struct drm_device *dev, return -ENOMEM; } - node = drm_memrange_get_block(node, num_pages, mem->page_alignment); + node = drm_mm_get_block(node, num_pages, mem->page_alignment); if (unlikely(!node)) { mutex_unlock(&dev->struct_mutex); return -ENOMEM; @@ -911,7 +911,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo, int type_found = 0; int type_ok = 0; int has_eagain = 0; - struct drm_memrange_node *node = NULL; + struct drm_mm_node *node = NULL; int ret; mem->mm_node = NULL; @@ -939,10 +939,10 @@ int drm_bo_mem_space(struct drm_buffer_object *bo, mutex_lock(&dev->struct_mutex); if (man->has_type && man->use_type) { type_found = 1; - node = drm_memrange_search_free(&man->manager, mem->num_pages, + node = drm_mm_search_free(&man->manager, mem->num_pages, mem->page_alignment, 1); if (node) - node = drm_memrange_get_block(node, mem->num_pages, + node = drm_mm_get_block(node, mem->num_pages, mem->page_alignment); } mutex_unlock(&dev->struct_mutex); @@ -1156,7 +1156,7 @@ out_unlock: if (ret || !move_unfenced) { if (mem.mm_node) { if (mem.mm_node != bo->pinned_node) - drm_memrange_put_block(mem.mm_node); + drm_mm_put_block(mem.mm_node); mem.mm_node = NULL; } drm_bo_add_to_lru(bo); @@ -1248,7 +1248,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo, if (bo->pinned_node != bo->mem.mm_node) { if (bo->pinned_node != NULL) - drm_memrange_put_block(bo->pinned_node); + drm_mm_put_block(bo->pinned_node); bo->pinned_node = bo->mem.mm_node; } @@ -1259,7 +1259,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo, mutex_lock(&dev->struct_mutex); if (bo->pinned_node != bo->mem.mm_node) - drm_memrange_put_block(bo->pinned_node); + drm_mm_put_block(bo->pinned_node); list_del_init(&bo->pinned_lru); bo->pinned_node = NULL; @@ -1554,7 +1554,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo, if (bo->pinned_node == bo->mem.mm_node) bo->pinned_node = NULL; if (bo->pinned_node != NULL) { - drm_memrange_put_block(bo->pinned_node); + drm_mm_put_block(bo->pinned_node); bo->pinned_node = NULL; } mutex_unlock(&dev->struct_mutex); @@ -1695,8 +1695,8 @@ int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean) drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0); drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1); - if (drm_memrange_clean(&man->manager)) { - drm_memrange_takedown(&man->manager); + if (drm_mm_clean(&man->manager)) { + drm_mm_takedown(&man->manager); } else { ret = -EBUSY; } @@ -1767,7 +1767,7 @@ int drm_bo_init_mm(struct drm_device *dev, unsigned type, DRM_ERROR("Zero size memory manager type %d\n", type); return ret; } - ret = drm_memrange_init(&man->manager, p_offset, p_size); + ret = drm_mm_init(&man->manager, p_offset, p_size); if (ret) return ret; } @@ -2008,7 +2008,7 @@ void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo) list->user_token = 0; } if (list->file_offset_node) { - drm_memrange_put_block(list->file_offset_node); + drm_mm_put_block(list->file_offset_node); list->file_offset_node = NULL; } @@ -2052,7 +2052,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) atomic_inc(&bo->usage); map->handle = (void *)bo; - list->file_offset_node = drm_memrange_search_free(&dev->offset_manager, + list->file_offset_node = drm_mm_search_free(&dev->offset_manager, bo->mem.num_pages, 0, 0); if (unlikely(!list->file_offset_node)) { @@ -2060,7 +2060,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo) return -ENOMEM; } - list->file_offset_node = drm_memrange_get_block(list->file_offset_node, + list->file_offset_node = drm_mm_get_block(list->file_offset_node, bo->mem.num_pages, 0); if (unlikely(!list->file_offset_node)) { diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 9147a475..5c290af2 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -41,7 +41,7 @@ static void drm_bo_free_old_node(struct drm_buffer_object *bo) if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); - drm_memrange_put_block(old_mem->mm_node); + drm_mm_put_block(old_mem->mm_node); mutex_unlock(&bo->dev->struct_mutex); } old_mem->mm_node = NULL; diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h index f1efc1fe..88c5bb1d 100644 --- a/linux-core/drm_compat.h +++ b/linux-core/drm_compat.h @@ -391,10 +391,22 @@ extern struct page *drm_vm_sg_nopage(struct vm_area_struct *vma, #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,26) #define drm_on_each_cpu(handler, data, wait) \ - on_each_cpu(handler, data, wait) + on_each_cpu(handler, data, wait) #else #define drm_on_each_cpu(handler, data, wait) \ - on_each_cpu(handler, data, wait, 1) + on_each_cpu(handler, data, wait, 1) +#endif + +#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) +#define drm_core_ioremap_wc drm_core_ioremap +#endif + +#ifndef OS_HAS_GEM +#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) +#define OS_HAS_GEM 1 +#else +#define OS_HAS_GEM 0 +#endif #endif #endif diff --git a/linux-core/drm_crtc.c b/linux-core/drm_crtc.c index 8375bf9a..aa6749d6 100644 --- a/linux-core/drm_crtc.c +++ b/linux-core/drm_crtc.c @@ -285,7 +285,9 @@ struct drm_crtc *drm_crtc_from_fb(struct drm_device *dev, struct drm_framebuffer *drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { - drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB); + if(drm_mode_object_get(dev, &fb->base, DRM_MODE_OBJECT_FB)) + return NULL; + fb->dev = dev; fb->funcs = funcs; dev->mode_config.num_fb++; diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c index 7c43fd00..11044bff 100644 --- a/linux-core/drm_drv.c +++ b/linux-core/drm_drv.c @@ -146,9 +146,11 @@ static struct drm_ioctl_desc drm_ioctls[] = { DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER), DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER), +#if OS_HAS_GEM DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), +#endif }; #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) @@ -393,7 +395,7 @@ static void drm_cleanup(struct drm_device * dev) drm_ctxbitmap_cleanup(dev); drm_ht_remove(&dev->map_hash); - drm_memrange_takedown(&dev->offset_manager); + drm_mm_takedown(&dev->offset_manager); if (drm_core_check_feature(dev, DRIVER_MODESET)) drm_put_minor(&dev->control); @@ -600,9 +602,10 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END) && (nr < DRM_COMMAND_BASE + dev->driver->num_ioctls)) ioctl = &dev->driver->ioctls[nr - DRM_COMMAND_BASE]; - else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) + else if ((nr >= DRM_COMMAND_END) || (nr < DRM_COMMAND_BASE)) { ioctl = &drm_ioctls[nr]; - else { + cmd = ioctl->cmd; + } else { retcode = -EINVAL; goto err_i1; } @@ -619,6 +622,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) goto err_i1; } #endif + func = ioctl->func; /* is there a local override? */ if ((nr == DRM_IOCTL_NR(DRM_IOCTL_DMA)) && dev->driver->dma_ioctl) @@ -644,7 +648,7 @@ long drm_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) retcode = func(dev, kdata, file_priv); } - if ((retcode == 0) && (cmd & IOC_OUT)) { + if (cmd & IOC_OUT) { if (copy_to_user((void __user *)arg, kdata, _IOC_SIZE(cmd)) != 0) retcode = -EFAULT; diff --git a/linux-core/drm_gem.c b/linux-core/drm_gem.c index 05320376..0cbf9cab 100644 --- a/linux-core/drm_gem.c +++ b/linux-core/drm_gem.c @@ -25,6 +25,12 @@ * */ +#include <linux/version.h> + +#include "drmP.h" + +#if OS_HAS_GEM + #include <linux/types.h> #include <linux/slab.h> #include <linux/mm.h> @@ -34,7 +40,6 @@ #include <linux/module.h> #include <linux/mman.h> #include <linux/pagemap.h> -#include "drmP.h" /** @file drm_gem.c * @@ -417,3 +422,22 @@ drm_gem_object_handle_free(struct kref *kref) } EXPORT_SYMBOL(drm_gem_object_handle_free); +#else + +int drm_gem_init(struct drm_device *dev) +{ + return 0; +} + +void drm_gem_open(struct drm_device *dev, struct drm_file *file_private) +{ + +} + +void +drm_gem_release(struct drm_device *dev, struct drm_file *file_private) +{ + +} + +#endif diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c index 0dfbe57a..800768ae 100644 --- a/linux-core/drm_irq.c +++ b/linux-core/drm_irq.c @@ -77,10 +77,16 @@ static void vblank_disable_fn(unsigned long arg) unsigned long irqflags; int i; + if (!dev->vblank_disable_allowed) + return; + for (i = 0; i < dev->num_crtcs; i++) { spin_lock_irqsave(&dev->vbl_lock, irqflags); if (atomic_read(&dev->vblank_refcount[i]) == 0 && dev->vblank_enabled[i]) { + DRM_DEBUG("disabling vblank on crtc %d\n", i); + dev->last_vblank[i] = + dev->driver->get_vblank_counter(dev, i); dev->driver->disable_vblank(dev, i); dev->vblank_enabled[i] = 0; } @@ -118,13 +124,9 @@ static void drm_vblank_cleanup(struct drm_device *dev) drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, DRM_MEM_DRIVER); - if (dev->vblank_premodeset) - drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) * - dev->num_crtcs, DRM_MEM_DRIVER); - - if (dev->vblank_offset) - drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * dev->num_crtcs, - DRM_MEM_DRIVER); + if (dev->vblank_inmodeset) + drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * + dev->num_crtcs, DRM_MEM_DRIVER); } int drm_vblank_init(struct drm_device *dev, int num_crtcs) @@ -167,13 +169,9 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) if (!dev->last_vblank) goto err; - dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32), - DRM_MEM_DRIVER); - if (!dev->vblank_premodeset) - goto err; - - dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); - if (!dev->vblank_offset) + dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), + DRM_MEM_DRIVER); + if (!dev->vblank_inmodeset) goto err; /* Zero per-crtc vblank stuff */ @@ -184,6 +182,8 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs) atomic_set(&dev->vblank_refcount[i], 0); } + dev->vblank_disable_allowed = 0; + return 0; err: @@ -426,8 +426,7 @@ int drm_control(struct drm_device *dev, void *data, */ u32 drm_vblank_count(struct drm_device *dev, int crtc) { - return atomic_read(&dev->_vblank_count[crtc]) + - dev->vblank_offset[crtc]; + return atomic_read(&dev->_vblank_count[crtc]); } EXPORT_SYMBOL(drm_vblank_count); @@ -440,10 +439,15 @@ EXPORT_SYMBOL(drm_vblank_count); * (specified by @crtc). Deal with wraparound, if it occurred, and * update the last read value so we can deal with wraparound on the next * call if necessary. + * + * Only necessary when going from off->on, to account for frames we + * didn't get an interrupt for. + * + * Note: caller must hold dev->vbl_lock since this reads & writes + * device vblank fields. */ -void drm_update_vblank_count(struct drm_device *dev, int crtc) +static void drm_update_vblank_count(struct drm_device *dev, int crtc) { - unsigned long irqflags; u32 cur_vblank, diff; /* @@ -454,20 +458,19 @@ void drm_update_vblank_count(struct drm_device *dev, int crtc) * a long time. */ cur_vblank = dev->driver->get_vblank_counter(dev, crtc); - spin_lock_irqsave(&dev->vbl_lock, irqflags); + diff = cur_vblank - dev->last_vblank[crtc]; if (cur_vblank < dev->last_vblank[crtc]) { - diff = dev->max_vblank_count - - dev->last_vblank[crtc]; - diff += cur_vblank; - } else { - diff = cur_vblank - dev->last_vblank[crtc]; + diff += dev->max_vblank_count; + + DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", + crtc, dev->last_vblank[crtc], cur_vblank, diff); } - dev->last_vblank[crtc] = cur_vblank; - spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + + DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", + crtc, diff); atomic_add(diff, &dev->_vblank_count[crtc]); } -EXPORT_SYMBOL(drm_update_vblank_count); /** * drm_vblank_get - get a reference count on vblank events @@ -475,9 +478,7 @@ EXPORT_SYMBOL(drm_update_vblank_count); * @crtc: which CRTC to own * * Acquire a reference count on vblank events to avoid having them disabled - * while in use. Note callers will probably want to update the master counter - * using drm_update_vblank_count() above before calling this routine so that - * wakeups occur on the right vblank event. + * while in use. * * RETURNS * Zero on success, nonzero on failure. @@ -487,15 +488,17 @@ int drm_vblank_get(struct drm_device *dev, int crtc) unsigned long irqflags; int ret = 0; - spin_lock_irqsave(&dev->vbl_lock, irqflags); + spin_lock_irqsave(&dev->vbl_lock, irqflags); /* Going from 0->1 means we have to enable interrupts again */ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && !dev->vblank_enabled[crtc]) { ret = dev->driver->enable_vblank(dev, crtc); if (ret) atomic_dec(&dev->vblank_refcount[crtc]); - else + else { dev->vblank_enabled[crtc] = 1; + drm_update_vblank_count(dev, crtc); + } } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); @@ -525,13 +528,21 @@ EXPORT_SYMBOL(drm_vblank_put); * * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET * ioctls around modesetting so that any lost vblank events are accounted for. + * + * Generally the counter will reset across mode sets. If interrupts are + * enabled around this call, we don't have to do anything since the counter + * will have already been incremented. */ int drm_modeset_ctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_modeset_ctl *modeset = data; + unsigned long irqflags; int crtc, ret = 0; - u32 new; + + /* If drm_vblank_init() hasn't been called yet, just no-op */ + if (!dev->num_crtcs) + goto out; crtc = modeset->crtc; if (crtc >= dev->num_crtcs) { @@ -539,14 +550,28 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, goto out; } + /* + * To avoid all the problems that might happen if interrupts + * were enabled/disabled around or between these calls, we just + * have the kernel take a reference on the CRTC (just once though + * to avoid corrupting the count if multiple, mismatch calls occur), + * so that interrupts remain enabled in the interim. + */ switch (modeset->cmd) { case _DRM_PRE_MODESET: - dev->vblank_premodeset[crtc] = - dev->driver->get_vblank_counter(dev, crtc); + if (!dev->vblank_inmodeset[crtc]) { + dev->vblank_inmodeset[crtc] = 1; + drm_vblank_get(dev, crtc); + } break; case _DRM_POST_MODESET: - new = dev->driver->get_vblank_counter(dev, crtc); - dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new; + if (dev->vblank_inmodeset[crtc]) { + spin_lock_irqsave(&dev->vbl_lock, irqflags); + dev->vblank_disable_allowed = 1; + dev->vblank_inmodeset[crtc] = 0; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); + drm_vblank_put(dev, crtc); + } break; default: ret = -EINVAL; @@ -580,7 +605,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) { union drm_wait_vblank *vblwait = data; - struct timeval now; int ret = 0; unsigned int flags, seq, crtc; @@ -601,7 +625,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (crtc >= dev->num_crtcs) return -EINVAL; - drm_update_vblank_count(dev, crtc); + ret = drm_vblank_get(dev, crtc); + if (ret) + return ret; seq = drm_vblank_count(dev, crtc); switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { @@ -611,7 +637,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, case _DRM_VBLANK_ABSOLUTE: break; default: - return -EINVAL; + ret = -EINVAL; + goto done; } if ((flags & _DRM_VBLANK_NEXTONMISS) && @@ -644,15 +671,18 @@ int drm_wait_vblank(struct drm_device *dev, void *data, if (atomic_read(&dev->vbl_signal_pending) >= 100) { spin_unlock_irqrestore(&dev->vbl_lock, irqflags); - return -EBUSY; + ret = -EBUSY; + goto done; } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER); - if (!vbl_sig) - return -ENOMEM; + if (!vbl_sig) { + ret = -ENOMEM; + goto done; + } ret = drm_vblank_get(dev, crtc); if (ret) { @@ -675,23 +705,23 @@ int drm_wait_vblank(struct drm_device *dev, void *data, vblwait->reply.sequence = seq; } else { - unsigned long cur_vblank; - - ret = drm_vblank_get(dev, crtc); - if (ret) - return ret; DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, - (((cur_vblank = drm_vblank_count(dev, crtc)) + ((drm_vblank_count(dev, crtc) - vblwait->request.sequence) <= (1 << 23))); - drm_vblank_put(dev, crtc); - do_gettimeofday(&now); - vblwait->reply.tval_sec = now.tv_sec; - vblwait->reply.tval_usec = now.tv_usec; - vblwait->reply.sequence = cur_vblank; + if (ret != -EINTR) { + struct timeval now; + + do_gettimeofday(&now); + + vblwait->reply.tval_sec = now.tv_sec; + vblwait->reply.tval_usec = now.tv_usec; + vblwait->reply.sequence = drm_vblank_count(dev, crtc); + } } - done: +done: + drm_vblank_put(dev, crtc); return ret; } @@ -745,7 +775,7 @@ static void drm_vbl_send_signals(struct drm_device * dev, int crtc) */ void drm_handle_vblank(struct drm_device *dev, int crtc) { - drm_update_vblank_count(dev, crtc); + atomic_inc(&dev->_vblank_count[crtc]); DRM_WAKEUP(&dev->vbl_queue[crtc]); drm_vbl_send_signals(dev, crtc); } diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c index a663e965..fe648933 100644 --- a/linux-core/drm_memory.c +++ b/linux-core/drm_memory.c @@ -351,6 +351,15 @@ void drm_core_ioremap(struct drm_map *map, struct drm_device *dev) } EXPORT_SYMBOL_GPL(drm_core_ioremap); + +#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) +void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev) +{ + map->handle = ioremap_wc(map->offset, map->size); +} +EXPORT_SYMBOL_GPL(drm_core_ioremap_wc); +#endif + void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev) { if (!map->handle || !map->size) diff --git a/linux-core/drm_memrange.c b/linux-core/drm_mm.c index 5921eff8..8d7390d4 100644 --- a/linux-core/drm_memrange.c +++ b/linux-core/drm_mm.c @@ -44,26 +44,26 @@ #include "drmP.h" #include <linux/slab.h> -unsigned long drm_memrange_tail_space(struct drm_memrange *mm) +unsigned long drm_mm_tail_space(struct drm_mm *mm) { struct list_head *tail_node; - struct drm_memrange_node *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return 0; return entry->size; } -int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long size) +int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - struct drm_memrange_node *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) return -ENOMEM; @@ -75,13 +75,13 @@ int drm_memrange_remove_space_from_tail(struct drm_memrange *mm, unsigned long s } -static int drm_memrange_create_tail_node(struct drm_memrange *mm, +static int drm_mm_create_tail_node(struct drm_mm *mm, unsigned long start, unsigned long size) { - struct drm_memrange_node *child; + struct drm_mm_node *child; - child = (struct drm_memrange_node *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return -ENOMEM; @@ -98,26 +98,26 @@ static int drm_memrange_create_tail_node(struct drm_memrange *mm, } -int drm_memrange_add_space_to_tail(struct drm_memrange *mm, unsigned long size) +int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size) { struct list_head *tail_node; - struct drm_memrange_node *entry; + struct drm_mm_node *entry; tail_node = mm->ml_entry.prev; - entry = list_entry(tail_node, struct drm_memrange_node, ml_entry); + entry = list_entry(tail_node, struct drm_mm_node, ml_entry); if (!entry->free) { - return drm_memrange_create_tail_node(mm, entry->start + entry->size, size); + return drm_mm_create_tail_node(mm, entry->start + entry->size, size); } entry->size += size; return 0; } -static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange_node *parent, +static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, unsigned long size) { - struct drm_memrange_node *child; + struct drm_mm_node *child; - child = (struct drm_memrange_node *) + child = (struct drm_mm_node *) drm_ctl_alloc(sizeof(*child), DRM_MEM_MM); if (!child) return NULL; @@ -137,19 +137,19 @@ static struct drm_memrange_node *drm_memrange_split_at_start(struct drm_memrange return child; } -struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * parent, +struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, unsigned long size, unsigned alignment) { - struct drm_memrange_node *align_splitoff = NULL; - struct drm_memrange_node *child; + struct drm_mm_node *align_splitoff = NULL; + struct drm_mm_node *child; unsigned tmp = 0; if (alignment) tmp = parent->start % alignment; if (tmp) { - align_splitoff = drm_memrange_split_at_start(parent, alignment - tmp); + align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); if (!align_splitoff) return NULL; } @@ -159,41 +159,41 @@ struct drm_memrange_node *drm_memrange_get_block(struct drm_memrange_node * pare parent->free = 0; return parent; } else { - child = drm_memrange_split_at_start(parent, size); + child = drm_mm_split_at_start(parent, size); } if (align_splitoff) - drm_memrange_put_block(align_splitoff); + drm_mm_put_block(align_splitoff); return child; } -EXPORT_SYMBOL(drm_memrange_get_block); +EXPORT_SYMBOL(drm_mm_get_block); /* * Put a block. Merge with the previous and / or next block if they are free. * Otherwise add to the free stack. */ -void drm_memrange_put_block(struct drm_memrange_node * cur) +void drm_mm_put_block(struct drm_mm_node * cur) { - struct drm_memrange *mm = cur->mm; + struct drm_mm *mm = cur->mm; struct list_head *cur_head = &cur->ml_entry; struct list_head *root_head = &mm->ml_entry; - struct drm_memrange_node *prev_node = NULL; - struct drm_memrange_node *next_node; + struct drm_mm_node *prev_node = NULL; + struct drm_mm_node *next_node; int merged = 0; if (cur_head->prev != root_head) { - prev_node = list_entry(cur_head->prev, struct drm_memrange_node, ml_entry); + prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); if (prev_node->free) { prev_node->size += cur->size; merged = 1; } } if (cur_head->next != root_head) { - next_node = list_entry(cur_head->next, struct drm_memrange_node, ml_entry); + next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); if (next_node->free) { if (merged) { prev_node->size += next_node->size; @@ -216,16 +216,16 @@ void drm_memrange_put_block(struct drm_memrange_node * cur) drm_ctl_free(cur, sizeof(*cur), DRM_MEM_MM); } } -EXPORT_SYMBOL(drm_memrange_put_block); +EXPORT_SYMBOL(drm_mm_put_block); -struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * mm, +struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, unsigned long size, unsigned alignment, int best_match) { struct list_head *list; const struct list_head *free_stack = &mm->fl_entry; - struct drm_memrange_node *entry; - struct drm_memrange_node *best; + struct drm_mm_node *entry; + struct drm_mm_node *best; unsigned long best_size; unsigned wasted; @@ -233,7 +233,7 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m best_size = ~0UL; list_for_each(list, free_stack) { - entry = list_entry(list, struct drm_memrange_node, fl_entry); + entry = list_entry(list, struct drm_mm_node, fl_entry); wasted = 0; if (entry->size < size) @@ -258,31 +258,31 @@ struct drm_memrange_node *drm_memrange_search_free(const struct drm_memrange * m return best; } -EXPORT_SYMBOL(drm_memrange_search_free); +EXPORT_SYMBOL(drm_mm_search_free); -int drm_memrange_clean(struct drm_memrange * mm) +int drm_mm_clean(struct drm_mm * mm) { struct list_head *head = &mm->ml_entry; return (head->next->next == head); } -int drm_memrange_init(struct drm_memrange * mm, unsigned long start, unsigned long size) +int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) { INIT_LIST_HEAD(&mm->ml_entry); INIT_LIST_HEAD(&mm->fl_entry); - return drm_memrange_create_tail_node(mm, start, size); + return drm_mm_create_tail_node(mm, start, size); } -EXPORT_SYMBOL(drm_memrange_init); +EXPORT_SYMBOL(drm_mm_init); -void drm_memrange_takedown(struct drm_memrange * mm) +void drm_mm_takedown(struct drm_mm * mm) { struct list_head *bnode = mm->fl_entry.next; - struct drm_memrange_node *entry; + struct drm_mm_node *entry; - entry = list_entry(bnode, struct drm_memrange_node, fl_entry); + entry = list_entry(bnode, struct drm_mm_node, fl_entry); if (entry->ml_entry.next != &mm->ml_entry || entry->fl_entry.next != &mm->fl_entry) { @@ -294,4 +294,4 @@ void drm_memrange_takedown(struct drm_memrange * mm) list_del(&entry->ml_entry); drm_ctl_free(entry, sizeof(*entry), DRM_MEM_MM); } -EXPORT_SYMBOL(drm_memrange_takedown); +EXPORT_SYMBOL(drm_mm_takedown); diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index a8aa84db..925b4d67 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -508,7 +508,7 @@ extern int drm_ttm_destroy(struct drm_ttm *ttm); */ struct drm_bo_mem_reg { - struct drm_memrange_node *mm_node; + struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; uint32_t page_alignment; @@ -588,7 +588,7 @@ struct drm_buffer_object { unsigned long num_pages; /* For pinned buffers */ - struct drm_memrange_node *pinned_node; + struct drm_mm_node *pinned_node; uint32_t pinned_mem_type; struct list_head pinned_lru; @@ -623,7 +623,7 @@ struct drm_mem_type_manager { int has_type; int use_type; int kern_init_type; - struct drm_memrange manager; + struct drm_mm manager; struct list_head lru; struct list_head pinned; uint32_t flags; diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index 127a7987..e66edfb3 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -598,20 +598,20 @@ struct drm_gem_name_info_data { int eof; }; -static int drm_gem_one_name_info (int id, void *ptr, void *data) +static int drm_gem_one_name_info(int id, void *ptr, void *data) { struct drm_gem_object *obj = ptr; struct drm_gem_name_info_data *nid = data; - DRM_INFO ("name %d size %d\n", obj->name, obj->size); + DRM_INFO("name %d size %d\n", obj->name, obj->size); if (nid->eof) return 0; - - nid->len += sprintf (&nid->buf[nid->len], - "%6d%9d%8d%9d\n", - obj->name, obj->size, - atomic_read(&obj->handlecount.refcount), - atomic_read(&obj->refcount.refcount)); + + nid->len += sprintf(&nid->buf[nid->len], + "%6d%9d%8d%9d\n", + obj->name, obj->size, + atomic_read(&obj->handlecount.refcount), + atomic_read(&obj->refcount.refcount)); if (nid->len > DRM_PROC_LIMIT) { nid->eof = 1; return 0; @@ -622,20 +622,20 @@ static int drm_gem_one_name_info (int id, void *ptr, void *data) static int drm_gem_name_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - struct drm_minor *minor = (struct drm_minor *) data; + struct drm_minor *minor = (struct drm_minor *) data; struct drm_device *dev = minor->dev; struct drm_gem_name_info_data nid; - + if (offset > DRM_PROC_LIMIT) { *eof = 1; return 0; } - nid.len = sprintf (buf, " name size handles refcount\n"); + nid.len = sprintf(buf, " name size handles refcount\n"); nid.buf = buf; nid.eof = 0; - idr_for_each (&dev->object_name_idr, drm_gem_one_name_info, &nid); - + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); + *start = &buf[offset]; *eof = 0; if (nid.len > request + offset) @@ -647,10 +647,10 @@ static int drm_gem_name_info(char *buf, char **start, off_t offset, static int drm_gem_object_info(char *buf, char **start, off_t offset, int request, int *eof, void *data) { - struct drm_minor *minor = (struct drm_minor *) data; + struct drm_minor *minor = (struct drm_minor *) data; struct drm_device *dev = minor->dev; int len = 0; - + if (offset > DRM_PROC_LIMIT) { *eof = 1; return 0; @@ -658,11 +658,11 @@ static int drm_gem_object_info(char *buf, char **start, off_t offset, *start = &buf[offset]; *eof = 0; - DRM_PROC_PRINT("%d objects\n", atomic_read (&dev->object_count)); - DRM_PROC_PRINT("%d object bytes\n", atomic_read (&dev->object_memory)); - DRM_PROC_PRINT("%d pinned\n", atomic_read (&dev->pin_count)); - DRM_PROC_PRINT("%d pin bytes\n", atomic_read (&dev->pin_memory)); - DRM_PROC_PRINT("%d gtt bytes\n", atomic_read (&dev->gtt_memory)); + DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); + DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); + DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); + DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); if (len > request + offset) return request; diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c index 7c16f685..8421a939 100644 --- a/linux-core/drm_sman.c +++ b/linux-core/drm_sman.c @@ -88,34 +88,34 @@ EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { - struct drm_memrange *mm = (struct drm_memrange *) private; - struct drm_memrange_node *tmp; + struct drm_mm *mm = (struct drm_mm *) private; + struct drm_mm_node *tmp; - tmp = drm_memrange_search_free(mm, size, alignment, 1); + tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { return NULL; } - tmp = drm_memrange_get_block(tmp, size, alignment); + tmp = drm_mm_get_block(tmp, size, alignment); return tmp; } static void drm_sman_mm_free(void *private, void *ref) { - struct drm_memrange_node *node = (struct drm_memrange_node *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; - drm_memrange_put_block(node); + drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { - struct drm_memrange *mm = (struct drm_memrange *) private; - drm_memrange_takedown(mm); + struct drm_mm *mm = (struct drm_mm *) private; + drm_mm_takedown(mm); drm_free(mm, sizeof(*mm), DRM_MEM_MM); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { - struct drm_memrange_node *node = (struct drm_memrange_node *) ref; + struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } @@ -124,7 +124,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { struct drm_sman_mm *sman_mm; - struct drm_memrange *mm; + struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); @@ -135,7 +135,7 @@ drm_sman_set_range(struct drm_sman * sman, unsigned int manager, return -ENOMEM; } sman_mm->private = mm; - ret = drm_memrange_init(mm, start, size); + ret = drm_mm_init(mm, start, size); if (ret) { drm_free(mm, sizeof(*mm), DRM_MEM_MM); diff --git a/linux-core/drm_sman.h b/linux-core/drm_sman.h index 0299776c..39a39fef 100644 --- a/linux-core/drm_sman.h +++ b/linux-core/drm_sman.h @@ -45,7 +45,7 @@ /* * A class that is an abstration of a simple memory allocator. * The sman implementation provides a default such allocator - * using the drm_memrange.c implementation. But the user can replace it. + * using the drm_mm.c implementation. But the user can replace it. * See the SiS implementation, which may use the SiS FB kernel module * for memory management. */ @@ -116,7 +116,7 @@ extern int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order); /* - * Initialize a drm_memrange.c allocator. Should be called only once for each + * Initialize a drm_mm.c allocator. Should be called only once for each * manager unless a customized allogator is used. */ diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c index 4654dca1..1257b0d4 100644 --- a/linux-core/drm_stub.c +++ b/linux-core/drm_stub.c @@ -219,9 +219,8 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) return -ENOMEM; - - if (drm_memrange_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE)) { + if (drm_mm_init(&dev->offset_manager, DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE)) { drm_ht_remove(&dev->map_hash); return -ENOMEM; } diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c index 5c384a60..3b217342 100644 --- a/linux-core/drm_sysfs.c +++ b/linux-core/drm_sysfs.c @@ -450,6 +450,7 @@ void drm_sysfs_hotplug_event(struct drm_device *dev) kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp); } +EXPORT_SYMBOL(drm_sysfs_hotplug_event); static struct device_attribute dri_attrs[] = { __ATTR(dri_library_name, S_IRUGO, show_dri, NULL), diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index 9a215161..0aabf943 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -698,7 +698,7 @@ EXPORT_SYMBOL(drm_mmap); * protected by the bo->mutex lock. */ -#ifdef DRM_FULL_MM_COMPAT +#if defined(DRM_FULL_MM_COMPAT) && !defined(DRM_NO_FAULT) static int drm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { @@ -715,6 +715,7 @@ static int drm_bo_vm_fault(struct vm_area_struct *vma, unsigned long ret = VM_FAULT_NOPAGE; dev = bo->dev; + err = mutex_lock_interruptible(&bo->mutex); if (err) { return VM_FAULT_NOPAGE; diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 33a33e61..2e67eb71 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -73,206 +73,9 @@ static struct drm_bo_driver i915_bo_driver = { }; #endif /* ttm */ -static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (pipe == PIPE_A) - return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); - else - return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); -} - -static void i915_save_palette(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (pipe == PIPE_A) - array = dev_priv->save_palette_a; - else - array = dev_priv->save_palette_b; - - for(i = 0; i < 256; i++) - array[i] = I915_READ(reg + (i << 2)); -} - -static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); - u32 *array; - int i; - - if (!i915_pipe_enabled(dev, pipe)) - return; - - if (pipe == PIPE_A) - array = dev_priv->save_palette_a; - else - array = dev_priv->save_palette_b; - - for(i = 0; i < 256; i++) - I915_WRITE(reg + (i << 2), array[i]); -} - -static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg) -{ - outb(reg, index_port); - return inb(data_port); -} - -static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable) -{ - inb(st01); - outb(palette_enable | reg, VGA_AR_INDEX); - return inb(VGA_AR_DATA_READ); -} - -static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable) -{ - inb(st01); - outb(palette_enable | reg, VGA_AR_INDEX); - outb(val, VGA_AR_DATA_WRITE); -} - -static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val) -{ - outb(reg, index_port); - outb(val, data_port); -} - -static void i915_save_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* VGA color palette registers */ - dev_priv->saveDACMASK = inb(VGA_DACMASK); - /* DACCRX automatically increments during read */ - outb(0, VGA_DACRX); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - dev_priv->saveDACDATA[i] = inb(VGA_DACDATA); - - /* MSR bits */ - dev_priv->saveMSR = inb(VGA_MSR_READ); - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - - /* CRT controller regs */ - i915_write_indexed(cr_index, cr_data, 0x11, - i915_read_indexed(cr_index, cr_data, 0x11) & - (~0x80)); - for (i = 0; i <= 0x24; i++) - dev_priv->saveCR[i] = - i915_read_indexed(cr_index, cr_data, i); - /* Make sure we don't turn off CR group 0 writes */ - dev_priv->saveCR[0x11] &= ~0x80; - - /* Attribute controller registers */ - inb(st01); - dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); - for (i = 0; i <= 0x14; i++) - dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); - inb(st01); - outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); - inb(st01); - - /* Graphics controller registers */ - for (i = 0; i < 9; i++) - dev_priv->saveGR[i] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i); - - dev_priv->saveGR[0x10] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10); - dev_priv->saveGR[0x11] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11); - dev_priv->saveGR[0x18] = - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18); - - /* Sequencer registers */ - for (i = 0; i < 8; i++) - dev_priv->saveSR[i] = - i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i); -} - -static void i915_restore_vga(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - u16 cr_index, cr_data, st01; - - /* MSR bits */ - outb(dev_priv->saveMSR, VGA_MSR_WRITE); - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { - cr_index = VGA_CR_INDEX_CGA; - cr_data = VGA_CR_DATA_CGA; - st01 = VGA_ST01_CGA; - } else { - cr_index = VGA_CR_INDEX_MDA; - cr_data = VGA_CR_DATA_MDA; - st01 = VGA_ST01_MDA; - } - - /* Sequencer registers, don't write SR07 */ - for (i = 0; i < 7; i++) - i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i, - dev_priv->saveSR[i]); - - /* CRT controller regs */ - /* Enable CR group 0 writes */ - i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); - for (i = 0; i <= 0x24; i++) - i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); - - /* Graphics controller regs */ - for (i = 0; i < 9; i++) - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i, - dev_priv->saveGR[i]); - - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10, - dev_priv->saveGR[0x10]); - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11, - dev_priv->saveGR[0x11]); - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18, - dev_priv->saveGR[0x18]); - - /* Attribute controller registers */ - inb(st01); /* switch back to index mode */ - for (i = 0; i <= 0x14; i++) - i915_write_ar(st01, i, dev_priv->saveAR[i], 0); - inb(st01); /* switch back to index mode */ - outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); - inb(st01); - - /* VGA color palette registers */ - outb(dev_priv->saveDACMASK, VGA_DACMASK); - /* DACCRX automatically increments during read */ - outb(0, VGA_DACWX); - /* Read 3 bytes of color data from each index */ - for (i = 0; i < 256 * 3; i++) - outb(dev_priv->saveDACDATA[i], VGA_DACDATA); - -} - static int i915_suspend(struct drm_device *dev, pm_message_t state) { struct drm_i915_private *dev_priv = dev->dev_private; - int i; if (!dev || !dev_priv) { printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); @@ -284,122 +87,12 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) return 0; pci_save_state(dev->pdev); - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); - - /* Display arbitration control */ - dev_priv->saveDSPARB = I915_READ(DSPARB); - - /* Pipe & plane A info */ - dev_priv->savePIPEACONF = I915_READ(PIPEACONF); - dev_priv->savePIPEASRC = I915_READ(PIPEASRC); - dev_priv->saveFPA0 = I915_READ(FPA0); - dev_priv->saveFPA1 = I915_READ(FPA1); - dev_priv->saveDPLL_A = I915_READ(DPLL_A); - if (IS_I965G(dev)) - dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); - dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); - dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); - dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); - dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); - dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); - dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); - - dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); - dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); - dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); - dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); - dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); - if (IS_I965G(dev)) { - dev_priv->saveDSPASURF = I915_READ(DSPASURF); - dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); - } - i915_save_palette(dev, PIPE_A); - dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); - - /* Pipe & plane B info */ - dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); - dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); - dev_priv->saveFPB0 = I915_READ(FPB0); - dev_priv->saveFPB1 = I915_READ(FPB1); - dev_priv->saveDPLL_B = I915_READ(DPLL_B); - if (IS_I965G(dev)) - dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); - dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); - dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); - dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); - dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); - dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); - dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); - - dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); - dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); - dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); - dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); - dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); - if (IS_I965GM(dev) || IS_IGD_GM(dev)) { - dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); - dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); - } - i915_save_palette(dev, PIPE_B); - dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); - - /* CRT state */ - dev_priv->saveADPA = I915_READ(ADPA); - - /* LVDS state */ - dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); - dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); - dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); - if (IS_I965G(dev)) - dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); - if (IS_MOBILE(dev) && !IS_I830(dev)) - dev_priv->saveLVDS = I915_READ(LVDS); - if (!IS_I830(dev) && !IS_845G(dev)) - dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); - dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); - dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); - dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); - - /* FIXME: save TV & SDVO state */ - - /* FBC state */ - dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); - dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); - dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); - dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); - - /* Interrupt state */ - dev_priv->saveIIR = I915_READ(IIR); - dev_priv->saveIER = I915_READ(IER); - dev_priv->saveIMR = I915_READ(IMR); - - /* VGA state */ - dev_priv->saveVGA0 = I915_READ(VGA0); - dev_priv->saveVGA1 = I915_READ(VGA1); - dev_priv->saveVGA_PD = I915_READ(VGA_PD); - dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); - - /* Clock gating state */ - dev_priv->saveD_STATE = I915_READ(D_STATE); - dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); - - /* Cache mode state */ - dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); - - /* Memory Arbitration state */ - dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); - - /* Scratch space */ - for (i = 0; i < 16; i++) { - dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); - dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); - } - for (i = 0; i < 3; i++) - dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); - i915_save_vga(dev); + i915_save_state(dev); + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) + intel_opregion_free(dev); +#endif if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ @@ -412,158 +105,17 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state) static int i915_resume(struct drm_device *dev) { - struct drm_i915_private *dev_priv = dev->dev_private; - int i; - pci_set_power_state(dev->pdev, PCI_D0); pci_restore_state(dev->pdev); if (pci_enable_device(dev->pdev)) return -1; + pci_set_master(dev->pdev); - DRM_INFO("resuming i915\n"); - - pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); - - I915_WRITE(DSPARB, dev_priv->saveDSPARB); - - /* Pipe & plane A info */ - /* Prime the clock */ - if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & - ~DPLL_VCO_ENABLE); - udelay(150); - } - I915_WRITE(FPA0, dev_priv->saveFPA0); - I915_WRITE(FPA1, dev_priv->saveFPA1); - /* Actually enable it */ - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); - udelay(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); - udelay(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); - - /* Restore plane info */ - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); - I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); - } - - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); - - i915_restore_palette(dev, PIPE_A); - /* Enable the plane */ - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); - I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); - - /* Pipe & plane B info */ - if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { - DRM_INFO("restoring DPLL_B: 0x%08x\n", dev_priv->saveDPLL_B); - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & - ~DPLL_VCO_ENABLE); - udelay(150); - } - I915_WRITE(FPB0, dev_priv->saveFPB0); - I915_WRITE(FPB1, dev_priv->saveFPB1); - /* Actually enable it */ - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); - DRM_INFO("restoring DPLL_B: 0x%08x\n", dev_priv->saveDPLL_B); - udelay(150); - if (IS_I965G(dev)) - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); - udelay(150); - - /* Restore mode */ - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); - - /* Restore plane info */ - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); - I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); - if (IS_I965G(dev)) { - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); - } - - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); - - i915_restore_palette(dev, PIPE_B); - /* Enable the plane */ - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); - I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); - - /* CRT state */ - I915_WRITE(ADPA, dev_priv->saveADPA); - - /* LVDS state */ - if (IS_I965G(dev)) - I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); - if (IS_MOBILE(dev) && !IS_I830(dev)) - I915_WRITE(LVDS, dev_priv->saveLVDS); - if (!IS_I830(dev) && !IS_845G(dev)) - I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); - - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); - I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); - I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); - I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); - I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); - I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); - - /* FIXME: restore TV & SDVO state */ - - /* FBC info */ - I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); - I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); - I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); - I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); - - /* VGA state */ - I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); - I915_WRITE(VGA0, dev_priv->saveVGA0); - I915_WRITE(VGA1, dev_priv->saveVGA1); - I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); - udelay(150); - - /* Clock gating state */ - I915_WRITE (D_STATE, dev_priv->saveD_STATE); - I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS); - - /* Cache mode state */ - I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); - - /* Memory arbitration state */ - I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); - - for (i = 0; i < 16; i++) { - I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); - I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); - } - for (i = 0; i < 3; i++) - I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); + i915_restore_state(dev); - i915_restore_vga(dev); +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) + intel_opregion_init(dev); +#endif return 0; } diff --git a/linux-core/i915_gem.c b/linux-core/i915_gem.c index 63f4b91d..64ffa356 100644 --- a/linux-core/i915_gem.c +++ b/linux-core/i915_gem.c @@ -30,33 +30,26 @@ #include "drm_compat.h" #include "i915_drm.h" #include "i915_drv.h" +#include <linux/swap.h> -#define WATCH_COHERENCY 0 -#define WATCH_BUF 0 -#define WATCH_EXEC 0 -#define WATCH_LRU 0 -#define WATCH_RELOC 0 -#define WATCH_INACTIVE 0 -#define WATCH_PWRITE 0 - -#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE -static void -i915_gem_dump_object(struct drm_gem_object *obj, int len, - const char *where, uint32_t mark); -#endif - static int i915_gem_object_set_domain(struct drm_gem_object *obj, uint32_t read_domains, uint32_t write_domain); +static int +i915_gem_object_set_domain_range(struct drm_gem_object *obj, + uint64_t offset, + uint64_t size, + uint32_t read_domains, + uint32_t write_domain); int i915_gem_set_domain(struct drm_gem_object *obj, struct drm_file *file_priv, uint32_t read_domains, uint32_t write_domain); - -static void -i915_gem_clflush_object(struct drm_gem_object *obj); +static int i915_gem_object_get_page_list(struct drm_gem_object *obj); +static void i915_gem_object_free_page_list(struct drm_gem_object *obj); +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); int i915_gem_do_init(struct drm_device *dev, unsigned long start, unsigned long end) @@ -69,8 +62,8 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start, return -EINVAL; } - drm_memrange_init(&dev_priv->mm.gtt_space, start, - end - start); + drm_mm_init(&dev_priv->mm.gtt_space, start, + end - start); dev->gtt_total = (uint32_t) (end - start); @@ -134,22 +127,35 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, { struct drm_i915_gem_pread *args = data; struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; ssize_t read; loff_t offset; int ret; obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) + return -EBADF; + obj_priv = obj->driver_private; + + /* Bounds check source. + * + * XXX: This could use review for overflow issues... + */ + if (args->offset > obj->size || args->size > obj->size || + args->offset + args->size > obj->size) { + drm_gem_object_unreference(obj); return -EINVAL; + } mutex_lock(&dev->struct_mutex); - ret = i915_gem_set_domain(obj, file_priv, - I915_GEM_DOMAIN_CPU, 0); - if (ret) { + + ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, + I915_GEM_DOMAIN_CPU, 0); + if (ret != 0) { drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); - return ret; } + offset = args->offset; read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, @@ -171,18 +177,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, #include "drm_compat.h" -/** - * Writes data to the object referenced by handle. - * - * On error, the contents of the buffer that were to be modified are undefined. - */ -int -i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int +i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) { - struct drm_i915_gem_pwrite *args = data; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; + struct drm_i915_gem_object *obj_priv = obj->driver_private; ssize_t remain; loff_t offset; char __user *user_data; @@ -192,18 +192,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, unsigned long pfn; unsigned long unwritten; - obj = drm_gem_object_lookup(dev, file_priv, args->handle); - if (obj == NULL) - return -EINVAL; - - /** Bounds check destination. - * - * XXX: This could use review for overflow issues... - */ - if (args->offset > obj->size || args->size > obj->size || - args->offset + args->size > obj->size) - return -EFAULT; - user_data = (char __user *) (uintptr_t) args->data_ptr; remain = args->size; if (!access_ok(VERIFY_READ, user_data, remain)) @@ -213,7 +201,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, mutex_lock(&dev->struct_mutex); ret = i915_gem_object_pin(obj, 0); if (ret) { - drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; } @@ -221,14 +208,13 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); if (ret) goto fail; - + obj_priv = obj->driver_private; offset = obj_priv->gtt_offset + args->offset; obj_priv->dirty = 1; - + while (remain > 0) { - - /** Operation in this page + /* Operation in this page * * i = page number * o = offset within page @@ -241,7 +227,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, l = PAGE_SIZE - o; pfn = (dev->agp->base >> PAGE_SHIFT) + i; - + #ifdef DRM_KMAP_ATOMIC_PROT_PFN /* kmap_atomic can't map IO pages on non-HIGHMEM kernels */ @@ -251,7 +237,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", i, o, l, pfn, vaddr); #endif - unwritten = __copy_from_user_inatomic_nocache(vaddr + o, user_data, l); + unwritten = __copy_from_user_inatomic_nocache(vaddr + o, + user_data, l); kunmap_atomic(vaddr, KM_USER0); if (unwritten) @@ -259,7 +246,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, { vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); #if WATCH_PWRITE - DRM_INFO("pwrite slow i %d o %d l %d pfn %ld vaddr %p\n", + DRM_INFO("pwrite slow i %d o %d l %d " + "pfn %ld vaddr %p\n", i, o, l, pfn, vaddr); #endif if (vaddr == NULL) { @@ -288,14 +276,96 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, #endif fail: - i915_gem_object_unpin (obj); - drm_gem_object_unreference(obj); + i915_gem_object_unpin(obj); mutex_unlock(&dev->struct_mutex); + return ret; +} + +int +i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file_priv) +{ + int ret; + loff_t offset; + ssize_t written; + + mutex_lock(&dev->struct_mutex); + + ret = i915_gem_set_domain(obj, file_priv, + I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); + if (ret) { + mutex_unlock(&dev->struct_mutex); + return ret; + } + + offset = args->offset; + + written = vfs_write(obj->filp, + (char __user *)(uintptr_t) args->data_ptr, + args->size, &offset); + if (written != args->size) { + mutex_unlock(&dev->struct_mutex); + if (written < 0) + return written; + else + return -EINVAL; + } + + mutex_unlock(&dev->struct_mutex); + + return 0; +} + +/** + * Writes data to the object referenced by handle. + * + * On error, the contents of the buffer that were to be modified are undefined. + */ +int +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_pwrite *args = data; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret = 0; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EBADF; + obj_priv = obj->driver_private; + + /* Bounds check destination. + * + * XXX: This could use review for overflow issues... + */ + if (args->offset > obj->size || args->size > obj->size || + args->offset + args->size > obj->size) { + drm_gem_object_unreference(obj); + return -EINVAL; + } + + /* We can only do the GTT pwrite on untiled buffers, as otherwise + * it would end up going through the fenced access, and we'll get + * different detiling behavior between reading and writing. + * pread/pwrite currently are reading and writing from the CPU + * perspective, requiring manual detiling by the client. + */ + if (obj_priv->tiling_mode == I915_TILING_NONE && + dev->gtt_total != 0) + ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); + else + ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); + #if WATCH_PWRITE if (ret) DRM_INFO("pwrite failed %d\n", ret); #endif + + drm_gem_object_unreference(obj); + return ret; } @@ -315,9 +385,13 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) - return -EINVAL; + return -EBADF; mutex_lock(&dev->struct_mutex); +#if WATCH_BUF + DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", + obj, obj->size, args->read_domains, args->write_domain); +#endif ret = i915_gem_set_domain(obj, file_priv, args->read_domains, args->write_domain); drm_gem_object_unreference(obj); @@ -344,21 +418,20 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) { mutex_unlock(&dev->struct_mutex); - return -EINVAL; + return -EBADF; } #if WATCH_BUF - DRM_INFO("%s: sw_finish %d (%p)\n", - __func__, args->handle, obj); + DRM_INFO("%s: sw_finish %d (%p %d)\n", + __func__, args->handle, obj, obj->size); #endif - obj_priv = obj->driver_private; - - /** Pinned buffers may be scanout, so flush the cache - */ - if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { - i915_gem_clflush_object(obj); - drm_agp_chipset_flush(dev); - } + obj_priv = obj->driver_private; + + /* Pinned buffers may be scanout, so flush the cache */ + if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { + i915_gem_clflush_object(obj); + drm_agp_chipset_flush(dev); + } drm_gem_object_unreference(obj); mutex_unlock(&dev->struct_mutex); return ret; @@ -385,7 +458,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data, obj = drm_gem_object_lookup(dev, file_priv, args->handle); if (obj == NULL) - return -EINVAL; + return -EBADF; offset = args->offset; @@ -448,25 +521,6 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj) &dev_priv->mm.active_list); } -#if WATCH_INACTIVE -static void -i915_verify_inactive(struct drm_device *dev, char *file, int line) -{ - drm_i915_private_t *dev_priv = dev->dev_private; - struct drm_gem_object *obj; - struct drm_i915_gem_object *obj_priv; - - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - obj = obj_priv->obj; - if (obj_priv->pin_count || obj_priv->active || (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) - DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", - obj, - obj_priv->pin_count, obj_priv->active, obj->write_domain, file, line); - } -} -#else -#define i915_verify_inactive(dev,file,line) -#endif static void i915_gem_object_move_to_inactive(struct drm_gem_object *obj) @@ -522,7 +576,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); OUT_RING(seqno); - OUT_RING(GFX_OP_USER_INTERRUPT); + OUT_RING(MI_USER_INTERRUPT); ADVANCE_LP_RING(); DRM_DEBUG("%d\n", seqno); @@ -534,7 +588,7 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) list_add_tail(&request->list, &dev_priv->mm.request_list); if (was_empty) - schedule_delayed_work (&dev_priv->mm.retire_work, HZ); + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); return seqno; } @@ -544,7 +598,6 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains) * Ensures that all commands in the ring are finished * before signalling the CPU */ - uint32_t i915_retire_commands(struct drm_device *dev) { @@ -615,6 +668,12 @@ i915_gem_retire_request(struct drm_device *dev, __func__, request->seqno, obj); #endif + /* If this request flushes the write domain, + * clear the write domain from the object now + */ + if (request->flush_domains & obj->write_domain) + obj->write_domain = 0; + if (obj->write_domain != 0) { list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); @@ -661,7 +720,8 @@ i915_gem_retire_requests(struct drm_device *dev) list); retiring_seqno = request->seqno; - if (i915_seqno_passed(seqno, retiring_seqno) || dev_priv->mm.wedged) { + if (i915_seqno_passed(seqno, retiring_seqno) || + dev_priv->mm.wedged) { i915_gem_retire_request(dev, request); list_del(&request->list); @@ -684,7 +744,7 @@ i915_gem_retire_work_handler(struct work_struct *work) mutex_lock(&dev->struct_mutex); i915_gem_retire_requests(dev); if (!list_empty(&dev_priv->mm.request_list)) - schedule_delayed_work (&dev_priv->mm.retire_work, HZ); + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); mutex_unlock(&dev->struct_mutex); } @@ -705,14 +765,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno) i915_user_irq_on(dev); ret = wait_event_interruptible(dev_priv->irq_queue, i915_seqno_passed(i915_get_gem_seqno(dev), - seqno) || dev_priv->mm.wedged); + seqno) || + dev_priv->mm.wedged); i915_user_irq_off(dev); dev_priv->mm.waiting_gem_seqno = 0; } if (dev_priv->mm.wedged) ret = -EIO; - if (ret) + if (ret && ret != -ERESTARTSYS) DRM_ERROR("%s returns %d (awaiting %d at %d)\n", __func__, ret, seqno, i915_get_gem_seqno(dev)); @@ -744,7 +805,8 @@ i915_gem_flush(struct drm_device *dev, if (flush_domains & I915_GEM_DOMAIN_CPU) drm_agp_chipset_flush(dev); - if ((invalidate_domains|flush_domains) & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { + if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT)) { /* * read/write caches: * @@ -808,18 +870,18 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) struct drm_device *dev = obj->dev; struct drm_i915_gem_object *obj_priv = obj->driver_private; int ret; + uint32_t write_domain; /* If there are writes queued to the buffer, flush and * create a new seqno to wait for. */ - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { - uint32_t write_domain = obj->write_domain; + write_domain = obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT); + if (write_domain) { #if WATCH_BUF DRM_INFO("%s: flushing object %p from write domain %08x\n", __func__, obj, write_domain); #endif i915_gem_flush(dev, 0, write_domain); - obj->write_domain = 0; i915_gem_object_move_to_active(obj); obj_priv->last_rendering_seqno = i915_add_request(dev, @@ -829,6 +891,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj) DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); #endif } + /* If there is rendering queued on the buffer being evicted, wait for * it. */ @@ -871,7 +934,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj) */ ret = i915_gem_object_wait_rendering(obj); if (ret) { - DRM_ERROR ("wait_rendering failed: %d\n", ret); + DRM_ERROR("wait_rendering failed: %d\n", ret); return ret; } @@ -901,8 +964,8 @@ i915_gem_object_unbind(struct drm_gem_object *obj) if (obj_priv->gtt_space) { atomic_dec(&dev->gtt_count); atomic_sub(obj->size, &dev->gtt_memory); - - drm_memrange_put_block(obj_priv->gtt_space); + + drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; } @@ -913,83 +976,6 @@ i915_gem_object_unbind(struct drm_gem_object *obj) return 0; } -#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE -static void -i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, - uint32_t bias, uint32_t mark) -{ - uint32_t *mem = kmap_atomic(page, KM_USER0); - int i; - for (i = start; i < end; i += 4) - DRM_INFO("%08x: %08x%s\n", - (int) (bias + i), mem[i / 4], - (bias + i == mark) ? " ********" : ""); - kunmap_atomic(mem, KM_USER0); - /* give syslog time to catch up */ - msleep(1); -} - -static void -i915_gem_dump_object(struct drm_gem_object *obj, int len, - const char *where, uint32_t mark) -{ - struct drm_i915_gem_object *obj_priv = obj->driver_private; - int page; - - DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); - for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { - int page_len, chunk, chunk_len; - - page_len = len - page * PAGE_SIZE; - if (page_len > PAGE_SIZE) - page_len = PAGE_SIZE; - - for (chunk = 0; chunk < page_len; chunk += 128) { - chunk_len = page_len - chunk; - if (chunk_len > 128) - chunk_len = 128; - i915_gem_dump_page(obj_priv->page_list[page], - chunk, chunk + chunk_len, - obj_priv->gtt_offset + - page * PAGE_SIZE, - mark); - } - } -} -#endif - -#if WATCH_LRU -static void -i915_dump_lru(struct drm_device *dev, const char *where) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - - DRM_INFO("active list %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); - DRM_INFO("flushing list %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); - DRM_INFO("inactive %s {\n", where); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { - DRM_INFO(" %p: %08x\n", obj_priv, - obj_priv->last_rendering_seqno); - } - DRM_INFO("}\n"); -} -#endif - static int i915_gem_evict_something(struct drm_device *dev) { @@ -1063,7 +1049,8 @@ i915_gem_evict_something(struct drm_device *dev) continue; } - DRM_ERROR("inactive empty %d request empty %d flushing empty %d\n", + DRM_ERROR("inactive empty %d request empty %d " + "flushing empty %d\n", list_empty(&dev_priv->mm.inactive_list), list_empty(&dev_priv->mm.request_list), list_empty(&dev_priv->mm.flushing_list)); @@ -1084,7 +1071,7 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) struct inode *inode; struct page *page; int ret; - + if (obj_priv->page_list) return 0; @@ -1103,20 +1090,12 @@ i915_gem_object_get_page_list(struct drm_gem_object *obj) inode = obj->filp->f_path.dentry->d_inode; mapping = inode->i_mapping; for (i = 0; i < page_count; i++) { - page = find_get_page(mapping, i); - if (page == NULL || !PageUptodate(page)) { - if (page) { - page_cache_release(page); - page = NULL; - } - ret = shmem_getpage(inode, i, &page, SGP_DIRTY, NULL); - - if (ret) { - DRM_ERROR("shmem_getpage failed: %d\n", ret); - i915_gem_object_free_page_list(obj); - return ret; - } - unlock_page(page); + page = read_mapping_page(mapping, i, NULL); + if (IS_ERR(page)) { + ret = PTR_ERR(page); + DRM_ERROR("read_mapping_page failed: %d\n", ret); + i915_gem_object_free_page_list(obj); + return ret; } obj_priv->page_list[i] = page; } @@ -1132,7 +1111,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) struct drm_device *dev = obj->dev; struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj_priv = obj->driver_private; - struct drm_memrange_node *free_space; + struct drm_mm_node *free_space; int page_count, ret; if (alignment == 0) @@ -1143,13 +1122,11 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) } search_free: - free_space = drm_memrange_search_free(&dev_priv->mm.gtt_space, - obj->size, - alignment, 0); + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, + obj->size, alignment, 0); if (free_space != NULL) { - obj_priv->gtt_space = - drm_memrange_get_block(free_space, obj->size, - alignment); + obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, + alignment); if (obj_priv->gtt_space != NULL) { obj_priv->gtt_space->private = obj; obj_priv->gtt_offset = obj_priv->gtt_space->start; @@ -1183,7 +1160,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) #endif ret = i915_gem_object_get_page_list(obj); if (ret) { - drm_memrange_put_block(obj_priv->gtt_space); + drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; return ret; } @@ -1198,7 +1175,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) obj_priv->gtt_offset); if (obj_priv->agp_mem == NULL) { i915_gem_object_free_page_list(obj); - drm_memrange_put_block(obj_priv->gtt_space); + drm_mm_put_block(obj_priv->gtt_space); obj_priv->gtt_space = NULL; return -ENOMEM; } @@ -1215,7 +1192,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) return 0; } -static void +void i915_gem_clflush_object(struct drm_gem_object *obj) { struct drm_i915_gem_object *obj_priv = obj->driver_private; @@ -1354,8 +1331,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, #if WATCH_BUF DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", - __func__, obj, - obj->read_domains, read_domains, + __func__, obj, + obj->read_domains, read_domains, obj->write_domain, write_domain); #endif /* @@ -1393,7 +1370,8 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, * flushed before the cpu cache is invalidated */ if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && - (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))) { + (flush_domains & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT))) { ret = i915_gem_object_wait_rendering(obj); if (ret) return ret; @@ -1403,7 +1381,17 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, if ((write_domain | flush_domains) != 0) obj->write_domain = write_domain; + + /* If we're invalidating the CPU domain, clear the per-page CPU + * domain list as well. + */ + if (obj_priv->page_cpu_valid != NULL && + (obj->read_domains & I915_GEM_DOMAIN_CPU) && + ((read_domains & I915_GEM_DOMAIN_CPU) == 0)) { + memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); + } obj->read_domains = read_domains; + dev->invalidate_domains |= invalidate_domains; dev->flush_domains |= flush_domains; #if WATCH_BUF @@ -1416,6 +1404,57 @@ i915_gem_object_set_domain(struct drm_gem_object *obj, } /** + * Set the read/write domain on a range of the object. + * + * Currently only implemented for CPU reads, otherwise drops to normal + * i915_gem_object_set_domain(). + */ +static int +i915_gem_object_set_domain_range(struct drm_gem_object *obj, + uint64_t offset, + uint64_t size, + uint32_t read_domains, + uint32_t write_domain) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int ret, i; + + if (obj->read_domains & I915_GEM_DOMAIN_CPU) + return 0; + + if (read_domains != I915_GEM_DOMAIN_CPU || + write_domain != 0) + return i915_gem_object_set_domain(obj, + read_domains, write_domain); + + /* Wait on any GPU rendering to the object to be flushed. */ + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { + ret = i915_gem_object_wait_rendering(obj); + if (ret) + return ret; + } + + if (obj_priv->page_cpu_valid == NULL) { + obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, + DRM_MEM_DRIVER); + } + + /* Flush the cache on any pages that are still invalid from the CPU's + * perspective. + */ + for (i = offset / PAGE_SIZE; i < (offset + size - 1) / PAGE_SIZE; i++) { + if (obj_priv->page_cpu_valid[i]) + continue; + + drm_ttm_cache_flush(obj_priv->page_list + i, 1); + + obj_priv->page_cpu_valid[i] = 1; + } + + return 0; +} + +/** * Once all of the objects have been set in the proper domain, * perform the necessary flush and invalidate operations. * @@ -1447,76 +1486,6 @@ i915_gem_dev_set_domain(struct drm_device *dev) return flush_domains; } -#if WATCH_COHERENCY -static void -i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) -{ - struct drm_device *dev = obj->dev; - struct drm_i915_gem_object *obj_priv = obj->driver_private; - int page; - uint32_t *gtt_mapping; - uint32_t *backing_map = NULL; - int bad_count = 0; - - DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", - __func__, obj, obj_priv->gtt_offset, handle, - obj->size / 1024); - - gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, - obj->size); - if (gtt_mapping == NULL) { - DRM_ERROR("failed to map GTT space\n"); - return; - } - - for (page = 0; page < obj->size / PAGE_SIZE; page++) { - int i; - - backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); - - if (backing_map == NULL) { - DRM_ERROR("failed to map backing page\n"); - goto out; - } - - for (i = 0; i < PAGE_SIZE / 4; i++) { - uint32_t cpuval = backing_map[i]; - uint32_t gttval = readl(gtt_mapping + - page * 1024 + i); - - if (cpuval != gttval) { - DRM_INFO("incoherent CPU vs GPU at 0x%08x: " - "0x%08x vs 0x%08x\n", - (int)(obj_priv->gtt_offset + - page * PAGE_SIZE + i * 4), - cpuval, gttval); - if (bad_count++ >= 8) { - DRM_INFO("...\n"); - goto out; - } - } - } - kunmap_atomic(backing_map, KM_USER0); - backing_map = NULL; - } - - out: - if (backing_map != NULL) - kunmap_atomic(backing_map, KM_USER0); - iounmap(gtt_mapping); - - /* give syslog time to catch up */ - msleep(1); - - /* Directly flush the object, since we just loaded values with the CPU - * from thebacking pages and we don't want to disturb the cache - * management that we're trying to observe. - */ - - i915_gem_clflush_object(obj); -} -#endif - /** * Pin an object to the GTT and evaluate the relocations landing in it. */ @@ -1561,7 +1530,7 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, reloc.target_handle); if (target_obj == NULL) { i915_gem_object_unpin(obj); - return -EINVAL; + return -EBADF; } target_obj_priv = target_obj->driver_private; @@ -1784,7 +1753,8 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) mutex_lock(&dev->struct_mutex); seqno = i915_file_priv->mm.last_gem_throttle_seqno; - i915_file_priv->mm.last_gem_throttle_seqno = i915_file_priv->mm.last_gem_seqno; + i915_file_priv->mm.last_gem_throttle_seqno = + i915_file_priv->mm.last_gem_seqno; if (seqno) ret = i915_wait_request(dev, seqno); mutex_unlock(&dev->struct_mutex); @@ -1841,7 +1811,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, mutex_unlock(&dev->struct_mutex); return -EIO; } - + if (dev_priv->mm.suspended) { DRM_ERROR("Execbuf while VT-switched.\n"); mutex_unlock(&dev->struct_mutex); @@ -1862,7 +1832,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data, if (object_list[i] == NULL) { DRM_ERROR("Invalid object handle %d at index %d\n", exec_list[i].handle, i); - ret = -EINVAL; + ret = -EBADF; goto err; } @@ -2022,7 +1992,9 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) if (obj_priv->pin_count == 1) { atomic_inc(&dev->pin_count); atomic_add(obj->size, &dev->pin_memory); - if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0 && + if (!obj_priv->active && + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT)) == 0 && !list_empty(&obj_priv->list)) list_del_init(&obj_priv->list); } @@ -2048,7 +2020,9 @@ i915_gem_object_unpin(struct drm_gem_object *obj) * the inactive list */ if (obj_priv->pin_count == 0) { - if (!obj_priv->active && (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) == 0) + if (!obj_priv->active && + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT)) == 0) list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); atomic_dec(&dev->pin_count); @@ -2073,7 +2047,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", args->handle); mutex_unlock(&dev->struct_mutex); - return -EINVAL; + return -EBADF; } obj_priv = obj->driver_private; @@ -2084,7 +2058,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, return ret; } - /** XXX - flush the CPU caches for pinned objects + /* XXX - flush the CPU caches for pinned objects * as the X server doesn't manage domains yet */ if (obj->write_domain & I915_GEM_DOMAIN_CPU) { @@ -2113,7 +2087,7 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", args->handle); mutex_unlock(&dev->struct_mutex); - return -EINVAL; + return -EBADF; } i915_gem_object_unpin(obj); @@ -2137,7 +2111,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", args->handle); mutex_unlock(&dev->struct_mutex); - return -EINVAL; + return -EBADF; } obj_priv = obj->driver_private; @@ -2187,6 +2161,7 @@ void i915_gem_free_object(struct drm_gem_object *obj) i915_gem_object_unbind(obj); + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); } @@ -2206,7 +2181,7 @@ i915_gem_set_domain(struct drm_gem_object *obj, if (ret) return ret; flush_domains = i915_gem_dev_set_domain(obj->dev); - + if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) (void) i915_add_request(dev, flush_domains); @@ -2267,7 +2242,8 @@ i915_gem_idle(struct drm_device *dev) */ i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); - seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); + seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT)); if (seqno == 0) { mutex_unlock(&dev->struct_mutex); @@ -2317,6 +2293,56 @@ i915_gem_idle(struct drm_device *dev) return 0; } +static int +i915_gem_init_hws(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + int ret; + + /* If we need a physical address for the status page, it's already + * initialized at driver load time. + */ + if (!I915_NEED_GFX_HWS(dev)) + return 0; + + obj = drm_gem_object_alloc(dev, 4096); + if (obj == NULL) { + DRM_ERROR("Failed to allocate status page\n"); + return -ENOMEM; + } + obj_priv = obj->driver_private; + + ret = i915_gem_object_pin(obj, 4096); + if (ret != 0) { + drm_gem_object_unreference(obj); + return ret; + } + + dev_priv->status_gfx_addr = obj_priv->gtt_offset; + dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; + dev_priv->hws_map.size = 4096; + dev_priv->hws_map.type = 0; + dev_priv->hws_map.flags = 0; + dev_priv->hws_map.mtrr = 0; + + drm_core_ioremap(&dev_priv->hws_map, dev); + if (dev_priv->hws_map.handle == NULL) { + DRM_ERROR("Failed to map status page.\n"); + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + drm_gem_object_unreference(obj); + return -EINVAL; + } + dev_priv->hws_obj = obj; + dev_priv->hw_status_page = dev_priv->hws_map.handle; + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); + DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); + + return 0; +} + int i915_gem_init_ringbuffer(struct drm_device *dev) { @@ -2325,6 +2351,10 @@ i915_gem_init_ringbuffer(struct drm_device *dev) struct drm_i915_gem_object *obj_priv; int ret; + ret = i915_gem_init_hws(dev); + if (ret != 0) + return ret; + obj = drm_gem_object_alloc(dev, 128 * 1024); if (obj == NULL) { DRM_ERROR("Failed to allocate ringbuffer\n"); @@ -2360,15 +2390,16 @@ i915_gem_init_ringbuffer(struct drm_device *dev) /* Stop the ring if it's running. */ I915_WRITE(PRB0_CTL, 0); - I915_WRITE(PRB0_HEAD, 0); - I915_WRITE(PRB0_TAIL, 0); + I915_WRITE(PRB0_HEAD, 0); + I915_WRITE(PRB0_TAIL, 0); I915_WRITE(PRB0_START, 0); /* Initialize the ring. */ I915_WRITE(PRB0_START, obj_priv->gtt_offset); - I915_WRITE(PRB0_CTL, (((obj->size - 4096) & RING_NR_PAGES) | - RING_NO_REPORT | - RING_VALID)); + I915_WRITE(PRB0_CTL, + ((obj->size - 4096) & RING_NR_PAGES) | + RING_NO_REPORT | + RING_VALID); /* Update our cache of the ring state */ i915_kernel_lost_context(dev); @@ -2388,8 +2419,18 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev) i915_gem_object_unpin(dev_priv->ring.ring_obj); drm_gem_object_unreference(dev_priv->ring.ring_obj); - + dev_priv->ring.ring_obj = NULL; memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); + + if (dev_priv->hws_obj != NULL) { + i915_gem_object_unpin(dev_priv->hws_obj); + drm_gem_object_unreference(dev_priv->hws_obj); + dev_priv->hws_obj = NULL; + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); + + /* Write high address into HWS_PGA when disabling. */ + I915_WRITE(HWS_PGA, 0x1ffff000); + } } int @@ -2399,8 +2440,11 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + if (dev_priv->mm.wedged) { - DRM_ERROR("Renabling wedged hardware, good luck\n"); + DRM_ERROR("Reenabling wedged hardware, good luck\n"); dev_priv->mm.wedged = 0; } @@ -2424,6 +2468,9 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, { int ret; + if (drm_core_check_feature(dev, DRIVER_MODESET)) + return 0; + mutex_lock(&dev->struct_mutex); ret = i915_gem_idle(dev); if (ret == 0) @@ -2433,263 +2480,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, return 0; } -static int i915_gem_active_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Active:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.active_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", - obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_flushing_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Flushing:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_inactive_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_object *obj_priv; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Inactive:\n"); - list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, - list) - { - struct drm_gem_object *obj = obj_priv->obj; - if (obj->name) { - DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", - obj, obj->name, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } else { - DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, - obj->read_domains, obj->write_domain, - obj_priv->last_rendering_seqno); - } - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_request_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - struct drm_i915_gem_request *gem_request; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Request:\n"); - list_for_each_entry(gem_request, &dev_priv->mm.request_list, - list) - { - DRM_PROC_PRINT (" %d @ %d %08x\n", - gem_request->seqno, - (int) (jiffies - gem_request->emitted_jiffies), - gem_request->flush_domains); - } - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static int i915_gem_seqno_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); - DRM_PROC_PRINT("Waiter sequence: %d\n", dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - - -static int i915_interrupt_info(char *buf, char **start, off_t offset, - int request, int *eof, void *data) -{ - struct drm_minor *minor = (struct drm_minor *) data; - struct drm_device *dev = minor->dev; - struct drm_i915_private *dev_priv = dev->dev_private; - int len = 0; - - if (offset > DRM_PROC_LIMIT) { - *eof = 1; - return 0; - } - - *start = &buf[offset]; - *eof = 0; - DRM_PROC_PRINT("Interrupt enable: %08x\n", - I915_READ(IER)); - DRM_PROC_PRINT("Interrupt identity: %08x\n", - I915_READ(IIR)); - DRM_PROC_PRINT("Interrupt mask: %08x\n", - I915_READ(IMR)); - DRM_PROC_PRINT("Pipe A stat: %08x\n", - I915_READ(PIPEASTAT)); - DRM_PROC_PRINT("Pipe B stat: %08x\n", - I915_READ(PIPEBSTAT)); - DRM_PROC_PRINT("Interrupts received: %d\n", - atomic_read(&dev_priv->irq_received)); - DRM_PROC_PRINT("Current sequence: %d\n", - i915_get_gem_seqno(dev)); - DRM_PROC_PRINT("Waiter sequence: %d\n", - dev_priv->mm.waiting_gem_seqno); - DRM_PROC_PRINT("IRQ sequence: %d\n", - dev_priv->mm.irq_gem_seqno); - if (len > request + offset) - return request; - *eof = 1; - return len - offset; -} - -static struct drm_proc_list { - const char *name; /**< file name */ - int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ -} i915_gem_proc_list[] = { - {"i915_gem_active", i915_gem_active_info}, - {"i915_gem_flushing", i915_gem_flushing_info}, - {"i915_gem_inactive", i915_gem_inactive_info}, - {"i915_gem_request", i915_gem_request_info}, - {"i915_gem_seqno", i915_gem_seqno_info}, - {"i915_gem_interrupt", i915_interrupt_info}, -}; - -#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) - -int i915_gem_proc_init(struct drm_minor *minor) -{ - struct proc_dir_entry *ent; - int i, j; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { - ent = create_proc_entry(i915_gem_proc_list[i].name, - S_IFREG | S_IRUGO, minor->dev_root); - if (!ent) { - DRM_ERROR("Cannot create /proc/dri/.../%s\n", - i915_gem_proc_list[i].name); - for (j = 0; j < i; j++) - remove_proc_entry(i915_gem_proc_list[i].name, - minor->dev_root); - return -1; - } - ent->read_proc = i915_gem_proc_list[i].f; - ent->data = minor; - } - return 0; -} - -void i915_gem_proc_cleanup(struct drm_minor *minor) -{ - int i; - - if (!minor->dev_root) - return; - - for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) - remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); -} - void i915_gem_lastclose(struct drm_device *dev) { @@ -2702,9 +2492,24 @@ i915_gem_lastclose(struct drm_device *dev) ret = i915_gem_idle(dev); if (ret) DRM_ERROR("failed to idle hardware: %d\n", ret); - + i915_gem_cleanup_ringbuffer(dev); } - + mutex_unlock(&dev->struct_mutex); } + +void i915_gem_load(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + + INIT_LIST_HEAD(&dev_priv->mm.active_list); + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); + INIT_LIST_HEAD(&dev_priv->mm.request_list); + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, + i915_gem_retire_work_handler); + dev_priv->mm.next_gem_seqno = 1; + + i915_gem_detect_bit_6_swizzle(dev); +} diff --git a/linux-core/i915_gem_debug.c b/linux-core/i915_gem_debug.c new file mode 100644 index 00000000..6d50e5bb --- /dev/null +++ b/linux-core/i915_gem_debug.c @@ -0,0 +1,202 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Keith Packard <keithp@keithp.com> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_compat.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#if WATCH_INACTIVE +void +i915_verify_inactive(struct drm_device *dev, char *file, int line) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + obj = obj_priv->obj; + if (obj_priv->pin_count || obj_priv->active || + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | + I915_GEM_DOMAIN_GTT))) + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", + obj, + obj_priv->pin_count, obj_priv->active, + obj->write_domain, file, line); + } +} +#endif /* WATCH_INACTIVE */ + + +#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE +static void +i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, + uint32_t bias, uint32_t mark) +{ + uint32_t *mem = kmap_atomic(page, KM_USER0); + int i; + for (i = start; i < end; i += 4) + DRM_INFO("%08x: %08x%s\n", + (int) (bias + i), mem[i / 4], + (bias + i == mark) ? " ********" : ""); + kunmap_atomic(mem, KM_USER0); + /* give syslog time to catch up */ + msleep(1); +} + +void +i915_gem_dump_object(struct drm_gem_object *obj, int len, + const char *where, uint32_t mark) +{ + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page; + + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); + for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { + int page_len, chunk, chunk_len; + + page_len = len - page * PAGE_SIZE; + if (page_len > PAGE_SIZE) + page_len = PAGE_SIZE; + + for (chunk = 0; chunk < page_len; chunk += 128) { + chunk_len = page_len - chunk; + if (chunk_len > 128) + chunk_len = 128; + i915_gem_dump_page(obj_priv->page_list[page], + chunk, chunk + chunk_len, + obj_priv->gtt_offset + + page * PAGE_SIZE, + mark); + } + } +} +#endif + +#if WATCH_LRU +void +i915_dump_lru(struct drm_device *dev, const char *where) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + + DRM_INFO("active list %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); + DRM_INFO("flushing list %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); + DRM_INFO("inactive %s {\n", where); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { + DRM_INFO(" %p: %08x\n", obj_priv, + obj_priv->last_rendering_seqno); + } + DRM_INFO("}\n"); +} +#endif + + +#if WATCH_COHERENCY +void +i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) +{ + struct drm_device *dev = obj->dev; + struct drm_i915_gem_object *obj_priv = obj->driver_private; + int page; + uint32_t *gtt_mapping; + uint32_t *backing_map = NULL; + int bad_count = 0; + + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", + __func__, obj, obj_priv->gtt_offset, handle, + obj->size / 1024); + + gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, + obj->size); + if (gtt_mapping == NULL) { + DRM_ERROR("failed to map GTT space\n"); + return; + } + + for (page = 0; page < obj->size / PAGE_SIZE; page++) { + int i; + + backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); + + if (backing_map == NULL) { + DRM_ERROR("failed to map backing page\n"); + goto out; + } + + for (i = 0; i < PAGE_SIZE / 4; i++) { + uint32_t cpuval = backing_map[i]; + uint32_t gttval = readl(gtt_mapping + + page * 1024 + i); + + if (cpuval != gttval) { + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " + "0x%08x vs 0x%08x\n", + (int)(obj_priv->gtt_offset + + page * PAGE_SIZE + i * 4), + cpuval, gttval); + if (bad_count++ >= 8) { + DRM_INFO("...\n"); + goto out; + } + } + } + kunmap_atomic(backing_map, KM_USER0); + backing_map = NULL; + } + + out: + if (backing_map != NULL) + kunmap_atomic(backing_map, KM_USER0); + iounmap(gtt_mapping); + + /* give syslog time to catch up */ + msleep(1); + + /* Directly flush the object, since we just loaded values with the CPU + * from the backing pages and we don't want to disturb the cache + * management that we're trying to observe. + */ + + i915_gem_clflush_object(obj); +} +#endif diff --git a/linux-core/i915_gem_proc.c b/linux-core/i915_gem_proc.c new file mode 100644 index 00000000..2704f922 --- /dev/null +++ b/linux-core/i915_gem_proc.c @@ -0,0 +1,293 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * Keith Packard <keithp@keithp.com> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "drm_compat.h" +#include "i915_drm.h" +#include "i915_drv.h" + +static int i915_gem_active_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Active:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", + obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_flushing_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Flushing:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_inactive_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_object *obj_priv; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Inactive:\n"); + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, + list) + { + struct drm_gem_object *obj = obj_priv->obj; + if (obj->name) { + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", + obj, obj->name, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } else { + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, + obj->read_domains, obj->write_domain, + obj_priv->last_rendering_seqno); + } + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_request_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_i915_gem_request *gem_request; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Request:\n"); + list_for_each_entry(gem_request, &dev_priv->mm.request_list, + list) + { + DRM_PROC_PRINT(" %d @ %d %08x\n", + gem_request->seqno, + (int) (jiffies - gem_request->emitted_jiffies), + gem_request->flush_domains); + } + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static int i915_gem_seqno_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); + DRM_PROC_PRINT("Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + + +static int i915_interrupt_info(char *buf, char **start, off_t offset, + int request, int *eof, void *data) +{ + struct drm_minor *minor = (struct drm_minor *) data; + struct drm_device *dev = minor->dev; + struct drm_i915_private *dev_priv = dev->dev_private; + int len = 0; + + if (offset > DRM_PROC_LIMIT) { + *eof = 1; + return 0; + } + + *start = &buf[offset]; + *eof = 0; + DRM_PROC_PRINT("Interrupt enable: %08x\n", + I915_READ(IER)); + DRM_PROC_PRINT("Interrupt identity: %08x\n", + I915_READ(IIR)); + DRM_PROC_PRINT("Interrupt mask: %08x\n", + I915_READ(IMR)); + DRM_PROC_PRINT("Pipe A stat: %08x\n", + I915_READ(PIPEASTAT)); + DRM_PROC_PRINT("Pipe B stat: %08x\n", + I915_READ(PIPEBSTAT)); + DRM_PROC_PRINT("Interrupts received: %d\n", + atomic_read(&dev_priv->irq_received)); + DRM_PROC_PRINT("Current sequence: %d\n", + i915_get_gem_seqno(dev)); + DRM_PROC_PRINT("Waiter sequence: %d\n", + dev_priv->mm.waiting_gem_seqno); + DRM_PROC_PRINT("IRQ sequence: %d\n", + dev_priv->mm.irq_gem_seqno); + if (len > request + offset) + return request; + *eof = 1; + return len - offset; +} + +static struct drm_proc_list { + /** file name */ + const char *name; + /** proc callback*/ + int (*f) (char *, char **, off_t, int, int *, void *); +} i915_gem_proc_list[] = { + {"i915_gem_active", i915_gem_active_info}, + {"i915_gem_flushing", i915_gem_flushing_info}, + {"i915_gem_inactive", i915_gem_inactive_info}, + {"i915_gem_request", i915_gem_request_info}, + {"i915_gem_seqno", i915_gem_seqno_info}, + {"i915_gem_interrupt", i915_interrupt_info}, +}; + +#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) + +int i915_gem_proc_init(struct drm_minor *minor) +{ + struct proc_dir_entry *ent; + int i, j; + + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { + ent = create_proc_entry(i915_gem_proc_list[i].name, + S_IFREG | S_IRUGO, minor->dev_root); + if (!ent) { + DRM_ERROR("Cannot create /proc/dri/.../%s\n", + i915_gem_proc_list[i].name); + for (j = 0; j < i; j++) + remove_proc_entry(i915_gem_proc_list[i].name, + minor->dev_root); + return -1; + } + ent->read_proc = i915_gem_proc_list[i].f; + ent->data = minor; + } + return 0; +} + +void i915_gem_proc_cleanup(struct drm_minor *minor) +{ + int i; + + if (!minor->dev_root) + return; + + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) + remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); +} diff --git a/linux-core/i915_gem_tiling.c b/linux-core/i915_gem_tiling.c new file mode 100644 index 00000000..3ac1f353 --- /dev/null +++ b/linux-core/i915_gem_tiling.c @@ -0,0 +1,310 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + * Eric Anholt <eric@anholt.net> + * + */ + +#include "drmP.h" +#include "drm.h" +#include "i915_drm.h" +#include "i915_drv.h" + +/** @file i915_gem_tiling.c + * + * Support for managing tiling state of buffer objects. + * + * The idea behind tiling is to increase cache hit rates by rearranging + * pixel data so that a group of pixel accesses are in the same cacheline. + * Performance improvement from doing this on the back/depth buffer are on + * the order of 30%. + * + * Intel architectures make this somewhat more complicated, though, by + * adjustments made to addressing of data when the memory is in interleaved + * mode (matched pairs of DIMMS) to improve memory bandwidth. + * For interleaved memory, the CPU sends every sequential 64 bytes + * to an alternate memory channel so it can get the bandwidth from both. + * + * The GPU also rearranges its accesses for increased bandwidth to interleaved + * memory, and it matches what the CPU does for non-tiled. However, when tiled + * it does it a little differently, since one walks addresses not just in the + * X direction but also Y. So, along with alternating channels when bit + * 6 of the address flips, it also alternates when other bits flip -- Bits 9 + * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) + * are common to both the 915 and 965-class hardware. + * + * The CPU also sometimes XORs in higher bits as well, to improve + * bandwidth doing strided access like we do so frequently in graphics. This + * is called "Channel XOR Randomization" in the MCH documentation. The result + * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address + * decode. + * + * All of this bit 6 XORing has an effect on our memory management, + * as we need to make sure that the 3d driver can correctly address object + * contents. + * + * If we don't have interleaved memory, all tiling is safe and no swizzling is + * required. + * + * When bit 17 is XORed in, we simply refuse to tile at all. Bit + * 17 is not just a page offset, so as we page an objet out and back in, + * individual pages in it will have different bit 17 addresses, resulting in + * each 64 bytes being swapped with its neighbor! + * + * Otherwise, if interleaved, we have to tell the 3d driver what the address + * swizzling it needs to do is, since it's writing with the CPU to the pages + * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the + * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling + * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order + * to match what the GPU expects. + */ + +/** + * Detects bit 6 swizzling of address lookup between IGD access and CPU + * access through main memory. + */ +void +i915_gem_detect_bit_6_swizzle(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct pci_dev *bridge; + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + int mchbar_offset; + char __iomem *mchbar; + int ret; + + bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); + if (bridge == NULL) { + DRM_ERROR("Couldn't get bridge device\n"); + return; + } + + ret = pci_enable_device(bridge); + if (ret != 0) { + DRM_ERROR("pci_enable_device failed: %d\n", ret); + return; + } + + if (IS_I965G(dev)) + mchbar_offset = 0x48; + else + mchbar_offset = 0x44; + + /* Use resource 2 for our BAR that's stashed in a nonstandard location, + * since the bridge would only ever use standard BARs 0-1 (though it + * doesn't anyway) + */ + ret = pci_read_base(bridge, pci_bar_mem64, &bridge->resource[2], + mchbar_offset); + if (ret != 0) { + DRM_ERROR("pci_read_base failed: %d\n", ret); + return; + } + + mchbar = ioremap(pci_resource_start(bridge, 2), + pci_resource_len(bridge, 2)); + if (mchbar == NULL) { + DRM_ERROR("Couldn't map MCHBAR to determine tile swizzling\n"); + return; + } + + if (IS_I965G(dev) && !IS_I965GM(dev)) { + uint32_t chdecmisc; + + /* On the 965, channel interleave appears to be determined by + * the flex bit. If flex is set, then the ranks (sides of a + * DIMM) of memory will be "stacked" (physical addresses walk + * through one rank then move on to the next, flipping channels + * or not depending on rank configuration). The GPU in this + * case does exactly the same addressing as the CPU. + * + * Unlike the 945, channel randomization based does not + * appear to be available. + * + * XXX: While the G965 doesn't appear to do any interleaving + * when the DIMMs are not exactly matched, the G4x chipsets + * might be for "L-shaped" configurations, and will need to be + * detected. + * + * L-shaped configuration: + * + * +-----+ + * | | + * |DIMM2| <-- non-interleaved + * +-----+ + * +-----+ +-----+ + * | | | | + * |DIMM0| |DIMM1| <-- interleaved area + * +-----+ +-----+ + */ + chdecmisc = readb(mchbar + CHDECMISC); + + if (chdecmisc == 0xff) { + DRM_ERROR("Couldn't read from MCHBAR. " + "Disabling tiling.\n"); + } else if (chdecmisc & CHDECMISC_FLEXMEMORY) { + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } else { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } + } else if (IS_I9XX(dev)) { + uint32_t dcc; + + /* On 915-945 and GM965, channel interleave by the CPU is + * determined by DCC. The CPU will alternate based on bit 6 + * in interleaved mode, and the GPU will then also alternate + * on bit 6, 9, and 10 for X, but the CPU may also optionally + * alternate based on bit 17 (XOR not disabled and XOR + * bit == 17). + */ + dcc = readl(mchbar + DCC); + switch (dcc & DCC_ADDRESSING_MODE_MASK) { + case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + break; + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: + if (IS_I915G(dev) || IS_I915GM(dev) || + dcc & DCC_CHANNEL_XOR_DISABLE) { + swizzle_x = I915_BIT_6_SWIZZLE_9_10; + swizzle_y = I915_BIT_6_SWIZZLE_9; + } else if (IS_I965GM(dev)) { + /* GM965 only does bit 11-based channel + * randomization + */ + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; + swizzle_y = I915_BIT_6_SWIZZLE_9_11; + } else { + /* Bit 17 or perhaps other swizzling */ + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } + break; + } + if (dcc == 0xffffffff) { + DRM_ERROR("Couldn't read from MCHBAR. " + "Disabling tiling.\n"); + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; + } + } else { + /* As far as we know, the 865 doesn't have these bit 6 + * swizzling issues. + */ + swizzle_x = I915_BIT_6_SWIZZLE_NONE; + swizzle_y = I915_BIT_6_SWIZZLE_NONE; + } + + iounmap(mchbar); + + dev_priv->mm.bit_6_swizzle_x = swizzle_x; + dev_priv->mm.bit_6_swizzle_y = swizzle_y; +} + +/** + * Sets the tiling mode of an object, returning the required swizzling of + * bit 6 of addresses in the object. + */ +int +i915_gem_set_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_set_tiling *args = data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + obj_priv = obj->driver_private; + + mutex_lock(&dev->struct_mutex); + + if (args->tiling_mode == I915_TILING_NONE) { + obj_priv->tiling_mode = I915_TILING_NONE; + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + } else { + if (args->tiling_mode == I915_TILING_X) + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + else + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + /* If we can't handle the swizzling, make it untiled. */ + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { + args->tiling_mode = I915_TILING_NONE; + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + } + } + obj_priv->tiling_mode = args->tiling_mode; + + mutex_unlock(&dev->struct_mutex); + + drm_gem_object_unreference(obj); + + return 0; +} + +/** + * Returns the current tiling mode and required bit 6 swizzling for the object. + */ +int +i915_gem_get_tiling(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_i915_gem_get_tiling *args = data; + struct drm_i915_private *dev_priv = dev->dev_private; + struct drm_gem_object *obj; + struct drm_i915_gem_object *obj_priv; + + obj = drm_gem_object_lookup(dev, file_priv, args->handle); + if (obj == NULL) + return -EINVAL; + obj_priv = obj->driver_private; + + mutex_lock(&dev->struct_mutex); + + args->tiling_mode = obj_priv->tiling_mode; + switch (obj_priv->tiling_mode) { + case I915_TILING_X: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + break; + case I915_TILING_Y: + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + break; + case I915_TILING_NONE: + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; + break; + default: + DRM_ERROR("unknown tiling mode\n"); + } + + mutex_unlock(&dev->struct_mutex); + + drm_gem_object_unreference(obj); + + return 0; +} diff --git a/linux-core/i915_opregion.c b/linux-core/i915_opregion.c new file mode 100644 index 00000000..1fa599ea --- /dev/null +++ b/linux-core/i915_opregion.c @@ -0,0 +1,389 @@ +/* + * + * Copyright 2008 Intel Corporation <hong.liu@intel.com> + * Copyright 2008 Red Hat <mjg@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +#include <linux/acpi.h> + +#include "drmP.h" +#include "i915_drm.h" +#include "i915_drv.h" + +#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,25) +#define PCI_ASLE 0xe4 +#define PCI_ASLS 0xfc + +#define OPREGION_SZ (8*1024) +#define OPREGION_HEADER_OFFSET 0 +#define OPREGION_ACPI_OFFSET 0x100 +#define OPREGION_SWSCI_OFFSET 0x200 +#define OPREGION_ASLE_OFFSET 0x300 +#define OPREGION_VBT_OFFSET 0x1000 + +#define OPREGION_SIGNATURE "IntelGraphicsMem" +#define MBOX_ACPI (1<<0) +#define MBOX_SWSCI (1<<1) +#define MBOX_ASLE (1<<2) + +/* _DOD id definitions */ +#define OUTPUT_CONNECTOR_MSK 0xf000 +#define OUTPUT_CONNECTOR_OFFSET 12 + +#define OUTPUT_PORT_MSK 0x00f0 +#define OUTPUT_PORT_OFFSET 4 + #define OUTPUT_PORT_ANALOG 0 + #define OUTPUT_PORT_LVDS 1 + #define OUTPUT_PORT_SDVOB 2 + #define OUTPUT_PORT_SDVOC 3 + #define OUTPUT_PORT_TV 4 + +#define OUTPUT_DISPLAY_MSK 0x0f00 +#define OUTPUT_DISPLAY_OFFSET 8 + #define OUTPUT_DISPLAY_OTHER 0 + #define OUTPUT_DISPLAY_VGA 1 + #define OUTPUT_DISPLAY_TV 2 + #define OUTPUT_DISPLAY_DIGI 3 + #define OUTPUT_DISPLAY_FLAT_PANEL 4 + +/* predefined id for integrated LVDS and VGA connector */ +#define OUTPUT_INT_LVDS 0x00000110 +#define OUTPUT_INT_VGA 0x80000100 + +struct opregion_header { + u8 signature[16]; + u32 size; + u32 opregion_ver; + u8 bios_ver[32]; + u8 vbios_ver[16]; + u8 driver_ver[16]; + u32 mboxes; + u8 reserved[164]; +} __attribute__((packed)); + +/* OpRegion mailbox #1: public ACPI methods */ +struct opregion_acpi { + u32 drdy; /* driver readiness */ + u32 csts; /* notification status */ + u32 cevt; /* current event */ + u8 rsvd1[20]; + u32 didl[8]; /* supported display devices ID list */ + u32 cpdl[8]; /* currently presented display list */ + u32 cadl[8]; /* currently active display list */ + u32 nadl[8]; /* next active devices list */ + u32 aslp; /* ASL sleep time-out */ + u32 tidx; /* toggle table index */ + u32 chpd; /* current hotplug enable indicator */ + u32 clid; /* current lid state*/ + u32 cdck; /* current docking state */ + u32 sxsw; /* Sx state resume */ + u32 evts; /* ASL supported events */ + u32 cnot; /* current OS notification */ + u32 nrdy; /* driver status */ + u8 rsvd2[60]; +} __attribute__((packed)); + +/* OpRegion mailbox #2: SWSCI */ +struct opregion_swsci { + u32 scic; /* SWSCI command|status|data */ + u32 parm; /* command parameters */ + u32 dslp; /* driver sleep time-out */ + u8 rsvd[244]; +} __attribute__((packed)); + +/* OpRegion mailbox #3: ASLE */ +struct opregion_asle { + u32 ardy; /* driver readiness */ + u32 aslc; /* ASLE interrupt command */ + u32 tche; /* technology enabled indicator */ + u32 alsi; /* current ALS illuminance reading */ + u32 bclp; /* backlight brightness to set */ + u32 pfit; /* panel fitting state */ + u32 cblv; /* current brightness level */ + u16 bclm[20]; /* backlight level duty cycle mapping table */ + u32 cpfm; /* current panel fitting mode */ + u32 epfm; /* enabled panel fitting modes */ + u8 plut[74]; /* panel LUT and identifier */ + u32 pfmb; /* PWM freq and min brightness */ + u8 rsvd[102]; +} __attribute__((packed)); + +/* ASLE irq request bits */ +#define ASLE_SET_ALS_ILLUM (1 << 0) +#define ASLE_SET_BACKLIGHT (1 << 1) +#define ASLE_SET_PFIT (1 << 2) +#define ASLE_SET_PWM_FREQ (1 << 3) +#define ASLE_REQ_MSK 0xf + +/* response bits of ASLE irq request */ +#define ASLE_ALS_ILLUM_FAIL (2<<10) +#define ASLE_BACKLIGHT_FAIL (2<<12) +#define ASLE_PFIT_FAIL (2<<14) +#define ASLE_PWM_FREQ_FAIL (2<<16) + +/* ASLE backlight brightness to set */ +#define ASLE_BCLP_VALID (1<<31) +#define ASLE_BCLP_MSK (~(1<<31)) + +/* ASLE panel fitting request */ +#define ASLE_PFIT_VALID (1<<31) +#define ASLE_PFIT_CENTER (1<<0) +#define ASLE_PFIT_STRETCH_TEXT (1<<1) +#define ASLE_PFIT_STRETCH_GFX (1<<2) + +/* PWM frequency and minimum brightness */ +#define ASLE_PFMB_BRIGHTNESS_MASK (0xff) +#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) +#define ASLE_PFMB_PWM_MASK (0x7ffffe00) +#define ASLE_PFMB_PWM_VALID (1<<31) + +#define ASLE_CBLV_VALID (1<<31) + +static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle *asle = dev_priv->opregion.asle; + u32 blc_pwm_ctl; + + if (!(bclp & ASLE_BCLP_VALID)) + return ASLE_BACKLIGHT_FAIL; + + bclp &= ASLE_BCLP_MSK; + if (bclp < 0 || bclp > 255) + return ASLE_BACKLIGHT_FAIL; + + blc_pwm_ctl = I915_READ(BLC_PWM_CTL); + blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; + I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101) -1)); + asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; + + return 0; +} + +static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) +{ + return 0; +} + +static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + if (pfmb & ASLE_PFMB_PWM_VALID) { + u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); + u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; + blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; + pwm = pwm >> 9; + // FIXME - what do we do with the PWM? + } + return 0; +} + +static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) +{ + if (!(pfit & ASLE_PFIT_VALID)) + return ASLE_PFIT_FAIL; + return 0; +} + +void opregion_asle_intr(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle *asle = dev_priv->opregion.asle; + u32 asle_stat = 0; + u32 asle_req; + + if (!asle) + return; + + asle_req = asle->aslc & ASLE_REQ_MSK; + + if (!asle_req) { + DRM_DEBUG("non asle set request??\n"); + return; + } + + if (asle_req & ASLE_SET_ALS_ILLUM) + asle_stat |= asle_set_als_illum(dev, asle->alsi); + + if (asle_req & ASLE_SET_BACKLIGHT) + asle_stat |= asle_set_backlight(dev, asle->bclp); + + if (asle_req & ASLE_SET_PFIT) + asle_stat |= asle_set_pfit(dev, asle->pfit); + + if (asle_req & ASLE_SET_PWM_FREQ) + asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); + + asle->aslc = asle_stat; +} + +#define ASLE_ALS_EN (1<<0) +#define ASLE_BLC_EN (1<<1) +#define ASLE_PFIT_EN (1<<2) +#define ASLE_PFMB_EN (1<<3) + +void opregion_enable_asle(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct opregion_asle *asle = dev_priv->opregion.asle; + + if (asle) { + if (IS_MOBILE(dev)) { + u32 pipeb_stats = I915_READ(PIPEBSTAT); + /* Some hardware uses the legacy backlight controller + to signal interrupts, so we need to set up pipe B + to generate an IRQ on writes */ + pipeb_stats |= I915_LEGACY_BLC_EVENT_ENABLE; + I915_WRITE(PIPEBSTAT, pipeb_stats); + + dev_priv->irq_mask_reg &= + ~I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; + } + + dev_priv->irq_mask_reg &= ~I915_ASLE_INTERRUPT; + + asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | + ASLE_PFMB_EN; + asle->ardy = 1; + } +} + +#define ACPI_EV_DISPLAY_SWITCH (1<<0) +#define ACPI_EV_LID (1<<1) +#define ACPI_EV_DOCK (1<<2) + +static struct intel_opregion *system_opregion; + +int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, + void *data) +{ + /* The only video events relevant to opregion are 0x80. These indicate + either a docking event, lid switch or display switch request. In + Linux, these are handled by the dock, button and video drivers. + We might want to fix the video driver to be opregion-aware in + future, but right now we just indicate to the firmware that the + request has been handled */ + + struct opregion_acpi *acpi; + + if (!system_opregion) + return NOTIFY_DONE; + + acpi = system_opregion->acpi; + acpi->csts = 0; + + return NOTIFY_OK; +} + +static struct notifier_block intel_opregion_notifier = { + .notifier_call = intel_opregion_video_event, +}; + +int intel_opregion_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + void *base; + u32 asls, mboxes; + int err = 0; + + pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); + DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); + if (asls == 0) { + DRM_DEBUG("ACPI OpRegion not supported!\n"); + return -ENOTSUPP; + } + + base = ioremap(asls, OPREGION_SZ); + if (!base) + return -ENOMEM; + + opregion->header = base; + if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { + DRM_DEBUG("opregion signature mismatch\n"); + err = -EINVAL; + goto err_out; + } + + mboxes = opregion->header->mboxes; + if (mboxes & MBOX_ACPI) { + DRM_DEBUG("Public ACPI methods supported\n"); + opregion->acpi = base + OPREGION_ACPI_OFFSET; + } else { + DRM_DEBUG("Public ACPI methods not supported\n"); + err = -ENOTSUPP; + goto err_out; + } + opregion->enabled = 1; + + if (mboxes & MBOX_SWSCI) { + DRM_DEBUG("SWSCI supported\n"); + opregion->swsci = base + OPREGION_SWSCI_OFFSET; + } + if (mboxes & MBOX_ASLE) { + DRM_DEBUG("ASLE supported\n"); + opregion->asle = base + OPREGION_ASLE_OFFSET; + } + + /* Notify BIOS we are ready to handle ACPI video ext notifs. + * Right now, all the events are handled by the ACPI video module. + * We don't actually need to do anything with them. */ + opregion->acpi->csts = 0; + opregion->acpi->drdy = 1; + + system_opregion = opregion; + register_acpi_notifier(&intel_opregion_notifier); + + return 0; + +err_out: + iounmap(opregion->header); + opregion->header = NULL; + return err; +} + +void intel_opregion_free(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + struct intel_opregion *opregion = &dev_priv->opregion; + + if (!opregion->enabled) + return; + + opregion->acpi->drdy = 0; + + system_opregion = NULL; + unregister_acpi_notifier(&intel_opregion_notifier); + + /* just clear all opregion memory pointers now */ + iounmap(opregion->header); + opregion->header = NULL; + opregion->acpi = NULL; + opregion->swsci = NULL; + opregion->asle = NULL; + + opregion->enabled = 0; +} +#endif diff --git a/linux-core/i915_suspend.c b/linux-core/i915_suspend.c new file mode 120000 index 00000000..b55754c5 --- /dev/null +++ b/linux-core/i915_suspend.c @@ -0,0 +1 @@ +../shared-core/i915_suspend.c
\ No newline at end of file diff --git a/linux-core/intel_display.c b/linux-core/intel_display.c index 0236bbc9..fbe06f7c 100644 --- a/linux-core/intel_display.c +++ b/linux-core/intel_display.c @@ -1506,7 +1506,9 @@ struct drm_framebuffer *intel_user_framebuffer_create(struct drm_device *dev, if (!intel_fb) return NULL; - drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); + if (!drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs)) + return NULL; + drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); if (filp) { diff --git a/linux-core/nouveau_bo.c b/linux-core/nouveau_bo.c index 86347e03..ab3b23a4 100644 --- a/linux-core/nouveau_bo.c +++ b/linux-core/nouveau_bo.c @@ -229,7 +229,7 @@ out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); if (tmp_mem.mm_node != bo->pinned_node) - drm_memrange_put_block(tmp_mem.mm_node); + drm_mm_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); } diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c index 81704ea1..cc4d5a92 100644 --- a/linux-core/nouveau_sgdma.c +++ b/linux-core/nouveau_sgdma.c @@ -280,7 +280,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev) struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_ttm_backend *be; struct drm_scatter_gather sgreq; - struct drm_memrange_node mm_node; + struct drm_mm_node mm_node; struct drm_bo_mem_reg mem; int ret; diff --git a/linux-core/nv50_display.c b/linux-core/nv50_display.c index eeaa0e68..6665a32f 100644 --- a/linux-core/nv50_display.c +++ b/linux-core/nv50_display.c @@ -125,6 +125,9 @@ static int nv50_display_init(struct nv50_display *display) /* enable clock change interrupts. */ NV_WRITE(NV50_PDISPLAY_SUPERVISOR_INTR, NV_READ(NV50_PDISPLAY_SUPERVISOR_INTR) | 0x70); + /* enable hotplug interrupts */ + NV_WRITE(NV50_PCONNECTOR_HOTPLUG_INTR, 0x7FFF7FFF); + display->init_done = true; return 0; @@ -171,6 +174,9 @@ static int nv50_display_disable(struct nv50_display *display) /* disable clock change interrupts. */ NV_WRITE(NV50_PDISPLAY_SUPERVISOR_INTR, NV_READ(NV50_PDISPLAY_SUPERVISOR_INTR) & ~0x70); + /* disable hotplug interrupts */ + NV_WRITE(NV50_PCONNECTOR_HOTPLUG_INTR, 0); + display->init_done = false; return 0; diff --git a/linux-core/nv50_kms_wrapper.c b/linux-core/nv50_kms_wrapper.c index 8ae72f48..1320e5ed 100644 --- a/linux-core/nv50_kms_wrapper.c +++ b/linux-core/nv50_kms_wrapper.c @@ -970,6 +970,9 @@ static enum drm_connector_status nv50_kms_connector_detect(struct drm_connector /* notify fb of changes */ dev->mode_config.funcs->fb_changed(dev); + + /* sent a hotplug event when appropriate. */ + drm_sysfs_hotplug_event(dev); } return drm_connector->status; diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c index 5fdd9c35..900d450a 100644 --- a/linux-core/radeon_buffer.c +++ b/linux-core/radeon_buffer.c @@ -220,7 +220,7 @@ out_cleanup: if (tmp_mem.mm_node) { mutex_lock(&dev->struct_mutex); if (tmp_mem.mm_node != bo->pinned_node) - drm_memrange_put_block(tmp_mem.mm_node); + drm_mm_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; mutex_unlock(&dev->struct_mutex); } diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c index 5f51c813..7676ca4c 100644 --- a/linux-core/radeon_drv.c +++ b/linux-core/radeon_drv.c @@ -59,6 +59,28 @@ static int dri_library_name(struct drm_device * dev, char * buf) "r300")); } +static int radeon_suspend(struct drm_device *dev, pm_message_t state) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + /* Disable *all* interrupts */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); + RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); + return 0; +} + +static int radeon_resume(struct drm_device *dev) +{ + drm_radeon_private_t *dev_priv = dev->dev_private; + + /* Restore interrupt registers */ + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); + return 0; +} + static struct pci_device_id pciidlist[] = { radeon_PCI_IDS }; @@ -94,6 +116,8 @@ static struct drm_driver driver = { .postclose = radeon_driver_postclose, .lastclose = radeon_driver_lastclose, .unload = radeon_driver_unload, + .suspend = radeon_suspend, + .resume = radeon_resume, .get_vblank_counter = radeon_get_vblank_counter, .enable_vblank = radeon_enable_vblank, .disable_vblank = radeon_disable_vblank, diff --git a/linux-core/radeon_reg.h b/linux-core/radeon_reg.h index 59108979..b4f0ac08 100644 --- a/linux-core/radeon_reg.h +++ b/linux-core/radeon_reg.h @@ -3991,7 +3991,6 @@ # define R300_TILE_SIZE_32 (2 << 4) # define R300_SUBPIXEL_1_12 (0 << 16) # define R300_SUBPIXEL_1_16 (1 << 16) -#define R300_GB_SELECT 0x401c #define R300_GB_ENABLE 0x4008 #define R300_GB_AA_CONFIG 0x4020 #define R400_GB_PIPE_SELECT 0x402c @@ -4069,17 +4068,6 @@ # define R300_GL_CLIP_SPACE_DEF (0 << 22) # define R300_DX_CLIP_SPACE_DEF (1 << 22) # define R500_TCL_STATE_OPTIMIZATION (1 << 23) -#define R300_VAP_VTE_CNTL 0x20B0 -# define R300_VPORT_X_SCALE_ENA (1 << 0) -# define R300_VPORT_X_OFFSET_ENA (1 << 1) -# define R300_VPORT_Y_SCALE_ENA (1 << 2) -# define R300_VPORT_Y_OFFSET_ENA (1 << 3) -# define R300_VPORT_Z_SCALE_ENA (1 << 4) -# define R300_VPORT_Z_OFFSET_ENA (1 << 5) -# define R300_VTX_XY_FMT (1 << 8) -# define R300_VTX_Z_FMT (1 << 9) -# define R300_VTX_W0_FMT (1 << 10) -#define R300_VAP_VTX_STATE_CNTL 0x2180 #define R300_VAP_PSC_SGN_NORM_CNTL 0x21DC #define R300_VAP_PROG_STREAM_CNTL_0 0x2150 # define R300_DATA_TYPE_0_SHIFT 0 @@ -4469,7 +4457,7 @@ # define R300_ENDIAN_SWAP_HALF_DWORD (3 << 0) # define R300_MACRO_TILE (1 << 2) -#define R300_TX_BORDER_COLOR_0 0x45c0 +#define R300_TX_BORDER_COLOR_0 0x45c0 #define R300_TX_ENABLE 0x4104 # define R300_TEX_0_ENABLE (1 << 0) @@ -4757,8 +4745,8 @@ # define R300_READ_ENABLE (1 << 2) #define R300_RB3D_ABLENDCNTL 0x4e08 #define R300_RB3D_DSTCACHE_CTLSTAT 0x4e4c -#define R300_RB3D_COLOROFFSET0 0x4e28 -#define R300_RB3D_COLORPITCH0 0x4e38 +#define R300_RB3D_COLOROFFSET0 0x4e28 +#define R300_RB3D_COLORPITCH0 0x4e38 # define R300_COLORTILE (1 << 16) # define R300_COLORENDIAN_WORD (1 << 19) # define R300_COLORENDIAN_DWORD (2 << 19) @@ -4774,7 +4762,7 @@ # define R300_COLORFORMAT_UV88 (13 << 21) # define R300_COLORFORMAT_ARGB4444 (15 << 21) -#define R300_RB3D_AARESOLVE_CTL 0x4e88 +#define R300_RB3D_AARESOLVE_CTL 0x4e88 #define R300_RB3D_COLOR_CHANNEL_MASK 0x4e0c # define R300_BLUE_MASK_EN (1 << 0) # define R300_GREEN_MASK_EN (1 << 1) diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index b31ac2d4..423208b0 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -148,7 +148,9 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data, } info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr); +#ifdef XGI_HAVE_FENCE drm_fence_flush_old(info->dev, 0, info->next_sequence); +#endif /* XGI_HAVE_FENCE */ return 0; } diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index f8ed7de8..bfe9acdf 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -37,7 +37,9 @@ static struct pci_device_id pciidlist[] = { xgi_PCI_IDS }; +#ifdef XGI_HAVE_FENCE extern struct drm_fence_driver xgi_fence_driver; +#endif /* XGI_HAVE_FENCE */ int xgi_bootstrap(struct drm_device *, void *, struct drm_file *); @@ -47,6 +49,8 @@ static struct drm_ioctl_desc xgi_ioctls[] = { DRM_IOCTL_DEF(DRM_XGI_FREE, xgi_free_ioctl, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_SUBMIT_CMDLIST, xgi_submit_cmdlist, DRM_AUTH), DRM_IOCTL_DEF(DRM_XGI_STATE_CHANGE, xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER), + DRM_IOCTL_DEF(DRM_XGI_SET_FENCE, xgi_set_fence_ioctl, DRM_AUTH), + DRM_IOCTL_DEF(DRM_XGI_WAIT_FENCE, xgi_wait_fence_ioctl, DRM_AUTH), }; static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls); @@ -58,6 +62,7 @@ static void xgi_driver_lastclose(struct drm_device * dev); static void xgi_reclaim_buffers_locked(struct drm_device * dev, struct drm_file * filp); static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS); +static int xgi_kern_isr_postinstall(struct drm_device * dev); static struct drm_driver driver = { @@ -70,7 +75,7 @@ static struct drm_driver driver = { .lastclose = xgi_driver_lastclose, .dma_quiescent = NULL, .irq_preinstall = NULL, - .irq_postinstall = NULL, + .irq_postinstall = xgi_kern_isr_postinstall, .irq_uninstall = NULL, .irq_handler = xgi_kern_isr, .reclaim_buffers = drm_core_reclaim_buffers, @@ -100,7 +105,9 @@ static struct drm_driver driver = { .remove = __devexit_p(drm_cleanup_pci), }, +#ifdef XGI_HAVE_FENCE .fence_driver = &xgi_fence_driver, +#endif /* XGI_HAVE_FENCE */ .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -355,7 +362,10 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) DRM_WRITE32(info->mmio_map, 0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS, cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits)); +#ifdef XGI_HAVE_FENCE xgi_fence_handler(dev); +#endif /* XGI_HAVE_FENCE */ + DRM_WAKEUP(&info->fence_queue); return IRQ_HANDLED; } else { return IRQ_NONE; @@ -363,6 +373,15 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS) } +int xgi_kern_isr_postinstall(struct drm_device * dev) +{ + struct xgi_info *info = dev->dev_private; + + DRM_INIT_WAITQUEUE(&info->fence_queue); + return 0; +} + + int xgi_driver_load(struct drm_device *dev, unsigned long flags) { struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 9408073e..5ad3ddb3 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -35,11 +35,11 @@ #define DRIVER_NAME "xgi" #define DRIVER_DESC "XGI XP5 / XP10 / XG47" -#define DRIVER_DATE "20071003" +#define DRIVER_DATE "20080612" #define DRIVER_MAJOR 1 -#define DRIVER_MINOR 1 -#define DRIVER_PATCHLEVEL 3 +#define DRIVER_MINOR 2 +#define DRIVER_PATCHLEVEL 0 #include "xgi_cmdlist.h" #include "xgi_drm.h" @@ -74,6 +74,7 @@ struct xgi_info { struct xgi_cmdring_info cmdring; DRM_SPINTYPE fence_lock; + wait_queue_head_t fence_queue; unsigned complete_sequence; unsigned next_sequence; }; @@ -86,7 +87,7 @@ extern int xgi_fb_heap_init(struct xgi_info * info); extern int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc, struct drm_file * filp); -extern int xgi_free(struct xgi_info * info, unsigned long index, +extern int xgi_free(struct xgi_info * info, unsigned int index, struct drm_file * filp); extern int xgi_pcie_heap_init(struct xgi_info * info); @@ -98,12 +99,24 @@ extern void xgi_disable_mmio(struct xgi_info * info); extern void xgi_enable_ge(struct xgi_info * info); extern void xgi_disable_ge(struct xgi_info * info); +/* TTM-style fences. + */ +#ifdef XGI_HAVE_FENCE extern void xgi_poke_flush(struct drm_device * dev, uint32_t class); extern int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type); extern void xgi_fence_handler(struct drm_device * dev); extern int xgi_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags); +#endif /* XGI_HAVE_FENCE */ + + +/* Non-TTM-style fences. + */ +extern int xgi_set_fence_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); +extern int xgi_wait_fence_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp); extern int xgi_alloc_ioctl(struct drm_device * dev, void * data, struct drm_file * filp); diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 3f50fe8f..e793bd3f 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -93,7 +93,7 @@ int xgi_alloc_ioctl(struct drm_device * dev, void * data, } -int xgi_free(struct xgi_info * info, unsigned long index, +int xgi_free(struct xgi_info * info, unsigned int index, struct drm_file * filp) { int err; @@ -111,7 +111,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data, { struct xgi_info *info = dev->dev_private; - return xgi_free(info, *(unsigned long *) data, filp); + return xgi_free(info, *(unsigned int *) data, filp); } diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c index 63ed29ee..38cf9e4f 100644 --- a/linux-core/xgi_fence.c +++ b/linux-core/xgi_fence.c @@ -30,6 +30,76 @@ #include "xgi_misc.h" #include "xgi_cmdlist.h" +static int xgi_low_level_fence_emit(struct drm_device *dev, u32 *sequence) +{ + struct xgi_info *const info = dev->dev_private; + + if (info == NULL) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + DRM_SPINLOCK(&info->fence_lock); + info->next_sequence++; + if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { + info->next_sequence = 1; + } + + *sequence = (u32) info->next_sequence; + DRM_SPINUNLOCK(&info->fence_lock); + + + xgi_emit_irq(info); + return 0; +} + +#define GET_BEGIN_ID(i) (le32_to_cpu(DRM_READ32((i)->mmio_map, 0x2820)) \ + & BEGIN_BEGIN_IDENTIFICATION_MASK) + +static int xgi_low_level_fence_wait(struct drm_device *dev, unsigned *sequence) +{ + struct xgi_info *const info = dev->dev_private; + unsigned int cur_fence; + int ret = 0; + + if (info == NULL) { + DRM_ERROR("called with no initialization\n"); + return -EINVAL; + } + + /* Assume that the user has missed the current sequence number + * by about a day rather than she wants to wait for years + * using fences. + */ + DRM_WAIT_ON(ret, info->fence_queue, 3 * DRM_HZ, + ((((cur_fence = GET_BEGIN_ID(info)) + - *sequence) & BEGIN_BEGIN_IDENTIFICATION_MASK) + <= (1 << 18))); + + info->complete_sequence = cur_fence; + *sequence = cur_fence; + + return ret; +} + + +int xgi_set_fence_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + (void) filp; + return xgi_low_level_fence_emit(dev, (u32 *) data); +} + + +int xgi_wait_fence_ioctl(struct drm_device * dev, void * data, + struct drm_file * filp) +{ + (void) filp; + return xgi_low_level_fence_wait(dev, (u32 *) data); +} + + +#ifdef XGI_HAVE_FENCE static void xgi_fence_poll(struct drm_device * dev, uint32_t class, uint32_t waiting_types) { @@ -68,25 +138,18 @@ int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * sequence, uint32_t * native_type) { - struct xgi_info * info = dev->dev_private; + int err; - if ((info == NULL) || (class != 0)) - return -EINVAL; - - - DRM_SPINLOCK(&info->fence_lock); - info->next_sequence++; - if (info->next_sequence > BEGIN_BEGIN_IDENTIFICATION_MASK) { - info->next_sequence = 1; - } - DRM_SPINUNLOCK(&info->fence_lock); + (void) flags; + if (class != 0) + return -EINVAL; - xgi_emit_irq(info); + err = xgi_low_level_fence_emit(dev, sequence); + if (err) + return err; - *sequence = (uint32_t) info->next_sequence; *native_type = DRM_FENCE_TYPE_EXE; - return 0; } @@ -120,3 +183,4 @@ struct drm_fence_driver xgi_fence_driver = { .wait = NULL }; +#endif /* XGI_HAVE_FENCE */ |