summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/ati_pcigart.c6
-rw-r--r--linux-core/drmP.h91
-rw-r--r--linux-core/drm_agpsupport.c16
-rw-r--r--linux-core/drm_bo.c270
-rw-r--r--linux-core/drm_bo_move.c18
-rw-r--r--linux-core/drm_compat.c2
-rw-r--r--linux-core/drm_drv.c5
-rw-r--r--linux-core/drm_irq.c354
-rw-r--r--linux-core/drm_object.c2
-rw-r--r--linux-core/drm_objects.h127
-rw-r--r--linux-core/drm_scatter.c10
-rw-r--r--linux-core/drm_ttm.c139
-rw-r--r--linux-core/drm_vm.c8
-rw-r--r--linux-core/i810_dma.c22
-rw-r--r--linux-core/i810_drv.h2
-rw-r--r--linux-core/i915_buffer.c26
-rw-r--r--linux-core/i915_compat.c20
-rw-r--r--linux-core/i915_drv.c14
-rw-r--r--linux-core/i915_ioc32.c63
-rw-r--r--linux-core/mach64_drv.c6
-rw-r--r--linux-core/mga_drv.c7
-rw-r--r--linux-core/nouveau_buffer.c15
-rw-r--r--linux-core/nouveau_sgdma.c33
-rw-r--r--linux-core/r128_drv.c7
-rw-r--r--linux-core/radeon_buffer.c6
-rw-r--r--linux-core/radeon_drv.c10
-rw-r--r--linux-core/radeon_ms_drv.c5
-rw-r--r--linux-core/sis_mm.c4
-rw-r--r--linux-core/via_buffer.c2
-rw-r--r--linux-core/via_mm.c4
30 files changed, 989 insertions, 305 deletions
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 5fafbb93..c669067b 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -80,7 +80,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
struct page *page;
int i;
- DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+ DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
@@ -97,7 +97,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
SetPageReserved(page);
}
- DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+ DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
}
@@ -106,7 +106,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
struct page *page;
int i;
int num_pages = 1 << order;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 2f0791c0..1c815c5f 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -104,10 +104,8 @@ struct drm_file;
#define DRIVER_HAVE_DMA 0x20
#define DRIVER_HAVE_IRQ 0x40
#define DRIVER_IRQ_SHARED 0x80
-#define DRIVER_IRQ_VBL 0x100
-#define DRIVER_DMA_QUEUE 0x200
-#define DRIVER_FB_DMA 0x400
-#define DRIVER_IRQ_VBL2 0x800
+#define DRIVER_DMA_QUEUE 0x100
+#define DRIVER_FB_DMA 0x200
/*@}*/
@@ -632,9 +630,51 @@ struct drm_driver {
int (*context_dtor) (struct drm_device *dev, int context);
int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
- void (*kernel_context_switch_unlock) (struct drm_device *dev);
- int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence);
- int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence);
+ void (*kernel_context_switch_unlock) (struct drm_device * dev);
+ /**
+ * get_vblank_counter - get raw hardware vblank counter
+ * @dev: DRM device
+ * @crtc: counter to fetch
+ *
+ * Driver callback for fetching a raw hardware vblank counter
+ * for @crtc. If a device doesn't have a hardware counter, the
+ * driver can simply return the value of drm_vblank_count and
+ * make the enable_vblank() and disable_vblank() hooks into no-ops,
+ * leaving interrupts enabled at all times.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code.
+ *
+ * RETURNS
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter) (struct drm_device *dev, int crtc);
+
+ /**
+ * enable_vblank - enable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Enable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ *
+ * RETURNS
+ * Zero on success, appropriate errno if the given @crtc's vblank
+ * interrupt cannot be enabled.
+ */
+ int (*enable_vblank) (struct drm_device *dev, int crtc);
+
+ /**
+ * disable_vblank - disable vblank interrupt events
+ * @dev: DRM device
+ * @crtc: which irq to enable
+ *
+ * Disable vblank interrupts for @crtc. If the device doesn't have
+ * a hardware vblank counter, this routine should be a no-op, since
+ * interrupts will have to stay on to keep the count accurate.
+ */
+ void (*disable_vblank) (struct drm_device *dev, int crtc);
int (*dri_library_name) (struct drm_device *dev, char * buf);
/**
@@ -653,7 +693,7 @@ struct drm_driver {
/* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
void (*irq_preinstall) (struct drm_device *dev);
- void (*irq_postinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file *file_priv);
@@ -799,13 +839,19 @@ struct drm_device {
/** \name VBLANK IRQ support */
/*@{ */
- wait_queue_head_t vbl_queue; /**< VBLANK wait queue */
- atomic_t vbl_received;
- atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */
+ wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */
+ atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */
spinlock_t vbl_lock;
- struct list_head vbl_sigs; /**< signal list to send on VBLANK */
- struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */
- unsigned int vbl_pending;
+ struct list_head *vbl_sigs; /**< signal list to send on VBLANK */
+ atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/
+ atomic_t *vblank_refcount; /* number of users of vblank interrupts per crtc */
+ u32 *last_vblank; /* protected by dev->vbl_lock, used */
+ /* for wraparound handling */
+ u32 *vblank_offset; /* used to track how many vblanks */
+ u32 *vblank_premodeset; /* were lost during modeset */
+ struct timer_list vblank_disable_timer;
+
+ unsigned long max_vblank_count; /**< size of vblank counter register */
spinlock_t tasklet_lock; /**< For drm_locked_tasklet */
void (*locked_tasklet_func)(struct drm_device *dev);
@@ -825,6 +871,7 @@ struct drm_device {
#ifdef __alpha__
struct pci_controller *hose;
#endif
+ int num_crtcs; /**< Number of CRTCs on this device */
struct drm_sg_mem *sg; /**< Scatter gather memory */
void *dev_private; /**< device private data */
struct drm_sigdata sigdata; /**< For block_all_signals */
@@ -1113,11 +1160,19 @@ extern void drm_driver_irq_preinstall(struct drm_device *dev);
extern void drm_driver_irq_postinstall(struct drm_device *dev);
extern void drm_driver_irq_uninstall(struct drm_device *dev);
-extern int drm_wait_vblank(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq);
-extern void drm_vbl_send_signals(struct drm_device *dev);
+extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
+extern int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *filp);
+extern int drm_vblank_wait(struct drm_device * dev, unsigned int *vbl_seq);
extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*));
+extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern void drm_update_vblank_count(struct drm_device *dev, int crtc);
+extern void drm_handle_vblank(struct drm_device *dev, int crtc);
+extern int drm_vblank_get(struct drm_device *dev, int crtc);
+extern void drm_vblank_put(struct drm_device *dev, int crtc);
+
+ /* Modesetting support */
+extern int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
/* AGP/GART support (drm_agpsupport.h) */
extern struct drm_agp_head *drm_agp_init(struct drm_device *dev);
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 0b321afd..adcb93a5 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -505,12 +505,14 @@ static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
static int drm_agp_populate(struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages)
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
{
struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
+ int dummy_page_count = 0;
if (drm_alloc_memctl(num_pages * sizeof(void *)))
return -1;
@@ -528,8 +530,16 @@ static int drm_agp_populate(struct drm_ttm_backend *backend,
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
- for (cur_page = pages; cur_page < last_page; ++cur_page)
- mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ struct page *page = *cur_page;
+ if (!page) {
+ page = dummy_read_page;
+ ++dummy_page_count;
+ }
+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
+ }
+ if (dummy_page_count)
+ DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
agp_be->mem = mem;
return 0;
}
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 55f5d08a..6b3d4e14 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -80,7 +80,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object *bo)
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
- if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
@@ -137,27 +137,32 @@ static int drm_bo_add_ttm(struct drm_buffer_object *bo)
{
struct drm_device *dev = bo->dev;
int ret = 0;
+ uint32_t page_flags = 0;
DRM_ASSERT_LOCKED(&bo->mutex);
bo->ttm = NULL;
+ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
+ page_flags |= DRM_TTM_PAGE_WRITE;
+
switch (bo->type) {
- case drm_bo_type_dc:
+ case drm_bo_type_device:
case drm_bo_type_kernel:
- bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+ page_flags, dev->bm.dummy_read_page);
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_user:
- bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+ page_flags | DRM_TTM_PAGE_USER,
+ dev->bm.dummy_read_page);
if (!bo->ttm)
ret = -ENOMEM;
ret = drm_ttm_set_user(bo->ttm, current,
- bo->mem.mask & DRM_BO_FLAG_WRITE,
bo->buffer_start,
- bo->num_pages,
- dev->bm.dummy_read_page);
+ bo->num_pages);
if (ret)
return ret;
@@ -199,7 +204,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
goto out_err;
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_bind_ttm(bo->ttm, mem);
+ ret = drm_ttm_bind(bo->ttm, mem);
if (ret)
goto out_err;
}
@@ -209,11 +214,11 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
*old_mem = *mem;
mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
@@ -262,7 +267,7 @@ out_err:
new_man = &bm->man[bo->mem.mem_type];
if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -419,7 +424,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
if (bo->ttm) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -702,7 +707,7 @@ static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
evict_mem.mm_node = NULL;
evict_mem = bo->mem;
- evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
if (ret) {
@@ -866,7 +871,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
type_ok = drm_bo_mt_compatible(man,
bo->type == drm_bo_type_user,
- mem_type, mem->mask,
+ mem_type, mem->proposed_flags,
&cur_flags);
if (!type_ok)
@@ -918,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
if (!drm_bo_mt_compatible(man,
bo->type == drm_bo_type_user,
mem_type,
- mem->mask,
+ mem->proposed_flags,
&cur_flags))
continue;
@@ -938,11 +943,25 @@ int drm_bo_mem_space(struct drm_buffer_object *bo,
}
EXPORT_SYMBOL(drm_bo_mem_space);
-static int drm_bo_new_mask(struct drm_buffer_object *bo,
- uint64_t new_flags, uint64_t used_mask)
+/*
+ * drm_bo_propose_flags:
+ *
+ * @bo: the buffer object getting new flags
+ *
+ * @new_flags: the new set of proposed flag bits
+ *
+ * @new_mask: the mask of bits changed in new_flags
+ *
+ * Modify the proposed_flag bits in @bo
+ */
+static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
+ uint64_t new_flags, uint64_t new_mask)
{
- uint32_t new_props;
+ uint32_t new_access;
+ /* Copy unchanging bits from existing proposed_flags */
+ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
+
if (bo->type == drm_bo_type_user &&
((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
(DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
@@ -950,7 +969,7 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,
return -EINVAL;
}
- if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
return -EPERM;
}
@@ -960,15 +979,15 @@ static int drm_bo_new_mask(struct drm_buffer_object *bo,
return -EPERM;
}
- new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_READ);
+ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_READ);
- if (!new_props) {
+ if (new_access == 0) {
DRM_ERROR("Invalid buffer object rwx properties\n");
return -EINVAL;
}
- bo->mem.mask = new_flags;
+ bo->mem.proposed_flags = new_flags;
return 0;
}
@@ -1103,8 +1122,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
ret = 0;
mutex_unlock(&bo->mutex);
- DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
- !drm_bo_check_unfenced(bo));
+ DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
+ !drm_bo_check_unfenced(bo));
mutex_lock(&bo->mutex);
if (ret == -EINTR)
return -EAGAIN;
@@ -1135,12 +1154,17 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
rep->size = bo->num_pages * PAGE_SIZE;
rep->offset = bo->offset;
- if (bo->type == drm_bo_type_dc)
+ /*
+ * drm_bo_type_device buffers have user-visible
+ * handles which can be used to share across
+ * processes. Hand that back to the application
+ */
+ if (bo->type == drm_bo_type_device)
rep->arg_handle = bo->map_list.user_token;
else
rep->arg_handle = 0;
- rep->mask = bo->mem.mask;
+ rep->proposed_flags = bo->mem.proposed_flags;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
@@ -1286,7 +1310,7 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
/*
* bo->mutex locked.
- * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
*/
int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
@@ -1312,7 +1336,7 @@ int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
- mem.mask = new_mem_flags;
+ mem.proposed_flags = new_mem_flags;
mem.page_alignment = bo->mem.page_alignment;
mutex_lock(&bm->evict_mutex);
@@ -1355,24 +1379,41 @@ out_unlock:
static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
{
- uint32_t flag_diff = (mem->mask ^ mem->flags);
+ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
- if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
return 0;
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
- (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
- (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
+ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
return 0;
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
- ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
- (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
return 0;
return 1;
}
-/*
- * bo locked.
+/**
+ * drm_buffer_object_validate:
+ *
+ * @bo: the buffer object to modify
+ *
+ * @fence_class: the new fence class covering this buffer
+ *
+ * @move_unfenced: a boolean indicating whether switching the
+ * memory space of this buffer should cause the buffer to
+ * be placed on the unfenced list.
+ *
+ * @no_wait: whether this function should return -EBUSY instead
+ * of waiting.
+ *
+ * Change buffer access parameters. This can involve moving
+ * the buffer to the correct memory type, pinning the buffer
+ * or changing the class/type of fence covering this buffer
+ *
+ * Must be called with bo locked.
*/
static int drm_buffer_object_validate(struct drm_buffer_object *bo,
@@ -1385,8 +1426,8 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
uint32_t ftype;
int ret;
- DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
- (unsigned long long) bo->mem.mask,
+ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.proposed_flags,
(unsigned long long) bo->mem.flags);
ret = driver->fence_type(bo, &fence_class, &ftype);
@@ -1427,7 +1468,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
*/
if (!drm_bo_mem_compat(&bo->mem)) {
- ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
move_unfenced);
if (ret) {
if (ret != -EAGAIN)
@@ -1440,7 +1481,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
* Pinned buffers.
*/
- if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
bo->pinned_mem_type = bo->mem.mem_type;
mutex_lock(&dev->struct_mutex);
list_del_init(&bo->pinned_lru);
@@ -1476,7 +1517,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
if (ret)
return ret;
}
- DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+ /*
+ * Validation has succeeded, move the access and other
+ * non-mapping-related flag bits from the proposed flags to
+ * the active flags
+ */
+
+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
/*
* Finally, adjust lru to be sure.
@@ -1501,13 +1548,38 @@ static int drm_buffer_object_validate(struct drm_buffer_object *bo,
return 0;
}
+/**
+ * drm_bo_do_validate:
+ *
+ * @bo: the buffer object
+ *
+ * @flags: access rights, mapping parameters and cacheability. See
+ * the DRM_BO_FLAG_* values in drm.h
+ *
+ * @mask: Which flag values to change; this allows callers to modify
+ * things without knowing the current state of other flags.
+ *
+ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+ * values in drm.h.
+ *
+ * @fence_class: a driver-specific way of doing fences. Presumably,
+ * this would be used if the driver had more than one submission and
+ * fencing mechanism. At this point, there isn't any use of this
+ * from the user mode code.
+ *
+ * @rep: To be stuffed with the reply from validation
+ *
+ * 'validate' a buffer object. This changes where the buffer is
+ * located, along with changing access modes.
+ */
+
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
- int no_wait,
struct drm_bo_info_rep *rep)
{
int ret;
+ int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
mutex_lock(&bo->mutex);
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
@@ -1515,9 +1587,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
if (ret)
goto out;
-
- DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
- ret = drm_bo_new_mask(bo, flags, mask);
+ ret = drm_bo_modify_proposed_flags (bo, flags, mask);
if (ret)
goto out;
@@ -1534,11 +1604,42 @@ out:
}
EXPORT_SYMBOL(drm_bo_do_validate);
+/**
+ * drm_bo_handle_validate
+ *
+ * @file_priv: the drm file private, used to get a handle to the user context
+ *
+ * @handle: the buffer object handle
+ *
+ * @flags: access rights, mapping parameters and cacheability. See
+ * the DRM_BO_FLAG_* values in drm.h
+ *
+ * @mask: Which flag values to change; this allows callers to modify
+ * things without knowing the current state of other flags.
+ *
+ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+ * values in drm.h.
+ *
+ * @fence_class: a driver-specific way of doing fences. Presumably,
+ * this would be used if the driver had more than one submission and
+ * fencing mechanism. At this point, there isn't any use of this
+ * from the user mode code.
+ *
+ * @use_old_fence_class: don't change fence class, pull it from the buffer object
+ *
+ * @rep: To be stuffed with the reply from validation
+ *
+ * @bp_rep: To be stuffed with the buffer object pointer
+ *
+ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
+ * Some permissions checking is done on the parameters, otherwise this
+ * is a thin wrapper.
+ */
int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
- uint32_t fence_class,
uint64_t flags, uint64_t mask,
uint32_t hint,
+ uint32_t fence_class,
int use_old_fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep)
@@ -1546,7 +1647,6 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
struct drm_device *dev = file_priv->minor->dev;
struct drm_buffer_object *bo;
int ret;
- int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
@@ -1566,8 +1666,7 @@ int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
- ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
- no_wait, rep);
+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
if (!ret && bo_rep)
*bo_rep = bo;
@@ -1635,7 +1734,7 @@ out:
int drm_buffer_object_create(struct drm_device *dev,
unsigned long size,
enum drm_bo_type type,
- uint64_t mask,
+ uint64_t flags,
uint32_t hint,
uint32_t page_alignment,
unsigned long buffer_start,
@@ -1649,7 +1748,7 @@ int drm_buffer_object_create(struct drm_device *dev,
size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
- DRM_ERROR("Illegal buffer object size %d.\n", size);
+ DRM_ERROR("Illegal buffer object size %ld.\n", size);
return -EINVAL;
}
@@ -1680,16 +1779,23 @@ int drm_buffer_object_create(struct drm_device *dev,
bo->mem.page_alignment = page_alignment;
bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
- bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
- bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
+ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_MAPPABLE);
+ bo->mem.proposed_flags = 0;
atomic_inc(&bm->count);
- ret = drm_bo_new_mask(bo, mask, mask);
+ /*
+ * Use drm_bo_modify_proposed_flags to error-check the proposed flags
+ */
+ ret = drm_bo_modify_proposed_flags (bo, flags, flags);
if (ret)
goto out_err;
- if (bo->type == drm_bo_type_dc) {
+ /*
+ * For drm_bo_type_device buffers, allocate
+ * address space from the device so that applications
+ * can mmap the buffer from there
+ */
+ if (bo->type == drm_bo_type_device) {
mutex_lock(&dev->struct_mutex);
ret = drm_bo_setup_vm_locked(bo);
mutex_unlock(&dev->struct_mutex);
@@ -1752,20 +1858,28 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
return -EINVAL;
}
- bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+ /*
+ * If the buffer creation request comes in with a starting address,
+ * that points at the desired user pages to map. Otherwise, create
+ * a drm_bo_type_device buffer, which uses pages allocated from the kernel
+ */
+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
+ /*
+ * User buffers cannot be shared
+ */
if (bo_type == drm_bo_type_user)
- req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+ req->flags &= ~DRM_BO_FLAG_SHAREABLE;
ret = drm_buffer_object_create(file_priv->minor->dev,
- req->size, bo_type, req->mask,
+ req->size, bo_type, req->flags,
req->hint, req->page_alignment,
req->buffer_start, &entry);
if (ret)
goto out;
ret = drm_bo_add_user_object(file_priv, entry,
- req->mask & DRM_BO_FLAG_SHAREABLE);
+ req->flags & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(&entry);
goto out;
@@ -1796,11 +1910,17 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
if (ret)
return ret;
- ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+ /*
+ * validate the buffer. note that 'fence_class' will be unused
+ * as we pass use_old_fence_class=1 here. Note also that
+ * the libdrm API doesn't pass fence_class to the kernel,
+ * so it's a good thing it isn't used here.
+ */
+ ret = drm_bo_handle_validate(file_priv, req->handle,
req->flags,
req->mask,
req->hint | DRM_BO_HINT_DONT_FENCE,
- 1,
+ req->fence_class, 1,
rep, NULL);
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
@@ -1951,7 +2071,7 @@ static int drm_bo_leave_list(struct drm_buffer_object *bo,
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
"cleanup. Removing flag and evicting.\n");
bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
- bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
}
if (bo->mem.mem_type == mem_type)
@@ -2502,6 +2622,14 @@ void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}
+/**
+ * drm_bo_takedown_vm_locked:
+ *
+ * @bo: the buffer object to remove any drm device mapping
+ *
+ * Remove any associated vm mapping on the drm device node that
+ * would have been created for a drm_bo_type_device buffer
+ */
static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
{
struct drm_map_list *list;
@@ -2509,7 +2637,7 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
struct drm_device *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
- if (bo->type != drm_bo_type_dc)
+ if (bo->type != drm_bo_type_device)
return;
list = &bo->map_list;
@@ -2532,6 +2660,16 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
drm_bo_usage_deref_locked(&bo);
}
+/**
+ * drm_bo_setup_vm_locked:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to drm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
{
struct drm_map_list *list = &bo->map_list;
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 97946b3c..db433d63 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -54,7 +54,7 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,
struct drm_ttm *ttm = bo->ttm;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
int ret;
if (old_mem->mem_type == DRM_BO_MEM_TT) {
@@ -71,14 +71,14 @@ int drm_bo_move_ttm(struct drm_buffer_object *bo,
save_flags = old_mem->flags;
}
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_bind_ttm(ttm, new_mem);
+ ret = drm_ttm_bind(ttm, new_mem);
if (ret)
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
@@ -211,7 +211,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object *bo,
void *new_iomap;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
unsigned long add = 0;
@@ -256,12 +256,12 @@ out2:
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
bo->ttm = NULL;
}
@@ -331,7 +331,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
struct drm_bo_mem_reg *old_mem = &bo->mem;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
struct drm_buffer_object *old_obj;
if (bo->fence)
@@ -366,7 +366,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
} else {
@@ -400,7 +400,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index 67c8c998..a745a7d9 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -232,7 +232,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
- uint32_t new_mask = bo->mem.mask |
+ uint32_t new_mask = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 4ecea67f..ab047ca3 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -116,7 +116,11 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETOUTPUT, drm_mode_getoutput, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
@@ -131,6 +135,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_ROOT_ONLY|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_ROOT_ONLY | DRM_CONTROL_ALLOW),
+
DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index 314c2329..367d2dd8 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -71,18 +71,100 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return 0;
}
+static void vblank_disable_fn(unsigned long arg)
+{
+ struct drm_device *dev = (struct drm_device *)arg;
+ int i;
+
+ for (i = 0; i < dev->num_crtcs; i++)
+ if (atomic_read(&dev->vblank_refcount[i]) == 0)
+ dev->driver->disable_vblank(dev, i);
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+ int i, ret = -ENOMEM;
+
+ setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+ (unsigned long)dev);
+ spin_lock_init(&dev->vbl_lock);
+ atomic_set(&dev->vbl_signal_pending, 0);
+ dev->num_crtcs = num_crtcs;
+
+ dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_queue)
+ goto err;
+
+ dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vbl_sigs)
+ goto err;
+
+ dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->_vblank_count)
+ goto err;
+
+ dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_refcount)
+ goto err;
+
+ dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+ if (!dev->last_vblank)
+ goto err;
+
+ dev->vblank_premodeset = drm_calloc(num_crtcs, sizeof(u32),
+ DRM_MEM_DRIVER);
+ if (!dev->vblank_premodeset)
+ goto err;
+
+ dev->vblank_offset = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+ if (!dev->vblank_offset)
+ goto err;
+
+ /* Zero per-crtc vblank stuff */
+ for (i = 0; i < num_crtcs; i++) {
+ init_waitqueue_head(&dev->vbl_queue[i]);
+ INIT_LIST_HEAD(&dev->vbl_sigs[i]);
+ atomic_set(&dev->_vblank_count[i], 0);
+ atomic_set(&dev->vblank_refcount[i], 0);
+ }
+
+ return 0;
+
+err:
+ drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
+ num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * num_crtcs,
+ DRM_MEM_DRIVER);
+ drm_free(dev->vblank_premodeset, sizeof(*dev->vblank_premodeset) *
+ num_crtcs, DRM_MEM_DRIVER);
+ drm_free(dev->vblank_offset, sizeof(*dev->vblank_offset) * num_crtcs,
+ DRM_MEM_DRIVER);
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
/**
* Install IRQ handler.
*
* \param dev DRM device.
*
- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * Initializes the IRQ related data. Installs the handler, calling the driver
* \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
* before and after the installation.
*/
int drm_irq_install(struct drm_device * dev)
{
- int ret;
+ int ret = 0;
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
@@ -106,18 +188,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
-
- if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
- init_waitqueue_head(&dev->vbl_queue);
-
- spin_lock_init(&dev->vbl_lock);
-
- INIT_LIST_HEAD(&dev->vbl_sigs);
- INIT_LIST_HEAD(&dev->vbl_sigs2);
-
- dev->vbl_pending = 0;
- }
+ DRM_DEBUG("irq=%d\n", dev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -136,9 +207,14 @@ int drm_irq_install(struct drm_device * dev)
}
/* After installing handler */
- dev->driver->irq_postinstall(dev);
+ ret = dev->driver->irq_postinstall(dev);
+ if (ret < 0) {
+ mutex_lock(&dev->struct_mutex);
+ dev->irq_enabled = 0;
+ mutex_unlock(&dev->struct_mutex);
+ }
- return 0;
+ return ret;
}
EXPORT_SYMBOL(drm_irq_install);
@@ -164,7 +240,7 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);
@@ -213,6 +289,143 @@ int drm_control(struct drm_device *dev, void *data,
}
/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+ return atomic_read(&dev->_vblank_count[crtc]) +
+ dev->vblank_offset[crtc];
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc). Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ */
+void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+ unsigned long irqflags;
+ u32 cur_vblank, diff;
+
+ /*
+ * Interrupts were disabled prior to this call, so deal with counter
+ * wrap if needed.
+ * NOTE! It's possible we lost a full dev->max_vblank_count events
+ * here if the register is small or we had vblank interrupts off for
+ * a long time.
+ */
+ cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+ spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ if (cur_vblank < dev->last_vblank[crtc]) {
+ diff = dev->max_vblank_count -
+ dev->last_vblank[crtc];
+ diff += cur_vblank;
+ } else {
+ diff = cur_vblank - dev->last_vblank[crtc];
+ }
+ dev->last_vblank[crtc] = cur_vblank;
+ spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+ atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_update_vblank_count);
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use. Note callers will probably want to update the master counter
+ * using drm_update_vblank_count() above before calling this routine so that
+ * wakeups occur on the right vblank event.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+ int ret = 0;
+
+ /* Going from 0->1 means we have to enable interrupts again */
+ if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+ ret = dev->driver->enable_vblank(dev, crtc);
+ if (ret)
+ atomic_dec(&dev->vblank_refcount[crtc]);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+ /* Last user schedules interrupt disable */
+ if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+ mod_timer(&dev->vblank_disable_timer,
+ round_jiffies_relative(DRM_HZ));
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_modeset_ctl *modeset = data;
+ int crtc, ret = 0;
+ u32 new;
+
+ crtc = modeset->arg;
+ if (crtc >= dev->num_crtcs) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ switch (modeset->cmd) {
+ case _DRM_PRE_MODESET:
+ dev->vblank_premodeset[crtc] =
+ dev->driver->get_vblank_counter(dev, crtc);
+ break;
+ case _DRM_POST_MODESET:
+ new = dev->driver->get_vblank_counter(dev, crtc);
+ dev->vblank_offset[crtc] = dev->vblank_premodeset[crtc] - new;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+out:
+ return ret;
+}
+
+/**
* Wait for VBLANK.
*
* \param inode device inode.
@@ -231,12 +444,13 @@ int drm_control(struct drm_device *dev, void *data,
*
* If a signal is not requested, then calls vblank_wait().
*/
-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_wait_vblank(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
union drm_wait_vblank *vblwait = data;
struct timeval now;
int ret = 0;
- unsigned int flags, seq;
+ unsigned int flags, seq, crtc;
if ((!dev->irq) || (!dev->irq_enabled))
return -EINVAL;
@@ -250,13 +464,13 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+ crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
- if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
- DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+ if (crtc >= dev->num_crtcs)
return -EINVAL;
- seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
- : &dev->vbl_received);
+ drm_update_vblank_count(dev, crtc);
+ seq = drm_vblank_count(dev, crtc);
switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
case _DRM_VBLANK_RELATIVE:
@@ -275,8 +489,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
if (flags & _DRM_VBLANK_SIGNAL) {
unsigned long irqflags;
- struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
- ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+ struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
struct drm_vbl_sig *vbl_sig;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -297,22 +510,26 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
}
}
- if (dev->vbl_pending >= 100) {
+ if (atomic_read(&dev->vbl_signal_pending) >= 100) {
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
return -EBUSY;
}
- dev->vbl_pending++;
-
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
- if (!
- (vbl_sig =
- drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
+ vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
+ DRM_MEM_DRIVER);
+ if (!vbl_sig)
return -ENOMEM;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret) {
+ drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
+ DRM_MEM_DRIVER);
+ return ret;
}
- memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+ atomic_inc(&dev->vbl_signal_pending);
vbl_sig->sequence = vblwait->request.sequence;
vbl_sig->info.si_signo = vblwait->request.signal;
@@ -326,17 +543,20 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
vblwait->reply.sequence = seq;
} else {
- if (flags & _DRM_VBLANK_SECONDARY) {
- if (dev->driver->vblank_wait2)
- ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
- } else if (dev->driver->vblank_wait)
- ret =
- dev->driver->vblank_wait(dev,
- &vblwait->request.sequence);
-
+ unsigned long cur_vblank;
+
+ ret = drm_vblank_get(dev, crtc);
+ if (ret)
+ return ret;
+ DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+ (((cur_vblank = drm_vblank_count(dev, crtc))
+ - vblwait->request.sequence) <= (1 << 23)));
+ drm_vblank_put(dev, crtc);
do_gettimeofday(&now);
+
vblwait->reply.tval_sec = now.tv_sec;
vblwait->reply.tval_usec = now.tv_usec;
+ vblwait->reply.sequence = cur_vblank;
}
done:
@@ -347,43 +567,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
* Send the VBLANK signals.
*
* \param dev DRM device.
+ * \param crtc CRTC where the vblank event occurred
*
* Sends a signal for each task in drm_device::vbl_sigs and empties the list.
*
* If a signal is not requested, then calls vblank_wait().
*/
-void drm_vbl_send_signals(struct drm_device * dev)
+static void drm_vbl_send_signals(struct drm_device * dev, int crtc)
{
+ struct drm_vbl_sig *vbl_sig, *tmp;
+ struct list_head *vbl_sigs;
+ unsigned int vbl_seq;
unsigned long flags;
- int i;
spin_lock_irqsave(&dev->vbl_lock, flags);
- for (i = 0; i < 2; i++) {
- struct drm_vbl_sig *vbl_sig, *tmp;
- struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
- unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
- &dev->vbl_received);
-
- list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
- if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
- vbl_sig->info.si_code = vbl_seq;
- send_sig_info(vbl_sig->info.si_signo,
- &vbl_sig->info, vbl_sig->task);
+ vbl_sigs = &dev->vbl_sigs[crtc];
+ vbl_seq = drm_vblank_count(dev, crtc);
- list_del(&vbl_sig->head);
+ list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+ if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+ vbl_sig->info.si_code = vbl_seq;
+ send_sig_info(vbl_sig->info.si_signo,
+ &vbl_sig->info, vbl_sig->task);
- drm_free(vbl_sig, sizeof(*vbl_sig),
- DRM_MEM_DRIVER);
+ list_del(&vbl_sig->head);
- dev->vbl_pending--;
- }
- }
+ drm_free(vbl_sig, sizeof(*vbl_sig),
+ DRM_MEM_DRIVER);
+ atomic_dec(&dev->vbl_signal_pending);
+ drm_vblank_put(dev, crtc);
+ }
}
spin_unlock_irqrestore(&dev->vbl_lock, flags);
}
-EXPORT_SYMBOL(drm_vbl_send_signals);
+
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+ drm_update_vblank_count(dev, crtc);
+ DRM_WAKEUP(&dev->vbl_queue[crtc]);
+ drm_vbl_send_signals(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
/**
* Tasklet wrapper function.
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index 16ccdbcb..2994b716 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -44,7 +44,7 @@ int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
item->owner = priv;
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
- (unsigned long)item, 32, 0, 0);
+ (unsigned long)item, 31, 0, 0);
if (ret)
return ret;
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index 394a2721..53fad3af 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -262,7 +262,8 @@ struct drm_ttm_backend;
struct drm_ttm_backend_func {
int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
int (*populate) (struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages);
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page);
void (*clear) (struct drm_ttm_backend *backend);
int (*bind) (struct drm_ttm_backend *backend,
struct drm_bo_mem_reg *bo_mem);
@@ -296,8 +297,10 @@ struct drm_ttm {
};
-extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
-extern int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
+extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
+extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
extern void drm_ttm_unbind(struct drm_ttm *ttm);
extern void drm_ttm_evict(struct drm_ttm *ttm);
extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
@@ -306,10 +309,8 @@ extern void drm_ttm_cache_flush(void);
extern int drm_ttm_populate(struct drm_ttm *ttm);
extern int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
- int write,
unsigned long start,
- unsigned long num_pages,
- struct page *dummy_read_page);
+ unsigned long num_pages);
/*
* Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
@@ -317,7 +318,7 @@ extern int drm_ttm_set_user(struct drm_ttm *ttm,
* Otherwise it is called when the last vma exits.
*/
-extern int drm_destroy_ttm(struct drm_ttm *ttm);
+extern int drm_ttm_destroy(struct drm_ttm *ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
@@ -330,14 +331,47 @@ extern int drm_destroy_ttm(struct drm_ttm *ttm);
* Page flags.
*/
+/*
+ * This ttm should not be cached by the CPU
+ */
#define DRM_TTM_PAGE_UNCACHED (1 << 0)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_USED (1 << 1)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_BOUND (1 << 2)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
#define DRM_TTM_PAGE_PRESENT (1 << 3)
+/*
+ * The array of page pointers was allocated with vmalloc
+ * instead of drm_calloc.
+ */
#define DRM_TTM_PAGE_VMALLOC (1 << 4)
+/*
+ * This ttm is mapped from user space
+ */
#define DRM_TTM_PAGE_USER (1 << 5)
-#define DRM_TTM_PAGE_USER_WRITE (1 << 6)
+/*
+ * This ttm will be written to by the GPU
+ */
+#define DRM_TTM_PAGE_WRITE (1 << 6)
+/*
+ * This ttm was mapped to the GPU, and so the contents may have
+ * been modified
+ */
#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+/*
+ * This flag is not used at this time; I don't know what the
+ * intent was.
+ */
#define DRM_TTM_PAGE_USER_DMA (1 << 8)
/***************************************************
@@ -350,16 +384,50 @@ struct drm_bo_mem_reg {
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
+ /*
+ * Current buffer status flags, indicating
+ * where the buffer is located and which
+ * access modes are in effect
+ */
uint64_t flags;
- uint64_t mask;
+ /**
+ * These are the flags proposed for
+ * a validate operation. If the
+ * validate succeeds, they'll get moved
+ * into the flags field
+ */
+ uint64_t proposed_flags;
+
uint32_t desired_tile_stride;
uint32_t hw_tile_stride;
};
enum drm_bo_type {
- drm_bo_type_dc,
+ /*
+ * drm_bo_type_device are 'normal' drm allocations,
+ * pages are allocated from within the kernel automatically
+ * and the objects can be mmap'd from the drm device. Each
+ * drm_bo_type_device object has a unique name which can be
+ * used by other processes to share access to the underlying
+ * buffer.
+ */
+ drm_bo_type_device,
+ /*
+ * drm_bo_type_user are buffers of pages that already exist
+ * in the process address space. They are more limited than
+ * drm_bo_type_device buffers in that they must always
+ * remain cached (as we assume the user pages are mapped cached),
+ * and they are not sharable to other processes through DRM
+ * (although, regular shared memory should still work fine).
+ */
drm_bo_type_user,
- drm_bo_type_kernel, /* for initial kernel allocations */
+ /*
+ * drm_bo_type_kernel are buffers that exist solely for use
+ * within the kernel. The pages cannot be mapped into the
+ * process. One obvious use would be for the ring
+ * buffer where user access would not (ideally) be required.
+ */
+ drm_bo_type_kernel,
};
struct drm_buffer_object {
@@ -476,9 +544,36 @@ struct drm_bo_driver {
int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
int (*init_mem_type) (struct drm_device *dev, uint32_t type,
struct drm_mem_type_manager *man);
- uint32_t(*evict_mask) (struct drm_buffer_object *bo);
+ /*
+ * evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+ uint64_t(*evict_flags) (struct drm_buffer_object *bo);
+ /*
+ * move:
+ *
+ * @bo: the buffer to move
+ *
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ *
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ *
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
int (*move) (struct drm_buffer_object *bo,
int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+ /*
+ * ttm_cache_flush
+ */
void (*ttm_cache_flush)(struct drm_ttm *ttm);
};
@@ -519,7 +614,7 @@ extern int drm_fence_buffer_objects(struct drm_device *dev,
struct drm_fence_object **used_fence);
extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
- enum drm_bo_type type, uint64_t mask,
+ enum drm_bo_type type, uint64_t flags,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
@@ -534,9 +629,8 @@ extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size);
extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
- uint32_t fence_class, uint64_t flags,
- uint64_t mask, uint32_t hint,
- int use_old_fence_class,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ uint32_t fence_class, int use_old_fence_class,
struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
@@ -545,7 +639,6 @@ extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
- int no_wait,
struct drm_bo_info_rep *rep);
/*
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index 920b11c8..77b9f95d 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -68,7 +68,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
struct drm_sg_mem *entry;
unsigned long pages, i, j;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -82,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -123,8 +123,8 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
entry->handle = ScatterHandle((unsigned long)entry->virtual);
- DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
- DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
+ DRM_DEBUG("handle = %08lx\n", entry->handle);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
@@ -211,7 +211,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
if (!entry || entry->handle != request->handle)
return -EINVAL;
- DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index 3540571f..a9d87338 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush);
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
-static void ttm_alloc_pages(struct drm_ttm *ttm)
+static void drm_ttm_alloc_pages(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
@@ -66,7 +66,7 @@ static void ttm_alloc_pages(struct drm_ttm *ttm)
drm_free_memctl(size);
}
-static void ttm_free_pages(struct drm_ttm *ttm)
+static void drm_ttm_free_pages(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
@@ -103,7 +103,7 @@ static struct page *drm_ttm_alloc_page(void)
* for range of pages in a ttm.
*/
-static int drm_set_caching(struct drm_ttm *ttm, int noncached)
+static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
{
int i;
struct page **cur_page;
@@ -145,7 +145,7 @@ static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
int i;
BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
- write = ((ttm->page_flags & DRM_TTM_PAGE_USER_WRITE) != 0);
+ write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
for (i = 0; i < ttm->num_pages; ++i) {
@@ -193,7 +193,7 @@ static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
* Free all resources associated with a ttm.
*/
-int drm_destroy_ttm(struct drm_ttm *ttm)
+int drm_ttm_destroy(struct drm_ttm *ttm)
{
struct drm_ttm_backend *be;
@@ -208,14 +208,14 @@ int drm_destroy_ttm(struct drm_ttm *ttm)
if (ttm->pages) {
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
- drm_set_caching(ttm, 0);
+ drm_ttm_set_caching(ttm, 0);
if (ttm->page_flags & DRM_TTM_PAGE_USER)
drm_ttm_free_user_pages(ttm);
else
drm_ttm_free_alloced_pages(ttm);
- ttm_free_pages(ttm);
+ drm_ttm_free_pages(ttm);
}
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
@@ -239,23 +239,33 @@ struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
}
EXPORT_SYMBOL(drm_ttm_get_page);
+/**
+ * drm_ttm_set_user:
+ *
+ * @ttm: the ttm to map pages to. This must always be
+ * a freshly created ttm.
+ *
+ * @tsk: a pointer to the address space from which to map
+ * pages.
+ *
+ * @write: a boolean indicating that write access is desired
+ *
+ * start: the starting address
+ *
+ * Map a range of user addresses to a new ttm object. This
+ * provides access to user memory from the graphics device.
+ */
int drm_ttm_set_user(struct drm_ttm *ttm,
struct task_struct *tsk,
- int write,
unsigned long start,
- unsigned long num_pages,
- struct page *dummy_read_page)
+ unsigned long num_pages)
{
struct mm_struct *mm = tsk->mm;
int ret;
- int i;
+ int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
BUG_ON(num_pages != ttm->num_pages);
-
- ttm->dummy_read_page = dummy_read_page;
- ttm->page_flags |= DRM_TTM_PAGE_USER |
- ((write) ? DRM_TTM_PAGE_USER_WRITE : 0);
-
+ BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
down_read(&mm->mmap_sem);
ret = get_user_pages(tsk, mm, start, num_pages,
@@ -267,14 +277,17 @@ int drm_ttm_set_user(struct drm_ttm *ttm,
return -ENOMEM;
}
- for (i = 0; i < num_pages; ++i) {
- if (ttm->pages[i] == NULL)
- ttm->pages[i] = ttm->dummy_read_page;
- }
-
return 0;
}
+/**
+ * drm_ttm_populate:
+ *
+ * @ttm: the object to allocate pages for
+ *
+ * Allocate pages for all unset page entries, then
+ * call the backend to create the hardware mappings
+ */
int drm_ttm_populate(struct drm_ttm *ttm)
{
struct page *page;
@@ -285,21 +298,32 @@ int drm_ttm_populate(struct drm_ttm *ttm)
return 0;
be = ttm->be;
- for (i = 0; i < ttm->num_pages; ++i) {
- page = drm_ttm_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
+ if (ttm->page_flags & DRM_TTM_PAGE_WRITE) {
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = drm_ttm_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
+ }
}
- be->func->populate(be, ttm->num_pages, ttm->pages);
+ be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
ttm->state = ttm_unbound;
return 0;
}
-/*
- * Initialize a ttm.
+/**
+ * drm_ttm_create:
+ *
+ * @dev: the drm_device
+ *
+ * @size: The size (in bytes) of the desired object
+ *
+ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
+ *
+ * Allocate and initialize a ttm, leaving it unpopulated at this time
*/
-struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
+struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read_page)
{
struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
struct drm_ttm *ttm;
@@ -317,21 +341,23 @@ struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
ttm->destroy = 0;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->page_flags = 0;
+ ttm->page_flags = page_flags;
+
+ ttm->dummy_read_page = dummy_read_page;
/*
* Account also for AGP module memory usage.
*/
- ttm_alloc_pages(ttm);
+ drm_ttm_alloc_pages(ttm);
if (!ttm->pages) {
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
DRM_ERROR("Failed creating ttm backend entry\n");
return NULL;
}
@@ -339,10 +365,15 @@ struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size)
return ttm;
}
-/*
- * Unbind a ttm region from the aperture.
+/**
+ * drm_ttm_evict:
+ *
+ * @ttm: the object to be unbound from the aperture.
+ *
+ * Transition a ttm from bound to evicted, where it
+ * isn't present in the aperture, but various caches may
+ * not be consistent.
*/
-
void drm_ttm_evict(struct drm_ttm *ttm)
{
struct drm_ttm_backend *be = ttm->be;
@@ -356,17 +387,33 @@ void drm_ttm_evict(struct drm_ttm *ttm)
ttm->state = ttm_evicted;
}
+/**
+ * drm_ttm_fixup_caching:
+ *
+ * @ttm: the object to set unbound
+ *
+ * XXX this function is misnamed. Transition a ttm from evicted to
+ * unbound, flushing caches as appropriate.
+ */
void drm_ttm_fixup_caching(struct drm_ttm *ttm)
{
if (ttm->state == ttm_evicted) {
struct drm_ttm_backend *be = ttm->be;
if (be->func->needs_ub_cache_adjust(be))
- drm_set_caching(ttm, 0);
+ drm_ttm_set_caching(ttm, 0);
ttm->state = ttm_unbound;
}
}
+/**
+ * drm_ttm_unbind:
+ *
+ * @ttm: the object to unbind from the graphics device
+ *
+ * Unbind an object from the aperture. This removes the mappings
+ * from the graphics device and flushes caches if necessary.
+ */
void drm_ttm_unbind(struct drm_ttm *ttm)
{
if (ttm->state == ttm_bound)
@@ -375,7 +422,19 @@ void drm_ttm_unbind(struct drm_ttm *ttm)
drm_ttm_fixup_caching(ttm);
}
-int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
+/**
+ * drm_ttm_bind:
+ *
+ * @ttm: the ttm object to bind to the graphics device
+ *
+ * @bo_mem: the aperture memory region which will hold the object
+ *
+ * Bind a ttm object to the aperture. This ensures that the necessary
+ * pages are allocated, flushes CPU caches as needed and marks the
+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
+ * modified by the GPU
+ */
+int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
{
struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
int ret = 0;
@@ -393,7 +452,7 @@ int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
return ret;
if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
- drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+ drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
bo_driver->ttm_cache_flush)
bo_driver->ttm_cache_flush(ttm);
@@ -410,4 +469,4 @@ int drm_bind_ttm(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
return 0;
}
-EXPORT_SYMBOL(drm_bind_ttm);
+EXPORT_SYMBOL(drm_ttm_bind);
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index f439fa21..15e1c0f5 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -189,7 +189,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return NOPAGE_SIGBUS;
get_page(page);
- DRM_DEBUG("shm_nopage 0x%lx\n", address);
+ DRM_DEBUG("0x%lx\n", address);
return page;
}
@@ -305,7 +305,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
get_page(page);
- DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
+ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}
@@ -751,10 +751,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
- uint32_t new_mask = bo->mem.mask |
+ uint32_t new_flags = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
- err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ err = drm_bo_move_buffer(bo, new_flags, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 64c7c0db..f2bf5d9d 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -589,7 +589,7 @@ static void i810EmitState(struct drm_device * dev)
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
@@ -821,8 +821,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
int pitch = dev_priv->pitch;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
@@ -867,8 +866,6 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
@@ -888,8 +885,6 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
@@ -968,7 +963,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@@ -1006,7 +1001,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("i810_swap_bufs\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1087,11 +1082,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
sarea_priv->dirty = 0x7f;
- DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
+ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@@ -1197,7 +1191,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1207,7 +1201,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
@@ -1220,7 +1214,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index 86278e3d..f5c175fe 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -145,7 +145,7 @@ extern int i810_max_ioctl;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__);\
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index c455a515..54aa75ad 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -38,11 +38,11 @@ struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
return drm_agp_init_ttm(dev);
}
-int i915_fence_types(struct drm_buffer_object *bo,
+int i915_fence_type(struct drm_buffer_object *bo,
uint32_t *fclass,
uint32_t *type)
{
- if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
@@ -113,7 +113,16 @@ int i915_init_mem_type(struct drm_device *dev, uint32_t type,
return 0;
}
-uint32_t i915_evict_mask(struct drm_buffer_object *bo)
+/*
+ * i915_evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+uint64_t i915_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
@@ -280,7 +289,18 @@ void i915_flush_ttm(struct drm_ttm *ttm)
return;
DRM_MEMORYBARRIER();
+
+#ifdef CONFIG_X86_32
+ /* Hopefully nobody has built an x86-64 processor without clflush */
+ if (!cpu_has_clflush) {
+ wbinvd();
+ DRM_MEMORYBARRIER();
+ return;
+ }
+#endif
+
for (i = ttm->num_pages - 1; i >= 0; i--)
drm_cache_flush_page(drm_ttm_get_page(ttm, i));
+
DRM_MEMORYBARRIER();
}
diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c
index 58fb8418..f3e0a081 100644
--- a/linux-core/i915_compat.c
+++ b/linux-core/i915_compat.c
@@ -19,8 +19,13 @@
#define I915_IFPADDR 0x60
#define I965_IFPADDR 0x70
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+#define upper_32_bits(_val) (((u64)(_val)) >> 32)
+#endif
+
static struct _i9xx_private_compat {
void __iomem *flush_page;
+ int resource_valid;
struct resource ifp_resource;
} i9xx_private;
@@ -57,16 +62,17 @@ static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
pci_read_config_dword(pdev, I915_IFPADDR, &temp);
if (!(temp & 0x1)) {
intel_alloc_chipset_flush_resource(pdev);
-
+ i9xx_private.resource_valid = 1;
pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
} else {
temp &= ~1;
+ i9xx_private.resource_valid = 1;
i9xx_private.ifp_resource.start = temp;
i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
if (ret) {
- i9xx_private.ifp_resource.start = 0;
+ i9xx_private.resource_valid = 0;
printk("Failed inserting resource into tree\n");
}
}
@@ -84,6 +90,7 @@ static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
intel_alloc_chipset_flush_resource(pdev);
+ i9xx_private.resource_valid = 1;
pci_write_config_dword(pdev, I965_IFPADDR + 4,
upper_32_bits(i9xx_private.ifp_resource.start));
pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
@@ -93,11 +100,12 @@ static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
temp_lo &= ~0x1;
l64 = ((u64)temp_hi << 32) | temp_lo;
+ i9xx_private.resource_valid = 1;
i9xx_private.ifp_resource.start = l64;
i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
- if (!ret) {
- i9xx_private.ifp_resource.start = 0;
+ if (ret) {
+ i9xx_private.resource_valid = 0;
printk("Failed inserting resource into tree\n");
}
}
@@ -167,7 +175,9 @@ static void intel_i9xx_setup_flush(struct drm_device *dev)
static void intel_i9xx_fini_flush(struct drm_device *dev)
{
iounmap(i9xx_private.flush_page);
- release_resource(&i9xx_private.ifp_resource);
+ if (i9xx_private.resource_valid)
+ release_resource(&i9xx_private.ifp_resource);
+ i9xx_private.resource_valid = 0;
}
static void intel_i9xx_flush_page(struct drm_device *dev)
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 598039dc..629a39be 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -62,10 +62,10 @@ static struct drm_bo_driver i915_bo_driver = {
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
- .fence_type = i915_fence_types,
+ .fence_type = i915_fence_type,
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
- .evict_mask = i915_evict_mask,
+ .evict_flags = i915_evict_flags,
.move = i915_move,
.ttm_cache_flush = i915_flush_ttm,
};
@@ -331,7 +331,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
- if (IS_I965GM(dev)) {
+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
@@ -537,8 +537,7 @@ static struct drm_driver driver = {
*/
.driver_features =
DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR | */
- DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
- DRIVER_IRQ_VBL2,
+ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.load = i915_driver_load,
.unload = i915_driver_unload,
/* .lastclose = i915_driver_lastclose,
@@ -546,8 +545,9 @@ static struct drm_driver driver = {
.suspend = i915_suspend,
.resume = i915_resume,
.device_is_agp = i915_driver_device_is_agp,
- .vblank_wait = i915_driver_vblank_wait,
- .vblank_wait2 = i915_driver_vblank_wait2,
+ .get_vblank_counter = i915_get_vblank_counter,
+ .enable_vblank = i915_enable_vblank,
+ .disable_vblank = i915_disable_vblank,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c
index 11dee035..0b8fff19 100644
--- a/linux-core/i915_ioc32.c
+++ b/linux-core/i915_ioc32.c
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -182,13 +183,73 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
DRM_IOCTL_I915_ALLOC, (unsigned long) request);
}
+typedef struct drm_i915_execbuffer32 {
+ uint64_t ops_list;
+ uint32_t num_buffers;
+ struct _drm_i915_batchbuffer32 batch;
+ drm_context_t context;
+ struct drm_fence_arg fence_arg;
+} drm_i915_execbuffer32_t;
+
+static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_i915_execbuffer32_t req32;
+ struct drm_i915_execbuffer __user *request;
+ int err;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.ops_list, &request->ops_list)
+ || __put_user(req32.num_buffers, &request->num_buffers)
+ || __put_user(req32.context, &request->context)
+ || __copy_to_user(&request->fence_arg, &req32.fence_arg,
+ sizeof(req32.fence_arg))
+ || __put_user(req32.batch.start, &request->batch.start)
+ || __put_user(req32.batch.used, &request->batch.used)
+ || __put_user(req32.batch.DR1, &request->batch.DR1)
+ || __put_user(req32.batch.DR4, &request->batch.DR4)
+ || __put_user(req32.batch.num_cliprects,
+ &request->batch.num_cliprects)
+ || __put_user((int __user *)(unsigned long)req32.batch.cliprects,
+ &request->batch.cliprects))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
+
+ if (err)
+ return err;
+
+ if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
+ || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
+ || __get_user(req32.fence_arg.type, &request->fence_arg.type)
+ || __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
+ || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
+ || __get_user(req32.fence_arg.error, &request->fence_arg.error)
+ || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
- [DRM_I915_ALLOC] = compat_i915_alloc
+ [DRM_I915_ALLOC] = compat_i915_alloc,
+#ifdef I915_HAVE_BUFFER
+ [DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
+#endif
};
/**
diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c
index 9709934d..16bc9ff3 100644
--- a/linux-core/mach64_drv.c
+++ b/linux-core/mach64_drv.c
@@ -42,9 +42,11 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
- | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.lastclose = mach64_driver_lastclose,
- .vblank_wait = mach64_driver_vblank_wait,
+ .get_vblank_counter = mach64_get_vblank_counter,
+ .enable_vblank = mach64_enable_vblank,
+ .disable_vblank = mach64_disable_vblank,
.irq_preinstall = mach64_driver_irq_preinstall,
.irq_postinstall = mach64_driver_irq_postinstall,
.irq_uninstall = mach64_driver_irq_uninstall,
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index bab90aa5..14a0be45 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -46,15 +46,16 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof (drm_mga_buf_priv_t),
.load = mga_driver_load,
.unload = mga_driver_unload,
.lastclose = mga_driver_lastclose,
.dma_quiescent = mga_driver_dma_quiescent,
.device_is_agp = mga_driver_device_is_agp,
- .vblank_wait = mga_driver_vblank_wait,
+ .get_vblank_counter = mga_get_vblank_counter,
+ .enable_vblank = mga_enable_vblank,
+ .disable_vblank = mga_disable_vblank,
.irq_preinstall = mga_driver_irq_preinstall,
.irq_postinstall = mga_driver_irq_postinstall,
.irq_uninstall = mga_driver_irq_uninstall,
diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c
index 9b252a05..a652bb1d 100644
--- a/linux-core/nouveau_buffer.c
+++ b/linux-core/nouveau_buffer.c
@@ -56,7 +56,7 @@ nouveau_bo_fence_type(struct drm_buffer_object *bo,
{
/* When we get called, *fclass is set to the requested fence class */
- if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
@@ -130,8 +130,8 @@ nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
return 0;
}
-static uint32_t
-nouveau_bo_evict_mask(struct drm_buffer_object *bo)
+static uint64_t
+nouveau_bo_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
@@ -207,15 +207,16 @@ nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_FORCE_CACHING);
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
- ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+ ret = drm_ttm_bind (bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
@@ -291,7 +292,7 @@ struct drm_bo_driver nouveau_bo_driver = {
.fence_type = nouveau_bo_fence_type,
.invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
- .evict_mask = nouveau_bo_evict_mask,
+ .evict_flags = nouveau_bo_evict_flags,
.move = nouveau_bo_move,
.ttm_cache_flush= nouveau_bo_flush_ttm
};
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index f3bf5341..cc4d5a92 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -25,7 +25,7 @@ nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
static int
nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
- struct page **pages)
+ struct page **pages, struct page *dummy_read_page)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
int p, d, o;
@@ -41,8 +41,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
nvbe->pages_populated = d = 0;
for (p = 0; p < num_pages; p++) {
for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
+ struct page *page = pages[p];
+ if (!page)
+ page = dummy_read_page;
nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
- pages[p], o,
+ page, o,
NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(nvbe->pagelist[d])) {
@@ -136,8 +139,8 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)
if (dev_priv->card_type < NV_50) {
INSTANCE_WR(gpuobj, pte, pteval | 3);
} else {
- INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010);
- INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004);
+ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
+ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
}
pte++;
@@ -218,15 +221,14 @@ nouveau_sgdma_init(struct drm_device *dev)
return ret;
}
- if (dev_priv->card_type < NV_50) {
- dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32);
- SetPageLocked(dev_priv->gart_info.sg_dummy_page);
- dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(dev->pdev,
- dev_priv->gart_info.sg_dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+ SetPageLocked(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
@@ -242,8 +244,9 @@ nouveau_sgdma_init(struct drm_device *dev)
}
} else {
for (i=0; i<obj_size; i+=8) {
- INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010);
- INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004);
+ INSTANCE_WR(gpuobj, (i+0)/4,
+ dev_priv->gart_info.sg_dummy_bus | 0x21);
+ INSTANCE_WR(gpuobj, (i+4)/4, 0);
}
}
@@ -299,7 +302,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
}
dev_priv->gart_info.sg_handle = sgreq.handle;
- if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) {
+ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
DRM_ERROR("failed populate: %d\n", ret);
return ret;
}
diff --git a/linux-core/r128_drv.c b/linux-core/r128_drv.c
index ef4a5cbd..7b6159b9 100644
--- a/linux-core/r128_drv.c
+++ b/linux-core/r128_drv.c
@@ -44,12 +44,13 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL,
+ DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_r128_buf_priv_t),
.preclose = r128_driver_preclose,
.lastclose = r128_driver_lastclose,
- .vblank_wait = r128_driver_vblank_wait,
+ .get_vblank_counter = r128_get_vblank_counter,
+ .enable_vblank = r128_enable_vblank,
+ .disable_vblank = r128_disable_vblank,
.irq_preinstall = r128_driver_irq_preinstall,
.irq_postinstall = r128_driver_irq_postinstall,
.irq_uninstall = r128_driver_irq_uninstall,
diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c
index 8e2b20b1..5dff1898 100644
--- a/linux-core/radeon_buffer.c
+++ b/linux-core/radeon_buffer.c
@@ -65,7 +65,7 @@ int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
return 0;
}
-uint32_t radeon_evict_mask(struct drm_buffer_object *bo)
+uint64_t radeon_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
@@ -216,14 +216,14 @@ static int radeon_move_flip(struct drm_buffer_object * bo,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
+ tmp_mem.flags = DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
if (ret)
return ret;
- ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+ ret = drm_ttm_bind(bo->ttm, &tmp_mem);
if (ret)
goto out_cleanup;
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index 2e491304..355e4c22 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -83,7 +83,7 @@ static struct drm_bo_driver radeon_bo_driver = {
.fence_type = radeon_fence_types,
.invalidate_caches = radeon_invalidate_caches,
.init_mem_type = radeon_init_mem_type,
- .evict_mask = radeon_evict_mask,
+ .evict_flags = radeon_evict_flags,
.move = radeon_move,
};
#endif
@@ -92,8 +92,7 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
.load = radeon_driver_load,
.firstopen = radeon_driver_firstopen,
@@ -102,8 +101,9 @@ static struct drm_driver driver = {
.postclose = radeon_driver_postclose,
.lastclose = radeon_driver_lastclose,
.unload = radeon_driver_unload,
- .vblank_wait = radeon_driver_vblank_wait,
- .vblank_wait2 = radeon_driver_vblank_wait2,
+ .get_vblank_counter = radeon_get_vblank_counter,
+ .enable_vblank = radeon_enable_vblank,
+ .disable_vblank = radeon_disable_vblank,
.dri_library_name = dri_library_name,
.irq_preinstall = radeon_driver_irq_preinstall,
.irq_postinstall = radeon_driver_irq_postinstall,
diff --git a/linux-core/radeon_ms_drv.c b/linux-core/radeon_ms_drv.c
index 4b52a7d7..d7b0eecc 100644
--- a/linux-core/radeon_ms_drv.c
+++ b/linux-core/radeon_ms_drv.c
@@ -57,8 +57,6 @@ static struct drm_driver driver = {
.context_dtor = NULL,
.kernel_context_switch = NULL,
.kernel_context_switch_unlock = NULL,
- .vblank_wait = NULL,
- .vblank_wait2 = NULL,
.dri_library_name = radeon_ms_driver_dri_library_name,
.device_is_agp = NULL,
.irq_handler = radeon_ms_irq_handler,
@@ -83,8 +81,7 @@ static struct drm_driver driver = {
.date = DRIVER_DATE,
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
- DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
- DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+ DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
.dev_priv_size = 0,
.ioctls = radeon_ms_ioctls,
.num_ioctls = 0,
diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c
index 5110d6e5..6782731d 100644
--- a/linux-core/sis_mm.c
+++ b/linux-core/sis_mm.c
@@ -114,7 +114,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
@@ -204,7 +204,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
index ea755247..532fae6a 100644
--- a/linux-core/via_buffer.c
+++ b/linux-core/via_buffer.c
@@ -144,7 +144,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
return 0;
}
-uint32_t via_evict_mask(struct drm_buffer_object *bo)
+uint64_t via_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c
index 270eac15..3f75af38 100644
--- a/linux-core/via_mm.c
+++ b/linux-core/via_mm.c
@@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;