summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorJesse Barnes <jbarnes@hobbes.virtuousgeek.org>2008-01-22 09:42:37 -0800
committerJesse Barnes <jbarnes@hobbes.virtuousgeek.org>2008-01-22 09:42:37 -0800
commit0cd4cbc9a6330bd619608f274592082de7c05bcf (patch)
tree4e0b682a24e448d17abf8b2fadc75ccee2cd5b57 /linux-core
parent128a8f7ea20af2549e448157b431d5c1f90f37c3 (diff)
parent5231a524f53babd127a576d7567671dafb29651b (diff)
Merge branch 'master' into vblank-rework, including mach64 support
Conflicts: linux-core/drmP.h linux-core/drm_drv.c shared-core/i915_drv.h shared-core/i915_irq.c shared-core/mga_irq.c shared-core/radeon_irq.c shared-core/via_irq.c Mostly trivial conflicts. mach64 support from Mathieu BĂ©rard.
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile.kernel7
-rw-r--r--linux-core/ati_pcigart.c10
-rw-r--r--linux-core/drmP.h83
-rw-r--r--linux-core/drm_agpsupport.c77
-rw-r--r--linux-core/drm_auth.c4
-rw-r--r--linux-core/drm_bo.c614
-rw-r--r--linux-core/drm_bo_lock.c21
-rw-r--r--linux-core/drm_bo_move.c99
-rw-r--r--linux-core/drm_bufs.c42
-rw-r--r--linux-core/drm_compat.c60
-rw-r--r--linux-core/drm_compat.h24
-rw-r--r--linux-core/drm_context.c4
-rw-r--r--linux-core/drm_dma.c8
-rw-r--r--linux-core/drm_drv.c49
-rw-r--r--linux-core/drm_fence.c86
-rw-r--r--linux-core/drm_fops.c26
-rw-r--r--linux-core/drm_hashtab.c23
-rw-r--r--linux-core/drm_hashtab.h1
l---------linux-core/drm_internal.h1
-rw-r--r--linux-core/drm_ioctl.c18
-rw-r--r--linux-core/drm_irq.c8
-rw-r--r--linux-core/drm_memory.c6
-rw-r--r--linux-core/drm_memory.h1
-rw-r--r--linux-core/drm_mm.c4
-rw-r--r--linux-core/drm_object.c39
-rw-r--r--linux-core/drm_objects.h334
-rw-r--r--linux-core/drm_os_linux.h18
-rw-r--r--linux-core/drm_pci.c4
-rw-r--r--linux-core/drm_proc.c8
-rw-r--r--linux-core/drm_regman.c200
-rw-r--r--linux-core/drm_scatter.c12
-rw-r--r--linux-core/drm_sman.c3
-rw-r--r--linux-core/drm_stub.c20
-rw-r--r--linux-core/drm_sysfs.c9
-rw-r--r--linux-core/drm_ttm.c267
-rw-r--r--linux-core/drm_vm.c18
-rw-r--r--linux-core/ffb_drv.h4
-rw-r--r--linux-core/i810_dma.c26
-rw-r--r--linux-core/i810_drv.h50
-rw-r--r--linux-core/i915_buffer.c77
-rw-r--r--linux-core/i915_compat.c215
-rw-r--r--linux-core/i915_drv.c31
-rw-r--r--linux-core/i915_fence.c29
-rw-r--r--linux-core/i915_ioc32.c89
-rw-r--r--linux-core/mach64_drv.c6
-rw-r--r--linux-core/mga_drv.c6
-rw-r--r--linux-core/mga_ioc32.c32
-rw-r--r--linux-core/nouveau_buffer.c298
-rw-r--r--linux-core/nouveau_drv.c3
-rw-r--r--linux-core/nouveau_fence.c134
-rw-r--r--linux-core/nouveau_sgdma.c36
-rw-r--r--linux-core/r128_ioc32.c6
-rw-r--r--linux-core/radeon_drv.c2
-rw-r--r--linux-core/radeon_ioc32.c16
-rw-r--r--linux-core/sis_mm.c13
-rw-r--r--linux-core/via_buffer.c16
-rw-r--r--linux-core/via_dmablit.c184
-rw-r--r--linux-core/via_dmablit.h84
-rw-r--r--linux-core/via_mm.c6
-rw-r--r--linux-core/xgi_cmdlist.c14
-rw-r--r--linux-core/xgi_drv.c6
-rw-r--r--linux-core/xgi_drv.h2
-rw-r--r--linux-core/xgi_fb.c6
-rw-r--r--linux-core/xgi_fence.c2
-rw-r--r--linux-core/xgi_ioc32.c2
-rw-r--r--linux-core/xgi_misc.c12
-rw-r--r--linux-core/xgi_misc.h2
-rw-r--r--linux-core/xgi_regs.h2
68 files changed, 2490 insertions, 1129 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 79136431..e7c280d0 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -13,16 +13,17 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
- drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o
+ drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
+ drm_regman.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
i810-objs := i810_drv.o i810_dma.o
i915-objs := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_fence.o \
- i915_buffer.o
+ i915_buffer.o i915_compat.o
nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nouveau_object.o nouveau_irq.o nouveau_notifier.o nouveau_swmthd.o \
- nouveau_sgdma.o nouveau_dma.o \
+ nouveau_sgdma.o nouveau_dma.o nouveau_buffer.o nouveau_fence.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
nv04_fb.o nv10_fb.o nv40_fb.o \
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 7241c2a8..68029635 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -41,7 +41,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
struct page *page;
int i;
- DRM_DEBUG("%s: alloc %d order\n", __FUNCTION__, order);
+ DRM_DEBUG("%d order\n", order);
address = __get_free_pages(GFP_KERNEL | __GFP_COMP,
order);
@@ -58,7 +58,7 @@ static void *drm_ati_alloc_pcigart_table(int order)
SetPageReserved(page);
}
- DRM_DEBUG("%s: returning 0x%08lx\n", __FUNCTION__, address);
+ DRM_DEBUG("returning 0x%08lx\n", address);
return (void *)address;
}
@@ -67,7 +67,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
struct page *page;
int i;
int num_pages = 1 << order;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
page = virt_to_page((unsigned long)address);
@@ -81,7 +81,7 @@ static void drm_ati_free_pcigart_table(void *address, int order)
free_pages((unsigned long)address, order);
}
-int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info)
+int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_sg_mem *entry = dev->sg;
unsigned long pages;
@@ -132,7 +132,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gar
}
EXPORT_SYMBOL(drm_ati_pcigart_cleanup);
-int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info)
+int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_sg_mem *entry = dev->sg;
void *address = NULL;
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 332ee1cd..4e8b087b 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -66,8 +66,8 @@
#ifdef CONFIG_MTRR
#include <asm/mtrr.h>
#endif
-#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
#include <asm/agp.h>
+#if defined(CONFIG_AGP) || defined(CONFIG_AGP_MODULE)
#include <linux/types.h>
#include <linux/agp_backend.h>
#endif
@@ -83,7 +83,9 @@
#include "drm_os_linux.h"
#include "drm_hashtab.h"
+#include "drm_internal.h"
+struct drm_device;
struct drm_file;
/* If you want the memory alloc debug functionality, change define below */
@@ -159,6 +161,12 @@ struct drm_file;
#define DRM_OBJECT_HASH_ORDER 12
#define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
#define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
+/*
+ * This should be small enough to allow the use of kmalloc for hash tables
+ * instead of vmalloc.
+ */
+
+#define DRM_FILE_HASH_ORDER 8
#define DRM_MM_INIT_MAX_PAGES 256
/*@}*/
@@ -199,7 +207,7 @@ struct drm_file;
#if DRM_DEBUG_CODE
#define DRM_DEBUG(fmt, arg...) \
do { \
- if ( drm_debug ) \
+ if ( drm_debug ) \
printk(KERN_DEBUG \
"[" DRM_NAME ":%s] " fmt , \
__FUNCTION__ , ##arg); \
@@ -273,9 +281,6 @@ do { \
return -EFAULT; \
}
-struct drm_device;
-struct drm_file;
-
/**
* Ioctl function type.
*
@@ -391,14 +396,9 @@ struct drm_buf_entry {
struct drm_freelist freelist;
};
-/*
- * This should be small enough to allow the use of kmalloc for hash tables
- * instead of vmalloc.
- */
-#define DRM_FILE_HASH_ORDER 8
enum drm_ref_type {
- _DRM_REF_USE=0,
+ _DRM_REF_USE = 0,
_DRM_REF_TYPE1,
_DRM_NO_REF_TYPES
};
@@ -501,14 +501,14 @@ struct drm_agp_mem {
/**
* AGP data.
*
- * \sa drm_agp_init)() and drm_device::agp.
+ * \sa drm_agp_init() and drm_device::agp.
*/
struct drm_agp_head {
DRM_AGP_KERN agp_info; /**< AGP device information */
struct list_head memory;
unsigned long mode; /**< AGP mode */
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,11)
- struct agp_bridge_data *bridge;
+ struct agp_bridge_data *bridge;
#endif
int enabled; /**< whether the AGP bus as been enabled */
int acquired; /**< whether the AGP device has been acquired */
@@ -584,15 +584,6 @@ struct drm_vbl_sig {
struct task_struct *task;
};
-/**
- * Drawable information.
- */
-struct drm_drawable_info {
- unsigned int num_rects;
- struct drm_clip_rect *rects;
-};
-
-
/* location of GART table */
#define DRM_ATI_GART_MAIN 1
#define DRM_ATI_GART_FB 2
@@ -601,7 +592,7 @@ struct drm_drawable_info {
#define DRM_ATI_GART_PCIE 2
#define DRM_ATI_GART_IGP 3
-struct ati_pcigart_info {
+struct drm_ati_pcigart_info {
int gart_table_location;
int gart_reg_if;
void *addr;
@@ -631,9 +622,9 @@ struct drm_driver {
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
void (*dma_ready) (struct drm_device *);
int (*dma_quiescent) (struct drm_device *);
- int (*context_ctor) (struct drm_device * dev, int context);
- int (*context_dtor) (struct drm_device * dev, int context);
- int (*kernel_context_switch) (struct drm_device * dev, int old,
+ int (*context_ctor) (struct drm_device *dev, int context);
+ int (*context_dtor) (struct drm_device *dev, int context);
+ int (*kernel_context_switch) (struct drm_device *dev, int old,
int new);
void (*kernel_context_switch_unlock) (struct drm_device * dev);
/**
@@ -680,7 +671,7 @@ struct drm_driver {
* interrupts will have to stay on to keep the count accurate.
*/
void (*disable_vblank) (struct drm_device *dev, int crtc);
- int (*dri_library_name) (struct drm_device * dev, char * buf);
+ int (*dri_library_name) (struct drm_device *dev, char * buf);
/**
* Called by \c drm_device_is_agp. Typically used to determine if a
@@ -693,22 +684,23 @@ struct drm_driver {
* card is absolutely \b not AGP (return of 0), absolutely \b is AGP
* (return of 1), or may or may not be AGP (return of 2).
*/
- int (*device_is_agp) (struct drm_device * dev);
+ int (*device_is_agp) (struct drm_device *dev);
/* these have to be filled in */
irqreturn_t(*irq_handler) (DRM_IRQ_ARGS);
- void (*irq_preinstall) (struct drm_device * dev);
- int (*irq_postinstall) (struct drm_device * dev);
- void (*irq_uninstall) (struct drm_device * dev);
+ void (*irq_preinstall) (struct drm_device *dev);
+ int (*irq_postinstall) (struct drm_device *dev);
+ void (*irq_uninstall) (struct drm_device *dev);
void (*reclaim_buffers) (struct drm_device *dev,
struct drm_file *file_priv);
void (*reclaim_buffers_locked) (struct drm_device *dev,
struct drm_file *file_priv);
void (*reclaim_buffers_idlelocked) (struct drm_device *dev,
struct drm_file *file_priv);
- unsigned long (*get_map_ofs) (struct drm_map * map);
- unsigned long (*get_reg_ofs) (struct drm_device * dev);
- void (*set_version) (struct drm_device * dev, struct drm_set_version * sv);
+ unsigned long (*get_map_ofs) (struct drm_map *map);
+ unsigned long (*get_reg_ofs) (struct drm_device *dev);
+ void (*set_version) (struct drm_device *dev,
+ struct drm_set_version *sv);
struct drm_fence_driver *fence_driver;
struct drm_bo_driver *bo_driver;
@@ -1188,6 +1180,7 @@ extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
+extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
struct drm_driver *driver);
@@ -1219,8 +1212,8 @@ extern int drm_sg_free(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/* ATI PCIGART support (ati_pcigart.h) */
-extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info);
-extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
+extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info);
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
@@ -1231,7 +1224,7 @@ extern void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah);
struct drm_sysfs_class;
extern struct class *drm_sysfs_create(struct module *owner, char *name);
extern void drm_sysfs_destroy(void);
-extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head * head);
+extern int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head);
extern void drm_sysfs_device_remove(struct drm_device *dev);
/*
@@ -1271,7 +1264,7 @@ static __inline__ struct drm_map *drm_core_findmap(struct drm_device *dev,
static __inline__ int drm_device_is_agp(struct drm_device *dev)
{
if ( dev->driver->device_is_agp != NULL ) {
- int err = (*dev->driver->device_is_agp)( dev );
+ int err = (*dev->driver->device_is_agp)(dev);
if (err != 2) {
return err;
@@ -1342,19 +1335,5 @@ static inline void drm_ctl_free(void *pt, size_t size, int area)
/*@}*/
-/** Type for the OS's non-sleepable mutex lock */
-#define DRM_SPINTYPE spinlock_t
-/**
- * Initialize the lock for use. name is an optional string describing the
- * lock
- */
-#define DRM_SPININIT(l,name) spin_lock_init(l)
-#define DRM_SPINUNINIT(l)
-#define DRM_SPINLOCK(l) spin_lock(l)
-#define DRM_SPINUNLOCK(l) spin_unlock(l)
-#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
-#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
-#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
-
#endif /* __KERNEL__ */
#endif
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index b68efc64..02187017 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -48,7 +48,7 @@
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
-int drm_agp_info(struct drm_device * dev, struct drm_agp_info *info)
+int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
DRM_AGP_KERN *kern;
@@ -130,7 +130,7 @@ EXPORT_SYMBOL(drm_agp_acquire);
int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- return drm_agp_acquire( (struct drm_device *) file_priv->head->dev );
+ return drm_agp_acquire((struct drm_device *) file_priv->head->dev);
}
/**
@@ -426,7 +426,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
if (!(head->bridge = agp_backend_acquire(dev->pdev))) {
drm_free(head, sizeof(*head), DRM_MEM_AGPLISTS);
return NULL;
- }
+ }
agp_copy_info(head->bridge, &head->agp_info);
agp_backend_release(head->bridge);
} else {
@@ -498,18 +498,21 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
#define AGP_REQUIRED_MAJOR 0
#define AGP_REQUIRED_MINOR 102
-static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) {
+static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
+{
return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
}
-static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_pages,
- struct page **pages) {
-
- struct drm_agp_ttm_backend *agp_be =
+static int drm_agp_populate(struct drm_ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
struct page **cur_page, **last_page = pages + num_pages;
DRM_AGP_MEM *mem;
+ int dummy_page_count = 0;
if (drm_alloc_memctl(num_pages * sizeof(void *)))
return -1;
@@ -521,15 +524,22 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p
mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
#endif
if (!mem) {
- drm_free_memctl(num_pages *sizeof(void *));
+ drm_free_memctl(num_pages * sizeof(void *));
return -1;
}
DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
mem->page_count = 0;
for (cur_page = pages; cur_page < last_page; ++cur_page) {
- mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(*cur_page));
+ struct page *page = *cur_page;
+ if (!page) {
+ page = dummy_read_page;
+ ++dummy_page_count;
+ }
+ mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
}
+ if (dummy_page_count)
+ DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
agp_be->mem = mem;
return 0;
}
@@ -541,24 +551,28 @@ static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
int ret;
+ int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
DRM_DEBUG("drm_agp_bind_ttm\n");
mem->is_flushed = TRUE;
- mem->type = (bo_mem->flags & DRM_BO_FLAG_CACHED) ? AGP_USER_CACHED_MEMORY :
- AGP_USER_MEMORY;
+ mem->type = AGP_USER_MEMORY;
+ /* CACHED MAPPED implies not snooped memory */
+ if (snooped)
+ mem->type = AGP_USER_CACHED_MEMORY;
+
ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
- if (ret) {
+ if (ret)
DRM_ERROR("AGP Bind memory failed\n");
- }
+
DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
DRM_BE_FLAG_BOUND_CACHED : 0,
DRM_BE_FLAG_BOUND_CACHED);
return ret;
}
-static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
-
- struct drm_agp_ttm_backend *agp_be =
+static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
+{
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_DEBUG("drm_agp_unbind_ttm\n");
@@ -568,9 +582,9 @@ static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) {
return 0;
}
-static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
-
- struct drm_agp_ttm_backend *agp_be =
+static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
+{
+ struct drm_agp_ttm_backend *agp_be =
container_of(backend, struct drm_agp_ttm_backend, backend);
DRM_AGP_MEM *mem = agp_be->mem;
@@ -579,29 +593,27 @@ static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) {
unsigned long num_pages = mem->page_count;
backend->func->unbind(backend);
agp_free_memory(mem);
- drm_free_memctl(num_pages *sizeof(void *));
+ drm_free_memctl(num_pages * sizeof(void *));
}
agp_be->mem = NULL;
}
-static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) {
-
+static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
+{
struct drm_agp_ttm_backend *agp_be;
if (backend) {
DRM_DEBUG("drm_agp_destroy_ttm\n");
agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
if (agp_be) {
- if (agp_be->mem) {
+ if (agp_be->mem)
backend->func->clear(backend);
- }
drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
}
}
}
-static struct drm_ttm_backend_func agp_ttm_backend =
-{
+static struct drm_ttm_backend_func agp_ttm_backend = {
.needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
.populate = drm_agp_populate,
.clear = drm_agp_clear_ttm,
@@ -633,7 +645,7 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
return NULL;
}
-
+
agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
if (!agp_be)
return NULL;
@@ -643,11 +655,18 @@ struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
agp_be->bridge = dev->agp->bridge;
agp_be->populated = FALSE;
agp_be->backend.func = &agp_ttm_backend;
- // agp_be->backend.mem_type = DRM_BO_MEM_TT;
agp_be->backend.dev = dev;
return &agp_be->backend;
}
EXPORT_SYMBOL(drm_agp_init_ttm);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+ agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_flush_chipset);
+#endif
+
#endif /* __OS_HAS_AGP */
diff --git a/linux-core/drm_auth.c b/linux-core/drm_auth.c
index e35e8b6d..c904a91d 100644
--- a/linux-core/drm_auth.c
+++ b/linux-core/drm_auth.c
@@ -83,7 +83,7 @@ static int drm_add_magic(struct drm_device * dev, struct drm_file * priv,
return -ENOMEM;
memset(entry, 0, sizeof(*entry));
entry->priv = priv;
- entry->hash_item.key = (unsigned long) magic;
+ entry->hash_item.key = (unsigned long)magic;
mutex_lock(&dev->struct_mutex);
drm_ht_insert_item(&dev->magiclist, &entry->hash_item);
list_add_tail(&entry->head, &dev->magicfree);
@@ -109,7 +109,7 @@ static int drm_remove_magic(struct drm_device * dev, drm_magic_t magic)
DRM_DEBUG("%d\n", magic);
mutex_lock(&dev->struct_mutex);
- if (drm_ht_find_item(&dev->magiclist, (unsigned long) magic, &hash)) {
+ if (drm_ht_find_item(&dev->magiclist, (unsigned long)magic, &hash)) {
mutex_unlock(&dev->struct_mutex);
return -EINVAL;
}
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
index 16203c77..df10e12b 100644
--- a/linux-core/drm_bo.c
+++ b/linux-core/drm_bo.c
@@ -36,23 +36,23 @@
* The buffer usage atomic_t needs to be protected by dev->struct_mutex
* when there is a chance that it can be zero before or after the operation.
*
- * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
- * heads.
+ * dev->struct_mutex also protects all lists and list heads,
+ * Hash tables and hash heads.
*
* bo->mutex protects the buffer object itself excluding the usage field.
- * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
- * both the bo->mutex and the dev->struct_mutex.
+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
+ * we need both the bo->mutex and the dev->struct_mutex.
*
- * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
- * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
- * traversal will, in general, need to be restarted.
+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
+ * the list traversal will, in general, need to be restarted.
*
*/
-static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
-static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
-static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
-static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
static inline uint64_t drm_bo_type_flags(unsigned type)
{
@@ -63,7 +63,7 @@ static inline uint64_t drm_bo_type_flags(unsigned type)
* bo locked. dev->struct_mutex locked.
*/
-void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
{
struct drm_mem_type_manager *man;
@@ -74,13 +74,13 @@ void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
list_add_tail(&bo->pinned_lru, &man->pinned);
}
-void drm_bo_add_to_lru(struct drm_buffer_object * bo)
+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
{
struct drm_mem_type_manager *man;
DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
- if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+ if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
|| bo->mem.mem_type != bo->pinned_mem_type) {
man = &bo->dev->bm.man[bo->mem.mem_type];
list_add_tail(&bo->lru, &man->lru);
@@ -89,7 +89,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)
}
}
-static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
@@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
return 0;
}
-static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
{
#ifdef DRM_ODD_MM_COMPAT
int ret;
@@ -133,22 +133,39 @@ static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
* Call bo->mutex locked.
*/
-static int drm_bo_add_ttm(struct drm_buffer_object * bo)
+static int drm_bo_add_ttm(struct drm_buffer_object *bo)
{
struct drm_device *dev = bo->dev;
int ret = 0;
- bo->ttm = NULL;
+ uint32_t page_flags = 0;
DRM_ASSERT_LOCKED(&bo->mutex);
+ bo->ttm = NULL;
+
+ if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
+ page_flags |= DRM_TTM_PAGE_WRITE;
switch (bo->type) {
- case drm_bo_type_dc:
+ case drm_bo_type_device:
case drm_bo_type_kernel:
- bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+ page_flags, dev->bm.dummy_read_page);
if (!bo->ttm)
ret = -ENOMEM;
break;
case drm_bo_type_user:
+ bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+ page_flags | DRM_TTM_PAGE_USER,
+ dev->bm.dummy_read_page);
+ if (!bo->ttm)
+ ret = -ENOMEM;
+
+ ret = drm_ttm_set_user(bo->ttm, current,
+ bo->buffer_start,
+ bo->num_pages);
+ if (ret)
+ return ret;
+
break;
default:
DRM_ERROR("Illegal buffer object type\n");
@@ -159,8 +176,8 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
return ret;
}
-static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
- struct drm_bo_mem_reg * mem,
+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
+ struct drm_bo_mem_reg *mem,
int evict, int no_wait)
{
struct drm_device *dev = bo->dev;
@@ -187,7 +204,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
goto out_err;
if (mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_bind_ttm(bo->ttm, mem);
+ ret = drm_ttm_bind(bo->ttm, mem);
if (ret)
goto out_err;
}
@@ -197,11 +214,11 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
*old_mem = *mem;
mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
} else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
@@ -243,14 +260,14 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
return 0;
- out_err:
+out_err:
if (old_is_pci || new_is_pci)
drm_bo_vm_post_move(bo);
new_man = &bm->man[bo->mem.mem_type];
if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
@@ -262,7 +279,7 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
* Wait until the buffer is idle.
*/
-int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
int no_wait)
{
int ret;
@@ -274,11 +291,10 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
drm_fence_usage_deref_unlocked(&bo->fence);
return 0;
}
- if (no_wait) {
+ if (no_wait)
return -EBUSY;
- }
- ret =
- drm_fence_object_wait(bo->fence, lazy, ignore_signals,
+
+ ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
bo->fence_type);
if (ret)
return ret;
@@ -289,7 +305,7 @@ int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
}
EXPORT_SYMBOL(drm_bo_wait);
-static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
{
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
@@ -324,7 +340,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
* fence object and removing from lru lists and memory managers.
*/
-static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
{
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
@@ -346,9 +362,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
mutex_lock(&dev->struct_mutex);
- if (!atomic_dec_and_test(&bo->usage)) {
+ if (!atomic_dec_and_test(&bo->usage))
goto out;
- }
if (!bo->fence) {
list_del_init(&bo->lru);
@@ -376,7 +391,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
}
- out:
+out:
mutex_unlock(&bo->mutex);
return;
}
@@ -386,7 +401,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
* to the buffer object. Then destroy it.
*/
-static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
{
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
@@ -409,13 +424,12 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
if (bo->ttm) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
atomic_dec(&bm->count);
- // BUG_ON(!list_empty(&bo->base.list));
drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
return;
@@ -435,7 +449,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
* Call dev->struct_mutex locked.
*/
-static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
{
struct drm_buffer_manager *bm = &dev->bm;
@@ -454,9 +468,8 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
drm_bo_cleanup_refs(entry, remove_all);
- if (nentry) {
+ if (nentry)
atomic_dec(&nentry->usage);
- }
}
}
@@ -490,21 +503,20 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
mutex_unlock(&dev->struct_mutex);
}
-void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
{
- struct drm_buffer_object *tmp_bo = *bo;
+ struct drm_buffer_object *tmp_bo = *bo;
bo = NULL;
DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
- if (atomic_dec_and_test(&tmp_bo->usage)) {
+ if (atomic_dec_and_test(&tmp_bo->usage))
drm_bo_destroy_locked(tmp_bo);
- }
}
EXPORT_SYMBOL(drm_bo_usage_deref_locked);
-static void drm_bo_base_deref_locked(struct drm_file * file_priv,
- struct drm_user_object * uo)
+static void drm_bo_base_deref_locked(struct drm_file *file_priv,
+ struct drm_user_object *uo)
{
struct drm_buffer_object *bo =
drm_user_object_entry(uo, struct drm_buffer_object, base);
@@ -515,7 +527,7 @@ static void drm_bo_base_deref_locked(struct drm_file * file_priv,
drm_bo_usage_deref_locked(&bo);
}
-void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
{
struct drm_buffer_object *tmp_bo = *bo;
struct drm_device *dev = tmp_bo->dev;
@@ -571,8 +583,8 @@ EXPORT_SYMBOL(drm_putback_buffer_objects);
int drm_fence_buffer_objects(struct drm_device *dev,
struct list_head *list,
uint32_t fence_flags,
- struct drm_fence_object * fence,
- struct drm_fence_object ** used_fence)
+ struct drm_fence_object *fence,
+ struct drm_fence_object **used_fence)
{
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *entry;
@@ -656,7 +668,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
l = list->next;
}
DRM_DEBUG("Fenced %d buffers\n", count);
- out:
+out:
mutex_unlock(&dev->struct_mutex);
*used_fence = fence;
return ret;
@@ -667,7 +679,7 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);
* bo->mutex locked
*/
-static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
int no_wait)
{
int ret = 0;
@@ -675,7 +687,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
struct drm_bo_mem_reg evict_mem;
/*
- * Someone might have modified the buffer before we took the buffer mutex.
+ * Someone might have modified the buffer before we took the
+ * buffer mutex.
*/
if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
@@ -695,7 +708,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
evict_mem.mm_node = NULL;
evict_mem = bo->mem;
- evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+ evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
if (ret) {
@@ -726,7 +739,7 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
_DRM_BO_FLAG_EVICTED);
- out:
+out:
return ret;
}
@@ -734,8 +747,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
* Repeatedly evict memory from the LRU for @mem_type until we create enough
* space, or we've evicted everything and there isn't enough space.
*/
-static int drm_bo_mem_force_space(struct drm_device * dev,
- struct drm_bo_mem_reg * mem,
+static int drm_bo_mem_force_space(struct drm_device *dev,
+ struct drm_bo_mem_reg *mem,
uint32_t mem_type, int no_wait)
{
struct drm_mm_node *node;
@@ -783,13 +796,16 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
return 0;
}
-static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
+ int disallow_fixed,
uint32_t mem_type,
- uint64_t mask, uint32_t * res_mask)
+ uint64_t mask, uint32_t *res_mask)
{
uint64_t cur_flags = drm_bo_type_flags(mem_type);
uint64_t flag_diff;
+ if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
+ return 0;
if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
cur_flags |= DRM_BO_FLAG_CACHED;
if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
@@ -806,6 +822,9 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
}
flag_diff = (mask ^ cur_flags);
+ if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
+ cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
+
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
(!(mask & DRM_BO_FLAG_CACHED) ||
(mask & DRM_BO_FLAG_FORCE_CACHING)))
@@ -813,7 +832,7 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
((mask & DRM_BO_FLAG_MAPPABLE) ||
- (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
+ (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
return 0;
*res_mask = cur_flags;
@@ -828,8 +847,8 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
* drm_bo_mem_force_space is attempted in priority order to evict and find
* space.
*/
-int drm_bo_mem_space(struct drm_buffer_object * bo,
- struct drm_bo_mem_reg * mem, int no_wait)
+int drm_bo_mem_space(struct drm_buffer_object *bo,
+ struct drm_bo_mem_reg *mem, int no_wait)
{
struct drm_device *dev = bo->dev;
struct drm_buffer_manager *bm = &dev->bm;
@@ -851,7 +870,9 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
mem_type = prios[i];
man = &bm->man[mem_type];
- type_ok = drm_bo_mt_compatible(man, mem_type, mem->mask,
+ type_ok = drm_bo_mt_compatible(man,
+ bo->type == drm_bo_type_user,
+ mem_type, mem->proposed_flags,
&cur_flags);
if (!type_ok)
@@ -900,12 +921,16 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
if (!man->has_type)
continue;
- if (!drm_bo_mt_compatible(man, mem_type, mem->mask, &cur_flags))
+ if (!drm_bo_mt_compatible(man,
+ bo->type == drm_bo_type_user,
+ mem_type,
+ mem->proposed_flags,
+ &cur_flags))
continue;
ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
- if (ret == 0) {
+ if (ret == 0 && mem->mm_node) {
mem->flags = cur_flags;
return 0;
}
@@ -917,41 +942,53 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
ret = (has_eagain) ? -EAGAIN : -ENOMEM;
return ret;
}
-
EXPORT_SYMBOL(drm_bo_mem_space);
-static int drm_bo_new_mask(struct drm_buffer_object * bo,
- uint64_t new_flags, uint64_t used_mask)
-{
- uint32_t new_props;
-
- if (bo->type == drm_bo_type_user) {
- DRM_ERROR("User buffers are not supported yet.\n");
+/*
+ * drm_bo_propose_flags:
+ *
+ * @bo: the buffer object getting new flags
+ *
+ * @new_flags: the new set of proposed flag bits
+ *
+ * @new_mask: the mask of bits changed in new_flags
+ *
+ * Modify the proposed_flag bits in @bo
+ */
+static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
+ uint64_t new_flags, uint64_t new_mask)
+{
+ uint32_t new_access;
+
+ /* Copy unchanging bits from existing proposed_flags */
+ DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
+
+ if (bo->type == drm_bo_type_user &&
+ ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
+ (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
+ DRM_ERROR("User buffers require cache-coherent memory.\n");
return -EINVAL;
}
- if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
- DRM_ERROR
- ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
- "processes.\n");
+ if ((new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+ DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
return -EPERM;
}
if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
- DRM_ERROR
- ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
+ DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
return -EPERM;
}
- new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
- DRM_BO_FLAG_READ);
+ new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_READ);
- if (!new_props) {
+ if (new_access == 0) {
DRM_ERROR("Invalid buffer object rwx properties\n");
return -EINVAL;
}
- bo->mem.mask = new_flags;
+ bo->mem.proposed_flags = new_flags;
return 0;
}
@@ -989,7 +1026,7 @@ EXPORT_SYMBOL(drm_lookup_buffer_object);
* Doesn't do any fence flushing as opposed to the drm_bo_busy function.
*/
-static int drm_bo_quick_busy(struct drm_buffer_object * bo)
+static int drm_bo_quick_busy(struct drm_buffer_object *bo)
{
struct drm_fence_object *fence = bo->fence;
@@ -1009,7 +1046,7 @@ static int drm_bo_quick_busy(struct drm_buffer_object * bo)
* Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
*/
-static int drm_bo_busy(struct drm_buffer_object * bo)
+static int drm_bo_busy(struct drm_buffer_object *bo)
{
struct drm_fence_object *fence = bo->fence;
@@ -1029,7 +1066,7 @@ static int drm_bo_busy(struct drm_buffer_object * bo)
return 0;
}
-static int drm_bo_read_cached(struct drm_buffer_object * bo)
+static int drm_bo_evict_cached(struct drm_buffer_object *bo)
{
int ret = 0;
@@ -1043,7 +1080,7 @@ static int drm_bo_read_cached(struct drm_buffer_object * bo)
* Wait until a buffer is unmapped.
*/
-static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
{
int ret = 0;
@@ -1059,7 +1096,7 @@ static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
return ret;
}
-static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
{
int ret;
@@ -1074,7 +1111,7 @@ static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
* Until then, we cannot really do anything with it except delete it.
*/
-static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
+static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
int eagain_if_wait)
{
int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
@@ -1086,8 +1123,8 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
ret = 0;
mutex_unlock(&bo->mutex);
- DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
- !drm_bo_check_unfenced(bo));
+ DRM_WAIT_ON (ret, bo->event_queue, 3 * DRM_HZ,
+ !drm_bo_check_unfenced(bo));
mutex_lock(&bo->mutex);
if (ret == -EINTR)
return -EAGAIN;
@@ -1107,7 +1144,7 @@ static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
* Bo locked.
*/
-static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
+static void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
struct drm_bo_info_rep *rep)
{
if (!rep)
@@ -1117,8 +1154,18 @@ static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
rep->flags = bo->mem.flags;
rep->size = bo->num_pages * PAGE_SIZE;
rep->offset = bo->offset;
- rep->arg_handle = bo->map_list.user_token;
- rep->mask = bo->mem.mask;
+
+ /*
+ * drm_bo_type_device buffers have user-visible
+ * handles which can be used to share across
+ * processes. Hand that back to the application
+ */
+ if (bo->type == drm_bo_type_device)
+ rep->arg_handle = bo->map_list.user_token;
+ else
+ rep->arg_handle = 0;
+
+ rep->proposed_flags = bo->mem.proposed_flags;
rep->buffer_start = bo->buffer_start;
rep->fence_flags = bo->fence_type;
rep->rep_flags = 0;
@@ -1177,15 +1224,11 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
goto out;
}
- if ((map_flags & DRM_BO_FLAG_READ) &&
- (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
- (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
- drm_bo_read_cached(bo);
- }
+ if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
+ drm_bo_evict_cached(bo);
+
break;
- } else if ((map_flags & DRM_BO_FLAG_READ) &&
- (bo->mem.flags & DRM_BO_FLAG_READ_CACHED) &&
- (!(bo->mem.flags & DRM_BO_FLAG_CACHED))) {
+ } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
/*
* We are already mapped with different flags.
@@ -1210,7 +1253,7 @@ static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
} else
drm_bo_fill_rep_arg(bo, rep);
- out:
+out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
return ret;
@@ -1239,7 +1282,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
drm_remove_ref_object(file_priv, ro);
drm_bo_usage_deref_locked(&bo);
- out:
+out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1249,7 +1292,7 @@ static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
*/
static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
- struct drm_user_object * uo,
+ struct drm_user_object *uo,
enum drm_ref_type action)
{
struct drm_buffer_object *bo =
@@ -1268,10 +1311,10 @@ static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
/*
* bo->mutex locked.
- * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
*/
-int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
int no_wait, int move_unfenced)
{
struct drm_device *dev = bo->dev;
@@ -1294,7 +1337,7 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
mem.num_pages = bo->num_pages;
mem.size = mem.num_pages << PAGE_SHIFT;
- mem.mask = new_mem_flags;
+ mem.proposed_flags = new_mem_flags;
mem.page_alignment = bo->mem.page_alignment;
mutex_lock(&bm->evict_mutex);
@@ -1311,44 +1354,70 @@ int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
- out_unlock:
+out_unlock:
+ mutex_lock(&dev->struct_mutex);
if (ret || !move_unfenced) {
- mutex_lock(&dev->struct_mutex);
if (mem.mm_node) {
if (mem.mm_node != bo->pinned_node)
drm_mm_put_block(mem.mm_node);
mem.mm_node = NULL;
}
- mutex_unlock(&dev->struct_mutex);
+ drm_bo_add_to_lru(bo);
+ if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+ DRM_WAKEUP(&bo->event_queue);
+ DRM_FLAG_MASKED(bo->priv_flags, 0,
+ _DRM_BO_FLAG_UNFENCED);
+ }
+ } else {
+ list_add_tail(&bo->lru, &bm->unfenced);
+ DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+ _DRM_BO_FLAG_UNFENCED);
}
-
+ mutex_unlock(&dev->struct_mutex);
mutex_unlock(&bm->evict_mutex);
return ret;
}
-static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
{
- uint32_t flag_diff = (mem->mask ^ mem->flags);
+ uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
- if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+ if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
return 0;
if ((flag_diff & DRM_BO_FLAG_CACHED) &&
- (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
- (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
- return 0;
- }
+ (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
+ return 0;
+
if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
- ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
- (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+ ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
+ (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
return 0;
return 1;
}
-/*
- * bo locked.
+/**
+ * drm_buffer_object_validate:
+ *
+ * @bo: the buffer object to modify
+ *
+ * @fence_class: the new fence class covering this buffer
+ *
+ * @move_unfenced: a boolean indicating whether switching the
+ * memory space of this buffer should cause the buffer to
+ * be placed on the unfenced list.
+ *
+ * @no_wait: whether this function should return -EBUSY instead
+ * of waiting.
+ *
+ * Change buffer access parameters. This can involve moving
+ * the buffer to the correct memory type, pinning the buffer
+ * or changing the class/type of fence covering this buffer
+ *
+ * Must be called with bo locked.
*/
-static int drm_buffer_object_validate(struct drm_buffer_object * bo,
+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
uint32_t fence_class,
int move_unfenced, int no_wait)
{
@@ -1358,8 +1427,8 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
uint32_t ftype;
int ret;
- DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
- (unsigned long long) bo->mem.mask,
+ DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+ (unsigned long long) bo->mem.proposed_flags,
(unsigned long long) bo->mem.flags);
ret = driver->fence_type(bo, &fence_class, &ftype);
@@ -1391,7 +1460,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
ret = drm_bo_wait_unmapped(bo, no_wait);
if (ret) {
- DRM_ERROR("Timed out waiting for buffer unmap.\n");
+ DRM_ERROR("Timed out waiting for buffer unmap.\n");
return ret;
}
@@ -1400,7 +1469,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
*/
if (!drm_bo_mem_compat(&bo->mem)) {
- ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+ ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
move_unfenced);
if (ret) {
if (ret != -EAGAIN)
@@ -1413,7 +1482,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
* Pinned buffers.
*/
- if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
bo->pinned_mem_type = bo->mem.mem_type;
mutex_lock(&dev->struct_mutex);
list_del_init(&bo->pinned_lru);
@@ -1449,7 +1518,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
if (ret)
return ret;
}
- DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+ /*
+ * Validation has succeeded, move the access and other
+ * non-mapping-related flag bits from the proposed flags to
+ * the active flags
+ */
+
+ DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
/*
* Finally, adjust lru to be sure.
@@ -1474,13 +1549,38 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
return 0;
}
+/**
+ * drm_bo_do_validate:
+ *
+ * @bo: the buffer object
+ *
+ * @flags: access rights, mapping parameters and cacheability. See
+ * the DRM_BO_FLAG_* values in drm.h
+ *
+ * @mask: Which flag values to change; this allows callers to modify
+ * things without knowing the current state of other flags.
+ *
+ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+ * values in drm.h.
+ *
+ * @fence_class: a driver-specific way of doing fences. Presumably,
+ * this would be used if the driver had more than one submission and
+ * fencing mechanism. At this point, there isn't any use of this
+ * from the user mode code.
+ *
+ * @rep: To be stuffed with the reply from validation
+ *
+ * 'validate' a buffer object. This changes where the buffer is
+ * located, along with changing access modes.
+ */
+
int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
- int no_wait,
struct drm_bo_info_rep *rep)
{
int ret;
+ int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
mutex_lock(&bo->mutex);
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
@@ -1488,9 +1588,7 @@ int drm_bo_do_validate(struct drm_buffer_object *bo,
if (ret)
goto out;
-
- DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
- ret = drm_bo_new_mask(bo, flags, mask);
+ ret = drm_bo_modify_proposed_flags (bo, flags, mask);
if (ret)
goto out;
@@ -1507,25 +1605,55 @@ out:
}
EXPORT_SYMBOL(drm_bo_do_validate);
+/**
+ * drm_bo_handle_validate
+ *
+ * @file_priv: the drm file private, used to get a handle to the user context
+ *
+ * @handle: the buffer object handle
+ *
+ * @flags: access rights, mapping parameters and cacheability. See
+ * the DRM_BO_FLAG_* values in drm.h
+ *
+ * @mask: Which flag values to change; this allows callers to modify
+ * things without knowing the current state of other flags.
+ *
+ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+ * values in drm.h.
+ *
+ * @fence_class: a driver-specific way of doing fences. Presumably,
+ * this would be used if the driver had more than one submission and
+ * fencing mechanism. At this point, there isn't any use of this
+ * from the user mode code.
+ *
+ * @use_old_fence_class: don't change fence class, pull it from the buffer object
+ *
+ * @rep: To be stuffed with the reply from validation
+ *
+ * @bp_rep: To be stuffed with the buffer object pointer
+ *
+ * Perform drm_bo_do_validate on a buffer referenced by a user-space handle.
+ * Some permissions checking is done on the parameters, otherwise this
+ * is a thin wrapper.
+ */
-int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
- uint32_t fence_class,
- uint64_t flags, uint64_t mask,
+int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
+ uint64_t flags, uint64_t mask,
uint32_t hint,
+ uint32_t fence_class,
int use_old_fence_class,
- struct drm_bo_info_rep * rep,
+ struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep)
{
struct drm_device *dev = file_priv->head->dev;
struct drm_buffer_object *bo;
int ret;
- int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
mutex_lock(&dev->struct_mutex);
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
- if (!bo)
+ if (!bo)
return -EINVAL;
if (use_old_fence_class)
@@ -1535,12 +1663,11 @@ int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
* Only allow creator to change shared buffer mask.
*/
- if (bo->base.owner != file_priv)
+ if (bo->base.owner != file_priv)
mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
-
- ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
- no_wait, rep);
+
+ ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
if (!ret && bo_rep)
*bo_rep = bo;
@@ -1561,9 +1688,9 @@ static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
- if (!bo) {
+ if (!bo)
return -EINVAL;
- }
+
mutex_lock(&bo->mutex);
if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
(void)drm_bo_busy(bo);
@@ -1586,9 +1713,8 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
bo = drm_lookup_buffer_object(file_priv, handle, 1);
mutex_unlock(&dev->struct_mutex);
- if (!bo) {
+ if (!bo)
return -EINVAL;
- }
mutex_lock(&bo->mutex);
ret = drm_bo_wait_unfenced(bo, no_wait, 0);
@@ -1600,7 +1726,7 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
drm_bo_fill_rep_arg(bo, rep);
- out:
+out:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
return ret;
@@ -1609,21 +1735,18 @@ static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
int drm_buffer_object_create(struct drm_device *dev,
unsigned long size,
enum drm_bo_type type,
- uint64_t mask,
+ uint64_t flags,
uint32_t hint,
uint32_t page_alignment,
unsigned long buffer_start,
- struct drm_buffer_object ** buf_obj)
+ struct drm_buffer_object **buf_obj)
{
struct drm_buffer_manager *bm = &dev->bm;
struct drm_buffer_object *bo;
int ret = 0;
unsigned long num_pages;
- if (buffer_start & ~PAGE_MASK) {
- DRM_ERROR("Invalid buffer object start.\n");
- return -EINVAL;
- }
+ size += buffer_start & ~PAGE_MASK;
num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
if (num_pages == 0) {
DRM_ERROR("Illegal buffer object size.\n");
@@ -1649,28 +1772,31 @@ int drm_buffer_object_create(struct drm_device *dev,
INIT_LIST_HEAD(&bo->vma_list);
#endif
bo->dev = dev;
- if (buffer_start != 0)
- bo->type = drm_bo_type_user;
- else
- bo->type = type;
+ bo->type = type;
bo->num_pages = num_pages;
bo->mem.mem_type = DRM_BO_MEM_LOCAL;
bo->mem.num_pages = bo->num_pages;
bo->mem.mm_node = NULL;
bo->mem.page_alignment = page_alignment;
- bo->buffer_start = buffer_start;
+ bo->buffer_start = buffer_start & PAGE_MASK;
bo->priv_flags = 0;
- bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
- bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
- DRM_BO_FLAG_MAPPABLE;
+ bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_MAPPABLE);
+ bo->mem.proposed_flags = 0;
atomic_inc(&bm->count);
- ret = drm_bo_new_mask(bo, mask, hint);
-
+ /*
+ * Use drm_bo_modify_proposed_flags to error-check the proposed flags
+ */
+ ret = drm_bo_modify_proposed_flags (bo, flags, flags);
if (ret)
goto out_err;
- if (bo->type == drm_bo_type_dc) {
+ /*
+ * For drm_bo_type_device buffers, allocate
+ * address space from the device so that applications
+ * can mmap the buffer from there
+ */
+ if (bo->type == drm_bo_type_device) {
mutex_lock(&dev->struct_mutex);
ret = drm_bo_setup_vm_locked(bo);
mutex_unlock(&dev->struct_mutex);
@@ -1686,7 +1812,7 @@ int drm_buffer_object_create(struct drm_device *dev,
*buf_obj = bo;
return 0;
- out_err:
+out_err:
mutex_unlock(&bo->mutex);
drm_bo_usage_deref_unlocked(&bo);
@@ -1711,7 +1837,7 @@ static int drm_bo_add_user_object(struct drm_file *file_priv,
bo->base.ref_struct_locked = NULL;
bo->base.unref = drm_buffer_user_object_unmap;
- out:
+out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -1722,6 +1848,7 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
struct drm_bo_create_req *req = &arg->d.req;
struct drm_bo_info_rep *rep = &arg->d.rep;
struct drm_buffer_object *entry;
+ enum drm_bo_type bo_type;
int ret = 0;
DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
@@ -1732,20 +1859,33 @@ int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fil
return -EINVAL;
}
+ /*
+ * If the buffer creation request comes in with a starting address,
+ * that points at the desired user pages to map. Otherwise, create
+ * a drm_bo_type_device buffer, which uses pages allocated from the kernel
+ */
+ bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
+
+ /*
+ * User buffers cannot be shared
+ */
+ if (bo_type == drm_bo_type_user)
+ req->flags &= ~DRM_BO_FLAG_SHAREABLE;
+
ret = drm_buffer_object_create(file_priv->head->dev,
- req->size, drm_bo_type_dc, req->mask,
+ req->size, bo_type, req->flags,
req->hint, req->page_alignment,
req->buffer_start, &entry);
if (ret)
goto out;
-
+
ret = drm_bo_add_user_object(file_priv, entry,
- req->mask & DRM_BO_FLAG_SHAREABLE);
+ req->flags & DRM_BO_FLAG_SHAREABLE);
if (ret) {
drm_bo_usage_deref_unlocked(&entry);
goto out;
}
-
+
mutex_lock(&entry->mutex);
drm_bo_fill_rep_arg(entry, rep);
mutex_unlock(&entry->mutex);
@@ -1754,7 +1894,7 @@ out:
return ret;
}
-int drm_bo_setstatus_ioctl(struct drm_device *dev,
+int drm_bo_setstatus_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv)
{
struct drm_bo_map_wait_idle_arg *arg = data;
@@ -1771,11 +1911,17 @@ int drm_bo_setstatus_ioctl(struct drm_device *dev,
if (ret)
return ret;
- ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
+ /*
+ * validate the buffer. note that 'fence_class' will be unused
+ * as we pass use_old_fence_class=1 here. Note also that
+ * the libdrm API doesn't pass fence_class to the kernel,
+ * so it's a good thing it isn't used here.
+ */
+ ret = drm_bo_handle_validate(file_priv, req->handle,
req->flags,
req->mask,
req->hint | DRM_BO_HINT_DONT_FENCE,
- 1,
+ req->fence_class, 1,
rep, NULL);
(void) drm_bo_read_unlock(&dev->bm.bm_lock);
@@ -1835,7 +1981,7 @@ int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *
drm_buffer_type, &uo);
if (ret)
return ret;
-
+
ret = drm_bo_handle_info(file_priv, req->handle, rep);
if (ret)
return ret;
@@ -1895,7 +2041,7 @@ int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *
return 0;
}
-static int drm_bo_leave_list(struct drm_buffer_object * bo,
+static int drm_bo_leave_list(struct drm_buffer_object *bo,
uint32_t mem_type,
int free_pinned,
int allow_errors)
@@ -1926,7 +2072,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
"cleanup. Removing flag and evicting.\n");
bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
- bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+ bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
}
if (bo->mem.mem_type == mem_type)
@@ -1941,7 +2087,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
}
}
- out:
+out:
mutex_unlock(&bo->mutex);
return ret;
}
@@ -1960,7 +2106,7 @@ static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
* dev->struct_mutex locked.
*/
-static int drm_bo_force_list_clean(struct drm_device * dev,
+static int drm_bo_force_list_clean(struct drm_device *dev,
struct list_head *head,
unsigned mem_type,
int free_pinned,
@@ -2025,7 +2171,7 @@ restart:
return 0;
}
-int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
{
struct drm_buffer_manager *bm = &dev->bm;
struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -2067,7 +2213,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm);
*point since we have the hardware lock.
*/
-static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
+static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
{
int ret;
struct drm_buffer_manager *bm = &dev->bm;
@@ -2092,7 +2238,7 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
return ret;
}
-int drm_bo_init_mm(struct drm_device * dev,
+int drm_bo_init_mm(struct drm_device *dev,
unsigned type,
unsigned long p_offset, unsigned long p_size)
{
@@ -2139,11 +2285,11 @@ EXPORT_SYMBOL(drm_bo_init_mm);
/*
* This function is intended to be called on drm driver unload.
* If you decide to call it from lastclose, you must protect the call
- * from a potentially racing drm_bo_driver_init in firstopen.
+ * from a potentially racing drm_bo_driver_init in firstopen.
* (This may happen on X server restart).
*/
-int drm_bo_driver_finish(struct drm_device * dev)
+int drm_bo_driver_finish(struct drm_device *dev)
{
struct drm_buffer_manager *bm = &dev->bm;
int ret = 0;
@@ -2170,24 +2316,29 @@ int drm_bo_driver_finish(struct drm_device * dev)
}
mutex_unlock(&dev->struct_mutex);
- if (!cancel_delayed_work(&bm->wq)) {
+ if (!cancel_delayed_work(&bm->wq))
flush_scheduled_work();
- }
+
mutex_lock(&dev->struct_mutex);
drm_bo_delayed_delete(dev, 1);
- if (list_empty(&bm->ddestroy)) {
+ if (list_empty(&bm->ddestroy))
DRM_DEBUG("Delayed destroy list was clean\n");
- }
- if (list_empty(&bm->man[0].lru)) {
+
+ if (list_empty(&bm->man[0].lru))
DRM_DEBUG("Swap list was clean\n");
- }
- if (list_empty(&bm->man[0].pinned)) {
+
+ if (list_empty(&bm->man[0].pinned))
DRM_DEBUG("NO_MOVE list was clean\n");
- }
- if (list_empty(&bm->unfenced)) {
+
+ if (list_empty(&bm->unfenced))
DRM_DEBUG("Unfenced list was clean\n");
- }
- out:
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+ ClearPageReserved(bm->dummy_read_page);
+#endif
+ __free_page(bm->dummy_read_page);
+
+out:
mutex_unlock(&dev->struct_mutex);
return ret;
}
@@ -2195,21 +2346,32 @@ int drm_bo_driver_finish(struct drm_device * dev)
/*
* This function is intended to be called on drm driver load.
* If you decide to call it from firstopen, you must protect the call
- * from a potentially racing drm_bo_driver_finish in lastclose.
+ * from a potentially racing drm_bo_driver_finish in lastclose.
* (This may happen on X server restart).
*/
-int drm_bo_driver_init(struct drm_device * dev)
+int drm_bo_driver_init(struct drm_device *dev)
{
struct drm_bo_driver *driver = dev->driver->bo_driver;
struct drm_buffer_manager *bm = &dev->bm;
int ret = -EINVAL;
+ bm->dummy_read_page = NULL;
drm_bo_init_lock(&bm->bm_lock);
mutex_lock(&dev->struct_mutex);
if (!driver)
goto out_unlock;
+ bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+ if (!bm->dummy_read_page) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+ SetPageReserved(bm->dummy_read_page);
+#endif
+
/*
* Initialize the system memory buffer type.
* Other types need to be driver / IOCTL initialized.
@@ -2229,11 +2391,10 @@ int drm_bo_driver_init(struct drm_device * dev)
bm->cur_pages = 0;
INIT_LIST_HEAD(&bm->unfenced);
INIT_LIST_HEAD(&bm->ddestroy);
- out_unlock:
+out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-
EXPORT_SYMBOL(drm_bo_driver_init);
int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
@@ -2343,13 +2504,13 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
return -EINVAL;
}
-
+
if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
if (ret)
return ret;
}
-
+
mutex_lock(&dev->struct_mutex);
ret = drm_bo_lock_mm(dev, arg->mem_type);
mutex_unlock(&dev->struct_mutex);
@@ -2361,8 +2522,8 @@ int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-int drm_mm_unlock_ioctl(struct drm_device *dev,
- void *data,
+int drm_mm_unlock_ioctl(struct drm_device *dev,
+ void *data,
struct drm_file *file_priv)
{
struct drm_mm_type_arg *arg = data;
@@ -2379,7 +2540,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,
if (ret)
return ret;
}
-
+
return 0;
}
@@ -2387,7 +2548,7 @@ int drm_mm_unlock_ioctl(struct drm_device *dev,
* buffer object vm functions.
*/
-int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
{
struct drm_buffer_manager *bm = &dev->bm;
struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
@@ -2404,7 +2565,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
}
return 1;
}
-
EXPORT_SYMBOL(drm_mem_reg_is_pci);
/**
@@ -2450,7 +2610,7 @@ int drm_bo_pci_offset(struct drm_device *dev,
* Call bo->mutex locked.
*/
-void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
{
struct drm_device *dev = bo->dev;
loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
@@ -2462,13 +2622,25 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
}
-static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
+/**
+ * drm_bo_takedown_vm_locked:
+ *
+ * @bo: the buffer object to remove any drm device mapping
+ *
+ * Remove any associated vm mapping on the drm device node that
+ * would have been created for a drm_bo_type_device buffer
+ */
+static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
{
- struct drm_map_list *list = &bo->map_list;
+ struct drm_map_list *list;
drm_local_map_t *map;
struct drm_device *dev = bo->dev;
DRM_ASSERT_LOCKED(&dev->struct_mutex);
+ if (bo->type != drm_bo_type_device)
+ return;
+
+ list = &bo->map_list;
if (list->user_token) {
drm_ht_remove_item(&dev->map_hash, &list->hash);
list->user_token = 0;
@@ -2488,7 +2660,17 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
drm_bo_usage_deref_locked(&bo);
}
-static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
+/**
+ * drm_bo_setup_vm_locked:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to drm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
{
struct drm_map_list *list = &bo->map_list;
drm_local_map_t *map;
@@ -2529,11 +2711,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
return 0;
}
-int drm_bo_version_ioctl(struct drm_device *dev, void *data,
+int drm_bo_version_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
-
+
arg->major = DRM_BO_INIT_MAJOR;
arg->minor = DRM_BO_INIT_MINOR;
arg->patchlevel = DRM_BO_INIT_PATCH;
diff --git a/linux-core/drm_bo_lock.c b/linux-core/drm_bo_lock.c
index e5a86826..f967fb7c 100644
--- a/linux-core/drm_bo_lock.c
+++ b/linux-core/drm_bo_lock.c
@@ -31,19 +31,19 @@
/*
* This file implements a simple replacement for the buffer manager use
* of the heavyweight hardware lock.
- * The lock is a read-write lock. Taking it in read mode is fast, and
+ * The lock is a read-write lock. Taking it in read mode is fast, and
* intended for in-kernel use only.
* Taking it in write mode is slow.
*
- * The write mode is used only when there is a need to block all
- * user-space processes from allocating a
+ * The write mode is used only when there is a need to block all
+ * user-space processes from allocating a
* new memory area.
* Typical use in write mode is X server VT switching, and it's allowed
* to leave kernel space with the write lock held. If a user-space process
* dies while having the write-lock, it will be released during the file
* descriptor release.
*
- * The read lock is typically placed at the start of an IOCTL- or
+ * The read lock is typically placed at the start of an IOCTL- or
* user-space callable function that may end up allocating a memory area.
* This includes setstatus, super-ioctls and no_pfn; the latter may move
* unmappable regions to mappable. It's a bug to leave kernel space with the
@@ -53,7 +53,7 @@
* latency. The locking functions will return -EAGAIN if interrupted by a
* signal.
*
- * Locking order: The lock should be taken BEFORE any kernel mutexes
+ * Locking order: The lock should be taken BEFORE any kernel mutexes
* or spinlocks.
*/
@@ -73,7 +73,6 @@ void drm_bo_read_unlock(struct drm_bo_lock *lock)
if (atomic_read(&lock->readers) == 0)
wake_up_interruptible(&lock->queue);
}
-
EXPORT_SYMBOL(drm_bo_read_unlock);
int drm_bo_read_lock(struct drm_bo_lock *lock)
@@ -95,7 +94,6 @@ int drm_bo_read_lock(struct drm_bo_lock *lock)
}
return 0;
}
-
EXPORT_SYMBOL(drm_bo_read_lock);
static int __drm_bo_write_unlock(struct drm_bo_lock *lock)
@@ -123,9 +121,8 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
int ret = 0;
struct drm_device *dev;
- if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0)) {
+ if (unlikely(atomic_cmpxchg(&lock->write_lock_pending, 0, 1) != 0))
return -EINVAL;
- }
while (unlikely(atomic_cmpxchg(&lock->readers, 0, -1) != 0)) {
ret = wait_event_interruptible
@@ -140,7 +137,7 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
/*
* Add a dummy user-object, the destructor of which will
- * make sure the lock is released if the client dies
+ * make sure the lock is released if the client dies
* while holding it.
*/
@@ -149,9 +146,9 @@ int drm_bo_write_lock(struct drm_bo_lock *lock, struct drm_file *file_priv)
ret = drm_add_user_object(file_priv, &lock->base, 0);
lock->base.remove = &drm_bo_write_lock_remove;
lock->base.type = drm_lock_type;
- if (ret) {
+ if (ret)
(void)__drm_bo_write_unlock(lock);
- }
+
mutex_unlock(&dev->struct_mutex);
return ret;
diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c
index 7c86c4aa..b06a09f0 100644
--- a/linux-core/drm_bo_move.c
+++ b/linux-core/drm_bo_move.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,7 +10,7 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
@@ -19,8 +19,8 @@
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
@@ -35,7 +35,7 @@
* have not been requested to free also pinned regions.
*/
-static void drm_bo_free_old_node(struct drm_buffer_object * bo)
+static void drm_bo_free_old_node(struct drm_buffer_object *bo)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
@@ -48,13 +48,13 @@ static void drm_bo_free_old_node(struct drm_buffer_object * bo)
old_mem->mm_node = NULL;
}
-int drm_bo_move_ttm(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+int drm_bo_move_ttm(struct drm_buffer_object *bo,
+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_ttm *ttm = bo->ttm;
struct drm_bo_mem_reg *old_mem = &bo->mem;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
int ret;
if (old_mem->mem_type == DRM_BO_MEM_TT) {
@@ -71,18 +71,17 @@ int drm_bo_move_ttm(struct drm_buffer_object * bo,
save_flags = old_mem->flags;
}
if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_bind_ttm(ttm, new_mem);
+ ret = drm_ttm_bind(ttm, new_mem);
if (ret)
return ret;
}
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
-
EXPORT_SYMBOL(drm_bo_move_ttm);
/**
@@ -90,17 +89,17 @@ EXPORT_SYMBOL(drm_bo_move_ttm);
*
* \param bo The buffer object.
* \return Failure indication.
- *
+ *
* Returns -EINVAL if the buffer object is currently not mappable.
* Returns -ENOMEM if the ioremap operation failed.
* Otherwise returns zero.
- *
+ *
* After a successfull call, bo->iomap contains the virtual address, or NULL
- * if the buffer object content is not accessible through PCI space.
+ * if the buffer object content is not accessible through PCI space.
* Call bo->mutex locked.
*/
-int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
+int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
void **virtual)
{
struct drm_buffer_manager *bm = &dev->bm;
@@ -136,7 +135,7 @@ EXPORT_SYMBOL(drm_mem_reg_ioremap);
* Call bo->mutex locked.
*/
-void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
+void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
void *virtual)
{
struct drm_buffer_manager *bm;
@@ -145,9 +144,8 @@ void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem,
bm = &dev->bm;
man = &bm->man[mem->mem_type];
- if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
+ if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
iounmap(virtual);
- }
}
static int drm_copy_io_page(void *dst, void *src, unsigned long page)
@@ -163,7 +161,8 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page)
return 0;
}
-static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page)
+static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
+ unsigned long page)
{
struct page *d = drm_ttm_get_page(ttm, page);
void *dst;
@@ -181,7 +180,7 @@ static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long p
return 0;
}
-static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page)
+static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
{
struct page *s = drm_ttm_get_page(ttm, page);
void *src;
@@ -199,8 +198,8 @@ static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long p
return 0;
}
-int drm_bo_move_memcpy(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+int drm_bo_move_memcpy(struct drm_buffer_object *bo,
+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
@@ -211,7 +210,7 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
void *new_iomap;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
unsigned long i;
unsigned long page;
unsigned long add = 0;
@@ -251,27 +250,26 @@ int drm_bo_move_memcpy(struct drm_buffer_object * bo,
goto out1;
}
mb();
- out2:
+out2:
drm_bo_free_old_node(bo);
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
drm_ttm_unbind(ttm);
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
bo->ttm = NULL;
}
- out1:
+out1:
drm_mem_reg_iounmap(dev, new_mem, new_iomap);
- out:
+out:
drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
return ret;
}
-
EXPORT_SYMBOL(drm_bo_move_memcpy);
/*
@@ -280,8 +278,8 @@ EXPORT_SYMBOL(drm_bo_move_memcpy);
* object. Call bo->mutex locked.
*/
-int drm_buffer_object_transfer(struct drm_buffer_object * bo,
- struct drm_buffer_object ** new_obj)
+int drm_buffer_object_transfer(struct drm_buffer_object *bo,
+ struct drm_buffer_object **new_obj)
{
struct drm_buffer_object *fbo;
struct drm_device *dev = bo->dev;
@@ -305,7 +303,7 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,
INIT_LIST_HEAD(&fbo->p_mm_list);
#endif
- drm_fence_reference_unlocked(&fbo->fence, bo->fence);
+ fbo->fence = drm_fence_reference_locked(bo->fence);
fbo->pinned_node = NULL;
fbo->mem.mm_node->private = (void *)fbo;
atomic_set(&fbo->usage, 1);
@@ -322,19 +320,17 @@ int drm_buffer_object_transfer(struct drm_buffer_object * bo,
* We cannot restart until it has finished.
*/
-int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
- int evict,
- int no_wait,
- uint32_t fence_class,
- uint32_t fence_type,
- uint32_t fence_flags, struct drm_bo_mem_reg * new_mem)
+int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
+ int evict, int no_wait, uint32_t fence_class,
+ uint32_t fence_type, uint32_t fence_flags,
+ struct drm_bo_mem_reg *new_mem)
{
struct drm_device *dev = bo->dev;
struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
struct drm_bo_mem_reg *old_mem = &bo->mem;
int ret;
uint64_t save_flags = old_mem->flags;
- uint64_t save_mask = old_mem->mask;
+ uint64_t save_proposed_flags = old_mem->proposed_flags;
struct drm_buffer_object *old_obj;
if (bo->fence)
@@ -349,11 +345,11 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
#ifdef DRM_ODD_MM_COMPAT
/*
* In this mode, we don't allow pipelining a copy blit,
- * since the buffer will be accessible from user space
+ * since the buffer will be accessible from user space
* the moment we return and rebuild the page tables.
*
* With normal vm operation, page tables are rebuilt
- * on demand using fault(), which waits for buffer idle.
+ * on demand using fault(), which waits for buffer idle.
*/
if (1)
#else
@@ -369,7 +365,7 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
drm_ttm_unbind(bo->ttm);
- drm_destroy_ttm(bo->ttm);
+ drm_ttm_destroy(bo->ttm);
bo->ttm = NULL;
}
} else {
@@ -403,11 +399,10 @@ int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
*old_mem = *new_mem;
new_mem->mm_node = NULL;
- old_mem->mask = save_mask;
+ old_mem->proposed_flags = save_proposed_flags;
DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
return 0;
}
-
EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
int drm_bo_same_page(unsigned long offset,
@@ -420,13 +415,11 @@ EXPORT_SYMBOL(drm_bo_same_page);
unsigned long drm_bo_offset_end(unsigned long offset,
unsigned long end)
{
-
offset = (offset + PAGE_SIZE) & PAGE_MASK;
return (end < offset) ? end : offset;
}
EXPORT_SYMBOL(drm_bo_offset_end);
-
static pgprot_t drm_kernel_io_prot(uint32_t map_type)
{
pgprot_t tmp = PAGE_KERNEL;
@@ -475,8 +468,9 @@ static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
return (!map->virtual) ? -ENOMEM : 0;
}
-static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_page,
- unsigned long num_pages, struct drm_bo_kmap_obj *map)
+static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
+ unsigned long start_page, unsigned long num_pages,
+ struct drm_bo_kmap_obj *map)
{
struct drm_device *dev = bo->dev;
struct drm_bo_mem_reg *mem = &bo->mem;
@@ -503,7 +497,7 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
* Populate the part we're mapping;
*/
- for (i = start_page; i< start_page + num_pages; ++i) {
+ for (i = start_page; i < start_page + num_pages; ++i) {
d = drm_ttm_get_page(ttm, i);
if (!d)
return -ENOMEM;
@@ -530,7 +524,8 @@ static int drm_bo_kmap_ttm(struct drm_buffer_object *bo, unsigned long start_pag
* and caching policy the buffer currently has.
* Mapping multiple pages or buffers that live in io memory is a bit slow and
* consumes vmalloc space. Be restrictive with such mappings.
- * Mapping single pages usually returns the logical kernel address, (which is fast)
+ * Mapping single pages usually returns the logical kernel address,
+ * (which is fast)
* BUG may use slower temporary mappings for high memory pages or
* uncached / write-combined pages.
*
@@ -581,7 +576,7 @@ void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
if (!map->virtual)
return;
- switch(map->bo_kmap_type) {
+ switch (map->bo_kmap_type) {
case bo_map_iomap:
iounmap(map->virtual);
break;
diff --git a/linux-core/drm_bufs.c b/linux-core/drm_bufs.c
index 60eca60c..75c75c2f 100644
--- a/linux-core/drm_bufs.c
+++ b/linux-core/drm_bufs.c
@@ -53,7 +53,7 @@ struct drm_map_list *drm_find_matching_map(struct drm_device *dev, drm_local_map
struct drm_map_list *entry;
list_for_each_entry(entry, &dev->maplist, head) {
if (entry->map && map->type == entry->map->type &&
- ((entry->map->offset == map->offset) ||
+ ((entry->map->offset == map->offset) ||
(map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
return entry;
}
@@ -80,10 +80,10 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
int ret;
hash->key = user_token >> PAGE_SHIFT;
ret = drm_ht_insert_item(&dev->map_hash, hash);
- if (ret != -EINVAL)
+ if (ret != -EINVAL)
return ret;
}
- return drm_ht_just_insert_please(&dev->map_hash, hash,
+ return drm_ht_just_insert_please(&dev->map_hash, hash,
user_token, 32 - PAGE_SHIFT - 3,
0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
}
@@ -173,12 +173,17 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
if (drm_core_has_MTRR(dev)) {
if (map->type == _DRM_FRAME_BUFFER ||
(map->flags & _DRM_WRITE_COMBINING)) {
- map->mtrr = mtrr_add(map->offset, map->size,
- MTRR_TYPE_WRCOMB, 1);
+ map->mtrr = mtrr_add(map->offset, map->size,
+ MTRR_TYPE_WRCOMB, 1);
}
}
- if (map->type == _DRM_REGISTERS)
+ if (map->type == _DRM_REGISTERS) {
map->handle = ioremap(map->offset, map->size);
+ if (!map->handle) {
+ drm_free(map, sizeof(*map), DRM_MEM_MAPS);
+ return -ENOMEM;
+ }
+ }
break;
case _DRM_SHM:
list = drm_find_matching_map(dev, map);
@@ -297,7 +302,7 @@ static int drm_addmap_core(struct drm_device *dev, unsigned int offset,
/* Assign a 32-bit handle */
- user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
+ user_token = (map->type == _DRM_SHM) ? (unsigned long) map->handle :
map->offset;
ret = drm_map_handle(dev, &list->hash, user_token, 0);
@@ -379,7 +384,7 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
if (r_list->map == map) {
list_del(&r_list->head);
- drm_ht_remove_key(&dev->map_hash,
+ drm_ht_remove_key(&dev->map_hash,
r_list->user_token >> PAGE_SHIFT);
drm_free(r_list, sizeof(*r_list), DRM_MEM_MAPS);
found = 1;
@@ -387,9 +392,9 @@ int drm_rmmap_locked(struct drm_device *dev, drm_local_map_t *map)
}
}
- if (!found) {
+ if (!found)
return -EINVAL;
- }
+
/* List has wrapped around to the head pointer, or it's empty and we
* didn't find anything.
*/
@@ -494,7 +499,8 @@ int drm_rmmap_ioctl(struct drm_device *dev, void *data,
*
* Frees any pages and buffers associated with the given entry.
*/
-static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry * entry)
+static void drm_cleanup_buf_error(struct drm_device *dev,
+ struct drm_buf_entry *entry)
{
int i;
@@ -529,7 +535,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *
#if __OS_HAS_AGP
/**
- * Add AGP buffers for DMA transfers
+ * Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
* \param request pointer to a struct drm_buf_desc describing the request.
@@ -539,7 +545,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev, struct drm_buf_entry *
* reallocates the buffer list of the same size order to accommodate the new
* buffers.
*/
-int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request)
+int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@@ -709,7 +715,7 @@ int drm_addbufs_agp(struct drm_device *dev, struct drm_buf_desc * request)
EXPORT_SYMBOL(drm_addbufs_agp);
#endif /* __OS_HAS_AGP */
-int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
+int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
int count;
@@ -821,9 +827,9 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
page_count = 0;
while (entry->buf_count < count) {
-
+
dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
-
+
if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
@@ -935,7 +941,7 @@ int drm_addbufs_pci(struct drm_device *dev, struct drm_buf_desc * request)
}
EXPORT_SYMBOL(drm_addbufs_pci);
-static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc * request)
+static int drm_addbufs_sg(struct drm_device *dev, struct drm_buf_desc *request)
{
struct drm_device_dma *dma = dev->dma;
struct drm_buf_entry *entry;
@@ -1600,5 +1606,3 @@ int drm_order(unsigned long size)
return order;
}
EXPORT_SYMBOL(drm_order);
-
-
diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c
index ae44e500..a745a7d9 100644
--- a/linux-core/drm_compat.c
+++ b/linux-core/drm_compat.c
@@ -1,5 +1,5 @@
/**************************************************************************
- *
+ *
* This kernel module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
@@ -13,7 +13,7 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
+ *
**************************************************************************/
/*
* This code provides access to unexported mm kernel features. It is necessary
@@ -21,7 +21,7 @@
* directly.
*
* Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
- * Linux kernel mm subsystem authors.
+ * Linux kernel mm subsystem authors.
* (Most code taken from there).
*/
@@ -50,7 +50,7 @@ int drm_unmap_page_from_agp(struct page *page)
* performance reasons */
return i;
}
-#endif
+#endif
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
@@ -80,22 +80,22 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags)
/*
* vm code for kernels below 2.6.15 in which version a major vm write
- * occured. This implement a simple straightforward
+ * occured. This implement a simple straightforward
* version similar to what's going to be
* in kernel 2.6.19+
* Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
* nopfn.
- */
+ */
static struct {
spinlock_t lock;
struct page *dummy_page;
atomic_t present;
-} drm_np_retry =
+} drm_np_retry =
{SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
-static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
+static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct fault_data *data);
@@ -126,7 +126,7 @@ void free_nopage_retry(void)
}
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type)
{
struct fault_data data;
@@ -204,14 +204,14 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page = NULL;
- struct drm_ttm *ttm;
+ struct drm_ttm *ttm;
struct drm_device *dev;
unsigned long pfn;
int err;
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
-
+
dev = bo->dev;
while(drm_bo_read_lock(&dev->bm.bm_lock));
@@ -219,12 +219,12 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
err = drm_bo_wait(bo, 0, 1, 0);
if (err) {
- data->type = (err == -EAGAIN) ?
+ data->type = (err == -EAGAIN) ?
VM_FAULT_MINOR : VM_FAULT_SIGBUS;
goto out_unlock;
}
-
-
+
+
/*
* If buffer happens to be in a non-mappable location,
* move it to a mappable.
@@ -232,7 +232,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
unsigned long _end = jiffies + 3*DRM_HZ;
- uint32_t new_mask = bo->mem.mask |
+ uint32_t new_mask = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
@@ -253,7 +253,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
}
dev = bo->dev;
- err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
+ err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
&bus_size);
if (err) {
@@ -286,7 +286,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
err = vm_insert_pfn(vma, address, pfn);
if (!err || err == -EBUSY)
- data->type = VM_FAULT_MINOR;
+ data->type = VM_FAULT_MINOR;
else
data->type = VM_FAULT_OOM;
out_unlock:
@@ -330,7 +330,7 @@ unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
* VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
* workaround for a single BUG statement in do_no_page in these versions. The
* tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
- * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
+ * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
* check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
* fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
* release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
@@ -351,13 +351,13 @@ typedef struct vma_entry {
struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type)
{
struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
unsigned long page_offset;
struct page *page;
- struct drm_ttm *ttm;
+ struct drm_ttm *ttm;
struct drm_device *dev;
mutex_lock(&bo->mutex);
@@ -369,7 +369,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
page = NOPAGE_SIGBUS;
goto out_unlock;
}
-
+
dev = bo->dev;
if (drm_mem_reg_is_pci(dev, &bo->mem)) {
@@ -403,8 +403,8 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
unsigned long bus_base;
unsigned long bus_offset;
unsigned long bus_size;
-
- ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
+
+ ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
&bus_offset, &bus_size);
BUG_ON(ret);
@@ -419,7 +419,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma)
return ret;
}
-
+
int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
{
@@ -493,7 +493,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
int lock_ok = 1;
-
+
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(entry->locked);
if (!down_write_trylock(&entry->mm->mmap_sem)) {
@@ -507,7 +507,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
return 0;
list_for_each_entry(entry, &bo->p_mm_list, head) {
- if (!entry->locked)
+ if (!entry->locked)
break;
up_write(&entry->mm->mmap_sem);
entry->locked = 0;
@@ -524,7 +524,7 @@ int drm_bo_lock_kmm(struct drm_buffer_object * bo)
void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
{
p_mm_entry_t *entry;
-
+
list_for_each_entry(entry, &bo->p_mm_list, head) {
BUG_ON(!entry->locked);
up_write(&entry->mm->mmap_sem);
@@ -532,7 +532,7 @@ void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
}
}
-int drm_bo_remap_bound(struct drm_buffer_object *bo)
+int drm_bo_remap_bound(struct drm_buffer_object *bo)
{
vma_entry_t *v_entry;
int ret = 0;
@@ -553,9 +553,9 @@ void drm_bo_finish_unmap(struct drm_buffer_object *bo)
vma_entry_t *v_entry;
list_for_each_entry(v_entry, &bo->vma_list, head) {
- v_entry->vma->vm_flags &= ~VM_PFNMAP;
+ v_entry->vma->vm_flags &= ~VM_PFNMAP;
}
-}
+}
#endif
diff --git a/linux-core/drm_compat.h b/linux-core/drm_compat.h
index f74f4bc2..f8933e0c 100644
--- a/linux-core/drm_compat.h
+++ b/linux-core/drm_compat.h
@@ -89,7 +89,7 @@
#define __user
#endif
-#if !defined(__put_page)
+#if !defined(__put_page)
#define __put_page(p) atomic_dec(&(p)->count)
#endif
@@ -104,7 +104,7 @@
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
static inline int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t pgprot)
{
- return remap_page_range(vma, from,
+ return remap_page_range(vma, from,
pfn << PAGE_SHIFT,
size,
pgprot);
@@ -178,7 +178,7 @@ static __inline__ void *kcalloc(size_t nmemb, size_t size, int flags)
/*
- * Flush relevant caches and clear a VMA structure so that page references
+ * Flush relevant caches and clear a VMA structure so that page references
* will cause a page fault. Don't flush tlbs.
*/
@@ -186,7 +186,7 @@ extern void drm_clear_vma(struct vm_area_struct *vma,
unsigned long addr, unsigned long end);
/*
- * Return the PTE protection map entries for the VMA flags given by
+ * Return the PTE protection map entries for the VMA flags given by
* flags. This is a functional interface to the kernel's protection map.
*/
@@ -223,7 +223,7 @@ extern void free_nopage_retry(void);
#ifndef DRM_FULL_MM_COMPAT
/*
- * For now, just return a dummy page that we've allocated out of
+ * For now, just return a dummy page that we've allocated out of
* static space. The page will be put by do_nopage() since we've already
* filled out the pte.
*/
@@ -233,13 +233,13 @@ struct fault_data {
unsigned long address;
pgoff_t pgoff;
unsigned int flags;
-
+
int type;
};
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
extern struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
- unsigned long address,
+ unsigned long address,
int *type);
#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
!defined(DRM_FULL_MM_COMPAT)
@@ -254,22 +254,22 @@ struct drm_buffer_object;
/*
- * Add a vma to the ttm vma list, and the
+ * Add a vma to the ttm vma list, and the
* process mm pointer to the ttm mm list. Needs the ttm mutex.
*/
-extern int drm_bo_add_vma(struct drm_buffer_object * bo,
+extern int drm_bo_add_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Delete a vma and the corresponding mm pointer from the
* ttm lists. Needs the ttm mutex.
*/
-extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
+extern void drm_bo_delete_vma(struct drm_buffer_object * bo,
struct vm_area_struct *vma);
/*
* Attempts to lock all relevant mmap_sems for a ttm, while
- * not releasing the ttm mutex. May return -EAGAIN to avoid
+ * not releasing the ttm mutex. May return -EAGAIN to avoid
* deadlocks. In that case the caller shall release the ttm mutex,
* schedule() and try again.
*/
@@ -292,7 +292,7 @@ extern void drm_bo_unlock_kmm(struct drm_buffer_object * bo);
extern void drm_bo_finish_unmap(struct drm_buffer_object *bo);
/*
- * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
+ * Remap all vmas of this ttm using io_remap_pfn_range. We cannot
* fault these pfns in, because the first one will set the vma VM_PFNMAP
* flag, which will make the next fault bug in do_nopage(). The function
* releases the mmap_sems for this ttm.
diff --git a/linux-core/drm_context.c b/linux-core/drm_context.c
index 7854e89c..83ad291e 100644
--- a/linux-core/drm_context.c
+++ b/linux-core/drm_context.c
@@ -89,7 +89,7 @@ again:
mutex_unlock(&dev->struct_mutex);
goto again;
}
-
+
mutex_unlock(&dev->struct_mutex);
return new_id;
}
@@ -160,7 +160,7 @@ int drm_getsareactx(struct drm_device *dev, void *data,
request->handle = NULL;
list_for_each_entry(_entry, &dev->maplist, head) {
if (_entry->map == map) {
- request->handle =
+ request->handle =
(void *)(unsigned long)_entry->user_token;
break;
}
diff --git a/linux-core/drm_dma.c b/linux-core/drm_dma.c
index 7cc44193..f7bff0ac 100644
--- a/linux-core/drm_dma.c
+++ b/linux-core/drm_dma.c
@@ -43,7 +43,7 @@
*
* Allocate and initialize a drm_device_dma structure.
*/
-int drm_dma_setup(struct drm_device * dev)
+int drm_dma_setup(struct drm_device *dev)
{
int i;
@@ -65,9 +65,9 @@ int drm_dma_setup(struct drm_device * dev)
* \param dev DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
- * finally the the drm_device::dma structure itself.
+ * finally the drm_device::dma structure itself.
*/
-void drm_dma_takedown(struct drm_device * dev)
+void drm_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
int i, j;
@@ -129,7 +129,7 @@ void drm_dma_takedown(struct drm_device * dev)
*
* Resets the fields of \p buf.
*/
-void drm_free_buffer(struct drm_device * dev, struct drm_buf * buf)
+void drm_free_buffer(struct drm_device *dev, struct drm_buf *buf)
{
if (!buf)
return;
diff --git a/linux-core/drm_drv.c b/linux-core/drm_drv.c
index 296a3268..3c2794d0 100644
--- a/linux-core/drm_drv.c
+++ b/linux-core/drm_drv.c
@@ -121,13 +121,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_INIT, drm_mm_init_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_TAKEDOWN, drm_mm_takedown_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_LOCK, drm_mm_lock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
+ DRM_IOCTL_DEF(DRM_IOCTL_MM_UNLOCK, drm_mm_unlock_ioctl,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_IOCTL_FENCE_CREATE, drm_fence_create_ioctl, DRM_AUTH),
@@ -183,8 +183,8 @@ int drm_lastclose(struct drm_device * dev)
if (dev->unique) {
drm_free(dev->unique, strlen(dev->unique) + 1, DRM_MEM_DRIVER);
- dev->unique=NULL;
- dev->unique_len=0;
+ dev->unique = NULL;
+ dev->unique_len = 0;
}
if (dev->irq_enabled)
@@ -242,10 +242,12 @@ int drm_lastclose(struct drm_device * dev)
list_del(&vma->head);
drm_ctl_free(vma, sizeof(*vma), DRM_MEM_VMAS);
}
-
+
list_for_each_entry_safe(r_list, list_t, &dev->maplist, head) {
- drm_rmmap_locked(dev, r_list->map);
- r_list = NULL;
+ if (!(r_list->map->flags & _DRM_DRIVER)) {
+ drm_rmmap_locked(dev, r_list->map);
+ r_list = NULL;
+ }
}
if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE) && dev->queuelist) {
@@ -322,7 +324,7 @@ int drm_init(struct drm_driver *driver,
pci_get_subsys(pid->vendor, pid->device, pid->subvendor,
pid->subdevice, pdev))) {
/* Are there device class requirements? */
- if ((pid->class != 0)
+ if ((pid->class != 0)
&& ((pdev->class & pid->class_mask) != pid->class)) {
continue;
}
@@ -353,7 +355,7 @@ int drm_init(struct drm_driver *driver,
pid->subvendor, pid->subdevice,
pdev))) {
/* Are there device class requirements? */
- if ((pid->class != 0)
+ if ((pid->class != 0)
&& ((pdev->class & pid->class_mask) != pid->class)) {
continue;
}
@@ -390,15 +392,6 @@ static void drm_cleanup(struct drm_device * dev)
drm_lastclose(dev);
drm_fence_manager_takedown(dev);
- drm_ht_remove(&dev->map_hash);
- drm_mm_takedown(&dev->offset_manager);
- drm_ht_remove(&dev->object_hash);
-
- if (!drm_fb_loaded)
- pci_disable_device(dev->pdev);
-
- drm_ctxbitmap_cleanup(dev);
-
if (drm_core_has_MTRR(dev) && drm_core_has_AGP(dev) && dev->agp
&& dev->agp->agp_mtrr >= 0) {
int retval;
@@ -415,6 +408,14 @@ static void drm_cleanup(struct drm_device * dev)
if (dev->driver->unload)
dev->driver->unload(dev);
+ if (!drm_fb_loaded)
+ pci_disable_device(dev->pdev);
+
+ drm_ctxbitmap_cleanup(dev);
+ drm_ht_remove(&dev->map_hash);
+ drm_mm_takedown(&dev->offset_manager);
+ drm_ht_remove(&dev->object_hash);
+
drm_put_head(&dev->primary);
if (drm_put_dev(dev))
DRM_ERROR("Cannot unload module\n");
@@ -467,19 +468,19 @@ static int __init drm_core_init(void)
unsigned long max_memctl_mem;
si_meminfo(&si);
-
+
/*
* AGP only allows low / DMA32 memory ATM.
*/
avail_memctl_mem = si.totalram - si.totalhigh;
- /*
- * Avoid overflows
+ /*
+ * Avoid overflows
*/
max_memctl_mem = 1UL << (32 - PAGE_SHIFT);
- max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
+ max_memctl_mem = (max_memctl_mem / si.mem_unit) * PAGE_SIZE;
if (avail_memctl_mem >= max_memctl_mem)
avail_memctl_mem = max_memctl_mem;
diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c
index e696b42d..288b4db6 100644
--- a/linux-core/drm_fence.c
+++ b/linux-core/drm_fence.c
@@ -34,7 +34,7 @@
* Typically called by the IRQ handler.
*/
-void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
+void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence, uint32_t type, uint32_t error)
{
int wake = 0;
@@ -58,9 +58,8 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
diff = (sequence - fc->last_exe_flush) & driver->sequence_mask;
ge_last_exe = diff < driver->wrap_diff;
- if (is_exe && ge_last_exe) {
+ if (is_exe && ge_last_exe)
fc->last_exe_flush = sequence;
- }
if (list_empty(&fc->ring))
return;
@@ -123,11 +122,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
*/
if ((fc->pending_flush & type) != type) {
- head = head->prev;
+ head = head->prev;
list_for_each_entry(fence, head, ring) {
if (&fence->ring == &fc->ring)
break;
- diff = (fc->last_exe_flush - fence->sequence) &
+ diff = (fc->last_exe_flush - fence->sequence) &
driver->sequence_mask;
if (diff > driver->wrap_diff)
break;
@@ -141,10 +140,9 @@ void drm_fence_handler(struct drm_device * dev, uint32_t fence_class,
DRM_WAKEUP(&fc->fence_queue);
}
}
-
EXPORT_SYMBOL(drm_fence_handler);
-static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
+static void drm_fence_unring(struct drm_device *dev, struct list_head *ring)
{
struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
@@ -154,7 +152,7 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring)
write_unlock_irqrestore(&fm->lock, flags);
}
-void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
+void drm_fence_usage_deref_locked(struct drm_fence_object **fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
@@ -173,7 +171,7 @@ void drm_fence_usage_deref_locked(struct drm_fence_object ** fence)
}
EXPORT_SYMBOL(drm_fence_usage_deref_locked);
-void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence)
+void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence)
{
struct drm_fence_object *tmp_fence = *fence;
struct drm_device *dev = tmp_fence->dev;
@@ -212,7 +210,8 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst,
}
EXPORT_SYMBOL(drm_fence_reference_unlocked);
-static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base)
+static void drm_fence_object_destroy(struct drm_file *priv,
+ struct drm_user_object *base)
{
struct drm_fence_object *fence =
drm_user_object_entry(base, struct drm_fence_object, base);
@@ -220,7 +219,7 @@ static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_obje
drm_fence_usage_deref_locked(&fence);
}
-int drm_fence_object_signaled(struct drm_fence_object * fence,
+int drm_fence_object_signaled(struct drm_fence_object *fence,
uint32_t mask, int poke_flush)
{
unsigned long flags;
@@ -240,8 +239,9 @@ int drm_fence_object_signaled(struct drm_fence_object * fence,
}
EXPORT_SYMBOL(drm_fence_object_signaled);
-static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
- struct drm_fence_driver * driver, uint32_t sequence)
+static void drm_fence_flush_exe(struct drm_fence_class_manager *fc,
+ struct drm_fence_driver *driver,
+ uint32_t sequence)
{
uint32_t diff;
@@ -249,15 +249,13 @@ static void drm_fence_flush_exe(struct drm_fence_class_manager * fc,
fc->exe_flush_sequence = sequence;
fc->pending_exe_flush = 1;
} else {
- diff =
- (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
- if (diff < driver->wrap_diff) {
+ diff = (sequence - fc->exe_flush_sequence) & driver->sequence_mask;
+ if (diff < driver->wrap_diff)
fc->exe_flush_sequence = sequence;
- }
}
}
-int drm_fence_object_flush(struct drm_fence_object * fence,
+int drm_fence_object_flush(struct drm_fence_object *fence,
uint32_t type)
{
struct drm_device *dev = fence->dev;
@@ -296,7 +294,8 @@ int drm_fence_object_flush(struct drm_fence_object * fence,
* wrapped around and reused.
*/
-void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t sequence)
+void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
+ uint32_t sequence)
{
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fc = &fm->fence_class[fence_class];
@@ -328,12 +327,10 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t fence_class, uint32_t
mutex_unlock(&dev->struct_mutex);
diff = (old_sequence - fence->sequence) & driver->sequence_mask;
read_unlock_irqrestore(&fm->lock, flags);
- if (diff < driver->wrap_diff) {
+ if (diff < driver->wrap_diff)
drm_fence_object_flush(fence, fence->type);
- }
drm_fence_usage_deref_unlocked(&fence);
}
-
EXPORT_SYMBOL(drm_fence_flush_old);
static int drm_fence_lazy_wait(struct drm_fence_object *fence,
@@ -378,7 +375,7 @@ static int drm_fence_lazy_wait(struct drm_fence_object *fence,
return 0;
}
-int drm_fence_object_wait(struct drm_fence_object * fence,
+int drm_fence_object_wait(struct drm_fence_object *fence,
int lazy, int ignore_signals, uint32_t mask)
{
struct drm_device *dev = fence->dev;
@@ -431,10 +428,9 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
/*
* Avoid kernel-space busy-waits.
*/
-#if 1
if (!ignore_signals)
return -EAGAIN;
-#endif
+
do {
schedule();
signaled = drm_fence_object_signaled(fence, mask, 1);
@@ -447,9 +443,8 @@ int drm_fence_object_wait(struct drm_fence_object * fence,
}
EXPORT_SYMBOL(drm_fence_object_wait);
-
-int drm_fence_object_emit(struct drm_fence_object * fence,
- uint32_t fence_flags, uint32_t fence_class, uint32_t type)
+int drm_fence_object_emit(struct drm_fence_object *fence, uint32_t fence_flags,
+ uint32_t fence_class, uint32_t type)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
@@ -461,7 +456,8 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
int ret;
drm_fence_unring(dev, &fence->ring);
- ret = driver->emit(dev, fence_class, fence_flags, &sequence, &native_type);
+ ret = driver->emit(dev, fence_class, fence_flags, &sequence,
+ &native_type);
if (ret)
return ret;
@@ -481,10 +477,10 @@ int drm_fence_object_emit(struct drm_fence_object * fence,
}
EXPORT_SYMBOL(drm_fence_object_emit);
-static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
+static int drm_fence_object_init(struct drm_device *dev, uint32_t fence_class,
uint32_t type,
uint32_t fence_flags,
- struct drm_fence_object * fence)
+ struct drm_fence_object *fence)
{
int ret = 0;
unsigned long flags;
@@ -497,7 +493,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
write_lock_irqsave(&fm->lock, flags);
INIT_LIST_HEAD(&fence->ring);
- /*
+ /*
* Avoid hitting BUG() for kernel-only fence objects.
*/
@@ -517,8 +513,8 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t fence_class,
return ret;
}
-int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence,
- int shareable)
+int drm_fence_add_user_object(struct drm_file *priv,
+ struct drm_fence_object *fence, int shareable)
{
struct drm_device *dev = priv->head->dev;
int ret;
@@ -537,8 +533,9 @@ out:
}
EXPORT_SYMBOL(drm_fence_add_user_object);
-int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint32_t type,
- unsigned flags, struct drm_fence_object ** c_fence)
+int drm_fence_object_create(struct drm_device *dev, uint32_t fence_class,
+ uint32_t type, unsigned flags,
+ struct drm_fence_object **c_fence)
{
struct drm_fence_object *fence;
int ret;
@@ -557,10 +554,9 @@ int drm_fence_object_create(struct drm_device * dev, uint32_t fence_class, uint3
return 0;
}
-
EXPORT_SYMBOL(drm_fence_object_create);
-void drm_fence_manager_init(struct drm_device * dev)
+void drm_fence_manager_init(struct drm_device *dev)
{
struct drm_fence_manager *fm = &dev->fm;
struct drm_fence_class_manager *fence_class;
@@ -578,7 +574,7 @@ void drm_fence_manager_init(struct drm_device * dev)
fm->num_classes = fed->num_classes;
BUG_ON(fm->num_classes > _DRM_FENCE_CLASSES);
- for (i=0; i<fm->num_classes; ++i) {
+ for (i = 0; i < fm->num_classes; ++i) {
fence_class = &fm->fence_class[i];
INIT_LIST_HEAD(&fence_class->ring);
@@ -591,7 +587,8 @@ void drm_fence_manager_init(struct drm_device * dev)
write_unlock_irqrestore(&fm->lock, flags);
}
-void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *arg)
+void drm_fence_fill_arg(struct drm_fence_object *fence,
+ struct drm_fence_arg *arg)
{
struct drm_device *dev = fence->dev;
struct drm_fence_manager *fm = &dev->fm;
@@ -608,12 +605,12 @@ void drm_fence_fill_arg(struct drm_fence_object *fence, struct drm_fence_arg *ar
}
EXPORT_SYMBOL(drm_fence_fill_arg);
-
-void drm_fence_manager_takedown(struct drm_device * dev)
+void drm_fence_manager_takedown(struct drm_device *dev)
{
}
-struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle)
+struct drm_fence_object *drm_lookup_fence_object(struct drm_file *priv,
+ uint32_t handle)
{
struct drm_device *dev = priv->head->dev;
struct drm_user_object *uo;
@@ -656,14 +653,13 @@ int drm_fence_create_ioctl(struct drm_device *dev, void *data, struct drm_file *
drm_fence_usage_deref_unlocked(&fence);
return ret;
}
-
+
/*
* usage > 0. No need to lock dev->struct_mutex;
*/
arg->handle = fence->base.hash.key;
-
drm_fence_fill_arg(fence, arg);
drm_fence_usage_deref_unlocked(&fence);
diff --git a/linux-core/drm_fops.c b/linux-core/drm_fops.c
index 0ccaed5b..0e1c486c 100644
--- a/linux-core/drm_fops.c
+++ b/linux-core/drm_fops.c
@@ -85,7 +85,6 @@ static int drm_setup(struct drm_device * dev)
dev->queue_reserved = 0;
dev->queue_slots = 0;
dev->queuelist = NULL;
- dev->irq_enabled = 0;
dev->context_flag = 0;
dev->interrupt_flag = 0;
dev->dma_flag = 0;
@@ -153,7 +152,7 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
- out:
+out:
mutex_lock(&dev->struct_mutex);
BUG_ON((dev->dev_mapping != NULL) &&
(dev->dev_mapping != inode->i_mapping));
@@ -237,7 +236,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
int minor = iminor(inode);
struct drm_file *priv;
int ret;
- int i,j;
+ int i, j;
if (filp->f_flags & O_EXCL)
return -EBUSY; /* No exclusive opens */
@@ -265,16 +264,16 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
INIT_LIST_HEAD(&priv->lhead);
INIT_LIST_HEAD(&priv->refd_objects);
- for (i=0; i<_DRM_NO_REF_TYPES; ++i) {
- ret = drm_ht_create(&priv->refd_object_hash[i], DRM_FILE_HASH_ORDER);
+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i) {
+ ret = drm_ht_create(&priv->refd_object_hash[i],
+ DRM_FILE_HASH_ORDER);
if (ret)
break;
}
if (ret) {
- for(j=0; j<i; ++j) {
+ for (j = 0; j < i; ++j)
drm_ht_remove(&priv->refd_object_hash[j]);
- }
goto out_free;
}
@@ -333,8 +332,8 @@ int drm_fasync(int fd, struct file *filp, int on)
}
EXPORT_SYMBOL(drm_fasync);
-static void drm_object_release(struct file *filp) {
-
+static void drm_object_release(struct file *filp)
+{
struct drm_file *priv = filp->private_data;
struct list_head *head;
struct drm_ref_object *ref_object;
@@ -342,8 +341,9 @@ static void drm_object_release(struct file *filp) {
/*
* Free leftover ref objects created by me. Note that we cannot use
- * list_for_each() here, as the struct_mutex may be temporarily released
- * by the remove_() functions, and thus the lists may be altered.
+ * list_for_each() here, as the struct_mutex may be temporarily
+ * released by the remove_() functions, and thus the lists may be
+ * altered.
* Also, a drm_remove_ref_object() will not remove it
* from the list unless its refcount is 1.
*/
@@ -355,9 +355,8 @@ static void drm_object_release(struct file *filp) {
head = &priv->refd_objects;
}
- for(i=0; i<_DRM_NO_REF_TYPES; ++i) {
+ for (i = 0; i < _DRM_NO_REF_TYPES; ++i)
drm_ht_remove(&priv->refd_object_hash[i]);
- }
}
/**
@@ -528,4 +527,3 @@ unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
return 0;
}
EXPORT_SYMBOL(drm_poll);
-
diff --git a/linux-core/drm_hashtab.c b/linux-core/drm_hashtab.c
index a8ec8468..f5a4f849 100644
--- a/linux-core/drm_hashtab.c
+++ b/linux-core/drm_hashtab.c
@@ -36,7 +36,7 @@
#include "drm_hashtab.h"
#include <linux/hash.h>
-int drm_ht_create(struct drm_open_hash * ht, unsigned int order)
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
{
unsigned int i;
@@ -63,7 +63,7 @@ int drm_ht_create(struct drm_open_hash * ht, unsigned int order)
return 0;
}
-void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key)
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
@@ -80,7 +80,7 @@ void drm_ht_verbose_list(struct drm_open_hash * ht, unsigned long key)
}
}
-static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht,
+static struct hlist_node *drm_ht_find_key(struct drm_open_hash *ht,
unsigned long key)
{
struct drm_hash_item *entry;
@@ -100,7 +100,7 @@ static struct hlist_node *drm_ht_find_key(struct drm_open_hash * ht,
return NULL;
}
-int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
struct drm_hash_item *entry;
struct hlist_head *h_list;
@@ -128,10 +128,11 @@ int drm_ht_insert_item(struct drm_open_hash * ht, struct drm_hash_item * item)
}
/*
- * Just insert an item and return any "bits" bit key that hasn't been
+ * Just insert an item and return any "bits" bit key that hasn't been
* used before.
*/
-int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item * item,
+int drm_ht_just_insert_please(struct drm_open_hash *ht,
+ struct drm_hash_item *item,
unsigned long seed, int bits, int shift,
unsigned long add)
{
@@ -155,8 +156,8 @@ int drm_ht_just_insert_please(struct drm_open_hash * ht, struct drm_hash_item *
return 0;
}
-int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key,
- struct drm_hash_item ** item)
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
+ struct drm_hash_item **item)
{
struct hlist_node *list;
@@ -168,7 +169,7 @@ int drm_ht_find_item(struct drm_open_hash * ht, unsigned long key,
return 0;
}
-int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key)
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
{
struct hlist_node *list;
@@ -181,14 +182,14 @@ int drm_ht_remove_key(struct drm_open_hash * ht, unsigned long key)
return -EINVAL;
}
-int drm_ht_remove_item(struct drm_open_hash * ht, struct drm_hash_item * item)
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item)
{
hlist_del_init(&item->head);
ht->fill--;
return 0;
}
-void drm_ht_remove(struct drm_open_hash * ht)
+void drm_ht_remove(struct drm_open_hash *ht)
{
if (ht->table) {
if (ht->use_vmalloc)
diff --git a/linux-core/drm_hashtab.h b/linux-core/drm_hashtab.h
index 0f137677..c090677b 100644
--- a/linux-core/drm_hashtab.h
+++ b/linux-core/drm_hashtab.h
@@ -65,4 +65,3 @@ extern void drm_ht_remove(struct drm_open_hash *ht);
#endif
-
diff --git a/linux-core/drm_internal.h b/linux-core/drm_internal.h
new file mode 120000
index 00000000..b30ef94a
--- /dev/null
+++ b/linux-core/drm_internal.h
@@ -0,0 +1 @@
+../shared-core/drm_internal.h \ No newline at end of file
diff --git a/linux-core/drm_ioctl.c b/linux-core/drm_ioctl.c
index 9d52fd8a..3df163db 100644
--- a/linux-core/drm_ioctl.c
+++ b/linux-core/drm_ioctl.c
@@ -98,12 +98,14 @@ int drm_setunique(struct drm_device *dev, void *data,
dev->unique[dev->unique_len] = '\0';
- dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + strlen(dev->unique) + 2,
- DRM_MEM_DRIVER);
+ dev->devname =
+ drm_alloc(strlen(dev->driver->pci_driver.name) +
+ strlen(dev->unique) + 2, DRM_MEM_DRIVER);
if (!dev->devname)
return -ENOMEM;
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
+ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+ dev->unique);
/* Return error if the busid submitted doesn't match the device's actual
* busid.
@@ -142,12 +144,14 @@ static int drm_set_busid(struct drm_device * dev)
if (len > dev->unique_len)
DRM_ERROR("buffer overflow");
- dev->devname = drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len + 2,
- DRM_MEM_DRIVER);
+ dev->devname =
+ drm_alloc(strlen(dev->driver->pci_driver.name) + dev->unique_len +
+ 2, DRM_MEM_DRIVER);
if (dev->devname == NULL)
return -ENOMEM;
- sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name, dev->unique);
+ sprintf(dev->devname, "%s@%s", dev->driver->pci_driver.name,
+ dev->unique);
return 0;
}
@@ -264,7 +268,7 @@ int drm_getstats(struct drm_device *dev, void *data,
struct drm_stats *stats = data;
int i;
- memset(stats, 0, sizeof(stats));
+ memset(stats, 0, sizeof(*stats));
mutex_lock(&dev->struct_mutex);
diff --git a/linux-core/drm_irq.c b/linux-core/drm_irq.c
index 4aa58d77..2a5a4539 100644
--- a/linux-core/drm_irq.c
+++ b/linux-core/drm_irq.c
@@ -188,7 +188,7 @@ int drm_irq_install(struct drm_device * dev)
dev->irq_enabled = 1;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
/* Before installing handler */
dev->driver->irq_preinstall(dev);
@@ -240,7 +240,7 @@ int drm_irq_uninstall(struct drm_device * dev)
if (!irq_enabled)
return -EINVAL;
- DRM_DEBUG("%s: irq=%d\n", __FUNCTION__, dev->irq);
+ DRM_DEBUG("irq=%d\n", dev->irq);
dev->driver->irq_uninstall(dev);
@@ -636,7 +636,7 @@ EXPORT_SYMBOL(drm_handle_vblank);
*/
static void drm_locked_tasklet_func(unsigned long data)
{
- struct drm_device *dev = (struct drm_device*)data;
+ struct drm_device *dev = (struct drm_device *)data;
unsigned long irqflags;
spin_lock_irqsave(&dev->tasklet_lock, irqflags);
@@ -673,7 +673,7 @@ static void drm_locked_tasklet_func(unsigned long data)
* context, it must not make any assumptions about this. Also, the HW lock will
* be held with the kernel context or any client context.
*/
-void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device*))
+void drm_locked_tasklet(struct drm_device *dev, void (*func)(struct drm_device *))
{
unsigned long irqflags;
static DECLARE_TASKLET(drm_tasklet, drm_locked_tasklet_func, 0);
diff --git a/linux-core/drm_memory.c b/linux-core/drm_memory.c
index f68a3a3e..402a680f 100644
--- a/linux-core/drm_memory.c
+++ b/linux-core/drm_memory.c
@@ -45,13 +45,13 @@ static struct {
.lock = SPIN_LOCK_UNLOCKED
};
-static inline size_t drm_size_align(size_t size) {
-
+static inline size_t drm_size_align(size_t size)
+{
size_t tmpSize = 4;
if (size > PAGE_SIZE)
return PAGE_ALIGN(size);
- while(tmpSize < size)
+ while (tmpSize < size)
tmpSize <<= 1;
return (size_t) tmpSize;
diff --git a/linux-core/drm_memory.h b/linux-core/drm_memory.h
index 5590c491..63e425b5 100644
--- a/linux-core/drm_memory.h
+++ b/linux-core/drm_memory.h
@@ -42,7 +42,6 @@
* drm_memory.h.
*/
-/* Need the 4-argument version of vmap(). */
#if __OS_HAS_AGP
#include <linux/vmalloc.h>
diff --git a/linux-core/drm_mm.c b/linux-core/drm_mm.c
index cf0d92fa..59110293 100644
--- a/linux-core/drm_mm.c
+++ b/linux-core/drm_mm.c
@@ -235,12 +235,12 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
entry = list_entry(list, struct drm_mm_node, fl_entry);
wasted = 0;
- if (entry->size < size)
+ if (entry->size < size)
continue;
if (alignment) {
register unsigned tmp = entry->start % alignment;
- if (tmp)
+ if (tmp)
wasted += alignment - tmp;
}
diff --git a/linux-core/drm_object.c b/linux-core/drm_object.c
index a6d6c0d7..7d2e3a2b 100644
--- a/linux-core/drm_object.c
+++ b/linux-core/drm_object.c
@@ -30,7 +30,7 @@
#include "drmP.h"
-int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
+int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable)
{
struct drm_device *dev = priv->head->dev;
@@ -44,7 +44,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
item->owner = priv;
ret = drm_ht_just_insert_please(&dev->object_hash, &item->hash,
- (unsigned long)item, 32, 0, 0);
+ (unsigned long)item, 31, 0, 0);
if (ret)
return ret;
@@ -56,7 +56,7 @@ int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
}
EXPORT_SYMBOL(drm_add_user_object);
-struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t key)
+struct drm_user_object *drm_lookup_user_object(struct drm_file *priv, uint32_t key)
{
struct drm_device *dev = priv->head->dev;
struct drm_hash_item *hash;
@@ -66,9 +66,9 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
DRM_ASSERT_LOCKED(&dev->struct_mutex);
ret = drm_ht_find_item(&dev->object_hash, key, &hash);
- if (ret) {
+ if (ret)
return NULL;
- }
+
item = drm_hash_entry(hash, struct drm_user_object, hash);
if (priv != item->owner) {
@@ -83,7 +83,7 @@ struct drm_user_object *drm_lookup_user_object(struct drm_file * priv, uint32_t
}
EXPORT_SYMBOL(drm_lookup_user_object);
-static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object * item)
+static void drm_deref_user_object(struct drm_file *priv, struct drm_user_object *item)
{
struct drm_device *dev = priv->head->dev;
int ret;
@@ -95,7 +95,7 @@ static void drm_deref_user_object(struct drm_file * priv, struct drm_user_object
}
}
-static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object * ro,
+static int drm_object_ref_action(struct drm_file *priv, struct drm_user_object *ro,
enum drm_ref_type action)
{
int ret = 0;
@@ -114,7 +114,7 @@ static int drm_object_ref_action(struct drm_file * priv, struct drm_user_object
return ret;
}
-int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenced_object,
+int drm_add_ref_object(struct drm_file *priv, struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
int ret = 0;
@@ -167,12 +167,12 @@ int drm_add_ref_object(struct drm_file * priv, struct drm_user_object * referenc
list_add(&item->list, &priv->refd_objects);
ret = drm_object_ref_action(priv, referenced_object, ref_action);
- out:
+out:
return ret;
}
-struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
- struct drm_user_object * referenced_object,
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
+ struct drm_user_object *referenced_object,
enum drm_ref_type ref_action)
{
struct drm_hash_item *hash;
@@ -188,8 +188,8 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
}
EXPORT_SYMBOL(drm_lookup_ref_object);
-static void drm_remove_other_references(struct drm_file * priv,
- struct drm_user_object * ro)
+static void drm_remove_other_references(struct drm_file *priv,
+ struct drm_user_object *ro)
{
int i;
struct drm_open_hash *ht;
@@ -205,7 +205,7 @@ static void drm_remove_other_references(struct drm_file * priv,
}
}
-void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
+void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item)
{
int ret;
struct drm_user_object *user_object = (struct drm_user_object *) item->hash.key;
@@ -234,9 +234,10 @@ void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item)
}
}
+EXPORT_SYMBOL(drm_remove_ref_object);
-int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
- enum drm_object_type type, struct drm_user_object ** object)
+int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
+ enum drm_object_type type, struct drm_user_object **object)
{
struct drm_device *dev = priv->head->dev;
struct drm_user_object *uo;
@@ -260,12 +261,12 @@ int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
mutex_unlock(&dev->struct_mutex);
*object = uo;
return 0;
- out_err:
+out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
+int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type)
{
struct drm_device *dev = priv->head->dev;
@@ -287,7 +288,7 @@ int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
drm_remove_ref_object(priv, ro);
mutex_unlock(&dev->struct_mutex);
return 0;
- out_err:
+out_err:
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h
index cea811eb..a2d10b5d 100644
--- a/linux-core/drm_objects.h
+++ b/linux-core/drm_objects.h
@@ -68,12 +68,12 @@ struct drm_user_object {
atomic_t refcount;
int shareable;
struct drm_file *owner;
- void (*ref_struct_locked) (struct drm_file * priv,
- struct drm_user_object * obj,
+ void (*ref_struct_locked) (struct drm_file *priv,
+ struct drm_user_object *obj,
enum drm_ref_type ref_action);
- void (*unref) (struct drm_file * priv, struct drm_user_object * obj,
+ void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
enum drm_ref_type unref_action);
- void (*remove) (struct drm_file * priv, struct drm_user_object * obj);
+ void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
};
/*
@@ -94,29 +94,29 @@ struct drm_ref_object {
* Must be called with the struct_mutex held.
*/
-extern int drm_add_user_object(struct drm_file * priv, struct drm_user_object * item,
+extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
int shareable);
/**
* Must be called with the struct_mutex held.
*/
-extern struct drm_user_object *drm_lookup_user_object(struct drm_file * priv,
+extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
uint32_t key);
/*
* Must be called with the struct_mutex held. May temporarily release it.
*/
-extern int drm_add_ref_object(struct drm_file * priv,
- struct drm_user_object * referenced_object,
+extern int drm_add_ref_object(struct drm_file *priv,
+ struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
*/
-struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
- struct drm_user_object * referenced_object,
+struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
+ struct drm_user_object *referenced_object,
enum drm_ref_type ref_action);
/*
* Must be called with the struct_mutex held.
@@ -125,11 +125,11 @@ struct drm_ref_object *drm_lookup_ref_object(struct drm_file * priv,
* This function may temporarily release the struct_mutex.
*/
-extern void drm_remove_ref_object(struct drm_file * priv, struct drm_ref_object * item);
-extern int drm_user_object_ref(struct drm_file * priv, uint32_t user_token,
+extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
+extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type,
- struct drm_user_object ** object);
-extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
+ struct drm_user_object **object);
+extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
enum drm_object_type type);
/***************************************************
@@ -138,7 +138,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token,
struct drm_fence_object {
struct drm_user_object base;
- struct drm_device *dev;
+ struct drm_device *dev;
atomic_t usage;
/*
@@ -153,7 +153,7 @@ struct drm_fence_object {
uint32_t sequence;
uint32_t flush_mask;
uint32_t submitted_flush;
- uint32_t error;
+ uint32_t error;
};
#define _DRM_FENCE_CLASSES 8
@@ -182,40 +182,44 @@ struct drm_fence_driver {
uint32_t flush_diff;
uint32_t sequence_mask;
int lazy_capable;
- int (*has_irq) (struct drm_device * dev, uint32_t fence_class,
+ int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
uint32_t flags);
- int (*emit) (struct drm_device * dev, uint32_t fence_class, uint32_t flags,
- uint32_t * breadcrumb, uint32_t * native_type);
- void (*poke_flush) (struct drm_device * dev, uint32_t fence_class);
+ int (*emit) (struct drm_device *dev, uint32_t fence_class,
+ uint32_t flags, uint32_t *breadcrumb,
+ uint32_t *native_type);
+ void (*poke_flush) (struct drm_device *dev, uint32_t fence_class);
};
extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
- uint32_t sequence, uint32_t type, uint32_t error);
+ uint32_t sequence, uint32_t type,
+ uint32_t error);
extern void drm_fence_manager_init(struct drm_device *dev);
extern void drm_fence_manager_takedown(struct drm_device *dev);
extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
uint32_t sequence);
-extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type);
-extern int drm_fence_object_signaled(struct drm_fence_object * fence,
+extern int drm_fence_object_flush(struct drm_fence_object *fence,
+ uint32_t type);
+extern int drm_fence_object_signaled(struct drm_fence_object *fence,
uint32_t type, int flush);
-extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence);
-extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence);
+extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
+extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
struct drm_fence_object *src);
-extern int drm_fence_object_wait(struct drm_fence_object * fence,
+extern int drm_fence_object_wait(struct drm_fence_object *fence,
int lazy, int ignore_signals, uint32_t mask);
extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
uint32_t fence_flags, uint32_t fence_class,
- struct drm_fence_object ** c_fence);
-extern int drm_fence_object_emit(struct drm_fence_object * fence,
+ struct drm_fence_object **c_fence);
+extern int drm_fence_object_emit(struct drm_fence_object *fence,
uint32_t fence_flags, uint32_t class,
uint32_t type);
extern void drm_fence_fill_arg(struct drm_fence_object *fence,
struct drm_fence_arg *arg);
-extern int drm_fence_add_user_object(struct drm_file * priv,
- struct drm_fence_object * fence, int shareable);
+extern int drm_fence_add_user_object(struct drm_file *priv,
+ struct drm_fence_object *fence,
+ int shareable);
extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -242,7 +246,7 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
/*
* The ttm backend GTT interface. (In our case AGP).
* Any similar type of device (PCIE?)
- * needs only to implement these functions to be usable with the "TTM" interface.
+ * needs only to implement these functions to be usable with the TTM interface.
* The AGP backend implementation lives in drm_agpsupport.c
* basically maps these calls to available functions in agpgart.
* Each drm device driver gets an
@@ -257,24 +261,26 @@ extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
struct drm_ttm_backend;
struct drm_ttm_backend_func {
- int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend);
- int (*populate) (struct drm_ttm_backend * backend,
- unsigned long num_pages, struct page ** pages);
- void (*clear) (struct drm_ttm_backend * backend);
- int (*bind) (struct drm_ttm_backend * backend,
- struct drm_bo_mem_reg * bo_mem);
- int (*unbind) (struct drm_ttm_backend * backend);
- void (*destroy) (struct drm_ttm_backend * backend);
+ int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
+ int (*populate) (struct drm_ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page);
+ void (*clear) (struct drm_ttm_backend *backend);
+ int (*bind) (struct drm_ttm_backend *backend,
+ struct drm_bo_mem_reg *bo_mem);
+ int (*unbind) (struct drm_ttm_backend *backend);
+ void (*destroy) (struct drm_ttm_backend *backend);
};
-typedef struct drm_ttm_backend {
- struct drm_device *dev;
- uint32_t flags;
- struct drm_ttm_backend_func *func;
-} drm_ttm_backend_t;
+struct drm_ttm_backend {
+ struct drm_device *dev;
+ uint32_t flags;
+ struct drm_ttm_backend_func *func;
+};
struct drm_ttm {
+ struct page *dummy_read_page;
struct page **pages;
uint32_t page_flags;
unsigned long num_pages;
@@ -292,22 +298,28 @@ struct drm_ttm {
};
-extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size);
-extern int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem);
-extern void drm_ttm_unbind(struct drm_ttm * ttm);
-extern void drm_ttm_evict(struct drm_ttm * ttm);
-extern void drm_ttm_fixup_caching(struct drm_ttm * ttm);
-extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index);
+extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+ uint32_t page_flags,
+ struct page *dummy_read_page);
+extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
+extern void drm_ttm_unbind(struct drm_ttm *ttm);
+extern void drm_ttm_evict(struct drm_ttm *ttm);
+extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
+extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
extern void drm_ttm_cache_flush(void);
-extern int drm_ttm_populate(struct drm_ttm * ttm);
+extern int drm_ttm_populate(struct drm_ttm *ttm);
+extern int drm_ttm_set_user(struct drm_ttm *ttm,
+ struct task_struct *tsk,
+ unsigned long start,
+ unsigned long num_pages);
/*
- * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this,
- * which calls this function iff there are no vmas referencing it anymore. Otherwise it is called
- * when the last vma exits.
+ * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
+ * this which calls this function iff there are no vmas referencing it anymore.
+ * Otherwise it is called when the last vma exits.
*/
-extern int drm_destroy_ttm(struct drm_ttm * ttm);
+extern int drm_ttm_destroy(struct drm_ttm *ttm);
#define DRM_FLAG_MASKED(_old, _new, _mask) {\
(_old) ^= (((_old) ^ (_new)) & (_mask)); \
@@ -320,11 +332,48 @@ extern int drm_destroy_ttm(struct drm_ttm * ttm);
* Page flags.
*/
-#define DRM_TTM_PAGE_UNCACHED 0x01
-#define DRM_TTM_PAGE_USED 0x02
-#define DRM_TTM_PAGE_BOUND 0x04
-#define DRM_TTM_PAGE_PRESENT 0x08
-#define DRM_TTM_PAGE_VMALLOC 0x10
+/*
+ * This ttm should not be cached by the CPU
+ */
+#define DRM_TTM_PAGE_UNCACHED (1 << 0)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
+#define DRM_TTM_PAGE_USED (1 << 1)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
+#define DRM_TTM_PAGE_BOUND (1 << 2)
+/*
+ * This flat is not used at this time; I don't know what the
+ * intent was
+ */
+#define DRM_TTM_PAGE_PRESENT (1 << 3)
+/*
+ * The array of page pointers was allocated with vmalloc
+ * instead of drm_calloc.
+ */
+#define DRM_TTM_PAGE_VMALLOC (1 << 4)
+/*
+ * This ttm is mapped from user space
+ */
+#define DRM_TTM_PAGE_USER (1 << 5)
+/*
+ * This ttm will be written to by the GPU
+ */
+#define DRM_TTM_PAGE_WRITE (1 << 6)
+/*
+ * This ttm was mapped to the GPU, and so the contents may have
+ * been modified
+ */
+#define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
+/*
+ * This flag is not used at this time; I don't know what the
+ * intent was.
+ */
+#define DRM_TTM_PAGE_USER_DMA (1 << 8)
/***************************************************
* Buffer objects. (drm_bo.c, drm_bo_move.c)
@@ -336,16 +385,50 @@ struct drm_bo_mem_reg {
unsigned long num_pages;
uint32_t page_alignment;
uint32_t mem_type;
+ /*
+ * Current buffer status flags, indicating
+ * where the buffer is located and which
+ * access modes are in effect
+ */
uint64_t flags;
- uint64_t mask;
- uint32_t desired_tile_stride;
- uint32_t hw_tile_stride;
+ /**
+ * These are the flags proposed for
+ * a validate operation. If the
+ * validate succeeds, they'll get moved
+ * into the flags field
+ */
+ uint64_t proposed_flags;
+
+ uint32_t desired_tile_stride;
+ uint32_t hw_tile_stride;
};
enum drm_bo_type {
- drm_bo_type_dc,
+ /*
+ * drm_bo_type_device are 'normal' drm allocations,
+ * pages are allocated from within the kernel automatically
+ * and the objects can be mmap'd from the drm device. Each
+ * drm_bo_type_device object has a unique name which can be
+ * used by other processes to share access to the underlying
+ * buffer.
+ */
+ drm_bo_type_device,
+ /*
+ * drm_bo_type_user are buffers of pages that already exist
+ * in the process address space. They are more limited than
+ * drm_bo_type_device buffers in that they must always
+ * remain cached (as we assume the user pages are mapped cached),
+ * and they are not sharable to other processes through DRM
+ * (although, regular shared memory should still work fine).
+ */
drm_bo_type_user,
- drm_bo_type_kernel, /* for initial kernel allocations */
+ /*
+ * drm_bo_type_kernel are buffers that exist solely for use
+ * within the kernel. The pages cannot be mapped into the
+ * process. One obvious use would be for the ring
+ * buffer where user access would not (ideally) be required.
+ */
+ drm_bo_type_kernel,
};
struct drm_buffer_object {
@@ -369,8 +452,8 @@ struct drm_buffer_object {
uint32_t fence_type;
uint32_t fence_class;
- uint32_t new_fence_type;
- uint32_t new_fence_class;
+ uint32_t new_fence_type;
+ uint32_t new_fence_class;
struct drm_fence_object *fence;
uint32_t priv_flags;
wait_queue_head_t event_queue;
@@ -409,7 +492,7 @@ struct drm_mem_type_manager {
struct list_head pinned;
uint32_t flags;
uint32_t drm_bus_maptype;
- unsigned long gpu_offset;
+ unsigned long gpu_offset;
unsigned long io_offset;
unsigned long io_size;
void *io_addr;
@@ -431,8 +514,8 @@ struct drm_bo_lock {
#define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
struct drm_buffer_manager {
- struct drm_bo_lock bm_lock;
- struct mutex evict_mutex;
+ struct drm_bo_lock bm_lock;
+ struct mutex evict_mutex;
int nice_mode;
int initialized;
struct drm_file *last_to_validate;
@@ -447,6 +530,7 @@ struct drm_buffer_manager {
uint32_t fence_type;
unsigned long cur_pages;
atomic_t count;
+ struct page *dummy_read_page;
};
struct drm_bo_driver {
@@ -455,15 +539,42 @@ struct drm_bo_driver {
uint32_t num_mem_type_prio;
uint32_t num_mem_busy_prio;
struct drm_ttm_backend *(*create_ttm_backend_entry)
- (struct drm_device * dev);
+ (struct drm_device *dev);
int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
- uint32_t * type);
- int (*invalidate_caches) (struct drm_device * dev, uint64_t flags);
- int (*init_mem_type) (struct drm_device * dev, uint32_t type,
- struct drm_mem_type_manager * man);
- uint32_t(*evict_mask) (struct drm_buffer_object *bo);
- int (*move) (struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
+ uint32_t *type);
+ int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
+ int (*init_mem_type) (struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man);
+ /*
+ * evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+ uint64_t(*evict_flags) (struct drm_buffer_object *bo);
+ /*
+ * move:
+ *
+ * @bo: the buffer to move
+ *
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ *
+ * @no_wait: whether this should give up and return -EBUSY
+ * if this move would require sleeping
+ *
+ * @new_mem: the new memory region receiving the buffer
+ *
+ * Move a buffer between two memory regions.
+ */
+ int (*move) (struct drm_buffer_object *bo,
+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
+ /*
+ * ttm_cache_flush
+ */
void (*ttm_cache_flush)(struct drm_ttm *ttm);
};
@@ -488,49 +599,47 @@ extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_f
extern int drm_bo_driver_finish(struct drm_device *dev);
extern int drm_bo_driver_init(struct drm_device *dev);
extern int drm_bo_pci_offset(struct drm_device *dev,
- struct drm_bo_mem_reg * mem,
+ struct drm_bo_mem_reg *mem,
unsigned long *bus_base,
unsigned long *bus_offset,
unsigned long *bus_size);
-extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem);
+extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
-extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo);
-extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo);
+extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
+extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
extern void drm_putback_buffer_objects(struct drm_device *dev);
-extern int drm_fence_buffer_objects(struct drm_device * dev,
+extern int drm_fence_buffer_objects(struct drm_device *dev,
struct list_head *list,
uint32_t fence_flags,
- struct drm_fence_object * fence,
- struct drm_fence_object ** used_fence);
-extern void drm_bo_add_to_lru(struct drm_buffer_object * bo);
+ struct drm_fence_object *fence,
+ struct drm_fence_object **used_fence);
+extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
- enum drm_bo_type type, uint64_t mask,
+ enum drm_bo_type type, uint64_t flags,
uint32_t hint, uint32_t page_alignment,
unsigned long buffer_start,
struct drm_buffer_object **bo);
-extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
+extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
int no_wait);
-extern int drm_bo_mem_space(struct drm_buffer_object * bo,
- struct drm_bo_mem_reg * mem, int no_wait);
-extern int drm_bo_move_buffer(struct drm_buffer_object * bo,
+extern int drm_bo_mem_space(struct drm_buffer_object *bo,
+ struct drm_bo_mem_reg *mem, int no_wait);
+extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
uint64_t new_mem_flags,
int no_wait, int move_unfenced);
-extern int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type);
-extern int drm_bo_init_mm(struct drm_device * dev, unsigned type,
+extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type);
+extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
unsigned long p_offset, unsigned long p_size);
-extern int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
- uint32_t fence_class, uint64_t flags,
- uint64_t mask, uint32_t hint,
- int use_old_fence_class,
- struct drm_bo_info_rep * rep,
+extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
+ uint64_t flags, uint64_t mask, uint32_t hint,
+ uint32_t fence_class, int use_old_fence_class,
+ struct drm_bo_info_rep *rep,
struct drm_buffer_object **bo_rep);
-extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * file_priv,
+extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
uint32_t handle,
int check_owner);
extern int drm_bo_do_validate(struct drm_buffer_object *bo,
uint64_t flags, uint64_t mask, uint32_t hint,
uint32_t fence_class,
- int no_wait,
struct drm_bo_info_rep *rep);
/*
@@ -538,18 +647,17 @@ extern int drm_bo_do_validate(struct drm_buffer_object *bo,
* drm_bo_move.c
*/
-extern int drm_bo_move_ttm(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem);
-extern int drm_bo_move_memcpy(struct drm_buffer_object * bo,
+extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
+ int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem);
+extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
int evict,
- int no_wait, struct drm_bo_mem_reg * new_mem);
-extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo,
- int evict,
- int no_wait,
- uint32_t fence_class,
- uint32_t fence_type,
+ int no_wait, struct drm_bo_mem_reg *new_mem);
+extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
+ int evict, int no_wait,
+ uint32_t fence_class, uint32_t fence_type,
uint32_t fence_flags,
- struct drm_bo_mem_reg * new_mem);
+ struct drm_bo_mem_reg *new_mem);
extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
extern unsigned long drm_bo_offset_end(unsigned long offset,
unsigned long end);
@@ -615,7 +723,7 @@ extern void drm_regs_init(struct drm_reg_manager *manager,
void (*reg_destroy)(struct drm_reg *));
/*
- * drm_bo_lock.c
+ * drm_bo_lock.c
* Simple replacement for the hardware lock on buffer manager init and clean.
*/
@@ -623,10 +731,10 @@ extern void drm_regs_init(struct drm_reg_manager *manager,
extern void drm_bo_init_lock(struct drm_bo_lock *lock);
extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
extern int drm_bo_read_lock(struct drm_bo_lock *lock);
-extern int drm_bo_write_lock(struct drm_bo_lock *lock,
+extern int drm_bo_write_lock(struct drm_bo_lock *lock,
struct drm_file *file_priv);
-extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
+extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
struct drm_file *file_priv);
#ifdef CONFIG_DEBUG_MUTEXES
diff --git a/linux-core/drm_os_linux.h b/linux-core/drm_os_linux.h
index 2688479a..8921944e 100644
--- a/linux-core/drm_os_linux.h
+++ b/linux-core/drm_os_linux.h
@@ -92,9 +92,9 @@ static __inline__ int mtrr_del(int reg, unsigned long base, unsigned long size)
#define DRM_COPY_TO_USER(arg1, arg2, arg3) \
copy_to_user(arg1, arg2, arg3)
/* Macros for copyfrom user, but checking readability only once */
-#define DRM_VERIFYAREA_READ( uaddr, size ) \
+#define DRM_VERIFYAREA_READ( uaddr, size ) \
(access_ok( VERIFY_READ, uaddr, size) ? 0 : -EFAULT)
-#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
+#define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_from_user(arg1, arg2, arg3)
#define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
__copy_to_user(arg1, arg2, arg3)
@@ -129,3 +129,17 @@ do { \
#define DRM_WAKEUP( queue ) wake_up_interruptible( queue )
#define DRM_INIT_WAITQUEUE( queue ) init_waitqueue_head( queue )
+
+/** Type for the OS's non-sleepable mutex lock */
+#define DRM_SPINTYPE spinlock_t
+/**
+ * Initialize the lock for use. name is an optional string describing the
+ * lock
+ */
+#define DRM_SPININIT(l,name) spin_lock_init(l)
+#define DRM_SPINUNINIT(l)
+#define DRM_SPINLOCK(l) spin_lock(l)
+#define DRM_SPINUNLOCK(l) spin_unlock(l)
+#define DRM_SPINLOCK_IRQSAVE(l, _flags) spin_lock_irqsave(l, _flags);
+#define DRM_SPINUNLOCK_IRQRESTORE(l, _flags) spin_unlock_irqrestore(l, _flags);
+#define DRM_SPINLOCK_ASSERT(l) do {} while (0)
diff --git a/linux-core/drm_pci.c b/linux-core/drm_pci.c
index a608eed3..7569286c 100644
--- a/linux-core/drm_pci.c
+++ b/linux-core/drm_pci.c
@@ -123,7 +123,7 @@ EXPORT_SYMBOL(drm_pci_alloc);
*
* This function is for internal use in the Linux-specific DRM core code.
*/
-void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
+void __drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{
unsigned long addr;
size_t sz;
@@ -167,7 +167,7 @@ void __drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
/**
* \brief Free a PCI consistent memory block
*/
-void drm_pci_free(struct drm_device * dev, drm_dma_handle_t *dmah)
+void drm_pci_free(struct drm_device *dev, drm_dma_handle_t *dmah)
{
__drm_pci_free(dev, dmah);
kfree(dmah);
diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c
index 08bf99d6..3012c5b0 100644
--- a/linux-core/drm_proc.c
+++ b/linux-core/drm_proc.c
@@ -239,10 +239,10 @@ static int drm__vm_info(char *buf, char **start, off_t offset, int request,
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
- i,
- map->offset,
- map->size, type, map->flags,
- (unsigned long) r_list->user_token);
+ i,
+ map->offset,
+ map->size, type, map->flags,
+ (unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
diff --git a/linux-core/drm_regman.c b/linux-core/drm_regman.c
new file mode 100644
index 00000000..aa117323
--- /dev/null
+++ b/linux-core/drm_regman.c
@@ -0,0 +1,200 @@
+/**************************************************************************
+ * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * An allocate-fence manager implementation intended for sets of base-registers
+ * or tiling-registers.
+ */
+
+#include "drmP.h"
+
+/*
+ * Allocate a compatible register and put it on the unfenced list.
+ */
+
+int drm_regs_alloc(struct drm_reg_manager *manager,
+ const void *data,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ int interruptible, int no_wait, struct drm_reg **reg)
+{
+ struct drm_reg *entry, *next_entry;
+ int ret;
+
+ *reg = NULL;
+
+ /*
+ * Search the unfenced list.
+ */
+
+ list_for_each_entry(entry, &manager->unfenced, head) {
+ if (manager->reg_reusable(entry, data)) {
+ entry->new_fence_type |= fence_type;
+ goto out;
+ }
+ }
+
+ /*
+ * Search the lru list.
+ */
+
+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+ struct drm_fence_object *fence = entry->fence;
+ if (fence->fence_class == fence_class &&
+ (entry->fence_type & fence_type) == entry->fence_type &&
+ manager->reg_reusable(entry, data)) {
+ list_del(&entry->head);
+ entry->new_fence_type = fence_type;
+ list_add_tail(&entry->head, &manager->unfenced);
+ goto out;
+ }
+ }
+
+ /*
+ * Search the free list.
+ */
+
+ list_for_each_entry(entry, &manager->free, head) {
+ list_del(&entry->head);
+ entry->new_fence_type = fence_type;
+ list_add_tail(&entry->head, &manager->unfenced);
+ goto out;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ /*
+ * Go back to the lru list and try to expire fences.
+ */
+
+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+ BUG_ON(!entry->fence);
+ ret = drm_fence_object_wait(entry->fence, 0, !interruptible,
+ entry->fence_type);
+ if (ret)
+ return ret;
+
+ drm_fence_usage_deref_unlocked(&entry->fence);
+ list_del(&entry->head);
+ entry->new_fence_type = fence_type;
+ list_add_tail(&entry->head, &manager->unfenced);
+ goto out;
+ }
+
+ /*
+ * Oops. All registers are used up :(.
+ */
+
+ return -EBUSY;
+out:
+ *reg = entry;
+ return 0;
+}
+EXPORT_SYMBOL(drm_regs_alloc);
+
+void drm_regs_fence(struct drm_reg_manager *manager,
+ struct drm_fence_object *fence)
+{
+ struct drm_reg *entry;
+ struct drm_reg *next_entry;
+
+ if (!fence) {
+
+ /*
+ * Old fence (if any) is still valid.
+ * Put back on free and lru lists.
+ */
+
+ list_for_each_entry_safe_reverse(entry, next_entry,
+ &manager->unfenced, head) {
+ list_del(&entry->head);
+ list_add(&entry->head, (entry->fence) ?
+ &manager->lru : &manager->free);
+ }
+ } else {
+
+ /*
+ * Fence with a new fence and put on lru list.
+ */
+
+ list_for_each_entry_safe(entry, next_entry, &manager->unfenced,
+ head) {
+ list_del(&entry->head);
+ if (entry->fence)
+ drm_fence_usage_deref_unlocked(&entry->fence);
+ drm_fence_reference_unlocked(&entry->fence, fence);
+
+ entry->fence_type = entry->new_fence_type;
+ BUG_ON((entry->fence_type & fence->type) !=
+ entry->fence_type);
+
+ list_add_tail(&entry->head, &manager->lru);
+ }
+ }
+}
+EXPORT_SYMBOL(drm_regs_fence);
+
+void drm_regs_free(struct drm_reg_manager *manager)
+{
+ struct drm_reg *entry;
+ struct drm_reg *next_entry;
+
+ drm_regs_fence(manager, NULL);
+
+ list_for_each_entry_safe(entry, next_entry, &manager->free, head) {
+ list_del(&entry->head);
+ manager->reg_destroy(entry);
+ }
+
+ list_for_each_entry_safe(entry, next_entry, &manager->lru, head) {
+
+ (void)drm_fence_object_wait(entry->fence, 1, 1,
+ entry->fence_type);
+ list_del(&entry->head);
+ drm_fence_usage_deref_unlocked(&entry->fence);
+ manager->reg_destroy(entry);
+ }
+}
+EXPORT_SYMBOL(drm_regs_free);
+
+void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg)
+{
+ reg->fence = NULL;
+ list_add_tail(&reg->head, &manager->free);
+}
+EXPORT_SYMBOL(drm_regs_add);
+
+void drm_regs_init(struct drm_reg_manager *manager,
+ int (*reg_reusable) (const struct drm_reg *, const void *),
+ void (*reg_destroy) (struct drm_reg *))
+{
+ INIT_LIST_HEAD(&manager->free);
+ INIT_LIST_HEAD(&manager->lru);
+ INIT_LIST_HEAD(&manager->unfenced);
+ manager->reg_reusable = reg_reusable;
+ manager->reg_destroy = reg_destroy;
+}
+EXPORT_SYMBOL(drm_regs_init);
diff --git a/linux-core/drm_scatter.c b/linux-core/drm_scatter.c
index 3c0f672e..77b9f95d 100644
--- a/linux-core/drm_scatter.c
+++ b/linux-core/drm_scatter.c
@@ -68,7 +68,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
struct drm_sg_mem *entry;
unsigned long pages, i, j;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EINVAL;
@@ -82,7 +82,7 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
memset(entry, 0, sizeof(*entry));
pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
- DRM_DEBUG("sg size=%ld pages=%ld\n", request->size, pages);
+ DRM_DEBUG("size=%ld pages=%ld\n", request->size, pages);
entry->pages = pages;
entry->pagelist = drm_alloc(pages * sizeof(*entry->pagelist),
@@ -123,10 +123,10 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
entry->handle = ScatterHandle((unsigned long)entry->virtual);
- DRM_DEBUG("sg alloc handle = %08lx\n", entry->handle);
- DRM_DEBUG("sg alloc virtual = %p\n", entry->virtual);
+ DRM_DEBUG("handle = %08lx\n", entry->handle);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
- for (i = (unsigned long)entry->virtual, j = 0; j < pages;
+ for (i = (unsigned long)entry->virtual, j = 0; j < pages;
i += PAGE_SIZE, j++) {
entry->pagelist[j] = vmalloc_to_page((void *)i);
if (!entry->pagelist[j])
@@ -211,7 +211,7 @@ int drm_sg_free(struct drm_device *dev, void *data,
if (!entry || entry->handle != request->handle)
return -EINVAL;
- DRM_DEBUG("sg free virtual = %p\n", entry->virtual);
+ DRM_DEBUG("virtual = %p\n", entry->virtual);
drm_sg_cleanup(entry);
diff --git a/linux-core/drm_sman.c b/linux-core/drm_sman.c
index 118e82ae..8421a939 100644
--- a/linux-core/drm_sman.c
+++ b/linux-core/drm_sman.c
@@ -264,7 +264,8 @@ int drm_sman_free_key(struct drm_sman *sman, unsigned int key)
if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item))
return -EINVAL;
- memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash);
+ memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item,
+ user_hash);
drm_sman_free(memblock_item);
return 0;
}
diff --git a/linux-core/drm_stub.c b/linux-core/drm_stub.c
index 1d88d375..00a24521 100644
--- a/linux-core/drm_stub.c
+++ b/linux-core/drm_stub.c
@@ -55,8 +55,8 @@ struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
{
int retcode;
@@ -75,7 +75,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
mutex_init(&dev->bm.evict_mutex);
idr_init(&dev->drw_idr);
-
+
dev->pdev = pdev;
dev->pci_device = pdev->device;
dev->pci_vendor = pdev->vendor;
@@ -84,6 +84,7 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->hose = pdev->sysdata;
#endif
dev->irq = pdev->irq;
+ dev->irq_enabled = 0;
if (drm_ht_create(&dev->map_hash, DRM_MAP_HASH_ORDER)) {
return -ENOMEM;
@@ -111,10 +112,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
dev->driver = driver;
- if (dev->driver->load)
- if ((retcode = dev->driver->load(dev, ent->driver_data)))
- goto error_out_unreg;
-
if (drm_core_has_AGP(dev)) {
if (drm_device_is_agp(dev))
dev->agp = drm_agp_init(dev);
@@ -134,6 +131,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
}
}
+ if (dev->driver->load)
+ if ((retcode = dev->driver->load(dev, ent->driver_data)))
+ goto error_out_unreg;
+
+
retcode = drm_ctxbitmap_init(dev);
if (retcode) {
DRM_ERROR("Cannot allocate memory for context bitmap.\n");
@@ -217,7 +219,7 @@ err_g1:
* Try and register, if we fail to register, backout previous work.
*/
int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -317,7 +319,7 @@ int drm_put_head(struct drm_head * head)
drm_proc_cleanup(minor, drm_proc_root, head->dev_root);
drm_sysfs_device_remove(head->dev);
- *head = (struct drm_head){.dev = NULL};
+ *head = (struct drm_head) {.dev = NULL};
drm_heads[minor] = NULL;
return 0;
diff --git a/linux-core/drm_sysfs.c b/linux-core/drm_sysfs.c
index 6f8623ce..3aaac11b 100644
--- a/linux-core/drm_sysfs.c
+++ b/linux-core/drm_sysfs.c
@@ -89,8 +89,10 @@ struct class *drm_sysfs_create(struct module *owner, char *name)
goto err_out;
}
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22))
class->suspend = drm_sysfs_suspend;
class->resume = drm_sysfs_resume;
+#endif
err = class_create_file(class, &class_attr_version);
if (err)
@@ -160,12 +162,7 @@ int drm_sysfs_device_add(struct drm_device *dev, struct drm_head *head)
dev->dev.parent = &dev->pdev->dev;
dev->dev.class = drm_class;
dev->dev.release = drm_sysfs_device_release;
- /*
- * This will actually add the major:minor file so that udev
- * will create the device node. We don't want to do that just
- * yet...
- */
- /* dev->dev.devt = head->device; */
+ dev->dev.devt = head->device;
snprintf(dev->dev.bus_id, BUS_ID_SIZE, "card%d", head->minor);
err = device_register(&dev->dev);
diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c
index df9e7e44..a9d87338 100644
--- a/linux-core/drm_ttm.c
+++ b/linux-core/drm_ttm.c
@@ -46,7 +46,7 @@ EXPORT_SYMBOL(drm_ttm_cache_flush);
* Use kmalloc if possible. Otherwise fall back to vmalloc.
*/
-static void ttm_alloc_pages(struct drm_ttm * ttm)
+static void drm_ttm_alloc_pages(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
ttm->pages = NULL;
@@ -54,20 +54,19 @@ static void ttm_alloc_pages(struct drm_ttm * ttm)
if (drm_alloc_memctl(size))
return;
- if (size <= PAGE_SIZE) {
+ if (size <= PAGE_SIZE)
ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
- }
+
if (!ttm->pages) {
ttm->pages = vmalloc_user(size);
if (ttm->pages)
ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
}
- if (!ttm->pages) {
+ if (!ttm->pages)
drm_free_memctl(size);
- }
}
-static void ttm_free_pages(struct drm_ttm * ttm)
+static void drm_ttm_free_pages(struct drm_ttm *ttm)
{
unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
@@ -85,17 +84,15 @@ static struct page *drm_ttm_alloc_page(void)
{
struct page *page;
- if (drm_alloc_memctl(PAGE_SIZE)) {
+ if (drm_alloc_memctl(PAGE_SIZE))
return NULL;
- }
+
page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
if (!page) {
drm_free_memctl(PAGE_SIZE);
return NULL;
}
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- SetPageLocked(page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
SetPageReserved(page);
#endif
return page;
@@ -106,7 +103,7 @@ static struct page *drm_ttm_alloc_page(void)
* for range of pages in a ttm.
*/
-static int drm_set_caching(struct drm_ttm * ttm, int noncached)
+static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
{
int i;
struct page **cur_page;
@@ -139,15 +136,65 @@ static int drm_set_caching(struct drm_ttm * ttm, int noncached)
return 0;
}
-/*
- * Free all resources associated with a ttm.
- */
-int drm_destroy_ttm(struct drm_ttm * ttm)
+static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
{
+ int write;
+ int dirty;
+ struct page *page;
+ int i;
+ BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
+ write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
+ dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = ttm->pages[i];
+ if (page == NULL)
+ continue;
+
+ if (page == ttm->dummy_read_page) {
+ BUG_ON(write);
+ continue;
+ }
+
+ if (write && dirty && !PageReserved(page))
+ set_page_dirty_lock(page);
+
+ ttm->pages[i] = NULL;
+ put_page(page);
+ }
+}
+
+static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
+{
int i;
+ struct drm_buffer_manager *bm = &ttm->dev->bm;
struct page **cur_page;
+
+ for (i = 0; i < ttm->num_pages; ++i) {
+ cur_page = ttm->pages + i;
+ if (*cur_page) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+ ClearPageReserved(*cur_page);
+#endif
+ if (page_count(*cur_page) != 1)
+ DRM_ERROR("Erroneous page count. Leaking pages.\n");
+ if (page_mapped(*cur_page))
+ DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
+ __free_page(*cur_page);
+ drm_free_memctl(PAGE_SIZE);
+ --bm->cur_pages;
+ }
+ }
+}
+
+/*
+ * Free all resources associated with a ttm.
+ */
+
+int drm_ttm_destroy(struct drm_ttm *ttm)
+{
struct drm_ttm_backend *be;
if (!ttm)
@@ -160,39 +207,22 @@ int drm_destroy_ttm(struct drm_ttm * ttm)
}
if (ttm->pages) {
- struct drm_buffer_manager *bm = &ttm->dev->bm;
if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
- drm_set_caching(ttm, 0);
+ drm_ttm_set_caching(ttm, 0);
- for (i = 0; i < ttm->num_pages; ++i) {
- cur_page = ttm->pages + i;
- if (*cur_page) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
- unlock_page(*cur_page);
-#else
- ClearPageReserved(*cur_page);
-#endif
- if (page_count(*cur_page) != 1) {
- DRM_ERROR("Erroneous page count. "
- "Leaking pages.\n");
- }
- if (page_mapped(*cur_page)) {
- DRM_ERROR("Erroneous map count. "
- "Leaking page mappings.\n");
- }
- __free_page(*cur_page);
- drm_free_memctl(PAGE_SIZE);
- --bm->cur_pages;
- }
- }
- ttm_free_pages(ttm);
+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
+ drm_ttm_free_user_pages(ttm);
+ else
+ drm_ttm_free_alloced_pages(ttm);
+
+ drm_ttm_free_pages(ttm);
}
drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
return 0;
}
-struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
+struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
{
struct page *p;
struct drm_buffer_manager *bm = &ttm->dev->bm;
@@ -209,7 +239,56 @@ struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index)
}
EXPORT_SYMBOL(drm_ttm_get_page);
-int drm_ttm_populate(struct drm_ttm * ttm)
+/**
+ * drm_ttm_set_user:
+ *
+ * @ttm: the ttm to map pages to. This must always be
+ * a freshly created ttm.
+ *
+ * @tsk: a pointer to the address space from which to map
+ * pages.
+ *
+ * @write: a boolean indicating that write access is desired
+ *
+ * start: the starting address
+ *
+ * Map a range of user addresses to a new ttm object. This
+ * provides access to user memory from the graphics device.
+ */
+int drm_ttm_set_user(struct drm_ttm *ttm,
+ struct task_struct *tsk,
+ unsigned long start,
+ unsigned long num_pages)
+{
+ struct mm_struct *mm = tsk->mm;
+ int ret;
+ int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
+
+ BUG_ON(num_pages != ttm->num_pages);
+ BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
+
+ down_read(&mm->mmap_sem);
+ ret = get_user_pages(tsk, mm, start, num_pages,
+ write, 0, ttm->pages, NULL);
+ up_read(&mm->mmap_sem);
+
+ if (ret != num_pages && write) {
+ drm_ttm_free_user_pages(ttm);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * drm_ttm_populate:
+ *
+ * @ttm: the object to allocate pages for
+ *
+ * Allocate pages for all unset page entries, then
+ * call the backend to create the hardware mappings
+ */
+int drm_ttm_populate(struct drm_ttm *ttm)
{
struct page *page;
unsigned long i;
@@ -219,21 +298,32 @@ int drm_ttm_populate(struct drm_ttm * ttm)
return 0;
be = ttm->be;
- for (i = 0; i < ttm->num_pages; ++i) {
- page = drm_ttm_get_page(ttm, i);
- if (!page)
- return -ENOMEM;
+ if (ttm->page_flags & DRM_TTM_PAGE_WRITE) {
+ for (i = 0; i < ttm->num_pages; ++i) {
+ page = drm_ttm_get_page(ttm, i);
+ if (!page)
+ return -ENOMEM;
+ }
}
- be->func->populate(be, ttm->num_pages, ttm->pages);
+ be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
ttm->state = ttm_unbound;
return 0;
}
-/*
- * Initialize a ttm.
+/**
+ * drm_ttm_create:
+ *
+ * @dev: the drm_device
+ *
+ * @size: The size (in bytes) of the desired object
+ *
+ * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
+ *
+ * Allocate and initialize a ttm, leaving it unpopulated at this time
*/
-struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
+struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read_page)
{
struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
struct drm_ttm *ttm;
@@ -251,21 +341,23 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
ttm->destroy = 0;
ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- ttm->page_flags = 0;
+ ttm->page_flags = page_flags;
+
+ ttm->dummy_read_page = dummy_read_page;
/*
* Account also for AGP module memory usage.
*/
- ttm_alloc_pages(ttm);
+ drm_ttm_alloc_pages(ttm);
if (!ttm->pages) {
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
DRM_ERROR("Failed allocating page table\n");
return NULL;
}
ttm->be = bo_driver->create_ttm_backend_entry(dev);
if (!ttm->be) {
- drm_destroy_ttm(ttm);
+ drm_ttm_destroy(ttm);
DRM_ERROR("Failed creating ttm backend entry\n");
return NULL;
}
@@ -273,11 +365,16 @@ struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size)
return ttm;
}
-/*
- * Unbind a ttm region from the aperture.
+/**
+ * drm_ttm_evict:
+ *
+ * @ttm: the object to be unbound from the aperture.
+ *
+ * Transition a ttm from bound to evicted, where it
+ * isn't present in the aperture, but various caches may
+ * not be consistent.
*/
-
-void drm_ttm_evict(struct drm_ttm * ttm)
+void drm_ttm_evict(struct drm_ttm *ttm)
{
struct drm_ttm_backend *be = ttm->be;
int ret;
@@ -290,19 +387,34 @@ void drm_ttm_evict(struct drm_ttm * ttm)
ttm->state = ttm_evicted;
}
-void drm_ttm_fixup_caching(struct drm_ttm * ttm)
+/**
+ * drm_ttm_fixup_caching:
+ *
+ * @ttm: the object to set unbound
+ *
+ * XXX this function is misnamed. Transition a ttm from evicted to
+ * unbound, flushing caches as appropriate.
+ */
+void drm_ttm_fixup_caching(struct drm_ttm *ttm)
{
if (ttm->state == ttm_evicted) {
struct drm_ttm_backend *be = ttm->be;
- if (be->func->needs_ub_cache_adjust(be)) {
- drm_set_caching(ttm, 0);
- }
+ if (be->func->needs_ub_cache_adjust(be))
+ drm_ttm_set_caching(ttm, 0);
ttm->state = ttm_unbound;
}
}
-void drm_ttm_unbind(struct drm_ttm * ttm)
+/**
+ * drm_ttm_unbind:
+ *
+ * @ttm: the object to unbind from the graphics device
+ *
+ * Unbind an object from the aperture. This removes the mappings
+ * from the graphics device and flushes caches if necessary.
+ */
+void drm_ttm_unbind(struct drm_ttm *ttm)
{
if (ttm->state == ttm_bound)
drm_ttm_evict(ttm);
@@ -310,7 +422,19 @@ void drm_ttm_unbind(struct drm_ttm * ttm)
drm_ttm_fixup_caching(ttm);
}
-int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
+/**
+ * drm_ttm_bind:
+ *
+ * @ttm: the ttm object to bind to the graphics device
+ *
+ * @bo_mem: the aperture memory region which will hold the object
+ *
+ * Bind a ttm object to the aperture. This ensures that the necessary
+ * pages are allocated, flushes CPU caches as needed and marks the
+ * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
+ * modified by the GPU
+ */
+int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
{
struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
int ret = 0;
@@ -327,21 +451,22 @@ int drm_bind_ttm(struct drm_ttm * ttm, struct drm_bo_mem_reg *bo_mem)
if (ret)
return ret;
- if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED)) {
- drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
- } else if ((bo_mem->flags & DRM_BO_FLAG_CACHED) &&
+ if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
+ drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
+ else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
bo_driver->ttm_cache_flush)
bo_driver->ttm_cache_flush(ttm);
- if ((ret = be->func->bind(be, bo_mem))) {
+ ret = be->func->bind(be, bo_mem);
+ if (ret) {
ttm->state = ttm_evicted;
DRM_ERROR("Couldn't bind backend.\n");
return ret;
}
ttm->state = ttm_bound;
-
+ if (ttm->page_flags & DRM_TTM_PAGE_USER)
+ ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
return 0;
}
-
-EXPORT_SYMBOL(drm_bind_ttm);
+EXPORT_SYMBOL(drm_ttm_bind);
diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c
index d2554f31..c481a530 100644
--- a/linux-core/drm_vm.c
+++ b/linux-core/drm_vm.c
@@ -166,7 +166,7 @@ static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
* \param address access address.
* \return pointer to the page structure.
*
- * Get the the mapping, find the real physical page to map, get the page, and
+ * Get the mapping, find the real physical page to map, get the page, and
* return it.
*/
static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
@@ -189,7 +189,7 @@ static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
return NOPAGE_SIGBUS;
get_page(page);
- DRM_DEBUG("shm_nopage 0x%lx\n", address);
+ DRM_DEBUG("0x%lx\n", address);
return page;
}
@@ -263,7 +263,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
dmah.size = map->size;
__drm_pci_free(dev, &dmah);
break;
- case _DRM_TTM:
+ case _DRM_TTM:
BUG_ON(1);
break;
}
@@ -305,7 +305,7 @@ static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
get_page(page);
- DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
+ DRM_DEBUG("0x%lx (page %lu)\n", address, page_nr);
return page;
}
@@ -632,9 +632,9 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
if (io_remap_pfn_range(vma, vma->vm_start,
- (map->offset + offset) >> PAGE_SHIFT,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot))
+ (map->offset + offset) >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
return -EAGAIN;
DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
" offset = 0x%lx\n",
@@ -751,10 +751,10 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
*/
if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
- uint32_t new_mask = bo->mem.mask |
+ uint32_t new_flags = bo->mem.proposed_flags |
DRM_BO_FLAG_MAPPABLE |
DRM_BO_FLAG_FORCE_MAPPABLE;
- err = drm_bo_move_buffer(bo, new_mask, 0, 0);
+ err = drm_bo_move_buffer(bo, new_flags, 0, 0);
if (err) {
ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
goto out_unlock;
diff --git a/linux-core/ffb_drv.h b/linux-core/ffb_drv.h
index bad3c94d..f961ba47 100644
--- a/linux-core/ffb_drv.h
+++ b/linux-core/ffb_drv.h
@@ -124,7 +124,7 @@ typedef struct _ffb_fbc {
/*294*/ volatile unsigned int xpmask; /* X PlaneMask */
/*298*/ volatile unsigned int ypmask; /* Y PlaneMask */
/*29c*/ volatile unsigned int zpmask; /* Z PlaneMask */
-/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
+/*2a0*/ ffb_auxclip auxclip[4]; /* Auxilliary Viewport Clip */
/* New 3dRAM III support regs */
/*2c0*/ volatile unsigned int rawblend2;
@@ -266,7 +266,7 @@ typedef struct ffb_dev_priv {
int prom_node;
enum ffb_chip_type ffb_type;
u64 card_phys_base;
- struct miscdevice miscdev;
+ struct miscdevice miscdev;
/* Controller registers. */
ffb_fbcPtr regs;
diff --git a/linux-core/i810_dma.c b/linux-core/i810_dma.c
index 7c37b4bb..3c9ca3b2 100644
--- a/linux-core/i810_dma.c
+++ b/linux-core/i810_dma.c
@@ -41,7 +41,7 @@
#define I810_BUF_FREE 2
#define I810_BUF_CLIENT 1
-#define I810_BUF_HARDWARE 0
+#define I810_BUF_HARDWARE 0
#define I810_BUF_UNMAPPED 0
#define I810_BUF_MAPPED 1
@@ -589,7 +589,7 @@ static void i810EmitState(struct drm_device * dev)
drm_i810_sarea_t *sarea_priv = dev_priv->sarea_priv;
unsigned int dirty = sarea_priv->dirty;
- DRM_DEBUG("%s %x\n", __FUNCTION__, dirty);
+ DRM_DEBUG("%x\n", dirty);
if (dirty & I810_UPLOAD_BUFFERS) {
i810EmitDestVerified(dev, sarea_priv->BufferState);
@@ -821,8 +821,7 @@ static void i810_dma_dispatch_flip(struct drm_device * dev)
int pitch = dev_priv->pitch;
RING_LOCALS;
- DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
- __FUNCTION__,
+ DRM_DEBUG("page=%d pfCurrentPage=%d\n",
dev_priv->current_page,
dev_priv->sarea_priv->pf_current_page);
@@ -867,8 +866,6 @@ static void i810_dma_quiescent(struct drm_device * dev)
drm_i810_private_t *dev_priv = dev->dev_private;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(4);
@@ -888,8 +885,6 @@ static int i810_flush_queue(struct drm_device * dev)
int i, ret = 0;
RING_LOCALS;
-/* printk("%s\n", __FUNCTION__); */
-
i810_kernel_lost_context(dev);
BEGIN_LP_RING(2);
@@ -968,7 +963,7 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
LOCK_TEST_WITH_RETURN(dev, file_priv);
- DRM_DEBUG("i810 dma vertex, idx %d used %d discard %d\n",
+ DRM_DEBUG("idx %d used %d discard %d\n",
vertex->idx, vertex->used, vertex->discard);
if (vertex->idx < 0 || vertex->idx > dma->buf_count)
@@ -1006,7 +1001,7 @@ static int i810_clear_bufs(struct drm_device *dev, void *data,
static int i810_swap_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- DRM_DEBUG("i810_swap_bufs\n");
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1087,11 +1082,10 @@ static void i810_dma_dispatch_mc(struct drm_device * dev, struct drm_buf * buf,
sarea_priv->dirty = 0x7f;
- DRM_DEBUG("dispatch mc addr 0x%lx, used 0x%x\n", address, used);
+ DRM_DEBUG("addr 0x%lx, used 0x%x\n", address, used);
dev_priv->counter++;
DRM_DEBUG("dispatch counter : %ld\n", dev_priv->counter);
- DRM_DEBUG("i810_dma_dispatch_mc\n");
DRM_DEBUG("start : %lx\n", start);
DRM_DEBUG("used : %d\n", used);
DRM_DEBUG("start + used - 4 : %ld\n", start + used - 4);
@@ -1197,7 +1191,7 @@ static void i810_do_init_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
dev_priv->page_flipping = 1;
dev_priv->current_page = 0;
dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
@@ -1207,7 +1201,7 @@ static int i810_do_cleanup_pageflip(struct drm_device * dev)
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
if (dev_priv->current_page != 0)
i810_dma_dispatch_flip(dev);
@@ -1220,7 +1214,7 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
{
drm_i810_private_t *dev_priv = dev->dev_private;
- DRM_DEBUG("%s\n", __FUNCTION__);
+ DRM_DEBUG("\n");
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -1271,7 +1265,7 @@ int i810_driver_dma_quiescent(struct drm_device * dev)
}
struct drm_ioctl_desc i810_ioctls[] = {
- DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_IOCTL_DEF(DRM_I810_INIT, i810_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_I810_VERTEX, i810_dma_vertex, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I810_CLEAR, i810_clear_bufs, DRM_AUTH),
DRM_IOCTL_DEF(DRM_I810_FLUSH, i810_flush_ioctl, DRM_AUTH),
diff --git a/linux-core/i810_drv.h b/linux-core/i810_drv.h
index c525e165..f5c175fe 100644
--- a/linux-core/i810_drv.h
+++ b/linux-core/i810_drv.h
@@ -25,7 +25,7 @@
* DEALINGS IN THE SOFTWARE.
*
* Authors: Rickard E. (Rik) Faith <faith@valinux.com>
- * Jeff Hartmann <jhartmann@valinux.com>
+ * Jeff Hartmann <jhartmann@valinux.com>
*
*/
@@ -134,7 +134,7 @@ extern int i810_max_ioctl;
#define I810_ADDR(reg) (I810_BASE(reg) + reg)
#define I810_DEREF(reg) *(__volatile__ int *)I810_ADDR(reg)
#define I810_READ(reg) I810_DEREF(reg)
-#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
+#define I810_WRITE(reg,val) do { I810_DEREF(reg) = val; } while (0)
#define I810_DEREF16(reg) *(__volatile__ u16 *)I810_ADDR(reg)
#define I810_READ16(reg) I810_DEREF16(reg)
#define I810_WRITE16(reg,val) do { I810_DEREF16(reg) = val; } while (0)
@@ -145,7 +145,7 @@ extern int i810_max_ioctl;
#define BEGIN_LP_RING(n) do { \
if (I810_VERBOSE) \
- DRM_DEBUG("BEGIN_LP_RING(%d) in %s\n", n, __FUNCTION__);\
+ DRM_DEBUG("BEGIN_LP_RING(%d)\n", n); \
if (dev_priv->ring.space < n*4) \
i810_wait_ring(dev, n*4); \
dev_priv->ring.space -= n*4; \
@@ -155,19 +155,19 @@ extern int i810_max_ioctl;
} while (0)
#define ADVANCE_LP_RING() do { \
- if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
+ if (I810_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING\n"); \
dev_priv->ring.tail = outring; \
I810_WRITE(LP_RING + RING_TAIL, outring); \
} while(0)
-#define OUT_RING(n) do { \
+#define OUT_RING(n) do { \
if (I810_VERBOSE) DRM_DEBUG(" OUT_RING %x\n", (int)(n)); \
*(volatile unsigned int *)(virt + outring) = n; \
outring += 4; \
outring &= ringmask; \
} while (0)
-#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
+#define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23))
#define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23))
#define CMD_REPORT_HEAD (7<<23)
#define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1)
@@ -184,28 +184,28 @@ extern int i810_max_ioctl;
#define I810REG_HWSTAM 0x02098
#define I810REG_INT_IDENTITY_R 0x020a4
-#define I810REG_INT_MASK_R 0x020a8
+#define I810REG_INT_MASK_R 0x020a8
#define I810REG_INT_ENABLE_R 0x020a0
-#define LP_RING 0x2030
-#define HP_RING 0x2040
-#define RING_TAIL 0x00
+#define LP_RING 0x2030
+#define HP_RING 0x2040
+#define RING_TAIL 0x00
#define TAIL_ADDR 0x000FFFF8
-#define RING_HEAD 0x04
-#define HEAD_WRAP_COUNT 0xFFE00000
-#define HEAD_WRAP_ONE 0x00200000
-#define HEAD_ADDR 0x001FFFFC
-#define RING_START 0x08
-#define START_ADDR 0x00FFFFF8
-#define RING_LEN 0x0C
-#define RING_NR_PAGES 0x000FF000
-#define RING_REPORT_MASK 0x00000006
-#define RING_REPORT_64K 0x00000002
-#define RING_REPORT_128K 0x00000004
-#define RING_NO_REPORT 0x00000000
-#define RING_VALID_MASK 0x00000001
-#define RING_VALID 0x00000001
-#define RING_INVALID 0x00000000
+#define RING_HEAD 0x04
+#define HEAD_WRAP_COUNT 0xFFE00000
+#define HEAD_WRAP_ONE 0x00200000
+#define HEAD_ADDR 0x001FFFFC
+#define RING_START 0x08
+#define START_ADDR 0x00FFFFF8
+#define RING_LEN 0x0C
+#define RING_NR_PAGES 0x000FF000
+#define RING_REPORT_MASK 0x00000006
+#define RING_REPORT_64K 0x00000002
+#define RING_REPORT_128K 0x00000004
+#define RING_NO_REPORT 0x00000000
+#define RING_VALID_MASK 0x00000001
+#define RING_VALID 0x00000001
+#define RING_INVALID 0x00000000
#define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19))
#define SC_UPDATE_SCISSOR (0x1<<1)
diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c
index bbc7e1db..08067476 100644
--- a/linux-core/i915_buffer.c
+++ b/linux-core/i915_buffer.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,20 +10,20 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -33,23 +33,23 @@
#include "i915_drm.h"
#include "i915_drv.h"
-struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev)
+struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev)
{
return drm_agp_init_ttm(dev);
}
-int i915_fence_types(struct drm_buffer_object *bo,
- uint32_t * fclass,
- uint32_t * type)
+int i915_fence_type(struct drm_buffer_object *bo,
+ uint32_t *fclass,
+ uint32_t *type)
{
- if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
*type = 3;
else
*type = 1;
return 0;
}
-int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)
+int i915_invalidate_caches(struct drm_device *dev, uint64_t flags)
{
/*
* FIXME: Only emit once per batchbuffer submission.
@@ -65,8 +65,8 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags)
return i915_emit_mi_flush(dev, flush_cmd);
}
-int i915_init_mem_type(struct drm_device * dev, uint32_t type,
- struct drm_mem_type_manager * man)
+int i915_init_mem_type(struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man)
{
switch (type) {
case DRM_BO_MEM_LOCAL:
@@ -110,7 +110,16 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type,
return 0;
}
-uint32_t i915_evict_mask(struct drm_buffer_object *bo)
+/*
+ * i915_evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ */
+uint64_t i915_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
@@ -183,7 +192,7 @@ static int i915_move_blit(struct drm_buffer_object * bo,
}
/*
- * Flip destination ttm into cached-coherent AGP,
+ * Flip destination ttm into cached-coherent AGP,
* then blit and subsequently move out again.
*/
@@ -226,25 +235,24 @@ out_cleanup:
#endif
/*
- * Disable i915_move_flip for now, since we can't guarantee that the hardware lock
- * is held here. To re-enable we need to make sure either
+ * Disable i915_move_flip for now, since we can't guarantee that the hardware
+ * lock is held here. To re-enable we need to make sure either
* a) The X server is using DRM to submit commands to the ring, or
- * b) DRM can use the HP ring for these blits. This means i915 needs to implement
- * a new ring submission mechanism and fence class.
+ * b) DRM can use the HP ring for these blits. This means i915 needs to
+ * implement a new ring submission mechanism and fence class.
*/
-
-int i915_move(struct drm_buffer_object * bo,
- int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+int i915_move(struct drm_buffer_object *bo,
+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
{
struct drm_bo_mem_reg *old_mem = &bo->mem;
if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
- if (0 /*i915_move_flip(bo, evict, no_wait, new_mem)*/)
+ if (0) /*i915_move_flip(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
} else {
- if (0 /*i915_move_blit(bo, evict, no_wait, new_mem)*/)
+ if (0) /*i915_move_blit(bo, evict, no_wait, new_mem)*/
return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
}
return 0;
@@ -258,8 +266,8 @@ static inline void clflush(volatile void *__p)
#endif
static inline void drm_cache_flush_addr(void *virt)
-{
- int i;
+{
+ int i;
for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
clflush(virt+i);
@@ -278,7 +286,18 @@ void i915_flush_ttm(struct drm_ttm *ttm)
return;
DRM_MEMORYBARRIER();
- for (i = ttm->num_pages-1; i >= 0; i--)
+
+#ifdef CONFIG_X86_32
+ /* Hopefully nobody has built an x86-64 processor without clflush */
+ if (!cpu_has_clflush) {
+ wbinvd();
+ DRM_MEMORYBARRIER();
+ return;
+ }
+#endif
+
+ for (i = ttm->num_pages - 1; i >= 0; i--)
drm_cache_flush_page(drm_ttm_get_page(ttm, i));
+
DRM_MEMORYBARRIER();
}
diff --git a/linux-core/i915_compat.c b/linux-core/i915_compat.c
new file mode 100644
index 00000000..cc024085
--- /dev/null
+++ b/linux-core/i915_compat.c
@@ -0,0 +1,215 @@
+#include "drmP.h"
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
+
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define PCI_DEVICE_ID_INTEL_82946GZ_HB 0x2970
+#define PCI_DEVICE_ID_INTEL_82965G_1_HB 0x2980
+#define PCI_DEVICE_ID_INTEL_82965Q_HB 0x2990
+#define PCI_DEVICE_ID_INTEL_82965G_HB 0x29A0
+#define PCI_DEVICE_ID_INTEL_82965GM_HB 0x2A00
+#define PCI_DEVICE_ID_INTEL_82965GME_HB 0x2A10
+#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
+#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
+#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
+#define PCI_DEVICE_ID_INTEL_Q33_HB 0x29D0
+
+#define I915_IFPADDR 0x60
+#define I965_IFPADDR 0x70
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)
+#define upper_32_bits(_val) (((u64)(_val)) >> 32)
+#endif
+
+static struct _i9xx_private_compat {
+ void __iomem *flush_page;
+ int resource_valid;
+ struct resource ifp_resource;
+} i9xx_private;
+
+static struct _i8xx_private_compat {
+ void *flush_page;
+ struct page *page;
+} i8xx_private;
+
+static void
+intel_compat_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ return;
+}
+
+
+static int intel_alloc_chipset_flush_resource(struct pci_dev *pdev)
+{
+ int ret;
+ ret = pci_bus_alloc_resource(pdev->bus, &i9xx_private.ifp_resource, PAGE_SIZE,
+ PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
+ intel_compat_align_resource, pdev);
+ if (ret != 0)
+ return ret;
+
+ return 0;
+}
+
+static void intel_i915_setup_chipset_flush(struct pci_dev *pdev)
+{
+ int ret;
+ u32 temp;
+
+ pci_read_config_dword(pdev, I915_IFPADDR, &temp);
+ if (!(temp & 0x1)) {
+ intel_alloc_chipset_flush_resource(pdev);
+ i9xx_private.resource_valid = 1;
+ pci_write_config_dword(pdev, I915_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ temp &= ~1;
+
+ i9xx_private.resource_valid = 1;
+ i9xx_private.ifp_resource.start = temp;
+ i9xx_private.ifp_resource.end = temp + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
+ if (ret) {
+ i9xx_private.resource_valid = 0;
+ printk("Failed inserting resource into tree\n");
+ }
+ }
+}
+
+static void intel_i965_g33_setup_chipset_flush(struct pci_dev *pdev)
+{
+ u32 temp_hi, temp_lo;
+ int ret;
+
+ pci_read_config_dword(pdev, I965_IFPADDR + 4, &temp_hi);
+ pci_read_config_dword(pdev, I965_IFPADDR, &temp_lo);
+
+ if (!(temp_lo & 0x1)) {
+
+ intel_alloc_chipset_flush_resource(pdev);
+
+ i9xx_private.resource_valid = 1;
+ pci_write_config_dword(pdev, I965_IFPADDR + 4,
+ upper_32_bits(i9xx_private.ifp_resource.start));
+ pci_write_config_dword(pdev, I965_IFPADDR, (i9xx_private.ifp_resource.start & 0xffffffff) | 0x1);
+ } else {
+ u64 l64;
+
+ temp_lo &= ~0x1;
+ l64 = ((u64)temp_hi << 32) | temp_lo;
+
+ i9xx_private.resource_valid = 1;
+ i9xx_private.ifp_resource.start = l64;
+ i9xx_private.ifp_resource.end = l64 + PAGE_SIZE;
+ ret = request_resource(&iomem_resource, &i9xx_private.ifp_resource);
+ if (!ret) {
+ i9xx_private.resource_valid = 0;
+ printk("Failed inserting resource into tree\n");
+ }
+ }
+}
+
+static void intel_i8xx_fini_flush(struct drm_device *dev)
+{
+ kunmap(i8xx_private.page);
+ i8xx_private.flush_page = NULL;
+ unmap_page_from_agp(i8xx_private.page);
+ flush_agp_mappings();
+
+ __free_page(i8xx_private.page);
+}
+
+static void intel_i8xx_setup_flush(struct drm_device *dev)
+{
+
+ i8xx_private.page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ if (!i8xx_private.page) {
+ return;
+ }
+
+ /* make page uncached */
+ map_page_into_agp(i8xx_private.page);
+ flush_agp_mappings();
+
+ i8xx_private.flush_page = kmap(i8xx_private.page);
+ if (!i8xx_private.flush_page)
+ intel_i8xx_fini_flush(dev);
+}
+
+
+static void intel_i8xx_flush_page(struct drm_device *dev)
+{
+ unsigned int *pg = i8xx_private.flush_page;
+ int i;
+
+ /* HAI NUT CAN I HAZ HAMMER?? */
+ for (i = 0; i < 256; i++)
+ *(pg + i) = i;
+
+ DRM_MEMORYBARRIER();
+}
+
+static void intel_i9xx_setup_flush(struct drm_device *dev)
+{
+ struct pci_dev *agp_dev = dev->agp->agp_info.device;
+
+ i9xx_private.ifp_resource.name = "GMCH IFPBAR";
+ i9xx_private.ifp_resource.flags = IORESOURCE_MEM;
+
+ /* Setup chipset flush for 915 */
+ if (IS_I965G(dev) || IS_G33(dev)) {
+ intel_i965_g33_setup_chipset_flush(agp_dev);
+ } else {
+ intel_i915_setup_chipset_flush(agp_dev);
+ }
+
+ if (i9xx_private.ifp_resource.start) {
+ i9xx_private.flush_page = ioremap_nocache(i9xx_private.ifp_resource.start, PAGE_SIZE);
+ if (!i9xx_private.flush_page)
+ printk("unable to ioremap flush page - no chipset flushing");
+ }
+}
+
+static void intel_i9xx_fini_flush(struct drm_device *dev)
+{
+ iounmap(i9xx_private.flush_page);
+ if (i9xx_private.resource_valid)
+ release_resource(&i9xx_private.ifp_resource);
+ i9xx_private.resource_valid = 0;
+}
+
+static void intel_i9xx_flush_page(struct drm_device *dev)
+{
+ if (i9xx_private.flush_page)
+ writel(1, i9xx_private.flush_page);
+}
+
+void intel_init_chipset_flush_compat(struct drm_device *dev)
+{
+ /* not flush on i8xx */
+ if (IS_I9XX(dev))
+ intel_i9xx_setup_flush(dev);
+ else
+ intel_i8xx_setup_flush(dev);
+
+}
+
+void intel_fini_chipset_flush_compat(struct drm_device *dev)
+{
+ /* not flush on i8xx */
+ if (IS_I9XX(dev))
+ intel_i9xx_fini_flush(dev);
+ else
+ intel_i8xx_fini_flush(dev);
+}
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+ if (IS_I9XX(dev))
+ intel_i9xx_flush_page(dev);
+ else
+ intel_i8xx_flush_page(dev);
+}
+#endif
diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c
index 84df64a7..a5f60ee1 100644
--- a/linux-core/i915_drv.c
+++ b/linux-core/i915_drv.c
@@ -1,10 +1,10 @@
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
*/
/*
- *
+ *
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -12,11 +12,11 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
@@ -24,7 +24,7 @@
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
*/
#include "drmP.h"
@@ -61,10 +61,10 @@ static struct drm_bo_driver i915_bo_driver = {
.num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t),
.num_mem_busy_prio = sizeof(i915_busy_prios)/sizeof(uint32_t),
.create_ttm_backend_entry = i915_create_ttm_backend_entry,
- .fence_type = i915_fence_types,
+ .fence_type = i915_fence_type,
.invalidate_caches = i915_invalidate_caches,
.init_mem_type = i915_init_mem_type,
- .evict_mask = i915_evict_mask,
+ .evict_flags = i915_evict_flags,
.move = i915_move,
.ttm_cache_flush = i915_flush_ttm,
};
@@ -330,7 +330,7 @@ static int i915_suspend(struct drm_device *dev)
dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
- if (IS_I965GM(dev)) {
+ if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
}
@@ -420,7 +420,7 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
+
/* Restore plane info */
I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
@@ -431,7 +431,11 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
}
- I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+ if ((dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_A & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
@@ -451,7 +455,7 @@ static int i915_resume(struct drm_device *dev)
if (IS_I965G(dev))
I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
udelay(150);
-
+
/* Restore mode */
I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
@@ -471,7 +475,10 @@ static int i915_resume(struct drm_device *dev)
I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
}
- I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+ if ((dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) &&
+ (dev_priv->saveDPLL_B & DPLL_VGA_MODE_DIS))
+ I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c
index a0f22785..e3c76df6 100644
--- a/linux-core/i915_fence.c
+++ b/linux-core/i915_fence.c
@@ -1,8 +1,8 @@
/**************************************************************************
- *
+ *
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
@@ -10,20 +10,20 @@
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
- *
+ *
+ *
**************************************************************************/
/*
* Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
@@ -38,7 +38,7 @@
* Implements an intel sync flush operation.
*/
-static void i915_perform_flush(struct drm_device * dev)
+static void i915_perform_flush(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_fence_manager *fm = &dev->fm;
@@ -63,14 +63,14 @@ static void i915_perform_flush(struct drm_device * dev)
diff = (sequence - fc->last_exe_flush) & BREADCRUMB_MASK;
if (diff < driver->wrap_diff && diff != 0) {
- drm_fence_handler(dev, 0, sequence,
+ drm_fence_handler(dev, 0, sequence,
DRM_FENCE_TYPE_EXE, 0);
}
if (dev_priv->fence_irq_on && !fc->pending_exe_flush) {
i915_user_irq_off(dev_priv);
dev_priv->fence_irq_on = 0;
- } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
+ } else if (!dev_priv->fence_irq_on && fc->pending_exe_flush) {
i915_user_irq_on(dev_priv);
dev_priv->fence_irq_on = 1;
}
@@ -110,7 +110,7 @@ static void i915_perform_flush(struct drm_device * dev)
}
-void i915_poke_flush(struct drm_device * dev, uint32_t class)
+void i915_poke_flush(struct drm_device *dev, uint32_t class)
{
struct drm_fence_manager *fm = &dev->fm;
unsigned long flags;
@@ -120,8 +120,9 @@ void i915_poke_flush(struct drm_device * dev, uint32_t class)
write_unlock_irqrestore(&fm->lock, flags);
}
-int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t flags,
- uint32_t * sequence, uint32_t * native_type)
+int i915_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+ uint32_t flags, uint32_t *sequence,
+ uint32_t *native_type)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
if (!dev_priv)
@@ -136,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f
return 0;
}
-void i915_fence_handler(struct drm_device * dev)
+void i915_fence_handler(struct drm_device *dev)
{
struct drm_fence_manager *fm = &dev->fm;
diff --git a/linux-core/i915_ioc32.c b/linux-core/i915_ioc32.c
index c1e776b7..0b8fff19 100644
--- a/linux-core/i915_ioc32.c
+++ b/linux-core/i915_ioc32.c
@@ -3,7 +3,7 @@
*
* 32-bit ioctl compatibility routines for the i915 DRM.
*
- * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
+ * \author Alan Hourihane <alanh@fairlite.demon.co.uk>
*
*
* Copyright (C) Paul Mackerras 2005
@@ -34,6 +34,7 @@
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
+#include "i915_drv.h"
typedef struct _drm_i915_batchbuffer32 {
int start; /* agp offset */
@@ -45,15 +46,15 @@ typedef struct _drm_i915_batchbuffer32 {
} drm_i915_batchbuffer32_t;
static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
drm_i915_batchbuffer32_t batchbuffer32;
drm_i915_batchbuffer_t __user *batchbuffer;
-
+
if (copy_from_user
(&batchbuffer32, (void __user *)arg, sizeof(batchbuffer32)))
return -EFAULT;
-
+
batchbuffer = compat_alloc_user_space(sizeof(*batchbuffer));
if (!access_ok(VERIFY_WRITE, batchbuffer, sizeof(*batchbuffer))
|| __put_user(batchbuffer32.start, &batchbuffer->start)
@@ -65,7 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|| __put_user((int __user *)(unsigned long)batchbuffer32.cliprects,
&batchbuffer->cliprects))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_I915_BATCHBUFFER,
(unsigned long) batchbuffer);
@@ -81,15 +82,15 @@ typedef struct _drm_i915_cmdbuffer32 {
} drm_i915_cmdbuffer32_t;
static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
drm_i915_cmdbuffer32_t cmdbuffer32;
drm_i915_cmdbuffer_t __user *cmdbuffer;
-
+
if (copy_from_user
(&cmdbuffer32, (void __user *)arg, sizeof(cmdbuffer32)))
return -EFAULT;
-
+
cmdbuffer = compat_alloc_user_space(sizeof(*cmdbuffer));
if (!access_ok(VERIFY_WRITE, cmdbuffer, sizeof(*cmdbuffer))
|| __put_user((int __user *)(unsigned long)cmdbuffer32.buf,
@@ -101,7 +102,7 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|| __put_user((int __user *)(unsigned long)cmdbuffer32.cliprects,
&cmdbuffer->cliprects))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_I915_CMDBUFFER, (unsigned long) cmdbuffer);
}
@@ -111,7 +112,7 @@ typedef struct drm_i915_irq_emit32 {
} drm_i915_irq_emit32_t;
static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
drm_i915_irq_emit32_t req32;
drm_i915_irq_emit_t __user *request;
@@ -134,7 +135,7 @@ typedef struct drm_i915_getparam32 {
} drm_i915_getparam32_t;
static int compat_i915_getparam(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
drm_i915_getparam32_t req32;
drm_i915_getparam_t __user *request;
@@ -161,7 +162,7 @@ typedef struct drm_i915_mem_alloc32 {
} drm_i915_mem_alloc32_t;
static int compat_i915_alloc(struct file *file, unsigned int cmd,
- unsigned long arg)
+ unsigned long arg)
{
drm_i915_mem_alloc32_t req32;
drm_i915_mem_alloc_t __user *request;
@@ -182,13 +183,73 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
DRM_IOCTL_I915_ALLOC, (unsigned long) request);
}
+typedef struct drm_i915_execbuffer32 {
+ uint64_t ops_list;
+ uint32_t num_buffers;
+ struct _drm_i915_batchbuffer32 batch;
+ drm_context_t context;
+ struct drm_fence_arg fence_arg;
+} drm_i915_execbuffer32_t;
+
+static int compat_i915_execbuffer(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ drm_i915_execbuffer32_t req32;
+ struct drm_i915_execbuffer __user *request;
+ int err;
+
+ if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ return -EFAULT;
+
+ request = compat_alloc_user_space(sizeof(*request));
+
+ if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
+ || __put_user(req32.ops_list, &request->ops_list)
+ || __put_user(req32.num_buffers, &request->num_buffers)
+ || __put_user(req32.context, &request->context)
+ || __copy_to_user(&request->fence_arg, &req32.fence_arg,
+ sizeof(req32.fence_arg))
+ || __put_user(req32.batch.start, &request->batch.start)
+ || __put_user(req32.batch.used, &request->batch.used)
+ || __put_user(req32.batch.DR1, &request->batch.DR1)
+ || __put_user(req32.batch.DR4, &request->batch.DR4)
+ || __put_user(req32.batch.num_cliprects,
+ &request->batch.num_cliprects)
+ || __put_user((int __user *)(unsigned long)req32.batch.cliprects,
+ &request->batch.cliprects))
+ return -EFAULT;
+
+ err = drm_ioctl(file->f_dentry->d_inode, file,
+ DRM_IOCTL_I915_EXECBUFFER, (unsigned long)request);
+
+ if (err)
+ return err;
+
+ if (__get_user(req32.fence_arg.handle, &request->fence_arg.handle)
+ || __get_user(req32.fence_arg.fence_class, &request->fence_arg.fence_class)
+ || __get_user(req32.fence_arg.type, &request->fence_arg.type)
+ || __get_user(req32.fence_arg.flags, &request->fence_arg.flags)
+ || __get_user(req32.fence_arg.signaled, &request->fence_arg.signaled)
+ || __get_user(req32.fence_arg.error, &request->fence_arg.error)
+ || __get_user(req32.fence_arg.sequence, &request->fence_arg.sequence))
+ return -EFAULT;
+
+ if (copy_to_user((void __user *)arg, &req32, sizeof(req32)))
+ return -EFAULT;
+
+ return 0;
+}
+
drm_ioctl_compat_t *i915_compat_ioctls[] = {
[DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
[DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
[DRM_I915_GETPARAM] = compat_i915_getparam,
[DRM_I915_IRQ_EMIT] = compat_i915_irq_emit,
- [DRM_I915_ALLOC] = compat_i915_alloc
+ [DRM_I915_ALLOC] = compat_i915_alloc,
+#ifdef I915_HAVE_BUFFER
+ [DRM_I915_EXECBUFFER] = compat_i915_execbuffer,
+#endif
};
/**
@@ -208,7 +269,7 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
-
+
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
diff --git a/linux-core/mach64_drv.c b/linux-core/mach64_drv.c
index 9709934d..16bc9ff3 100644
--- a/linux-core/mach64_drv.c
+++ b/linux-core/mach64_drv.c
@@ -42,9 +42,11 @@ static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_HAVE_DMA
- | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+ | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.lastclose = mach64_driver_lastclose,
- .vblank_wait = mach64_driver_vblank_wait,
+ .get_vblank_counter = mach64_get_vblank_counter,
+ .enable_vblank = mach64_enable_vblank,
+ .disable_vblank = mach64_disable_vblank,
.irq_preinstall = mach64_driver_irq_preinstall,
.irq_postinstall = mach64_driver_irq_postinstall,
.irq_uninstall = mach64_driver_irq_uninstall,
diff --git a/linux-core/mga_drv.c b/linux-core/mga_drv.c
index 11796b01..14a0be45 100644
--- a/linux-core/mga_drv.c
+++ b/linux-core/mga_drv.c
@@ -141,10 +141,10 @@ static int mga_driver_device_is_agp(struct drm_device * dev)
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
-
+
if ((pdev->device == 0x0525) && pdev->bus->self
- && (pdev->bus->self->vendor == 0x3388)
- && (pdev->bus->self->device == 0x0021) ) {
+ && (pdev->bus->self->vendor == 0x3388)
+ && (pdev->bus->self->device == 0x0021)) {
return 0;
}
diff --git a/linux-core/mga_ioc32.c b/linux-core/mga_ioc32.c
index 75f2a231..e3df567e 100644
--- a/linux-core/mga_ioc32.c
+++ b/linux-core/mga_ioc32.c
@@ -39,17 +39,17 @@
typedef struct drm32_mga_init {
int func;
- u32 sarea_priv_offset;
+ u32 sarea_priv_offset;
int chipset;
- int sgram;
+ int sgram;
unsigned int maccess;
- unsigned int fb_cpp;
+ unsigned int fb_cpp;
unsigned int front_offset, front_pitch;
- unsigned int back_offset, back_pitch;
- unsigned int depth_cpp;
- unsigned int depth_offset, depth_pitch;
- unsigned int texture_offset[MGA_NR_TEX_HEAPS];
- unsigned int texture_size[MGA_NR_TEX_HEAPS];
+ unsigned int back_offset, back_pitch;
+ unsigned int depth_cpp;
+ unsigned int depth_offset, depth_pitch;
+ unsigned int texture_offset[MGA_NR_TEX_HEAPS];
+ unsigned int texture_size[MGA_NR_TEX_HEAPS];
u32 fb_offset;
u32 mmio_offset;
u32 status_offset;
@@ -64,10 +64,10 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
drm_mga_init32_t init32;
drm_mga_init_t __user *init;
int err = 0, i;
-
+
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
-
+
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
@@ -90,7 +90,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|| __put_user(init32.primary_offset, &init->primary_offset)
|| __put_user(init32.buffers_offset, &init->buffers_offset))
return -EFAULT;
-
+
for (i=0; i<MGA_NR_TEX_HEAPS; i++)
{
err |= __put_user(init32.texture_offset[i], &init->texture_offset[i]);
@@ -98,7 +98,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
}
if (err)
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_INIT, (unsigned long) init);
}
@@ -115,7 +115,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
{
drm_mga_getparam32_t getparam32;
drm_mga_getparam_t __user *getparam;
-
+
if (copy_from_user(&getparam32, (void __user *)arg, sizeof(getparam32)))
return -EFAULT;
@@ -125,7 +125,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
|| __put_user((void __user *)(unsigned long)getparam32.value, &getparam->value))
return -EFAULT;
- return drm_ioctl(file->f_dentry->d_inode, file,
+ return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
}
@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
return -EFAULT;
if (copy_to_user((void __user *)arg, &dma_bootstrap32,
- sizeof(dma_bootstrap32)))
+ sizeof(dma_bootstrap32)))
return -EFAULT;
return 0;
@@ -219,7 +219,7 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd,
if (nr < DRM_COMMAND_BASE)
return drm_compat_ioctl(filp, cmd, arg);
-
+
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
diff --git a/linux-core/nouveau_buffer.c b/linux-core/nouveau_buffer.c
new file mode 100644
index 00000000..a652bb1d
--- /dev/null
+++ b/linux-core/nouveau_buffer.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2007 Dave Airlied
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Authors: Dave Airlied <airlied@linux.ie>
+ * Ben Skeggs <darktama@iinet.net.au>
+ * Jeremy Kolb <jkolb@brandeis.edu>
+ */
+
+#include "drmP.h"
+#include "nouveau_drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static struct drm_ttm_backend *
+nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ return drm_agp_init_ttm(dev);
+ case NOUVEAU_GART_SGDMA:
+ return nouveau_sgdma_init_ttm(dev);
+ default:
+ DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
+ break;
+ }
+
+ return NULL;
+}
+
+static int
+nouveau_bo_fence_type(struct drm_buffer_object *bo,
+ uint32_t *fclass, uint32_t *type)
+{
+ /* When we get called, *fclass is set to the requested fence class */
+
+ if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ *type = 3;
+ else
+ *type = 1;
+ return 0;
+
+}
+
+static int
+nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
+{
+ /* We'll do this from user space. */
+ return 0;
+}
+
+static int
+nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
+ struct drm_mem_type_manager *man)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_VRAM:
+ man->flags = _DRM_FLAG_MEMTYPE_FIXED |
+ _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_NEEDS_IOREMAP;
+ man->io_addr = NULL;
+ man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+ man->io_offset = drm_get_resource_start(dev, 1);
+ man->io_size = drm_get_resource_len(dev, 1);
+ if (man->io_size > nouveau_mem_fb_amount(dev))
+ man->io_size = nouveau_mem_fb_amount(dev);
+ break;
+ case DRM_BO_MEM_PRIV0:
+ /* Unmappable VRAM */
+ man->flags = _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_TT:
+ switch (dev_priv->gart_info.type) {
+ case NOUVEAU_GART_AGP:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT |
+ _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ break;
+ case NOUVEAU_GART_SGDMA:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT |
+ _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = _DRM_SCATTER_GATHER;
+ break;
+ default:
+ DRM_ERROR("Unknown GART type: %d\n",
+ dev_priv->gart_info.type);
+ return -EINVAL;
+ }
+
+ man->io_offset = dev_priv->gart_info.aper_base;
+ man->io_size = dev_priv->gart_info.aper_size;
+ man->io_addr = NULL;
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static uint64_t
+nouveau_bo_evict_flags(struct drm_buffer_object *bo)
+{
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL;
+ default:
+ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+ }
+ return 0;
+}
+
+
+/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
+ * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
+ */
+static int
+nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ uint32_t srch, dsth, page_count;
+
+ /* Can happen during init/takedown */
+ if (!dchan->chan)
+ return -EINVAL;
+
+ srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
+ dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
+ if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
+ dchan->m2mf_dma_source = srch;
+ dchan->m2mf_dma_destin = dsth;
+
+ BEGIN_RING(NvSubM2MF,
+ NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
+ OUT_RING (dchan->m2mf_dma_source);
+ OUT_RING (dchan->m2mf_dma_destin);
+ }
+
+ page_count = new_mem->num_pages;
+ while (page_count) {
+ int line_count = (page_count > 2047) ? 2047 : page_count;
+
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
+ OUT_RING (old_mem->mm_node->start << PAGE_SHIFT);
+ OUT_RING (new_mem->mm_node->start << PAGE_SHIFT);
+ OUT_RING (PAGE_SIZE); /* src_pitch */
+ OUT_RING (PAGE_SIZE); /* dst_pitch */
+ OUT_RING (PAGE_SIZE); /* line_length */
+ OUT_RING (line_count);
+ OUT_RING ((1<<8)|(1<<0));
+ OUT_RING (0);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+ OUT_RING (0);
+
+ page_count -= line_count;
+ }
+
+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
+ DRM_FENCE_TYPE_EXE, 0, new_mem);
+}
+
+/* Flip pages into the GART and move if we can. */
+static int
+nouveau_bo_move_gart(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_mem_reg tmp_mem;
+ int ret;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED |
+ DRM_BO_FLAG_FORCE_CACHING);
+
+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+
+ if (ret)
+ return ret;
+
+ ret = drm_ttm_bind (bo->ttm, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ mutex_lock(&dev->struct_mutex);
+ if (tmp_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(tmp_mem.mm_node);
+ tmp_mem.mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return ret;
+}
+
+static int
+nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
+ struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+ if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+#if 0
+ if (!nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
+#endif
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ else
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+#if 0
+ if (nouveau_bo_move_to_gart(bo, evict, no_wait, new_mem))
+#endif
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ else {
+// if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ return 0;
+}
+
+static void
+nouveau_bo_flush_ttm(struct drm_ttm *ttm)
+{
+}
+
+static uint32_t nouveau_mem_prios[] = {
+ DRM_BO_MEM_PRIV0,
+ DRM_BO_MEM_VRAM,
+ DRM_BO_MEM_TT,
+ DRM_BO_MEM_LOCAL
+};
+static uint32_t nouveau_busy_prios[] = {
+ DRM_BO_MEM_TT,
+ DRM_BO_MEM_PRIV0,
+ DRM_BO_MEM_VRAM,
+ DRM_BO_MEM_LOCAL
+};
+
+struct drm_bo_driver nouveau_bo_driver = {
+ .mem_type_prio = nouveau_mem_prios,
+ .mem_busy_prio = nouveau_busy_prios,
+ .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
+ .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
+ .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
+ .fence_type = nouveau_bo_fence_type,
+ .invalidate_caches = nouveau_bo_invalidate_caches,
+ .init_mem_type = nouveau_bo_init_mem_type,
+ .evict_flags = nouveau_bo_evict_flags,
+ .move = nouveau_bo_move,
+ .ttm_cache_flush= nouveau_bo_flush_ttm
+};
diff --git a/linux-core/nouveau_drv.c b/linux-core/nouveau_drv.c
index 01de67de..e9623eb1 100644
--- a/linux-core/nouveau_drv.c
+++ b/linux-core/nouveau_drv.c
@@ -81,6 +81,9 @@ static struct drm_driver driver = {
.remove = __devexit_p(drm_cleanup_pci),
},
+ .bo_driver = &nouveau_bo_driver,
+ .fence_driver = &nouveau_fence_driver,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/linux-core/nouveau_fence.c b/linux-core/nouveau_fence.c
new file mode 100644
index 00000000..4e624a7a
--- /dev/null
+++ b/linux-core/nouveau_fence.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2007 Ben Skeggs.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+ * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_dma.h"
+
+static int
+nouveau_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
+
+ /* DRM's channel always uses IRQs to signal fences */
+ if (class == dev_priv->channel.chan->id)
+ return 1;
+
+ /* Other channels don't use IRQs at all yet */
+ return 0;
+}
+
+static int
+nouveau_fence_emit(struct drm_device *dev, uint32_t class, uint32_t flags,
+ uint32_t *breadcrumb, uint32_t *native_type)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_channel *chan = dev_priv->fifos[class];
+ struct nouveau_drm_channel *dchan = &dev_priv->channel;
+
+ DRM_DEBUG("class=%d, flags=0x%08x\n", class, flags);
+
+ /* We can't emit fences on client channels, update sequence number
+ * and userspace will emit the fence
+ */
+ *breadcrumb = ++chan->next_sequence;
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (chan != dchan->chan) {
+ DRM_DEBUG("user fence 0x%08x\n", *breadcrumb);
+ return 0;
+ }
+
+ DRM_DEBUG("emit 0x%08x\n", *breadcrumb);
+ BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_SET_REF, 1);
+ OUT_RING (*breadcrumb);
+ BEGIN_RING(NvSubM2MF, 0x0150, 1);
+ OUT_RING (0);
+ FIRE_RING ();
+
+ return 0;
+}
+
+static void
+nouveau_fence_perform_flush(struct drm_device *dev, uint32_t class)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_fence_class_manager *fc = &dev->fm.fence_class[class];
+ struct nouveau_channel *chan = dev_priv->fifos[class];
+ uint32_t pending_types = 0;
+
+ DRM_DEBUG("class=%d\n", class);
+
+ pending_types = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+ DRM_DEBUG("pending: 0x%08x 0x%08x\n", pending_types,
+ fc->pending_flush);
+
+ if (pending_types) {
+ uint32_t sequence = NV_READ(chan->ref_cnt);
+
+ DRM_DEBUG("got 0x%08x\n", sequence);
+ drm_fence_handler(dev, class, sequence, pending_types, 0);
+ }
+}
+
+static void
+nouveau_fence_poke_flush(struct drm_device *dev, uint32_t class)
+{
+ struct drm_fence_manager *fm = &dev->fm;
+ unsigned long flags;
+
+ DRM_DEBUG("class=%d\n", class);
+
+ write_lock_irqsave(&fm->lock, flags);
+ nouveau_fence_perform_flush(dev, class);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+void
+nouveau_fence_handler(struct drm_device *dev, int channel)
+{
+ struct drm_fence_manager *fm = &dev->fm;
+
+ DRM_DEBUG("class=%d\n", channel);
+
+ write_lock(&fm->lock);
+ nouveau_fence_perform_flush(dev, channel);
+ write_unlock(&fm->lock);
+}
+
+struct drm_fence_driver nouveau_fence_driver = {
+ .num_classes = 8,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .lazy_capable = 1,
+ .has_irq = nouveau_fence_has_irq,
+ .emit = nouveau_fence_emit,
+ .poke_flush = nouveau_fence_poke_flush
+};
diff --git a/linux-core/nouveau_sgdma.c b/linux-core/nouveau_sgdma.c
index b86c5d7c..cc4d5a92 100644
--- a/linux-core/nouveau_sgdma.c
+++ b/linux-core/nouveau_sgdma.c
@@ -25,7 +25,7 @@ nouveau_sgdma_needs_ub_cache_adjust(struct drm_ttm_backend *be)
static int
nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
- struct page **pages)
+ struct page **pages, struct page *dummy_read_page)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
int p, d, o;
@@ -41,8 +41,11 @@ nouveau_sgdma_populate(struct drm_ttm_backend *be, unsigned long num_pages,
nvbe->pages_populated = d = 0;
for (p = 0; p < num_pages; p++) {
for (o = 0; o < PAGE_SIZE; o += NV_CTXDMA_PAGE_SIZE) {
+ struct page *page = pages[p];
+ if (!page)
+ page = dummy_read_page;
nvbe->pagelist[d] = pci_map_page(nvbe->dev->pdev,
- pages[p], o,
+ page, o,
NV_CTXDMA_PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(nvbe->pagelist[d])) {
@@ -128,7 +131,7 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)
if (nvbe->is_bound) {
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned int pte;
-
+
pte = nvbe->pte_start;
while (pte < (nvbe->pte_start + nvbe->pages)) {
uint64_t pteval = dev_priv->gart_info.sg_dummy_bus;
@@ -136,8 +139,8 @@ nouveau_sgdma_unbind(struct drm_ttm_backend *be)
if (dev_priv->card_type < NV_50) {
INSTANCE_WR(gpuobj, pte, pteval | 3);
} else {
- INSTANCE_WR(gpuobj, (pte<<1)+0, 0x00000010);
- INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000004);
+ INSTANCE_WR(gpuobj, (pte<<1)+0, pteval | 0x21);
+ INSTANCE_WR(gpuobj, (pte<<1)+1, 0x00000000);
}
pte++;
@@ -218,15 +221,14 @@ nouveau_sgdma_init(struct drm_device *dev)
return ret;
}
- if (dev_priv->card_type < NV_50) {
- dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32);
- SetPageLocked(dev_priv->gart_info.sg_dummy_page);
- dev_priv->gart_info.sg_dummy_bus =
- pci_map_page(dev->pdev,
- dev_priv->gart_info.sg_dummy_page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ dev_priv->gart_info.sg_dummy_page =
+ alloc_page(GFP_KERNEL|__GFP_DMA32);
+ SetPageLocked(dev_priv->gart_info.sg_dummy_page);
+ dev_priv->gart_info.sg_dummy_bus =
+ pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (dev_priv->card_type < NV_50) {
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
@@ -242,8 +244,9 @@ nouveau_sgdma_init(struct drm_device *dev)
}
} else {
for (i=0; i<obj_size; i+=8) {
- INSTANCE_WR(gpuobj, (i+0)/4, 0); //x00000010);
- INSTANCE_WR(gpuobj, (i+4)/4, 0); //0x00000004);
+ INSTANCE_WR(gpuobj, (i+0)/4,
+ dev_priv->gart_info.sg_dummy_bus | 0x21);
+ INSTANCE_WR(gpuobj, (i+4)/4, 0);
}
}
@@ -299,7 +302,7 @@ nouveau_sgdma_nottm_hack_init(struct drm_device *dev)
}
dev_priv->gart_info.sg_handle = sgreq.handle;
- if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist))) {
+ if ((ret = be->func->populate(be, dev->sg->pages, dev->sg->pagelist, dev->bm.dummy_read_page))) {
DRM_ERROR("failed populate: %d\n", ret);
return ret;
}
@@ -336,4 +339,3 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
DRM_ERROR("Unimplemented on NV50\n");
return -EINVAL;
}
-
diff --git a/linux-core/r128_ioc32.c b/linux-core/r128_ioc32.c
index 6b757576..64b16798 100644
--- a/linux-core/r128_ioc32.c
+++ b/linux-core/r128_ioc32.c
@@ -64,10 +64,10 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
{
drm_r128_init32_t init32;
drm_r128_init_t __user *init;
-
+
if (copy_from_user(&init32, (void __user *)arg, sizeof(init32)))
return -EFAULT;
-
+
init = compat_alloc_user_space(sizeof(*init));
if (!access_ok(VERIFY_WRITE, init, sizeof(*init))
|| __put_user(init32.func, &init->func)
@@ -94,7 +94,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
|| __put_user(init32.agp_textures_offset,
&init->agp_textures_offset))
return -EFAULT;
-
+
return drm_ioctl(file->f_dentry->d_inode, file,
DRM_IOCTL_R128_INIT, (unsigned long)init);
}
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index 39c35134..f0f3320e 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -49,7 +49,7 @@ static int dri_library_name(struct drm_device * dev, char * buf)
return snprintf(buf, PAGE_SIZE, "%s\n",
(family < CHIP_R200) ? "radeon" :
((family < CHIP_R300) ? "r200" :
- "r300"));
+ "r300"));
}
static struct pci_device_id pciidlist[] = {
diff --git a/linux-core/radeon_ioc32.c b/linux-core/radeon_ioc32.c
index bc8aa35a..a842c743 100644
--- a/linux-core/radeon_ioc32.c
+++ b/linux-core/radeon_ioc32.c
@@ -136,7 +136,7 @@ typedef struct drm_radeon_stipple32 {
static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
unsigned long arg)
{
- drm_radeon_stipple32_t __user *argp = (void __user *) arg;
+ drm_radeon_stipple32_t __user *argp = (void __user *)arg;
drm_radeon_stipple_t __user *request;
u32 mask;
@@ -176,7 +176,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
drm_radeon_tex_image32_t img32;
drm_radeon_tex_image_t __user *image;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
if (req32.image == 0)
return -EINVAL;
@@ -223,7 +223,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
drm_radeon_vertex2_32_t req32;
drm_radeon_vertex2_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
@@ -255,7 +255,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
drm_radeon_cmd_buffer32_t req32;
drm_radeon_cmd_buffer_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
@@ -283,7 +283,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
drm_radeon_getparam32_t req32;
drm_radeon_getparam_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
@@ -310,7 +310,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
drm_radeon_mem_alloc32_t req32;
drm_radeon_mem_alloc_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
@@ -336,7 +336,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
drm_radeon_irq_emit32_t req32;
drm_radeon_irq_emit_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
@@ -362,7 +362,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
drm_radeon_setparam32_t req32;
drm_radeon_setparam_t __user *request;
- if (copy_from_user(&req32, (void __user *) arg, sizeof(req32)))
+ if (copy_from_user(&req32, (void __user *)arg, sizeof(req32)))
return -EFAULT;
request = compat_alloc_user_space(sizeof(*request));
diff --git a/linux-core/sis_mm.c b/linux-core/sis_mm.c
index 9222b08d..6782731d 100644
--- a/linux-core/sis_mm.c
+++ b/linux-core/sis_mm.c
@@ -74,7 +74,7 @@ static void sis_sman_mm_destroy(void *private)
;
}
-unsigned long sis_sman_mm_offset(void *private, void *ref)
+static unsigned long sis_sman_mm_offset(void *private, void *ref)
{
return ~((unsigned long)ref);
}
@@ -114,12 +114,12 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
}
-static int sis_drm_alloc(struct drm_device * dev, struct drm_file *file_priv,
+static int sis_drm_alloc(struct drm_device *dev, struct drm_file *file_priv,
void *data, int pool)
{
drm_sis_private_t *dev_priv = dev->dev_private;
@@ -204,7 +204,7 @@ static int sis_ioctl_agp_init(struct drm_device *dev, void *data,
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -231,8 +231,7 @@ static drm_local_map_t *sis_reg_init(struct drm_device *dev)
return NULL;
}
-int
-sis_idle(struct drm_device *dev)
+int sis_idle(struct drm_device *dev)
{
drm_sis_private_t *dev_priv = dev->dev_private;
uint32_t idle_reg;
@@ -249,7 +248,7 @@ sis_idle(struct drm_device *dev)
return 0;
}
}
-
+
/*
* Implement a device switch here if needed
*/
diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c
index a6c59832..532fae6a 100644
--- a/linux-core/via_buffer.c
+++ b/linux-core/via_buffer.c
@@ -94,9 +94,9 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
man->drm_bus_maptype = 0;
break;
- case DRM_BO_MEM_TT:
+ case DRM_BO_MEM_TT:
/* Dynamic agpgart memory */
-
+
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
@@ -109,21 +109,21 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
/* Only to get pte protection right. */
- man->drm_bus_maptype = _DRM_AGP;
+ man->drm_bus_maptype = _DRM_AGP;
break;
- case DRM_BO_MEM_VRAM:
+ case DRM_BO_MEM_VRAM:
/* "On-card" video ram */
-
+
man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
man->drm_bus_maptype = _DRM_FRAME_BUFFER;
man->io_addr = NULL;
return via_vram_info(dev, &man->io_offset, &man->io_size);
break;
- case DRM_BO_MEM_PRIV0:
+ case DRM_BO_MEM_PRIV0:
/* Pre-bound agpgart memory */
-
+
if (!(drm_core_has_AGP(dev) && dev->agp)) {
DRM_ERROR("AGP is not enabled for memory type %u\n",
(unsigned)type);
@@ -144,7 +144,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type,
return 0;
}
-uint32_t via_evict_mask(struct drm_buffer_object *bo)
+uint64_t via_evict_flags(struct drm_buffer_object *bo)
{
switch (bo->mem.mem_type) {
case DRM_BO_MEM_LOCAL:
diff --git a/linux-core/via_dmablit.c b/linux-core/via_dmablit.c
index d44c26f4..a6a21782 100644
--- a/linux-core/via_dmablit.c
+++ b/linux-core/via_dmablit.c
@@ -1,5 +1,5 @@
/* via_dmablit.c -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright (C) 2005 Thomas Hellstrom, All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -16,22 +16,22 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Partially based on code obtained from Digeo Inc.
*/
/*
- * Unmaps the DMA mappings.
- * FIXME: Is this a NoOp on x86? Also
- * FIXME: What happens if this one is called and a pending blit has previously done
- * the same DMA mappings?
+ * Unmaps the DMA mappings.
+ * FIXME: Is this a NoOp on x86? Also
+ * FIXME: What happens if this one is called and a pending blit has previously done
+ * the same DMA mappings?
*/
#include "drmP.h"
@@ -65,7 +65,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
int num_desc = vsg->num_desc;
unsigned cur_descriptor_page = num_desc / vsg->descriptors_per_page;
unsigned descriptor_this_page = num_desc % vsg->descriptors_per_page;
- drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ drm_via_descriptor_t *desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
dma_addr_t next = vsg->chain_start;
@@ -73,7 +73,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
if (descriptor_this_page-- == 0) {
cur_descriptor_page--;
descriptor_this_page = vsg->descriptors_per_page - 1;
- desc_ptr = vsg->desc_pages[cur_descriptor_page] +
+ desc_ptr = vsg->desc_pages[cur_descriptor_page] +
descriptor_this_page;
}
dma_unmap_single(&pdev->dev, next, sizeof(*desc_ptr), DMA_TO_DEVICE);
@@ -93,7 +93,7 @@ via_unmap_blit_from_device(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
static void
via_map_blit_for_device(struct pci_dev *pdev,
const drm_via_dmablit_t *xfer,
- drm_via_sg_info_t *vsg,
+ drm_via_sg_info_t *vsg,
int mode)
{
unsigned cur_descriptor_page = 0;
@@ -110,7 +110,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
dma_addr_t next = 0 | VIA_DMA_DPR_EC;
drm_via_descriptor_t *desc_ptr = NULL;
- if (mode == 1)
+ if (mode == 1)
desc_ptr = vsg->desc_pages[cur_descriptor_page];
for (cur_line = 0; cur_line < xfer->num_lines; ++cur_line) {
@@ -118,7 +118,7 @@ via_map_blit_for_device(struct pci_dev *pdev,
line_len = xfer->line_length;
cur_fb = fb_addr;
cur_mem = mem_addr;
-
+
while (line_len > 0) {
remaining_len = min(PAGE_SIZE-VIA_PGOFF(cur_mem), line_len);
@@ -131,10 +131,10 @@ via_map_blit_for_device(struct pci_dev *pdev,
VIA_PGOFF(cur_mem), remaining_len,
vsg->direction);
desc_ptr->dev_addr = cur_fb;
-
+
desc_ptr->size = remaining_len;
desc_ptr->next = (uint32_t) next;
- next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
+ next = dma_map_single(&pdev->dev, desc_ptr, sizeof(*desc_ptr),
DMA_TO_DEVICE);
desc_ptr++;
if (++num_descriptors_this_page >= vsg->descriptors_per_page) {
@@ -142,12 +142,12 @@ via_map_blit_for_device(struct pci_dev *pdev,
desc_ptr = vsg->desc_pages[++cur_descriptor_page];
}
}
-
+
num_desc++;
cur_mem += remaining_len;
cur_fb += remaining_len;
}
-
+
mem_addr += xfer->mem_stride;
fb_addr += xfer->fb_stride;
}
@@ -160,14 +160,14 @@ via_map_blit_for_device(struct pci_dev *pdev,
}
/*
- * Function that frees up all resources for a blit. It is usable even if the
+ * Function that frees up all resources for a blit. It is usable even if the
* blit info has only been partially built as long as the status enum is consistent
* with the actual status of the used resources.
*/
static void
-via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
+via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
{
struct page *page;
int i;
@@ -184,7 +184,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
case dr_via_pages_locked:
for (i=0; i<vsg->num_pages; ++i) {
if ( NULL != (page = vsg->pages[i])) {
- if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
+ if (! PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
SetPageDirty(page);
page_cache_release(page);
}
@@ -199,7 +199,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
vsg->bounce_buffer = NULL;
}
vsg->free_on_sequence = 0;
-}
+}
/*
* Fire a blit engine.
@@ -212,7 +212,7 @@ via_fire_dmablit(struct drm_device *dev, drm_via_sg_info_t *vsg, int engine)
VIA_WRITE(VIA_PCI_DMA_MAR0 + engine*0x10, 0);
VIA_WRITE(VIA_PCI_DMA_DAR0 + engine*0x10, 0);
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_DD | VIA_DMA_CSR_TD |
VIA_DMA_CSR_DE);
VIA_WRITE(VIA_PCI_DMA_MR0 + engine*0x04, VIA_DMA_MR_CM | VIA_DMA_MR_TDIE);
VIA_WRITE(VIA_PCI_DMA_BCR0 + engine*0x10, 0);
@@ -232,20 +232,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int ret;
unsigned long first_pfn = VIA_PFN(xfer->mem_addr);
- vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
+ vsg->num_pages = VIA_PFN(xfer->mem_addr + (xfer->num_lines * xfer->mem_stride -1)) -
first_pfn + 1;
-
+
if (NULL == (vsg->pages = vmalloc(sizeof(struct page *) * vsg->num_pages)))
return -ENOMEM;
memset(vsg->pages, 0, sizeof(struct page *) * vsg->num_pages);
down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, (unsigned long) xfer->mem_addr,
- vsg->num_pages, (vsg->direction == DMA_FROM_DEVICE),
+ ret = get_user_pages(current, current->mm,
+ (unsigned long)xfer->mem_addr,
+ vsg->num_pages,
+ (vsg->direction == DMA_FROM_DEVICE),
0, vsg->pages, NULL);
up_read(&current->mm->mmap_sem);
if (ret != vsg->num_pages) {
- if (ret < 0)
+ if (ret < 0)
return ret;
vsg->state = dr_via_pages_locked;
return -EINVAL;
@@ -261,22 +263,22 @@ via_lock_all_dma_pages(drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
* quite large for some blits, and pages don't need to be contingous.
*/
-static int
+static int
via_alloc_desc_pages(drm_via_sg_info_t *vsg)
{
int i;
-
+
vsg->descriptors_per_page = PAGE_SIZE / sizeof( drm_via_descriptor_t);
- vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
+ vsg->num_desc_pages = (vsg->num_desc + vsg->descriptors_per_page - 1) /
vsg->descriptors_per_page;
- if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
+ if (NULL == (vsg->desc_pages = kmalloc(sizeof(void *) * vsg->num_desc_pages, GFP_KERNEL)))
return -ENOMEM;
-
+
memset(vsg->desc_pages, 0, sizeof(void *) * vsg->num_desc_pages);
vsg->state = dr_via_desc_pages_alloc;
for (i=0; i<vsg->num_desc_pages; ++i) {
- if (NULL == (vsg->desc_pages[i] =
+ if (NULL == (vsg->desc_pages[i] =
(drm_via_descriptor_t *) __get_free_page(GFP_KERNEL)))
return -ENOMEM;
}
@@ -284,7 +286,7 @@ via_alloc_desc_pages(drm_via_sg_info_t *vsg)
vsg->num_desc);
return 0;
}
-
+
static void
via_abort_dmablit(struct drm_device *dev, int engine)
{
@@ -298,7 +300,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
- VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
+ VIA_WRITE(VIA_PCI_DMA_CSR0 + engine*0x04, VIA_DMA_CSR_TD | VIA_DMA_CSR_DD);
}
@@ -309,7 +311,7 @@ via_dmablit_engine_off(struct drm_device *dev, int engine)
* task. Basically the task of the interrupt handler is to submit a new blit to the engine, while
* the workqueue task takes care of processing associated with the old blit.
*/
-
+
void
via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
{
@@ -329,19 +331,19 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
- done_transfer = blitq->is_active &&
+ done_transfer = blitq->is_active &&
(( status = VIA_READ(VIA_PCI_DMA_CSR0 + engine*0x04)) & VIA_DMA_CSR_TD);
- done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
+ done_transfer = done_transfer || ( blitq->aborting && !(status & VIA_DMA_CSR_DE));
cur = blitq->cur;
if (done_transfer) {
blitq->blits[cur]->aborted = blitq->aborting;
blitq->done_blit_handle++;
- DRM_WAKEUP(blitq->blit_queue + cur);
+ DRM_WAKEUP(blitq->blit_queue + cur);
cur++;
- if (cur >= VIA_NUM_BLIT_SLOTS)
+ if (cur >= VIA_NUM_BLIT_SLOTS)
cur = 0;
blitq->cur = cur;
@@ -353,7 +355,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->is_active = 0;
blitq->aborting = 0;
- schedule_work(&blitq->wq);
+ schedule_work(&blitq->wq);
} else if (blitq->is_active && time_after_eq(jiffies, blitq->end)) {
@@ -365,7 +367,7 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
blitq->aborting = 1;
blitq->end = jiffies + DRM_HZ;
}
-
+
if (!blitq->is_active) {
if (blitq->num_outstanding) {
via_fire_dmablit(dev, blitq->blits[cur], engine);
@@ -383,14 +385,14 @@ via_dmablit_handler(struct drm_device *dev, int engine, int from_irq)
}
via_dmablit_engine_off(dev, engine);
}
- }
+ }
if (from_irq) {
spin_unlock(&blitq->blit_lock);
} else {
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-}
+}
@@ -426,13 +428,13 @@ via_dmablit_active(drm_via_blitq_t *blitq, int engine, uint32_t handle, wait_que
return active;
}
-
+
/*
* Sync. Wait for at least three seconds for the blit to be performed.
*/
static int
-via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
+via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
@@ -441,12 +443,12 @@ via_dmablit_sync(struct drm_device *dev, uint32_t handle, int engine)
int ret = 0;
if (via_dmablit_active(blitq, engine, handle, &queue)) {
- DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
+ DRM_WAIT_ON(ret, *queue, 3 * DRM_HZ,
!via_dmablit_active(blitq, engine, handle, NULL));
}
DRM_DEBUG("DMA blit sync handle 0x%x engine %d returned %d\n",
handle, engine, ret);
-
+
return ret;
}
@@ -468,12 +470,12 @@ via_dmablit_timer(unsigned long data)
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
-
- DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
+
+ DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine,
(unsigned long) jiffies);
via_dmablit_handler(dev, engine, 0);
-
+
if (!timer_pending(&blitq->poll_timer)) {
blitq->poll_timer.expires = jiffies+1;
add_timer(&blitq->poll_timer);
@@ -497,7 +499,7 @@ via_dmablit_timer(unsigned long data)
*/
-static void
+static void
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
via_dmablit_workqueue(void *data)
#else
@@ -509,42 +511,42 @@ via_dmablit_workqueue(struct work_struct *work)
#else
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
#endif
- struct drm_device *dev = blitq->dev;
+ struct drm_device *dev = blitq->dev;
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
-
-
- DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
+
+
+ DRM_DEBUG("Workqueue task called for blit engine %ld\n",(unsigned long)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues));
spin_lock_irqsave(&blitq->blit_lock, irqsave);
-
+
while(blitq->serviced != blitq->cur) {
cur_released = blitq->serviced++;
DRM_DEBUG("Releasing blit slot %d\n", cur_released);
- if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->serviced >= VIA_NUM_BLIT_SLOTS)
blitq->serviced = 0;
-
+
cur_sg = blitq->blits[cur_released];
blitq->num_free++;
-
+
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
-
+
DRM_WAKEUP(&blitq->busy_queue);
-
+
via_free_sg_info(dev->pdev, cur_sg);
kfree(cur_sg);
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
}
-
+
/*
* Init all blit engines. Currently we use two, but some hardware have 4.
@@ -558,8 +560,8 @@ via_init_dmablit(struct drm_device *dev)
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_blitq_t *blitq;
- pci_set_master(dev->pdev);
-
+ pci_set_master(dev->pdev);
+
for (i=0; i< VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
blitq->dev = dev;
@@ -585,20 +587,20 @@ via_init_dmablit(struct drm_device *dev)
init_timer(&blitq->poll_timer);
blitq->poll_timer.function = &via_dmablit_timer;
blitq->poll_timer.data = (unsigned long) blitq;
- }
+ }
}
/*
* Build all info and do all mappings required for a blit.
*/
-
+
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
int draw = xfer->to_fb;
int ret = 0;
-
+
vsg->direction = (draw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
vsg->bounce_buffer = NULL;
@@ -612,7 +614,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
/*
* Below check is a driver limitation, not a hardware one. We
* don't want to lock unused pages, and don't want to incoporate the
- * extra logic of avoiding them. Make sure there are no.
+ * extra logic of avoiding them. Make sure there are no.
* (Not a big limitation anyway.)
*/
@@ -638,11 +640,11 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (xfer->num_lines > 2048 || (xfer->num_lines*xfer->mem_stride > (2048*2048*4))) {
DRM_ERROR("Too large PCI DMA bitblt.\n");
return -EINVAL;
- }
+ }
- /*
+ /*
* we allow a negative fb stride to allow flipping of images in
- * transfer.
+ * transfer.
*/
if (xfer->mem_stride < xfer->line_length ||
@@ -668,7 +670,7 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
((xfer->num_lines > 1) && ((xfer->mem_stride & 15) || (xfer->fb_stride & 3)))) {
DRM_ERROR("Invalid DRM bitblt alignment.\n");
return -EINVAL;
- }
+ }
#endif
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
@@ -684,17 +686,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
return ret;
}
via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
-
+
return 0;
}
-
+
/*
* Reserve one free slot in the blit queue. Will wait for one second for one
* to become available. Otherwise -EBUSY is returned.
*/
-static int
+static int
via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
{
int ret=0;
@@ -709,10 +711,10 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
if (ret) {
return (-EINTR == ret) ? -EAGAIN : ret;
}
-
+
spin_lock_irqsave(&blitq->blit_lock, irqsave);
}
-
+
blitq->num_free--;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
@@ -723,7 +725,7 @@ via_dmablit_grab_slot(drm_via_blitq_t *blitq, int engine)
* Hand back a free slot if we changed our mind.
*/
-static void
+static void
via_dmablit_release_slot(drm_via_blitq_t *blitq)
{
unsigned long irqsave;
@@ -739,8 +741,8 @@ via_dmablit_release_slot(drm_via_blitq_t *blitq)
*/
-static int
-via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
+static int
+via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
{
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
drm_via_sg_info_t *vsg;
@@ -771,15 +773,15 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
spin_lock_irqsave(&blitq->blit_lock, irqsave);
blitq->blits[blitq->head++] = vsg;
- if (blitq->head >= VIA_NUM_BLIT_SLOTS)
+ if (blitq->head >= VIA_NUM_BLIT_SLOTS)
blitq->head = 0;
blitq->num_outstanding++;
- xfer->sync.sync_handle = ++blitq->cur_blit_handle;
+ xfer->sync.sync_handle = ++blitq->cur_blit_handle;
spin_unlock_irqrestore(&blitq->blit_lock, irqsave);
xfer->sync.engine = engine;
- via_dmablit_handler(dev, engine, 0);
+ via_dmablit_handler(dev, engine, 0);
return 0;
}
@@ -787,7 +789,7 @@ via_dmablit(struct drm_device *dev, drm_via_dmablit_t *xfer)
/*
* Sync on a previously submitted blit. Note that the X server use signals extensively, and
* that there is a very big probability that this IOCTL will be interrupted by a signal. In that
- * case it returns with -EAGAIN for the signal to be delivered.
+ * case it returns with -EAGAIN for the signal to be delivered.
* The caller should then reissue the IOCTL. This is similar to what is being done for drmGetLock().
*/
@@ -797,7 +799,7 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
drm_via_blitsync_t *sync = data;
int err;
- if (sync->engine >= VIA_NUM_BLIT_ENGINES)
+ if (sync->engine >= VIA_NUM_BLIT_ENGINES)
return -EINVAL;
err = via_dmablit_sync(dev, sync->sync_handle, sync->engine);
@@ -807,15 +809,15 @@ via_dma_blit_sync( struct drm_device *dev, void *data, struct drm_file *file_pri
return err;
}
-
+
/*
* Queue a blit and hand back a handle to be used for sync. This IOCTL may be interrupted by a signal
- * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
+ * while waiting for a free slot in the blit queue. In that case it returns with -EAGAIN and should
* be reissued. See the above IOCTL code.
*/
-int
+int
via_dma_blit( struct drm_device *dev, void *data, struct drm_file *file_priv )
{
drm_via_dmablit_t *xfer = data;
diff --git a/linux-core/via_dmablit.h b/linux-core/via_dmablit.h
index 726ad25d..9b662a32 100644
--- a/linux-core/via_dmablit.h
+++ b/linux-core/via_dmablit.h
@@ -1,5 +1,5 @@
/* via_dmablit.h -- PCI DMA BitBlt support for the VIA Unichrome/Pro
- *
+ *
* Copyright 2005 Thomas Hellstrom.
* All Rights Reserved.
*
@@ -17,12 +17,12 @@
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
- * Authors:
+ * Authors:
* Thomas Hellstrom.
* Register info from Digeo Inc.
*/
@@ -67,7 +67,7 @@ typedef struct _drm_via_blitq {
unsigned cur;
unsigned num_free;
unsigned num_outstanding;
- unsigned long end;
+ unsigned long end;
int aborting;
int is_active;
drm_via_sg_info_t *blits[VIA_NUM_BLIT_SLOTS];
@@ -77,46 +77,46 @@ typedef struct _drm_via_blitq {
struct work_struct wq;
struct timer_list poll_timer;
} drm_via_blitq_t;
-
-/*
+
+/*
* PCI DMA Registers
* Channels 2 & 3 don't seem to be implemented in hardware.
*/
-
-#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
-#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
-#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
-#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
-
-#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
-#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
-#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
-#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
-
-#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
-#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
-#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
-#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
-
-#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
-#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
-#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
-#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
-
-#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
-#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
-#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
-#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
-
-#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
-#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
-#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
-#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
-
-#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
-
-/* Define for DMA engine */
+
+#define VIA_PCI_DMA_MAR0 0xE40 /* Memory Address Register of Channel 0 */
+#define VIA_PCI_DMA_DAR0 0xE44 /* Device Address Register of Channel 0 */
+#define VIA_PCI_DMA_BCR0 0xE48 /* Byte Count Register of Channel 0 */
+#define VIA_PCI_DMA_DPR0 0xE4C /* Descriptor Pointer Register of Channel 0 */
+
+#define VIA_PCI_DMA_MAR1 0xE50 /* Memory Address Register of Channel 1 */
+#define VIA_PCI_DMA_DAR1 0xE54 /* Device Address Register of Channel 1 */
+#define VIA_PCI_DMA_BCR1 0xE58 /* Byte Count Register of Channel 1 */
+#define VIA_PCI_DMA_DPR1 0xE5C /* Descriptor Pointer Register of Channel 1 */
+
+#define VIA_PCI_DMA_MAR2 0xE60 /* Memory Address Register of Channel 2 */
+#define VIA_PCI_DMA_DAR2 0xE64 /* Device Address Register of Channel 2 */
+#define VIA_PCI_DMA_BCR2 0xE68 /* Byte Count Register of Channel 2 */
+#define VIA_PCI_DMA_DPR2 0xE6C /* Descriptor Pointer Register of Channel 2 */
+
+#define VIA_PCI_DMA_MAR3 0xE70 /* Memory Address Register of Channel 3 */
+#define VIA_PCI_DMA_DAR3 0xE74 /* Device Address Register of Channel 3 */
+#define VIA_PCI_DMA_BCR3 0xE78 /* Byte Count Register of Channel 3 */
+#define VIA_PCI_DMA_DPR3 0xE7C /* Descriptor Pointer Register of Channel 3 */
+
+#define VIA_PCI_DMA_MR0 0xE80 /* Mode Register of Channel 0 */
+#define VIA_PCI_DMA_MR1 0xE84 /* Mode Register of Channel 1 */
+#define VIA_PCI_DMA_MR2 0xE88 /* Mode Register of Channel 2 */
+#define VIA_PCI_DMA_MR3 0xE8C /* Mode Register of Channel 3 */
+
+#define VIA_PCI_DMA_CSR0 0xE90 /* Command/Status Register of Channel 0 */
+#define VIA_PCI_DMA_CSR1 0xE94 /* Command/Status Register of Channel 1 */
+#define VIA_PCI_DMA_CSR2 0xE98 /* Command/Status Register of Channel 2 */
+#define VIA_PCI_DMA_CSR3 0xE9C /* Command/Status Register of Channel 3 */
+
+#define VIA_PCI_DMA_PTR 0xEA0 /* Priority Type Register */
+
+/* Define for DMA engine */
/* DPR */
#define VIA_DMA_DPR_EC (1<<1) /* end of chain */
#define VIA_DMA_DPR_DDIE (1<<2) /* descriptor done interrupt enable */
diff --git a/linux-core/via_mm.c b/linux-core/via_mm.c
index 35ca6bfc..3f75af38 100644
--- a/linux-core/via_mm.c
+++ b/linux-core/via_mm.c
@@ -53,7 +53,7 @@ int via_agp_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->agp_offset = agp->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", agp->offset, agp->size);
+ DRM_DEBUG("offset = %u, size = %u\n", agp->offset, agp->size);
return 0;
}
@@ -77,7 +77,7 @@ int via_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
dev_priv->vram_offset = fb->offset;
mutex_unlock(&dev->struct_mutex);
- DRM_DEBUG("offset = %u, size = %u", fb->offset, fb->size);
+ DRM_DEBUG("offset = %u, size = %u\n", fb->offset, fb->size);
return 0;
@@ -115,7 +115,7 @@ void via_lastclose(struct drm_device *dev)
dev_priv->vram_initialized = 0;
dev_priv->agp_initialized = 0;
mutex_unlock(&dev->struct_mutex);
-}
+}
int via_mem_alloc(struct drm_device *dev, void *data,
struct drm_file *file_priv)
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
index d7b23c89..64401ae5 100644
--- a/linux-core/xgi_cmdlist.c
+++ b/linux-core/xgi_cmdlist.c
@@ -78,7 +78,7 @@ int xgi_cmdlist_initialize(struct xgi_info * info, size_t size,
* @type: Type of the current batch
*
* See section 3.2.2 "Begin" (page 15) of the 3D SPG.
- *
+ *
* This function assumes that @type is on the range [0,3].
*/
unsigned int get_batch_command(enum xgi_batch_type type)
@@ -86,7 +86,7 @@ unsigned int get_batch_command(enum xgi_batch_type type)
static const unsigned int ports[4] = {
0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
};
-
+
return ports[type];
}
@@ -159,7 +159,7 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
2 - fb
3 - logout
*/
-int xgi_state_change(struct xgi_info * info, unsigned int to,
+int xgi_state_change(struct xgi_info * info, unsigned int to,
unsigned int from)
{
#define STATE_CONSOLE 0
@@ -219,7 +219,7 @@ void xgi_cmdlist_cleanup(struct xgi_info * info)
}
xgi_waitfor_pci_idle(info);
-
+
(void) memset(&info->cmdring, 0, sizeof(info->cmdring));
}
}
@@ -243,7 +243,7 @@ static void triggerHWCommandList(struct xgi_info * info)
void xgi_emit_flush(struct xgi_info * info, bool stop)
{
const u32 flush_command[8] = {
- ((0x10 << 24)
+ ((0x10 << 24)
| (BEGIN_BEGIN_IDENTIFICATION_MASK & info->next_sequence)),
BEGIN_LINK_ENABLE_MASK | (0x00004),
0x00000000, 0x00000000,
@@ -266,9 +266,9 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
info->cmdring.ring_offset = 0;
}
- hw_addr = info->cmdring.ring_hw_base
+ hw_addr = info->cmdring.ring_hw_base
+ info->cmdring.ring_offset;
- batch_addr = info->cmdring.ptr
+ batch_addr = info->cmdring.ptr
+ (info->cmdring.ring_offset / 4);
for (i = 0; i < (flush_size / 4); i++) {
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
index 4e66197e..4f0b4ed0 100644
--- a/linux-core/xgi_drv.c
+++ b/linux-core/xgi_drv.c
@@ -352,7 +352,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *) arg;
struct xgi_info *info = dev->dev_private;
const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
- (0x2800
+ (0x2800
+ M2REG_AUTO_LINK_STATUS_ADDRESS)))
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
| M2REG_ACTIVE_INTERRUPT_0_MASK
@@ -361,7 +361,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
if (irq_bits != 0) {
- DRM_WRITE32(info->mmio_map,
+ DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
xgi_fence_handler(dev);
@@ -413,7 +413,7 @@ int xgi_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
-
+
fail:
drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
return err;
diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h
index d9a94f5f..9408073e 100644
--- a/linux-core/xgi_drv.h
+++ b/linux-core/xgi_drv.h
@@ -64,7 +64,7 @@ struct xgi_info {
struct drm_map *fb_map;
/* look up table parameters */
- struct ati_pcigart_info gart_info;
+ struct drm_ati_pcigart_info gart_info;
unsigned int lutPageSize;
struct drm_sman sman;
diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c
index 2e2d0094..3f50fe8f 100644
--- a/linux-core/xgi_fb.c
+++ b/linux-core/xgi_fb.c
@@ -32,7 +32,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
struct drm_file * filp)
{
struct drm_memblock_item *block;
- const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
+ const char *const mem_name = (alloc->location == XGI_MEMLOC_LOCAL)
? "on-card" : "GART";
@@ -43,7 +43,7 @@ int xgi_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
return -EINVAL;
}
- if ((alloc->location == XGI_MEMLOC_LOCAL)
+ if ((alloc->location == XGI_MEMLOC_LOCAL)
? !info->fb_heap_initialized : !info->pcie_heap_initialized) {
DRM_ERROR("Attempt to allocate from uninitialized memory "
"pool (0x%08x).\n", alloc->location);
@@ -118,7 +118,7 @@ int xgi_free_ioctl(struct drm_device * dev, void * data,
int xgi_fb_heap_init(struct xgi_info * info)
{
int err;
-
+
mutex_lock(&info->dev->struct_mutex);
err = drm_sman_set_range(&info->sman, XGI_MEMLOC_LOCAL,
XGI_FB_HEAP_START,
diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c
index 526bc5db..9a75581a 100644
--- a/linux-core/xgi_fence.c
+++ b/linux-core/xgi_fence.c
@@ -72,7 +72,7 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
int xgi_fence_emit_sequence(struct drm_device * dev, uint32_t class,
- uint32_t flags, uint32_t * sequence,
+ uint32_t flags, uint32_t * sequence,
uint32_t * native_type)
{
struct xgi_info * info = dev->dev_private;
diff --git a/linux-core/xgi_ioc32.c b/linux-core/xgi_ioc32.c
index c54044fa..e4338417 100644
--- a/linux-core/xgi_ioc32.c
+++ b/linux-core/xgi_ioc32.c
@@ -43,7 +43,7 @@ struct drm_map32 {
u32 handle; /**< User-space: "Handle" to pass to mmap() */
int mtrr; /**< MTRR slot used */
};
-
+
struct drm32_xgi_bootstrap {
struct drm_map32 gart;
};
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
index 4a4a9844..2b3a1788 100644
--- a/linux-core/xgi_misc.c
+++ b/linux-core/xgi_misc.c
@@ -90,7 +90,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0xb057, 8);
while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
- while (0 != ((--time_out) & 0xfff))
+ while (0 != ((--time_out) & 0xfff))
/* empty */ ;
if (0 == time_out) {
@@ -117,8 +117,8 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0x3d4, 0x36);
old_36 = DRM_READ8(map, 0x3d5);
DRM_WRITE8(map, 0x3d5, old_36 | 0x10);
-
- while (0 != ((--time_out) & 0xfff))
+
+ while (0 != ((--time_out) & 0xfff))
/* empty */ ;
DRM_WRITE8(map, 0x3d5, old_36);
@@ -134,7 +134,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
DRM_WRITE8(map, 0xb057, 0);
}
-
+
bool xgi_ge_irq_handler(struct xgi_info * info)
{
const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
@@ -143,7 +143,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
/* Check GE on/off */
if (0 == (0xffffc0f0 & int_status)) {
if (0 != (0x1000 & int_status)) {
- /* We got GE stall interrupt.
+ /* We got GE stall interrupt.
*/
DRM_WRITE32(info->mmio_map, 0x2810,
cpu_to_le32(int_status | 0x04000000));
@@ -289,7 +289,7 @@ static void dump_reg(struct xgi_info * info, unsigned regbase, unsigned range)
printk("%1x ", i);
for (j = 0; j < 0x10; j++) {
- u8 temp = DRM_READ8(info->mmio_map,
+ u8 temp = DRM_READ8(info->mmio_map,
regbase + (i * 0x10) + j);
printk("%3x", temp);
}
diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h
index af19a11a..5f9e4f09 100644
--- a/linux-core/xgi_misc.h
+++ b/linux-core/xgi_misc.h
@@ -1,5 +1,5 @@
/****************************************************************************
- * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
*
* All Rights Reserved.
*
diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h
index 5c0100a0..a897fd8a 100644
--- a/linux-core/xgi_regs.h
+++ b/linux-core/xgi_regs.h
@@ -4,7 +4,7 @@
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
- * a copy of this software and associated documentation files (the
+ * a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation on the rights to use, copy, modify, merge,
* publish, distribute, sublicense, and/or sell copies of the Software,