summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile.kernel2
-rw-r--r--linux-core/ati_pcigart.c195
-rw-r--r--linux-core/drmP.h12
-rw-r--r--linux-core/radeon_buffer.c263
-rw-r--r--linux-core/radeon_drv.c39
-rw-r--r--linux-core/radeon_fence.c125
6 files changed, 623 insertions, 13 deletions
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 457e3f3c..0ed5471a 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -32,7 +32,7 @@ nouveau-objs := nouveau_drv.o nouveau_state.o nouveau_fifo.o nouveau_mem.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
nv04_instmem.o nv50_instmem.o
-radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o
+radeon-objs := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o radeon_irq.o r300_cmdbuf.o radeon_fence.o radeon_buffer.o
sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
diff --git a/linux-core/ati_pcigart.c b/linux-core/ati_pcigart.c
index 7241c2a8..64e0e758 100644
--- a/linux-core/ati_pcigart.c
+++ b/linux-core/ati_pcigart.c
@@ -35,6 +35,45 @@
# define ATI_PCIGART_PAGE_SIZE 4096 /**< PCI GART page size */
+static __inline__ void insert_page_into_table(struct ati_pcigart_info *info, u32 page_base, u32 *pci_gart)
+{
+ switch(info->gart_reg_if) {
+ case DRM_ATI_GART_IGP:
+ *pci_gart = cpu_to_le32((page_base) | 0xc);
+ break;
+ case DRM_ATI_GART_PCIE:
+ *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
+ break;
+ default:
+ case DRM_ATI_GART_PCI:
+ *pci_gart = cpu_to_le32(page_base);
+ break;
+ }
+}
+
+static __inline__ u32 get_page_base_from_table(struct ati_pcigart_info *info, u32 *pci_gart)
+{
+ u32 retval;
+ switch(info->gart_reg_if) {
+ case DRM_ATI_GART_IGP:
+ retval = *pci_gart;
+ retval &= ~0xc;
+ break;
+ case DRM_ATI_GART_PCIE:
+ retval = *pci_gart;
+ retval &= ~0xc;
+ retval <<= 8;
+ break;
+ default:
+ case DRM_ATI_GART_PCI:
+ retval = *pci_gart;
+ break;
+ }
+ return retval;
+}
+
+
+
static void *drm_ati_alloc_pcigart_table(int order)
{
unsigned long address;
@@ -207,18 +246,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_i
page_base = (u32) entry->busaddr[i];
for (j = 0; j < (PAGE_SIZE / ATI_PCIGART_PAGE_SIZE); j++) {
- switch(gart_info->gart_reg_if) {
- case DRM_ATI_GART_IGP:
- *pci_gart = cpu_to_le32((page_base) | 0xc);
- break;
- case DRM_ATI_GART_PCIE:
- *pci_gart = cpu_to_le32((page_base >> 8) | 0xc);
- break;
- default:
- case DRM_ATI_GART_PCI:
- *pci_gart = cpu_to_le32(page_base);
- break;
- }
+ insert_page_into_table(gart_info, page_base, pci_gart);
pci_gart++;
page_base += ATI_PCIGART_PAGE_SIZE;
}
@@ -238,3 +266,146 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_i
return ret;
}
EXPORT_SYMBOL(drm_ati_pcigart_init);
+
+static int ati_pcigart_needs_unbind_cache_adjust(drm_ttm_backend_t *backend)
+{
+ return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
+}
+
+static int ati_pcigart_populate(drm_ttm_backend_t *backend,
+ unsigned long num_pages,
+ struct page **pages)
+{
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+ DRM_ERROR("%ld\n", num_pages);
+ atipci_be->pages = pages;
+ atipci_be->num_pages = num_pages;
+ atipci_be->populated = 1;
+ return 0;
+}
+
+static int ati_pcigart_bind_ttm(struct drm_ttm_backend *backend,
+ struct drm_bo_mem_reg *bo_mem)
+{
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ off_t j;
+ int i;
+ struct ati_pcigart_info *info = atipci_be->gart_info;
+ u32 *pci_gart;
+ u32 page_base;
+ unsigned long offset = bo_mem->mm_node->start;
+ pci_gart = info->addr;
+
+ DRM_ERROR("Offset is %08lX\n", bo_mem->mm_node->start);
+ j = offset;
+ while (j < (offset + atipci_be->num_pages)) {
+ if (get_page_base_from_table(info, pci_gart+j))
+ return -EBUSY;
+ j++;
+ }
+
+ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+ struct page *cur_page = atipci_be->pages[i];
+ /* write value */
+ page_base = page_to_phys(cur_page);
+ insert_page_into_table(info, page_base, pci_gart + j);
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ wbinvd();
+#else
+ mb();
+#endif
+
+ atipci_be->gart_flush_fn(atipci_be->dev);
+
+ atipci_be->bound = 1;
+ atipci_be->offset = offset;
+ /* need to traverse table and add entries */
+ DRM_DEBUG("\n");
+ return 0;
+}
+
+static int ati_pcigart_unbind_ttm(drm_ttm_backend_t *backend)
+{
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ struct ati_pcigart_info *info = atipci_be->gart_info;
+ unsigned long offset = atipci_be->offset;
+ int i;
+ off_t j;
+ u32 *pci_gart = info->addr;
+
+ DRM_DEBUG("\n");
+
+ if (atipci_be->bound != 1)
+ return -EINVAL;
+
+ for (i = 0, j = offset; i < atipci_be->num_pages; i++, j++) {
+ *(pci_gart + j) = 0;
+ }
+ atipci_be->gart_flush_fn(atipci_be->dev);
+ atipci_be->bound = 0;
+ atipci_be->offset = 0;
+ return 0;
+}
+
+static void ati_pcigart_clear_ttm(drm_ttm_backend_t *backend)
+{
+ ati_pcigart_ttm_backend_t *atipci_be =
+ container_of(backend, ati_pcigart_ttm_backend_t, backend);
+
+ DRM_DEBUG("\n");
+ if (atipci_be->pages) {
+ backend->func->unbind(backend);
+ atipci_be->pages = NULL;
+
+ }
+ atipci_be->num_pages = 0;
+}
+
+static void ati_pcigart_destroy_ttm(drm_ttm_backend_t *backend)
+{
+ ati_pcigart_ttm_backend_t *atipci_be;
+ if (backend) {
+ DRM_DEBUG("\n");
+ atipci_be = container_of(backend, ati_pcigart_ttm_backend_t, backend);
+ if (atipci_be) {
+ if (atipci_be->pages) {
+ backend->func->clear(backend);
+ }
+ drm_ctl_free(atipci_be, sizeof(*atipci_be), DRM_MEM_TTM);
+ }
+ }
+}
+
+static struct drm_ttm_backend_func ati_pcigart_ttm_backend =
+{
+ .needs_ub_cache_adjust = ati_pcigart_needs_unbind_cache_adjust,
+ .populate = ati_pcigart_populate,
+ .clear = ati_pcigart_clear_ttm,
+ .bind = ati_pcigart_bind_ttm,
+ .unbind = ati_pcigart_unbind_ttm,
+ .destroy = ati_pcigart_destroy_ttm,
+};
+
+struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev))
+{
+ ati_pcigart_ttm_backend_t *atipci_be;
+
+ atipci_be = drm_ctl_calloc(1, sizeof (*atipci_be), DRM_MEM_TTM);
+ if (!atipci_be)
+ return NULL;
+
+ atipci_be->populated = 0;
+ atipci_be->backend.func = &ati_pcigart_ttm_backend;
+ atipci_be->gart_info = info;
+ atipci_be->gart_flush_fn = gart_flush_fn;
+ atipci_be->dev = dev;
+
+ return &atipci_be->backend;
+}
+EXPORT_SYMBOL(ati_pcigart_init_ttm);
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 0237f593..2d1f6db8 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -849,6 +849,17 @@ struct drm_agp_ttm_backend {
};
#endif
+typedef struct ati_pcigart_ttm_backend {
+ struct drm_ttm_backend backend;
+ int populated;
+ void (*gart_flush_fn)(struct drm_device *dev);
+ struct ati_pcigart_info *gart_info;
+ unsigned long offset;
+ struct page **pages;
+ int num_pages;
+ int bound;
+ struct drm_device *dev;
+} ati_pcigart_ttm_backend_t;
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@@ -1165,6 +1176,7 @@ extern int drm_sg_free(struct drm_device *dev, void *data,
/* ATI PCIGART support (ati_pcigart.h) */
extern int drm_ati_pcigart_init(struct drm_device *dev, struct ati_pcigart_info *gart_info);
extern int drm_ati_pcigart_cleanup(struct drm_device *dev, struct ati_pcigart_info *gart_info);
+extern struct drm_ttm_backend *ati_pcigart_init_ttm(struct drm_device *dev, struct ati_pcigart_info *info, void (*gart_flush_fn)(struct drm_device *dev));
extern drm_dma_handle_t *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align, dma_addr_t maxaddr);
diff --git a/linux-core/radeon_buffer.c b/linux-core/radeon_buffer.c
new file mode 100644
index 00000000..8e2b20b1
--- /dev/null
+++ b/linux-core/radeon_buffer.c
@@ -0,0 +1,263 @@
+/**************************************************************************
+ *
+ * Copyright 2007 Dave Airlie
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Dave Airlie <airlied@linux.ie>
+ */
+
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+struct drm_ttm_backend *radeon_create_ttm_backend_entry(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ if(dev_priv->flags & RADEON_IS_AGP)
+ return drm_agp_init_ttm(dev);
+ else
+ return ati_pcigart_init_ttm(dev, &dev_priv->gart_info, radeon_gart_flush);
+}
+
+int radeon_fence_types(struct drm_buffer_object *bo, uint32_t * class, uint32_t * type)
+{
+ *class = 0;
+ if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
+ *type = 3;
+ else
+ *type = 1;
+ return 0;
+}
+
+int radeon_invalidate_caches(struct drm_device * dev, uint64_t flags)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ RING_LOCALS;
+
+ BEGIN_RING(4);
+ RADEON_FLUSH_CACHE();
+ RADEON_FLUSH_ZCACHE();
+ ADVANCE_RING();
+ return 0;
+}
+
+uint32_t radeon_evict_mask(struct drm_buffer_object *bo)
+{
+ switch (bo->mem.mem_type) {
+ case DRM_BO_MEM_LOCAL:
+ case DRM_BO_MEM_TT:
+ return DRM_BO_FLAG_MEM_LOCAL;
+ case DRM_BO_MEM_VRAM:
+ if (bo->mem.num_pages > 128)
+ return DRM_BO_MEM_TT;
+ else
+ return DRM_BO_MEM_LOCAL;
+ default:
+ return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
+ }
+}
+
+int radeon_init_mem_type(struct drm_device * dev, uint32_t type,
+ struct drm_mem_type_manager * man)
+{
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+
+ switch (type) {
+ case DRM_BO_MEM_LOCAL:
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CACHED;
+ man->drm_bus_maptype = 0;
+ break;
+ case DRM_BO_MEM_VRAM:
+ man->flags = _DRM_FLAG_MEMTYPE_FIXED | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_NEEDS_IOREMAP;
+ man->io_addr = NULL;
+ man->drm_bus_maptype = _DRM_FRAME_BUFFER;
+ man->io_offset = drm_get_resource_start(dev, 0);
+ man->io_size = drm_get_resource_len(dev, 0);
+ break;
+ case DRM_BO_MEM_TT:
+ if (dev_priv->flags & RADEON_IS_AGP) {
+ if (!(drm_core_has_AGP(dev) && dev->agp)) {
+ DRM_ERROR("AGP is not enabled for memory type %u\n",
+ (unsigned)type);
+ return -EINVAL;
+ }
+ man->io_offset = dev->agp->agp_info.aper_base;
+ man->io_size = dev->agp->agp_info.aper_size * 1024 * 1024;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
+ _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_NEEDS_IOREMAP;
+ man->drm_bus_maptype = _DRM_AGP;
+ } else {
+ man->io_offset = dev_priv->gart_vm_start;
+ man->io_size = dev_priv->gart_size;
+ man->io_addr = NULL;
+ man->flags = _DRM_FLAG_MEMTYPE_CSELECT | _DRM_FLAG_MEMTYPE_MAPPABLE | _DRM_FLAG_MEMTYPE_CMA;
+ man->drm_bus_maptype = _DRM_SCATTER_GATHER;
+ }
+ break;
+ default:
+ DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void radeon_emit_copy_blit(struct drm_device * dev,
+ uint32_t src_offset,
+ uint32_t dst_offset,
+ uint32_t pages, int direction)
+{
+ uint32_t cur_pages;
+ uint32_t stride = PAGE_SIZE;
+ drm_radeon_private_t *dev_priv = dev->dev_private;
+ uint32_t format, height;
+ RING_LOCALS;
+
+ if (!dev_priv)
+ return;
+
+ /* 32-bit copy format */
+ format = RADEON_COLOR_FORMAT_ARGB8888;
+
+ /* radeon limited to 16k stride */
+ stride &= 0x3fff;
+ while(pages > 0) {
+ cur_pages = pages;
+ if (cur_pages > 2048)
+ cur_pages = 2048;
+ pages -= cur_pages;
+
+ /* needs verification */
+ BEGIN_RING(7);
+ OUT_RING(CP_PACKET3(RADEON_CNTL_BITBLT_MULTI, 5));
+ OUT_RING(RADEON_GMC_SRC_PITCH_OFFSET_CNTL |
+ RADEON_GMC_DST_PITCH_OFFSET_CNTL |
+ RADEON_GMC_BRUSH_NONE |
+ (format << 8) |
+ RADEON_GMC_SRC_DATATYPE_COLOR |
+ RADEON_ROP3_S |
+ RADEON_DP_SRC_SOURCE_MEMORY |
+ RADEON_GMC_CLR_CMP_CNTL_DIS | RADEON_GMC_WR_MSK_DIS);
+ if (direction) {
+ OUT_RING((stride << 22) | (src_offset >> 10));
+ OUT_RING((stride << 22) | (dst_offset >> 10));
+ } else {
+ OUT_RING((stride << 22) | (dst_offset >> 10));
+ OUT_RING((stride << 22) | (src_offset >> 10));
+ }
+ OUT_RING(0);
+ OUT_RING(pages); /* x - y */
+ OUT_RING((stride << 16) | cur_pages);
+ ADVANCE_RING();
+ }
+
+ BEGIN_RING(2);
+ RADEON_WAIT_UNTIL_2D_IDLE();
+ ADVANCE_RING();
+
+ return;
+}
+
+static int radeon_move_blit(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
+{
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+ int dir = 0;
+
+ if ((old_mem->mem_type == new_mem->mem_type) &&
+ (new_mem->mm_node->start <
+ old_mem->mm_node->start + old_mem->mm_node->size)) {
+ dir = 1;
+ }
+
+ radeon_emit_copy_blit(bo->dev,
+ old_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->mm_node->start << PAGE_SHIFT,
+ new_mem->num_pages, dir);
+
+
+ return drm_bo_move_accel_cleanup(bo, evict, no_wait, 0,
+ DRM_FENCE_TYPE_EXE |
+ DRM_RADEON_FENCE_TYPE_RW,
+ DRM_RADEON_FENCE_FLAG_FLUSHED, new_mem);
+}
+
+static int radeon_move_flip(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+{
+ struct drm_device *dev = bo->dev;
+ struct drm_bo_mem_reg tmp_mem;
+ int ret;
+
+ tmp_mem = *new_mem;
+ tmp_mem.mm_node = NULL;
+ tmp_mem.mask = DRM_BO_FLAG_MEM_TT |
+ DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING;
+
+ ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
+ if (ret)
+ return ret;
+
+ ret = drm_bind_ttm(bo->ttm, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = radeon_move_blit(bo, 1, no_wait, &tmp_mem);
+ if (ret)
+ goto out_cleanup;
+
+ ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
+out_cleanup:
+ if (tmp_mem.mm_node) {
+ mutex_lock(&dev->struct_mutex);
+ if (tmp_mem.mm_node != bo->pinned_node)
+ drm_mm_put_block(tmp_mem.mm_node);
+ tmp_mem.mm_node = NULL;
+ mutex_unlock(&dev->struct_mutex);
+ }
+ return ret;
+}
+
+int radeon_move(struct drm_buffer_object * bo,
+ int evict, int no_wait, struct drm_bo_mem_reg * new_mem)
+{
+ struct drm_bo_mem_reg *old_mem = &bo->mem;
+
+ DRM_DEBUG("\n");
+ if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
+ if (radeon_move_flip(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ } else {
+ if (radeon_move_blit(bo, evict, no_wait, new_mem))
+ return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+ }
+ return 0;
+}
+
diff --git a/linux-core/radeon_drv.c b/linux-core/radeon_drv.c
index 327a6a97..107d71a1 100644
--- a/linux-core/radeon_drv.c
+++ b/linux-core/radeon_drv.c
@@ -56,6 +56,38 @@ static struct pci_device_id pciidlist[] = {
radeon_PCI_IDS
};
+
+#ifdef RADEON_HAVE_FENCE
+static struct drm_fence_driver radeon_fence_driver = {
+ .num_classes = 1,
+ .wrap_diff = (1 << 30),
+ .flush_diff = (1 << 29),
+ .sequence_mask = 0xffffffffU,
+ .lazy_capable = 1,
+ .emit = radeon_fence_emit_sequence,
+ .poke_flush = radeon_poke_flush,
+ .has_irq = radeon_fence_has_irq,
+};
+#endif
+#ifdef RADEON_HAVE_BUFFER
+
+static uint32_t radeon_mem_prios[] = {DRM_BO_MEM_VRAM, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL};
+static uint32_t radeon_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL};
+
+static struct drm_bo_driver radeon_bo_driver = {
+ .mem_type_prio = radeon_mem_prios,
+ .mem_busy_prio = radeon_busy_prios,
+ .num_mem_type_prio = sizeof(radeon_mem_prios)/sizeof(uint32_t),
+ .num_mem_busy_prio = sizeof(radeon_busy_prios)/sizeof(uint32_t),
+ .create_ttm_backend_entry = radeon_create_ttm_backend_entry,
+ .fence_type = radeon_fence_types,
+ .invalidate_caches = radeon_invalidate_caches,
+ .init_mem_type = radeon_init_mem_type,
+ .evict_mask = radeon_evict_mask,
+ .move = radeon_move,
+};
+#endif
+
static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
static struct drm_driver driver = {
.driver_features =
@@ -101,6 +133,13 @@ static struct drm_driver driver = {
.remove = __devexit_p(drm_cleanup_pci),
},
+#ifdef RADEON_HAVE_FENCE
+ .fence_driver = &radeon_fence_driver,
+#endif
+#ifdef RADEON_HAVE_BUFFER
+ .bo_driver = &radeon_bo_driver,
+#endif
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
diff --git a/linux-core/radeon_fence.c b/linux-core/radeon_fence.c
new file mode 100644
index 00000000..682f0bee
--- /dev/null
+++ b/linux-core/radeon_fence.c
@@ -0,0 +1,125 @@
+/**************************************************************************
+ *
+ * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "radeon_drm.h"
+#include "radeon_drv.h"
+
+/*
+ * Implements an intel sync flush operation.
+ */
+
+static void radeon_perform_flush(struct drm_device * dev)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ struct drm_fence_manager *fm = &dev->fm;
+ struct drm_fence_class_manager *fc = &dev->fm.fence_class[0];
+ struct drm_fence_driver *driver = dev->driver->fence_driver;
+ uint32_t pending_flush_types = 0;
+ uint32_t sequence;
+
+ if (!dev_priv)
+ return;
+
+ pending_flush_types = fc->pending_flush |
+ ((fc->pending_exe_flush) ? DRM_FENCE_TYPE_EXE : 0);
+
+ if (pending_flush_types) {
+ sequence = READ_BREADCRUMB(dev_priv);
+
+ drm_fence_handler(dev, 0, sequence, pending_flush_types, 0);
+ }
+
+ return;
+}
+
+void radeon_poke_flush(struct drm_device * dev, uint32_t class)
+{
+ struct drm_fence_manager *fm = &dev->fm;
+ unsigned long flags;
+
+ if (class != 0)
+ return;
+
+ write_lock_irqsave(&fm->lock, flags);
+ radeon_perform_flush(dev);
+ write_unlock_irqrestore(&fm->lock, flags);
+}
+
+int radeon_fence_emit_sequence(struct drm_device *dev, uint32_t class,
+ uint32_t flags, uint32_t *sequence,
+ uint32_t *native_type)
+{
+ drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
+ RING_LOCALS;
+
+ if (!dev_priv)
+ return -EINVAL;
+
+ *native_type = DRM_FENCE_TYPE_EXE;
+ if (flags & DRM_RADEON_FENCE_FLAG_FLUSHED) {
+ *native_type |= DRM_RADEON_FENCE_TYPE_RW;
+
+ BEGIN_RING(4);
+
+ RADEON_FLUSH_CACHE();
+ RADEON_FLUSH_ZCACHE();
+ ADVANCE_RING();
+ }
+
+ radeon_emit_irq(dev);
+ *sequence = (uint32_t) dev_priv->counter;
+
+
+ return 0;
+}
+
+void radeon_fence_handler(struct drm_device * dev)
+{
+ struct drm_fence_manager *fm = &dev->fm;
+
+ write_lock(&fm->lock);
+ radeon_perform_flush(dev);
+ write_unlock(&fm->lock);
+}
+
+int radeon_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t flags)
+{
+ /*
+ * We have an irq that tells us when we have a new breadcrumb.
+ */
+
+ if (class == 0 && flags == DRM_FENCE_TYPE_EXE)
+ return 1;
+
+ return 0;
+}