summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
authorIan Romanick <idr@us.ibm.com>2007-10-03 14:08:18 -0700
committerIan Romanick <idr@us.ibm.com>2007-10-03 14:08:29 -0700
commit7f99fd5d7aa1f0d2463907d9d8c483b6249ac831 (patch)
treef372e9e5782f2d39189b9df84f9858efaa17ff81 /linux-core
parenta72eb27fbc7a66e35018ffbcb5137cfaaf4049aa (diff)
First round of byte-ordering fixes for PowerPC.
This isn't 100% as command submission via PCI-e GART buffers doesn't work. I've hacked around that for the time being. This is essentially the code that was used at the POWER.org event to show Bimini.
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/xgi_cmdlist.c46
-rw-r--r--linux-core/xgi_drv.c6
-rw-r--r--linux-core/xgi_drv.h4
-rw-r--r--linux-core/xgi_fence.c4
-rw-r--r--linux-core/xgi_misc.c24
-rw-r--r--linux-core/xgi_pcie.c3
6 files changed, 56 insertions, 31 deletions
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
index 261f4e13..35f7e1bd 100644
--- a/linux-core/xgi_cmdlist.c
+++ b/linux-core/xgi_cmdlist.c
@@ -45,7 +45,7 @@ static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
map->handle, addr, data);
#endif
- DRM_WRITE32(map, addr, data);
+ DRM_WRITE32(map, addr, cpu_to_le32(data));
}
@@ -98,6 +98,25 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
const struct xgi_cmd_info *const pCmdInfo =
(struct xgi_cmd_info *) data;
const unsigned int cmd = get_batch_command(pCmdInfo->type);
+#if __BIG_ENDIAN
+ const u32 *const ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
+ unsigned i;
+ unsigned j;
+
+ xgi_waitfor_pci_idle(info);
+ for (j = 4; j < pCmdInfo->size; j += 4) {
+ u32 reg = ptr[j];
+
+ for (i = 1; i < 4; i++) {
+ if ((reg & 1) != 0) {
+ const unsigned r = 0x2100 | (reg & 0x0fe);
+ DRM_WRITE32(info->mmio_map, r, ptr[j + i]);
+ }
+
+ reg >>= 8;
+ }
+ }
+#else
u32 begin[4];
@@ -138,16 +157,17 @@ int xgi_submit_cmdlist(struct drm_device * dev, void * data,
xgi_emit_flush(info, FALSE);
}
- info->cmdring.last_ptr[1] = begin[1];
- info->cmdring.last_ptr[2] = begin[2];
- info->cmdring.last_ptr[3] = begin[3];
+ info->cmdring.last_ptr[1] = cpu_to_le32(begin[1]);
+ info->cmdring.last_ptr[2] = cpu_to_le32(begin[2]);
+ info->cmdring.last_ptr[3] = cpu_to_le32(begin[3]);
DRM_WRITEMEMORYBARRIER();
- info->cmdring.last_ptr[0] = begin[0];
+ info->cmdring.last_ptr[0] = cpu_to_le32(begin[0]);
triggerHWCommandList(info);
}
info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
+#endif
drm_fence_flush_old(info->dev, 0, info->next_sequence);
return 0;
}
@@ -258,6 +278,8 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
const unsigned int flush_size = sizeof(flush_command);
u32 *batch_addr;
u32 hw_addr;
+ unsigned int i;
+
/* check buf is large enough to contain a new flush batch */
if ((info->cmdring.ring_offset + flush_size) >= info->cmdring.size) {
@@ -269,18 +291,20 @@ void xgi_emit_flush(struct xgi_info * info, bool stop)
batch_addr = info->cmdring.ptr
+ (info->cmdring.ring_offset / 4);
- (void) memcpy(batch_addr, flush_command, flush_size);
+ for (i = 0; i < (flush_size / 4); i++) {
+ batch_addr[i] = cpu_to_le32(flush_command[i]);
+ }
if (stop) {
- *batch_addr |= BEGIN_STOP_STORE_CURRENT_POINTER_MASK;
+ *batch_addr |= cpu_to_le32(BEGIN_STOP_STORE_CURRENT_POINTER_MASK);
}
- info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK | (flush_size / 4);
- info->cmdring.last_ptr[2] = hw_addr >> 4;
+ info->cmdring.last_ptr[1] = cpu_to_le32(BEGIN_LINK_ENABLE_MASK | (flush_size / 4));
+ info->cmdring.last_ptr[2] = cpu_to_le32(hw_addr >> 4);
info->cmdring.last_ptr[3] = 0;
DRM_WRITEMEMORYBARRIER();
- info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
- | (BEGIN_VALID_MASK);
+ info->cmdring.last_ptr[0] = cpu_to_le32((get_batch_command(BTYPE_CTRL) << 24)
+ | (BEGIN_VALID_MASK));
triggerHWCommandList(info);
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
index bc6873a9..4e66197e 100644
--- a/linux-core/xgi_drv.c
+++ b/linux-core/xgi_drv.c
@@ -351,9 +351,9 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct xgi_info *info = dev->dev_private;
- const u32 irq_bits = DRM_READ32(info->mmio_map,
+ const u32 irq_bits = le32_to_cpu(DRM_READ32(info->mmio_map,
(0x2800
- + M2REG_AUTO_LINK_STATUS_ADDRESS))
+ + M2REG_AUTO_LINK_STATUS_ADDRESS)))
& (M2REG_ACTIVE_TIMER_INTERRUPT_MASK
| M2REG_ACTIVE_INTERRUPT_0_MASK
| M2REG_ACTIVE_INTERRUPT_2_MASK
@@ -363,7 +363,7 @@ irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
if (irq_bits != 0) {
DRM_WRITE32(info->mmio_map,
0x2800 + M2REG_AUTO_LINK_SETTING_ADDRESS,
- M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits);
+ cpu_to_le32(M2REG_AUTO_LINK_SETTING_COMMAND | irq_bits));
xgi_fence_handler(dev);
return IRQ_HANDLED;
} else {
diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h
index a68dc03b..d9a94f5f 100644
--- a/linux-core/xgi_drv.h
+++ b/linux-core/xgi_drv.h
@@ -35,11 +35,11 @@
#define DRIVER_NAME "xgi"
#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
-#define DRIVER_DATE "20070918"
+#define DRIVER_DATE "20071003"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 1
-#define DRIVER_PATCHLEVEL 0
+#define DRIVER_PATCHLEVEL 3
#include "xgi_cmdlist.h"
#include "xgi_drm.h"
diff --git a/linux-core/xgi_fence.c b/linux-core/xgi_fence.c
index 22e1dced..a98a8422 100644
--- a/linux-core/xgi_fence.c
+++ b/linux-core/xgi_fence.c
@@ -48,8 +48,8 @@ static uint32_t xgi_do_flush(struct drm_device * dev, uint32_t class)
if (pending_flush_types) {
if (pending_flush_types & DRM_FENCE_TYPE_EXE) {
- const u32 begin_id = DRM_READ32(info->mmio_map,
- 0x2820)
+ const u32 begin_id = le32_to_cpu(DRM_READ32(info->mmio_map,
+ 0x2820))
& BEGIN_BEGIN_IDENTIFICATION_MASK;
if (begin_id != info->complete_sequence) {
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
index 50a721c0..f39b3bb5 100644
--- a/linux-core/xgi_misc.c
+++ b/linux-core/xgi_misc.c
@@ -38,12 +38,12 @@ static unsigned int s_invalid_begin = 0;
static bool xgi_validate_signal(struct drm_map * map)
{
- if (DRM_READ32(map, 0x2800) & 0x001c0000) {
+ if (le32_to_cpu(DRM_READ32(map, 0x2800) & 0x001c0000)) {
u16 check;
/* Check Read back status */
DRM_WRITE8(map, 0x235c, 0x80);
- check = DRM_READ16(map, 0x2360);
+ check = le16_to_cpu(DRM_READ16(map, 0x2360));
if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
return FALSE;
@@ -51,28 +51,28 @@ static bool xgi_validate_signal(struct drm_map * map)
/* Check RO channel */
DRM_WRITE8(map, 0x235c, 0x83);
- check = DRM_READ16(map, 0x2360);
+ check = le16_to_cpu(DRM_READ16(map, 0x2360));
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RW channel */
DRM_WRITE8(map, 0x235c, 0x88);
- check = DRM_READ16(map, 0x2360);
+ check = le16_to_cpu(DRM_READ16(map, 0x2360));
if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
return FALSE;
}
/* Check RO channel outstanding */
DRM_WRITE8(map, 0x235c, 0x8f);
- check = DRM_READ16(map, 0x2360);
+ check = le16_to_cpu(DRM_READ16(map, 0x2360));
if (0 != (check & 0x3ff)) {
return FALSE;
}
/* Check RW channel outstanding */
DRM_WRITE8(map, 0x235c, 0x90);
- check = DRM_READ16(map, 0x2360);
+ check = le16_to_cpu(DRM_READ16(map, 0x2360));
if (0 != (check & 0x3ff)) {
return FALSE;
}
@@ -89,7 +89,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
int time_out = 0xffff;
DRM_WRITE8(map, 0xb057, 8);
- while (0 != (DRM_READ32(map, 0x2800) & 0xf0000000)) {
+ while (0 != le32_to_cpu(DRM_READ32(map, 0x2800) & 0xf0000000)) {
while (0 != ((--time_out) & 0xfff))
/* empty */ ;
@@ -100,7 +100,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
u8 old_36;
DRM_INFO("Can not reset back 0x%x!\n",
- DRM_READ32(map, 0x2800));
+ le32_to_cpu(DRM_READ32(map, 0x2800)));
DRM_WRITE8(map, 0xb057, 0);
@@ -137,7 +137,7 @@ static void xgi_ge_hang_reset(struct drm_map * map)
bool xgi_ge_irq_handler(struct xgi_info * info)
{
- const u32 int_status = DRM_READ32(info->mmio_map, 0x2810);
+ const u32 int_status = le32_to_cpu(DRM_READ32(info->mmio_map, 0x2810));
bool is_support_auto_reset = FALSE;
/* Check GE on/off */
@@ -146,7 +146,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
/* We got GE stall interrupt.
*/
DRM_WRITE32(info->mmio_map, 0x2810,
- int_status | 0x04000000);
+ cpu_to_le32(int_status | 0x04000000));
if (is_support_auto_reset) {
static cycles_t last_tick;
@@ -176,7 +176,7 @@ bool xgi_ge_irq_handler(struct xgi_info * info)
} else if (0 != (0x1 & int_status)) {
s_invalid_begin++;
DRM_WRITE32(info->mmio_map, 0x2810,
- (int_status & ~0x01) | 0x04000000);
+ cpu_to_le32((int_status & ~0x01) | 0x04000000));
}
return TRUE;
@@ -326,7 +326,7 @@ void xgi_waitfor_pci_idle(struct xgi_info * info)
unsigned int same_count = 0;
while (idleCount < 5) {
- const u32 status = DRM_READ32(info->mmio_map, WHOLD_GE_STATUS)
+ const u32 status = le32_to_cpu(DRM_READ32(info->mmio_map, WHOLD_GE_STATUS))
& IDLE_MASK;
if (status == old_status) {
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
index a7d3ea24..4becf35b 100644
--- a/linux-core/xgi_pcie.c
+++ b/linux-core/xgi_pcie.c
@@ -40,7 +40,8 @@ void xgi_gart_flush(struct drm_device *dev)
DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
/* Set GART base address to HW */
- DRM_WRITE32(info->mmio_map, 0xB034, info->gart_info.bus_addr);
+ DRM_WRITE32(info->mmio_map, 0xB034,
+ cpu_to_le32(info->gart_info.bus_addr));
/* Flush GART table. */
DRM_WRITE8(info->mmio_map, 0xB03F, 0x40);