summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile8
-rw-r--r--linux-core/Makefile.kernel2
-rw-r--r--linux-core/xgi_cmdlist.c264
-rw-r--r--linux-core/xgi_cmdlist.h98
l---------linux-core/xgi_drm.h1
-rw-r--r--linux-core/xgi_drv.c362
-rw-r--r--linux-core/xgi_drv.h152
-rw-r--r--linux-core/xgi_fb.c370
-rw-r--r--linux-core/xgi_misc.c474
-rw-r--r--linux-core/xgi_misc.h37
-rw-r--r--linux-core/xgi_pcie.c348
-rw-r--r--linux-core/xgi_regs.h216
12 files changed, 2331 insertions, 1 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 1758777c..55e25253 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
- mach64.o nv.o nouveau.o
+ mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@@ -91,6 +91,7 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
+XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_misc.h xgi_regs.h $(DRMHEADERS)
PROGS = dristat drmstat
@@ -284,6 +285,7 @@ CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
+CONFIG_DRM_XGI := n
# Enable module builds for the modules requested/supported.
@@ -320,6 +322,9 @@ endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
+ifneq (,$(findstring xgi,$(DRM_MODULES)))
+CONFIG_DRM_XGI := m
+endif
# These require AGP support
@@ -347,6 +352,7 @@ $(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
+$(xgi-objs): $(XGIHEADERS)
endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 5aa589cd..c898206d 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -38,6 +38,7 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
+xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
@@ -62,3 +63,4 @@ obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
+obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
new file mode 100644
index 00000000..490e9f39
--- /dev/null
+++ b/linux-core/xgi_cmdlist.c
@@ -0,0 +1,264 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+static void addFlush2D(struct xgi_info * info);
+static unsigned int get_batch_command(enum xgi_batch_type type);
+static void triggerHWCommandList(struct xgi_info * info);
+static void xgi_cmdlist_reset(struct xgi_info * info);
+
+int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
+{
+ struct xgi_mem_alloc mem_alloc = {
+ .size = size,
+ .owner = PCIE_2D,
+ };
+ int err;
+
+ err = xgi_pcie_alloc(info, &mem_alloc, 0);
+ if (err) {
+ return err;
+ }
+
+ info->cmdring.ptr = xgi_find_pcie_virt(info, mem_alloc.hw_addr);
+ info->cmdring.size = mem_alloc.size;
+ info->cmdring.ring_hw_base = mem_alloc.hw_addr;
+ info->cmdring.ring_gart_base = mem_alloc.offset;
+ info->cmdring.last_ptr = NULL;
+ info->cmdring.ring_offset = 0;
+
+ return 0;
+}
+
+
+/**
+ * get_batch_command - Get the command ID for the current begin type.
+ * @type: Type of the current batch
+ *
+ * See section 3.2.2 "Begin" (page 15) of the 3D SPG.
+ *
+ * This function assumes that @type is on the range [0,3].
+ */
+unsigned int get_batch_command(enum xgi_batch_type type)
+{
+ static const unsigned int ports[4] = {
+ 0x30 >> 2, 0x40 >> 2, 0x50 >> 2, 0x20 >> 2
+ };
+
+ return ports[type];
+}
+
+
+static void xgi_submit_cmdlist(struct xgi_info * info,
+ const struct xgi_cmd_info * pCmdInfo)
+{
+ const unsigned int cmd = get_batch_command(pCmdInfo->type);
+ u32 begin[4];
+
+
+ begin[0] = (cmd << 24) | BEGIN_VALID_MASK
+ | (BEGIN_BEGIN_IDENTIFICATION_MASK & pCmdInfo->id);
+ begin[1] = BEGIN_LINK_ENABLE_MASK | pCmdInfo->size;
+ begin[2] = pCmdInfo->hw_addr >> 4;
+ begin[3] = 0;
+
+ if (info->cmdring.last_ptr == NULL) {
+ const unsigned int portOffset = BASE_3D_ENG + (cmd << 2);
+
+
+ /* Enable PCI Trigger Mode
+ */
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
+ M2REG_CLEAR_COUNTERS_MASK | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+
+ /* Send PCI begin command
+ */
+ dwWriteReg(info->mmio_map, portOffset, begin[0]);
+ dwWriteReg(info->mmio_map, portOffset + 4, begin[1]);
+ dwWriteReg(info->mmio_map, portOffset + 8, begin[2]);
+ dwWriteReg(info->mmio_map, portOffset + 12, begin[3]);
+ } else {
+ DRM_DEBUG("info->cmdring.last_ptr != NULL\n");
+
+ if (pCmdInfo->type == BTYPE_3D) {
+ addFlush2D(info);
+ }
+
+ info->cmdring.last_ptr[1] = begin[1];
+ info->cmdring.last_ptr[2] = begin[2];
+ info->cmdring.last_ptr[3] = begin[3];
+ wmb();
+ info->cmdring.last_ptr[0] = begin[0];
+
+ triggerHWCommandList(info);
+ }
+
+ info->cmdring.last_ptr = xgi_find_pcie_virt(info, pCmdInfo->hw_addr);
+}
+
+
+int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_cmd_info cmd_list;
+ struct xgi_info *info = dev->dev_private;
+
+ DRM_COPY_FROM_USER_IOCTL(cmd_list,
+ (struct xgi_cmd_info __user *) data,
+ sizeof(cmd_list));
+
+ if (cmd_list.type > BTYPE_CTRL) {
+ return DRM_ERR(EINVAL);
+ }
+
+ xgi_submit_cmdlist(info, &cmd_list);
+ return 0;
+}
+
+
+/*
+ state: 0 - console
+ 1 - graphic
+ 2 - fb
+ 3 - logout
+*/
+int xgi_state_change(struct xgi_info * info, unsigned int to,
+ unsigned int from)
+{
+#define STATE_CONSOLE 0
+#define STATE_GRAPHIC 1
+#define STATE_FBTERM 2
+#define STATE_LOGOUT 3
+#define STATE_REBOOT 4
+#define STATE_SHUTDOWN 5
+
+ if ((from == STATE_GRAPHIC) && (to == STATE_CONSOLE)) {
+ DRM_INFO("Leaving graphical mode (probably VT switch)\n");
+ } else if ((from == STATE_CONSOLE) && (to == STATE_GRAPHIC)) {
+ DRM_INFO("Entering graphical mode (probably VT switch)\n");
+ xgi_cmdlist_reset(info);
+ } else if ((from == STATE_GRAPHIC)
+ && ((to == STATE_LOGOUT)
+ || (to == STATE_REBOOT)
+ || (to == STATE_SHUTDOWN))) {
+ DRM_INFO("Leaving graphical mode (probably X shutting down)\n");
+ } else {
+ DRM_ERROR("Invalid state change.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ return 0;
+}
+
+
+int xgi_state_change_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_state_info state;
+ struct xgi_info *info = dev->dev_private;
+
+ DRM_COPY_FROM_USER_IOCTL(state, (struct xgi_state_info __user *) data,
+ sizeof(state));
+
+ return xgi_state_change(info, state._toState, state._fromState);
+}
+
+
+void xgi_cmdlist_reset(struct xgi_info * info)
+{
+ info->cmdring.last_ptr = NULL;
+ info->cmdring.ring_offset = 0;
+}
+
+void xgi_cmdlist_cleanup(struct xgi_info * info)
+{
+ if (info->cmdring.ring_hw_base != 0) {
+ xgi_pcie_free(info, info->cmdring.ring_gart_base, NULL);
+ info->cmdring.ring_hw_base = 0;
+ info->cmdring.ring_offset = 0;
+ info->cmdring.size = 0;
+ }
+}
+
+static void triggerHWCommandList(struct xgi_info * info)
+{
+ static unsigned int s_triggerID = 1;
+
+ dwWriteReg(info->mmio_map,
+ BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
+ 0x05000000 + (0x0ffff & s_triggerID++));
+}
+
+
+static void addFlush2D(struct xgi_info * info)
+{
+ u32 *flushBatchVirtAddr;
+ u32 flushBatchHWAddr;
+
+ /* check buf is large enough to contain a new flush batch */
+ if ((info->cmdring.ring_offset + 0x20) >= info->cmdring.size) {
+ info->cmdring.ring_offset = 0;
+ }
+
+ flushBatchHWAddr = info->cmdring.ring_hw_base + info->cmdring.ring_offset;
+ flushBatchVirtAddr = info->cmdring.ptr
+ + (info->cmdring.ring_offset / 4);
+
+ /* not using memcpy for I assume the address is discrete */
+ *(flushBatchVirtAddr + 0) = 0x10000000;
+ *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */
+ *(flushBatchVirtAddr + 2) = 0x00000000;
+ *(flushBatchVirtAddr + 3) = 0x00000000;
+ *(flushBatchVirtAddr + 4) = FLUSH_2D;
+ *(flushBatchVirtAddr + 5) = FLUSH_2D;
+ *(flushBatchVirtAddr + 6) = FLUSH_2D;
+ *(flushBatchVirtAddr + 7) = FLUSH_2D;
+
+ info->cmdring.last_ptr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;
+ info->cmdring.last_ptr[2] = flushBatchHWAddr >> 4;
+ info->cmdring.last_ptr[3] = 0;
+ wmb();
+ info->cmdring.last_ptr[0] = (get_batch_command(BTYPE_CTRL) << 24)
+ | (BEGIN_VALID_MASK);
+
+ triggerHWCommandList(info);
+
+ info->cmdring.ring_offset += 0x20;
+ info->cmdring.last_ptr = flushBatchVirtAddr;
+}
diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h
new file mode 100644
index 00000000..604c9aac
--- /dev/null
+++ b/linux-core/xgi_cmdlist.h
@@ -0,0 +1,98 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_CMDLIST_H_
+#define _XGI_CMDLIST_H_
+
+#define ONE_BIT_MASK 0x1
+#define TWENTY_BIT_MASK 0xfffff
+#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)
+#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK
+#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)
+#define BASE_3D_ENG 0x2800
+#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10
+#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)
+#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)
+#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)
+#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)
+#define BEGIN_BEGIN_IDENTIFICATION_MASK (TWENTY_BIT_MASK<<0)
+#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14
+
+typedef enum {
+ FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK,
+ FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK,
+ FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK
+} FLUSH_CODE;
+
+typedef enum {
+ AGPCMDLIST_SCRATCH_SIZE = 0x100,
+ AGPCMDLIST_BEGIN_SIZE = 0x004,
+ AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004,
+ AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c,
+ AGPCMDLIST_FLUSH_CMD_LEN = 0x004,
+ AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE
+} CMD_SIZE;
+
+struct xgi_cmdring_info {
+ /**
+ * Kernel space pointer to the base of the command ring.
+ */
+ u32 * ptr;
+
+ /**
+ * Size, in bytes, of the command ring.
+ */
+ unsigned int size;
+
+ /**
+ * Base address of the command ring from the hardware's PoV.
+ */
+ unsigned int ring_hw_base;
+
+ /**
+ * Offset, in bytes, from the base of PCI-e GART space to the start
+ * of the ring.
+ */
+ unsigned long ring_gart_base;
+
+ u32 * last_ptr;
+
+ /**
+ * Offset, in bytes, from the start of the ring to the next available
+ * location to store a command.
+ */
+ unsigned int ring_offset;
+};
+
+struct xgi_info;
+extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size);
+
+extern int xgi_state_change(struct xgi_info * info, unsigned int to,
+ unsigned int from);
+
+extern void xgi_cmdlist_cleanup(struct xgi_info * info);
+
+#endif /* _XGI_CMDLIST_H_ */
diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h
new file mode 120000
index 00000000..677586d7
--- /dev/null
+++ b/linux-core/xgi_drm.h
@@ -0,0 +1 @@
+../shared-core/xgi_drm.h \ No newline at end of file
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
new file mode 100644
index 00000000..2c3384b0
--- /dev/null
+++ b/linux-core/xgi_drv.c
@@ -0,0 +1,362 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "drmP.h"
+#include "drm.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+#include "drm_pciids.h"
+
+static struct pci_device_id pciidlist[] = {
+ xgi_PCI_IDS
+};
+
+static int xgi_bootstrap(DRM_IOCTL_ARGS);
+
+static drm_ioctl_desc_t xgi_ioctls[] = {
+ [DRM_IOCTL_NR(DRM_XGI_BOOTSTRAP)] = {xgi_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY},
+
+ [DRM_IOCTL_NR(DRM_XGI_FB_ALLOC)] = {xgi_fb_alloc_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_FB_FREE)] = {xgi_fb_free_ioctl, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_XGI_PCIE_ALLOC)] = {xgi_pcie_alloc_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_PCIE_FREE)] = {xgi_pcie_free_ioctl, DRM_AUTH},
+
+ [DRM_IOCTL_NR(DRM_XGI_GE_RESET)] = {xgi_ge_reset_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_DUMP_REGISTER)] = {xgi_dump_register_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_DEBUG_INFO)] = {xgi_restore_registers_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_SUBMIT_CMDLIST)] = {xgi_submit_cmdlist_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_TEST_RWINKERNEL)] = {xgi_test_rwinkernel_ioctl, DRM_AUTH},
+ [DRM_IOCTL_NR(DRM_XGI_STATE_CHANGE)] = {xgi_state_change_ioctl, DRM_AUTH|DRM_MASTER},
+};
+
+static const int xgi_max_ioctl = DRM_ARRAY_SIZE(xgi_ioctls);
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static int xgi_driver_load(struct drm_device *dev, unsigned long flags);
+static int xgi_driver_unload(struct drm_device *dev);
+static void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp);
+static void xgi_driver_lastclose(drm_device_t * dev);
+static irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS);
+
+
+static struct drm_driver driver = {
+ .driver_features =
+ DRIVER_PCI_DMA | DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ |
+ DRIVER_IRQ_SHARED | DRIVER_SG,
+ .dev_priv_size = sizeof(struct xgi_info),
+ .load = xgi_driver_load,
+ .unload = xgi_driver_unload,
+ .preclose = xgi_driver_preclose,
+ .lastclose = xgi_driver_lastclose,
+ .dma_quiescent = NULL,
+ .irq_preinstall = NULL,
+ .irq_postinstall = NULL,
+ .irq_uninstall = NULL,
+ .irq_handler = xgi_kern_isr,
+ .reclaim_buffers = drm_core_reclaim_buffers,
+ .get_map_ofs = drm_core_get_map_ofs,
+ .get_reg_ofs = drm_core_get_reg_ofs,
+ .ioctls = xgi_ioctls,
+ .dma_ioctl = NULL,
+
+ .fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .ioctl = drm_ioctl,
+ .mmap = drm_mmap,
+ .poll = drm_poll,
+ .fasync = drm_fasync,
+ },
+
+ .pci_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pciidlist,
+ .probe = probe,
+ .remove = __devexit_p(drm_cleanup_pci),
+ },
+
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .patchlevel = DRIVER_PATCHLEVEL,
+
+};
+
+static int probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ return drm_get_dev(pdev, ent, &driver);
+}
+
+
+static int __init xgi_init(void)
+{
+ driver.num_ioctls = xgi_max_ioctl;
+ return drm_init(&driver, pciidlist);
+}
+
+static void __exit xgi_exit(void)
+{
+ drm_exit(&driver);
+}
+
+module_init(xgi_init);
+module_exit(xgi_exit);
+
+MODULE_AUTHOR(DRIVER_AUTHOR);
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL and additional rights");
+
+
+void xgi_kern_isr_bh(struct drm_device *dev);
+
+int xgi_bootstrap(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ struct xgi_bootstrap bs;
+ struct drm_map_list *maplist;
+ int err;
+
+
+ DRM_COPY_FROM_USER_IOCTL(bs, (struct xgi_bootstrap __user *) data,
+ sizeof(bs));
+
+ if (info->mmio_map == NULL) {
+ err = drm_addmap(dev, info->mmio.base, info->mmio.size,
+ _DRM_REGISTERS, _DRM_KERNEL,
+ &info->mmio_map);
+ if (err) {
+ DRM_ERROR("Unable to map MMIO region: %d\n", err);
+ return err;
+ }
+
+ xgi_enable_mmio(info);
+ }
+
+
+ info->fb.size = IN3CFB(info->mmio_map, 0x54) * 8 * 1024 * 1024;
+
+ DRM_INFO("fb base: 0x%lx, size: 0x%x (probed)\n",
+ (unsigned long) info->fb.base, info->fb.size);
+
+
+ if ((info->fb.base == 0) || (info->fb.size == 0)) {
+ DRM_ERROR("framebuffer appears to be wrong: 0x%lx 0x%x\n",
+ (unsigned long) info->fb.base, info->fb.size);
+ return DRM_ERR(EINVAL);
+ }
+
+
+ /* Init the resource manager */
+ if (!info->fb_heap.initialized) {
+ err = xgi_fb_heap_init(info);
+ if (err) {
+ DRM_ERROR("Unable to initialize FB heap.\n");
+ return err;
+ }
+ }
+
+
+ info->pcie.size = bs.gart.size;
+
+ /* Init the resource manager */
+ if (!info->pcie_heap.initialized) {
+ err = xgi_pcie_heap_init(info);
+ if (err) {
+ DRM_ERROR("Unable to initialize GART heap.\n");
+ return err;
+ }
+
+ /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
+ err = xgi_cmdlist_initialize(info, 0x100000);
+ if (err) {
+ DRM_ERROR("xgi_cmdlist_initialize() failed\n");
+ return err;
+ }
+ }
+
+
+ if (info->pcie_map == NULL) {
+ err = drm_addmap(info->dev, 0, info->pcie.size,
+ _DRM_SCATTER_GATHER, _DRM_LOCKED,
+ & info->pcie_map);
+ if (err) {
+ DRM_ERROR("Could not add map for GART backing "
+ "store.\n");
+ return err;
+ }
+ }
+
+
+ maplist = drm_find_matching_map(dev, info->pcie_map);
+ if (maplist == NULL) {
+ DRM_ERROR("Could not find GART backing store map.\n");
+ return DRM_ERR(EINVAL);
+ }
+
+ bs.gart = *info->pcie_map;
+ bs.gart.handle = (void *)(unsigned long) maplist->user_token;
+ DRM_COPY_TO_USER_IOCTL((struct xgi_bootstrap __user *) data,
+ bs, sizeof(bs));
+
+ return 0;
+}
+
+
+void xgi_driver_preclose(struct drm_device * dev, DRMFILE filp)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ xgi_pcie_free_all(info, filp);
+ xgi_fb_free_all(info, filp);
+}
+
+
+void xgi_driver_lastclose(drm_device_t * dev)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ if (info != NULL) {
+ /* The core DRM lastclose routine will destroy all of our
+ * mappings for us. NULL out the pointers here so that
+ * xgi_bootstrap can do the right thing.
+ */
+ info->pcie_map = NULL;
+ info->mmio_map = NULL;
+ info->fb_map = NULL;
+
+ xgi_cmdlist_cleanup(info);
+
+ if (info->fb_heap.initialized) {
+ xgi_mem_heap_cleanup(&info->fb_heap);
+ }
+
+ if (info->pcie_heap.initialized) {
+ xgi_mem_heap_cleanup(&info->pcie_heap);
+ xgi_pcie_lut_cleanup(info);
+ }
+ }
+}
+
+
+/*
+ * driver receives an interrupt if someone waiting, then hand it off.
+ */
+irqreturn_t xgi_kern_isr(DRM_IRQ_ARGS)
+{
+ struct drm_device *dev = (struct drm_device *) arg;
+// struct xgi_info *info = dev->dev_private;
+ u32 need_to_run_bottom_half = 0;
+
+ //DRM_INFO("xgi_kern_isr \n");
+
+ //xgi_dvi_irq_handler(info);
+
+ if (need_to_run_bottom_half) {
+ drm_locked_tasklet(dev, xgi_kern_isr_bh);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void xgi_kern_isr_bh(struct drm_device *dev)
+{
+ struct xgi_info *info = dev->dev_private;
+
+ DRM_INFO("xgi_kern_isr_bh \n");
+
+ //xgi_dvi_irq_handler(info);
+}
+
+int xgi_driver_load(struct drm_device *dev, unsigned long flags)
+{
+ struct xgi_info *info = drm_alloc(sizeof(*info), DRM_MEM_DRIVER);
+
+ if (!info)
+ return DRM_ERR(ENOMEM);
+
+ (void) memset(info, 0, sizeof(*info));
+ dev->dev_private = info;
+ info->dev = dev;
+
+ sema_init(&info->fb_sem, 1);
+ sema_init(&info->pcie_sem, 1);
+
+ info->mmio.base = drm_get_resource_start(dev, 1);
+ info->mmio.size = drm_get_resource_len(dev, 1);
+
+ DRM_INFO("mmio base: 0x%lx, size: 0x%x\n",
+ (unsigned long) info->mmio.base, info->mmio.size);
+
+
+ if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
+ DRM_ERROR("mmio appears to be wrong: 0x%lx 0x%x\n",
+ (unsigned long) info->mmio.base, info->mmio.size);
+ return DRM_ERR(EINVAL);
+ }
+
+
+ info->fb.base = drm_get_resource_start(dev, 0);
+ info->fb.size = drm_get_resource_len(dev, 0);
+
+ DRM_INFO("fb base: 0x%lx, size: 0x%x\n",
+ (unsigned long) info->fb.base, info->fb.size);
+
+
+ xgi_mem_block_cache = kmem_cache_create("xgi_mem_block",
+ sizeof(struct xgi_mem_block),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ NULL, NULL);
+ if (xgi_mem_block_cache == NULL) {
+ return DRM_ERR(ENOMEM);
+ }
+
+
+ return 0;
+}
+
+int xgi_driver_unload(struct drm_device *dev)
+{
+ struct xgi_info * info = dev->dev_private;
+
+ if (xgi_mem_block_cache) {
+ kmem_cache_destroy(xgi_mem_block_cache);
+ xgi_mem_block_cache = NULL;
+ }
+
+ drm_free(info, sizeof(*info), DRM_MEM_DRIVER);
+ dev->dev_private = NULL;
+
+ return 0;
+}
diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h
new file mode 100644
index 00000000..2061189a
--- /dev/null
+++ b/linux-core/xgi_drv.h
@@ -0,0 +1,152 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_DRV_H_
+#define _XGI_DRV_H_
+
+#include "drmP.h"
+#include "drm.h"
+
+#define DRIVER_AUTHOR "Andrea Zhang <andrea_zhang@macrosynergy.com>"
+
+#define DRIVER_NAME "xgi"
+#define DRIVER_DESC "XGI XP5 / XP10 / XG47"
+#define DRIVER_DATE "20070723"
+
+#define DRIVER_MAJOR 0
+#define DRIVER_MINOR 10
+#define DRIVER_PATCHLEVEL 0
+
+#include "xgi_cmdlist.h"
+#include "xgi_drm.h"
+
+struct xgi_aperture {
+ dma_addr_t base;
+ unsigned int size;
+};
+
+struct xgi_mem_block {
+ struct list_head list;
+ unsigned long offset;
+ unsigned long size;
+ DRMFILE filp;
+
+ unsigned int owner;
+};
+
+struct xgi_mem_heap {
+ struct list_head free_list;
+ struct list_head used_list;
+ struct list_head sort_list;
+ unsigned long max_freesize;
+
+ bool initialized;
+};
+
+struct xgi_info {
+ struct drm_device *dev;
+
+ bool bootstrap_done;
+
+ /* physical characteristics */
+ struct xgi_aperture mmio;
+ struct xgi_aperture fb;
+ struct xgi_aperture pcie;
+
+ struct drm_map *mmio_map;
+ struct drm_map *pcie_map;
+ struct drm_map *fb_map;
+
+ /* look up table parameters */
+ struct drm_dma_handle *lut_handle;
+ unsigned int lutPageSize;
+
+ struct xgi_mem_heap fb_heap;
+ struct xgi_mem_heap pcie_heap;
+
+ struct semaphore fb_sem;
+ struct semaphore pcie_sem;
+
+ struct xgi_cmdring_info cmdring;
+};
+
+enum PcieOwner {
+ PCIE_2D = 0,
+ /*
+ PCIE_3D should not begin with 1,
+ 2D alloc pcie memory will use owner 1.
+ */
+ PCIE_3D = 11, /*vetex buf */
+ PCIE_3D_CMDLIST = 12,
+ PCIE_3D_SCRATCHPAD = 13,
+ PCIE_3D_TEXTURE = 14,
+ PCIE_INVALID = 0x7fffffff
+};
+
+
+extern struct kmem_cache *xgi_mem_block_cache;
+extern struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
+ unsigned long size, enum PcieOwner owner);
+extern int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
+ DRMFILE filp);
+extern int xgi_mem_heap_init(struct xgi_mem_heap * heap, unsigned int start,
+ unsigned int end);
+extern void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap);
+
+extern int xgi_fb_heap_init(struct xgi_info * info);
+
+extern int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ DRMFILE filp);
+
+extern int xgi_fb_free(struct xgi_info * info, unsigned long offset,
+ DRMFILE filp);
+
+extern int xgi_pcie_heap_init(struct xgi_info * info);
+extern void xgi_pcie_lut_cleanup(struct xgi_info * info);
+
+extern int xgi_pcie_alloc(struct xgi_info * info,
+ struct xgi_mem_alloc * alloc, DRMFILE filp);
+
+extern int xgi_pcie_free(struct xgi_info * info, unsigned long offset,
+ DRMFILE filp);
+
+extern void *xgi_find_pcie_virt(struct xgi_info * info, u32 address);
+
+extern void xgi_pcie_free_all(struct xgi_info *, DRMFILE);
+extern void xgi_fb_free_all(struct xgi_info *, DRMFILE);
+
+extern int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_fb_free_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_dump_register_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_submit_cmdlist_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS);
+extern int xgi_state_change_ioctl(DRM_IOCTL_ARGS);
+
+#endif
diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c
new file mode 100644
index 00000000..3d3b2ae0
--- /dev/null
+++ b/linux-core/xgi_fb.c
@@ -0,0 +1,370 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+
+#define XGI_FB_HEAP_START 0x1000000
+
+struct kmem_cache *xgi_mem_block_cache = NULL;
+
+static struct xgi_mem_block *xgi_mem_new_node(void);
+
+
+int xgi_mem_heap_init(struct xgi_mem_heap *heap, unsigned int start,
+ unsigned int end)
+{
+ struct xgi_mem_block *block;
+
+ INIT_LIST_HEAD(&heap->free_list);
+ INIT_LIST_HEAD(&heap->used_list);
+ INIT_LIST_HEAD(&heap->sort_list);
+ heap->initialized = TRUE;
+
+ block = kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
+ if (!block) {
+ return DRM_ERR(ENOMEM);
+ }
+
+ block->offset = start;
+ block->size = end - start;
+
+ list_add(&block->list, &heap->free_list);
+
+ heap->max_freesize = end - start;
+
+ return 0;
+}
+
+
+void xgi_mem_heap_cleanup(struct xgi_mem_heap * heap)
+{
+ struct list_head *free_list;
+ struct xgi_mem_block *block;
+ struct xgi_mem_block *next;
+ int i;
+
+ free_list = &heap->free_list;
+ for (i = 0; i < 3; i++, free_list++) {
+ list_for_each_entry_safe(block, next, free_list, list) {
+ DRM_INFO
+ ("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
+ i, block->offset, block->size);
+ kmem_cache_free(xgi_mem_block_cache, block);
+ block = NULL;
+ }
+ }
+
+ heap->initialized = 0;
+}
+
+
+struct xgi_mem_block *xgi_mem_new_node(void)
+{
+ struct xgi_mem_block *block =
+ kmem_cache_alloc(xgi_mem_block_cache, GFP_KERNEL);
+
+ if (!block) {
+ DRM_ERROR("kmem_cache_alloc failed\n");
+ return NULL;
+ }
+
+ block->offset = 0;
+ block->size = 0;
+ block->owner = PCIE_INVALID;
+ block->filp = (DRMFILE) -1;
+
+ return block;
+}
+
+
+struct xgi_mem_block *xgi_mem_alloc(struct xgi_mem_heap * heap,
+ unsigned long originalSize,
+ enum PcieOwner owner)
+{
+ struct xgi_mem_block *block, *free_block, *used_block;
+ unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
+
+
+ DRM_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
+ originalSize, size);
+
+ if (size == 0) {
+ DRM_ERROR("size == 0\n");
+ return (NULL);
+ }
+ DRM_INFO("max_freesize: 0x%lx \n", heap->max_freesize);
+ if (size > heap->max_freesize) {
+ DRM_ERROR
+ ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
+ size, heap->max_freesize);
+ return (NULL);
+ }
+
+ list_for_each_entry(block, &heap->free_list, list) {
+ DRM_INFO("block: 0x%px \n", block);
+ if (size <= block->size) {
+ break;
+ }
+ }
+
+ if (&block->list == &heap->free_list) {
+ DRM_ERROR
+ ("Can't allocate %ldk size from frame buffer memory !\n",
+ size / 1024);
+ return (NULL);
+ }
+
+ free_block = block;
+ DRM_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
+ size, free_block->offset, free_block->size);
+
+ if (size == free_block->size) {
+ used_block = free_block;
+ DRM_INFO("size == free_block->size: free_block = 0x%p\n",
+ free_block);
+ list_del(&free_block->list);
+ } else {
+ used_block = xgi_mem_new_node();
+
+ if (used_block == NULL)
+ return (NULL);
+
+ if (used_block == free_block) {
+ DRM_ERROR("used_block == free_block = 0x%p\n",
+ used_block);
+ }
+
+ used_block->offset = free_block->offset;
+ used_block->size = size;
+
+ free_block->offset += size;
+ free_block->size -= size;
+ }
+
+ heap->max_freesize -= size;
+
+ list_add(&used_block->list, &heap->used_list);
+ used_block->owner = owner;
+
+ return (used_block);
+}
+
+int xgi_mem_free(struct xgi_mem_heap * heap, unsigned long offset,
+ DRMFILE filp)
+{
+ struct xgi_mem_block *used_block = NULL, *block;
+ struct xgi_mem_block *prev, *next;
+
+ unsigned long upper;
+ unsigned long lower;
+
+ list_for_each_entry(block, &heap->used_list, list) {
+ if (block->offset == offset) {
+ break;
+ }
+ }
+
+ if (&block->list == &heap->used_list) {
+ DRM_ERROR("can't find block: 0x%lx to free!\n", offset);
+ return DRM_ERR(ENOENT);
+ }
+
+ if (block->filp != filp) {
+ return DRM_ERR(EPERM);
+ }
+
+ used_block = block;
+ DRM_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
+ used_block, used_block->offset, used_block->size);
+
+ heap->max_freesize += used_block->size;
+
+ prev = next = NULL;
+ upper = used_block->offset + used_block->size;
+ lower = used_block->offset;
+
+ list_for_each_entry(block, &heap->free_list, list) {
+ if (block->offset == upper) {
+ next = block;
+ } else if ((block->offset + block->size) == lower) {
+ prev = block;
+ }
+ }
+
+ DRM_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
+ list_del(&used_block->list);
+
+ if (prev && next) {
+ prev->size += (used_block->size + next->size);
+ list_del(&next->list);
+ DRM_INFO("free node 0x%p\n", next);
+ kmem_cache_free(xgi_mem_block_cache, next);
+ kmem_cache_free(xgi_mem_block_cache, used_block);
+ }
+ else if (prev) {
+ prev->size += used_block->size;
+ DRM_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_mem_block_cache, used_block);
+ }
+ else if (next) {
+ next->size += used_block->size;
+ next->offset = used_block->offset;
+ DRM_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_mem_block_cache, used_block);
+ }
+ else {
+ list_add(&used_block->list, &heap->free_list);
+ DRM_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
+ used_block, used_block->offset, used_block->size);
+ }
+
+ return 0;
+}
+
+
+int xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ DRMFILE filp)
+{
+ struct xgi_mem_block *block;
+
+ if (alloc->is_front) {
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->offset = 0;
+ alloc->hw_addr = 0;
+ DRM_INFO
+ ("Video RAM allocation on front buffer successfully! \n");
+ } else {
+ down(&info->fb_sem);
+ block = xgi_mem_alloc(&info->fb_heap, alloc->size, PCIE_2D);
+ up(&info->fb_sem);
+
+ if (block == NULL) {
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->size = 0;
+ DRM_ERROR("Video RAM allocation failed\n");
+ return DRM_ERR(ENOMEM);
+ } else {
+ DRM_INFO("Video RAM allocation succeeded: 0x%p\n",
+ (char *)block->offset);
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->size = block->size;
+ alloc->offset = block->offset;
+ alloc->hw_addr = block->offset;
+
+ block->filp = filp;
+ }
+ }
+
+ return 0;
+}
+
+
+int xgi_fb_alloc_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_mem_alloc alloc;
+ struct xgi_info *info = dev->dev_private;
+ int err;
+
+ DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
+ sizeof(alloc));
+
+ err = xgi_fb_alloc(info, & alloc, filp);
+ if (err) {
+ return err;
+ }
+
+ DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
+ alloc, sizeof(alloc));
+
+ return 0;
+}
+
+
+int xgi_fb_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
+{
+ int err = 0;
+
+ if (offset == 0) {
+ DRM_INFO("free onscreen frame buffer successfully !\n");
+ } else {
+ down(&info->fb_sem);
+ err = xgi_mem_free(&info->fb_heap, offset, filp);
+ up(&info->fb_sem);
+ }
+
+ return err;
+}
+
+
+int xgi_fb_free_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ u32 offset;
+
+ DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
+ sizeof(offset));
+
+ return xgi_fb_free(info, offset, filp);
+}
+
+
+int xgi_fb_heap_init(struct xgi_info * info)
+{
+ return xgi_mem_heap_init(&info->fb_heap, XGI_FB_HEAP_START,
+ info->fb.size);
+}
+
+/**
+ * Free all blocks associated with a particular file handle.
+ */
+void xgi_fb_free_all(struct xgi_info * info, DRMFILE filp)
+{
+ if (!info->fb_heap.initialized) {
+ return;
+ }
+
+ down(&info->fb_sem);
+
+ do {
+ struct xgi_mem_block *block;
+
+ list_for_each_entry(block, &info->fb_heap.used_list, list) {
+ if (block->filp == filp) {
+ break;
+ }
+ }
+
+ if (&block->list == &info->fb_heap.used_list) {
+ break;
+ }
+
+ (void) xgi_mem_free(&info->fb_heap, block->offset, filp);
+ } while(1);
+
+ up(&info->fb_sem);
+}
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
new file mode 100644
index 00000000..5e8c3da8
--- /dev/null
+++ b/linux-core/xgi_misc.c
@@ -0,0 +1,474 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+
+int xgi_ge_reset_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+
+ xgi_disable_ge(info);
+ xgi_enable_ge(info);
+
+ return 0;
+}
+
+
+/*
+ * irq functions
+ */
+#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
+
+static unsigned int s_invalid_begin = 0;
+
+static bool xgi_validate_signal(volatile u8 *mmio_vbase)
+{
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ const u32 old_ge_status = ge_3d_status[0x00];
+
+ if (old_ge_status & 0x001c0000) {
+ u16 check;
+
+ /* Check Read back status */
+ *(mmio_vbase + 0x235c) = 0x80;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+
+ if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
+ return FALSE;
+ }
+
+ /* Check RO channel */
+ *(mmio_vbase + 0x235c) = 0x83;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RW channel */
+ *(mmio_vbase + 0x235c) = 0x88;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RO channel outstanding */
+ *(mmio_vbase + 0x235c) = 0x8f;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* Check RW channel outstanding */
+ *(mmio_vbase + 0x235c) = 0x90;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* No pending PCIE request. GE stall. */
+ }
+
+ return TRUE;
+}
+
+
+static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
+{
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ int time_out = 0xffff;
+
+ *(mmio_vbase + 0xb057) = 8;
+ while (0 != (ge_3d_status[0x00] & 0xf0000000)) {
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ if (0 == time_out) {
+ u8 old_3ce;
+ u8 old_3cf;
+ u8 old_index;
+ u8 old_36;
+
+ DRM_INFO("Can not reset back 0x%x!\n",
+ ge_3d_status[0x00]);
+
+ *(mmio_vbase + 0xb057) = 0;
+
+ /* Have to use 3x5.36 to reset. */
+ /* Save and close dynamic gating */
+
+ old_3ce = *(mmio_vbase + 0x3ce);
+ *(mmio_vbase + 0x3ce) = 0x2a;
+ old_3cf = *(mmio_vbase + 0x3cf);
+ *(mmio_vbase + 0x3cf) = old_3cf & 0xfe;
+
+ /* Reset GE */
+ old_index = *(mmio_vbase + 0x3d4);
+ *(mmio_vbase + 0x3d4) = 0x36;
+ old_36 = *(mmio_vbase + 0x3d5);
+ *(mmio_vbase + 0x3d5) = old_36 | 0x10;
+
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ *(mmio_vbase + 0x3d5) = old_36;
+ *(mmio_vbase + 0x3d4) = old_index;
+
+ /* Restore dynamic gating */
+ *(mmio_vbase + 0x3cf) = old_3cf;
+ *(mmio_vbase + 0x3ce) = old_3ce;
+ break;
+ }
+ }
+
+ *(mmio_vbase + 0xb057) = 0;
+}
+
+
+bool xgi_ge_irq_handler(struct xgi_info * info)
+{
+ volatile u8 *const mmio_vbase = info->mmio_map->handle;
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ const u32 int_status = ge_3d_status[4];
+ bool is_support_auto_reset = FALSE;
+
+ /* Check GE on/off */
+ if (0 == (0xffffc0f0 & int_status)) {
+ u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a];
+
+ if (0 != (0x1000 & int_status)) {
+ /* We got GE stall interrupt.
+ */
+ ge_3d_status[0x04] = int_status | 0x04000000;
+
+ if (is_support_auto_reset) {
+ static cycles_t last_tick;
+ static unsigned continue_int_count = 0;
+
+ /* OE II is busy. */
+
+ if (!xgi_validate_signal(mmio_vbase)) {
+ /* Nothing but skip. */
+ } else if (0 == continue_int_count++) {
+ last_tick = get_cycles();
+ } else {
+ const cycles_t new_tick = get_cycles();
+ if ((new_tick - last_tick) >
+ STALL_INTERRUPT_RESET_THRESHOLD) {
+ continue_int_count = 0;
+ } else if (continue_int_count >= 3) {
+ continue_int_count = 0;
+
+ /* GE Hung up, need reset. */
+ DRM_INFO("Reset GE!\n");
+
+ xgi_ge_hang_reset(mmio_vbase);
+ }
+ }
+ }
+ } else if (0 != (0x1 & int_status)) {
+ s_invalid_begin++;
+ ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000;
+ }
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+bool xgi_crt_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
+
+ if (IN3CFB(info->mmio_map, 0x37) & 0x01) // CRT1 interrupt just happened
+ {
+ u8 op3cf_3d;
+ u8 op3cf_37;
+
+ // What happened?
+ op3cf_37 = IN3CFB(info->mmio_map, 0x37);
+
+ // Clear CRT interrupt
+ op3cf_3d = IN3CFB(info->mmio_map, 0x3d);
+ OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d | 0x04));
+ OUT3CFB(info->mmio_map, 0x3d, (op3cf_3d & ~0x04));
+ ret = TRUE;
+ }
+ DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
+
+ return (ret);
+}
+
+bool xgi_dvi_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ const u8 save_3ce = DRM_READ8(info->mmio_map, 0x3ce);
+
+ if (IN3CFB(info->mmio_map, 0x38) & 0x20) { // DVI interrupt just happened
+ const u8 save_3x4 = DRM_READ8(info->mmio_map, 0x3d4);
+ u8 op3cf_39;
+ u8 op3cf_37;
+ u8 op3x5_5a;
+
+ // What happened?
+ op3cf_37 = IN3CFB(info->mmio_map, 0x37);
+
+ //Notify BIOS that DVI plug/unplug happened
+ op3x5_5a = IN3X5B(info->mmio_map, 0x5a);
+ OUT3X5B(info->mmio_map, 0x5a, op3x5_5a & 0xf7);
+
+ DRM_WRITE8(info->mmio_map, 0x3d4, save_3x4);
+
+ // Clear DVI interrupt
+ op3cf_39 = IN3CFB(info->mmio_map, 0x39);
+ OUT3C5B(info->mmio_map, 0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0
+ OUT3C5B(info->mmio_map, 0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1
+
+ ret = TRUE;
+ }
+ DRM_WRITE8(info->mmio_map, 0x3ce, save_3ce);
+
+ return (ret);
+}
+
+
+void xgi_dump_register(struct xgi_info * info)
+{
+ int i, j;
+ unsigned char temp;
+
+ // 0x3C5
+ printk("\r\n=====xgi_dump_register========0x%x===============\r\n",
+ 0x3C5);
+
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = IN3C5B(info->mmio_map, i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ // 0x3D5
+ printk("\r\n====xgi_dump_register=========0x%x===============\r\n",
+ 0x3D5);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = IN3X5B(info->mmio_map, i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ // 0x3CF
+ printk("\r\n=========xgi_dump_register====0x%x===============\r\n",
+ 0x3CF);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = IN3CFB(info->mmio_map, i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n=====xgi_dump_register======0x%x===============\r\n",
+ 0xB000);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x5; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = DRM_READ8(info->mmio_map, 0xB000 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2200);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0xB; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = DRM_READ8(info->mmio_map, 0x2200 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2300);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x7; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = DRM_READ8(info->mmio_map, 0x2300 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2400);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = DRM_READ8(info->mmio_map, 0x2400 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2800);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = DRM_READ8(info->mmio_map, 0x2800 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+}
+
+
+int xgi_dump_register_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+
+ xgi_dump_register(info);
+
+ return 0;
+}
+
+
+int xgi_restore_registers_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+
+ OUT3X5B(info->mmio_map, 0x13, 0);
+ OUT3X5B(info->mmio_map, 0x8b, 2);
+
+ return 0;
+}
+
+void xgi_waitfor_pci_idle(struct xgi_info * info)
+{
+#define WHOLD_GE_STATUS 0x2800
+#define IDLE_MASK ~0x90200000
+
+ int idleCount = 0;
+ while (idleCount < 5) {
+ if (DRM_READ32(info->mmio_map, WHOLD_GE_STATUS) & IDLE_MASK) {
+ idleCount = 0;
+ } else {
+ idleCount++;
+ }
+ }
+}
diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h
new file mode 100644
index 00000000..af19a11a
--- /dev/null
+++ b/linux-core/xgi_misc.h
@@ -0,0 +1,37 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_MISC_H_
+#define _XGI_MISC_H_
+
+extern void xgi_dump_register(struct xgi_info * info);
+
+extern bool xgi_ge_irq_handler(struct xgi_info * info);
+extern bool xgi_crt_irq_handler(struct xgi_info * info);
+extern bool xgi_dvi_irq_handler(struct xgi_info * info);
+extern void xgi_waitfor_pci_idle(struct xgi_info * info);
+
+#endif
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
new file mode 100644
index 00000000..537e82f5
--- /dev/null
+++ b/linux-core/xgi_pcie.c
@@ -0,0 +1,348 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+
+static struct xgi_mem_block *xgi_pcie_vertex_block = NULL;
+static struct xgi_mem_block *xgi_pcie_cmdlist_block = NULL;
+static struct xgi_mem_block *xgi_pcie_scratchpad_block = NULL;
+
+static int xgi_pcie_free_locked(struct xgi_info * info,
+ unsigned long offset, DRMFILE filp);
+
+static int xgi_pcie_lut_init(struct xgi_info * info)
+{
+ u8 temp = 0;
+ int err;
+ unsigned i;
+ struct drm_scatter_gather request;
+ struct drm_sg_mem *sg;
+ u32 *lut;
+
+
+ /* Get current FB aperture size */
+ temp = IN3X5B(info->mmio_map, 0x27);
+ DRM_INFO("In3x5(0x27): 0x%x \n", temp);
+
+ if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
+ info->pcie.base = 256 * 1024 * 1024;
+ } else { /* 128MB; Jong 06/05/2006; 0x08000000 */
+ info->pcie.base = 128 * 1024 * 1024;
+ }
+
+
+ DRM_INFO("info->pcie.base: 0x%lx\n", (unsigned long) info->pcie.base);
+
+ /* Get current lookup table page size */
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
+ if (temp & 0x04) { /* 8KB */
+ info->lutPageSize = 8 * 1024;
+ } else { /* 4KB */
+ info->lutPageSize = 4 * 1024;
+ }
+
+ DRM_INFO("info->lutPageSize: 0x%x \n", info->lutPageSize);
+
+
+ request.size = info->pcie.size;
+ err = drm_sg_alloc(info->dev, & request);
+ if (err) {
+ DRM_ERROR("cannot allocate PCIE GART backing store! "
+ "size = %d\n", info->pcie.size);
+ return err;
+ }
+
+ sg = info->dev->sg;
+
+ info->lut_handle = drm_pci_alloc(info->dev,
+ sizeof(u32) * sg->pages,
+ PAGE_SIZE,
+ DMA_31BIT_MASK);
+ if (info->lut_handle == NULL) {
+ DRM_ERROR("cannot allocate PCIE lut page!\n");
+ return DRM_ERR(ENOMEM);
+ }
+
+ lut = info->lut_handle->vaddr;
+ for (i = 0; i < sg->pages; i++) {
+ info->dev->sg->busaddr[i] = pci_map_page(info->dev->pdev,
+ sg->pagelist[i],
+ 0,
+ PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(info->dev->sg->busaddr[i])) {
+ DRM_ERROR("cannot map GART backing store for DMA!\n");
+ return DRM_ERR(-(info->dev->sg->busaddr[i]));
+ }
+
+ lut[i] = info->dev->sg->busaddr[i];
+ }
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ("wbinvd":::"memory");
+#else
+ mb();
+#endif
+
+ /* Set GART in SFB */
+ temp = DRM_READ8(info->mmio_map, 0xB00C);
+ DRM_WRITE8(info->mmio_map, 0xB00C, temp & ~0x02);
+
+ /* Set GART base address to HW */
+ dwWriteReg(info->mmio_map, 0xB034, info->lut_handle->busaddr);
+
+ return 0;
+}
+
+void xgi_pcie_lut_cleanup(struct xgi_info * info)
+{
+ if (info->dev->sg) {
+ drm_sg_free(info->dev, info->dev->sg->handle);
+ }
+
+ if (info->lut_handle) {
+ drm_pci_free(info->dev, info->lut_handle);
+ info->lut_handle = NULL;
+ }
+}
+
+int xgi_pcie_heap_init(struct xgi_info * info)
+{
+ int err;
+
+ err = xgi_pcie_lut_init(info);
+ if (err) {
+ DRM_ERROR("xgi_pcie_lut_init failed\n");
+ return err;
+ }
+
+
+ err = xgi_mem_heap_init(&info->pcie_heap, 0, info->pcie.size);
+ if (err) {
+ xgi_pcie_lut_cleanup(info);
+ }
+
+ return err;
+}
+
+
+int xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ DRMFILE filp)
+{
+ struct xgi_mem_block *block;
+
+ down(&info->pcie_sem);
+ if ((alloc->owner == PCIE_3D) && (xgi_pcie_vertex_block)) {
+ DRM_INFO("PCIE Vertex has been created, return directly.\n");
+ block = xgi_pcie_vertex_block;
+ }
+ else if ((alloc->owner == PCIE_3D_CMDLIST) && (xgi_pcie_cmdlist_block)) {
+ DRM_INFO("PCIE Cmdlist has been created, return directly.\n");
+ block = xgi_pcie_cmdlist_block;
+ }
+ else if ((alloc->owner == PCIE_3D_SCRATCHPAD) && (xgi_pcie_scratchpad_block)) {
+ DRM_INFO("PCIE Scratchpad has been created, return directly.\n");
+ block = xgi_pcie_scratchpad_block;
+ }
+ else {
+ block = xgi_mem_alloc(&info->pcie_heap, alloc->size, alloc->owner);
+
+ if (alloc->owner == PCIE_3D) {
+ xgi_pcie_vertex_block = block;
+ }
+ else if (alloc->owner == PCIE_3D_CMDLIST) {
+ xgi_pcie_cmdlist_block = block;
+ }
+ else if (alloc->owner == PCIE_3D_SCRATCHPAD) {
+ xgi_pcie_scratchpad_block = block;
+ }
+ }
+ up(&info->pcie_sem);
+
+ if (block == NULL) {
+ alloc->location = XGI_MEMLOC_INVALID;
+ alloc->size = 0;
+ DRM_ERROR("PCIE RAM allocation failed\n");
+ return DRM_ERR(ENOMEM);
+ } else {
+ DRM_INFO("PCIE RAM allocation succeeded: offset = 0x%lx\n",
+ block->offset);
+ alloc->location = XGI_MEMLOC_NON_LOCAL;
+ alloc->size = block->size;
+ alloc->hw_addr = block->offset + info->pcie.base;
+ alloc->offset = block->offset;
+
+ block->filp = filp;
+ return 0;
+ }
+}
+
+
+int xgi_pcie_alloc_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_mem_alloc alloc;
+ struct xgi_info *info = dev->dev_private;
+ int err;
+
+ DRM_COPY_FROM_USER_IOCTL(alloc, (struct xgi_mem_alloc __user *) data,
+ sizeof(alloc));
+
+ err = xgi_pcie_alloc(info, & alloc, filp);
+ if (err) {
+ return err;
+ }
+
+ DRM_COPY_TO_USER_IOCTL((struct xgi_mem_alloc __user *) data,
+ alloc, sizeof(alloc));
+
+ return 0;
+}
+
+
+/**
+ * Free all blocks associated with a particular file handle.
+ */
+void xgi_pcie_free_all(struct xgi_info * info, DRMFILE filp)
+{
+ if (!info->pcie_heap.initialized) {
+ return;
+ }
+
+ down(&info->pcie_sem);
+
+ do {
+ struct xgi_mem_block *block;
+
+ list_for_each_entry(block, &info->pcie_heap.used_list, list) {
+ if (block->filp == filp) {
+ break;
+ }
+ }
+
+ if (&block->list == &info->pcie_heap.used_list) {
+ break;
+ }
+
+ (void) xgi_pcie_free_locked(info, block->offset, filp);
+ } while(1);
+
+ up(&info->pcie_sem);
+}
+
+
+int xgi_pcie_free_locked(struct xgi_info * info,
+ unsigned long offset, DRMFILE filp)
+{
+ const bool isvertex = (xgi_pcie_vertex_block
+ && (xgi_pcie_vertex_block->offset == offset));
+ int err = xgi_mem_free(&info->pcie_heap, offset, filp);
+
+ if (!err && isvertex)
+ xgi_pcie_vertex_block = NULL;
+
+ return err;
+}
+
+
+int xgi_pcie_free(struct xgi_info * info, unsigned long offset, DRMFILE filp)
+{
+ int err;
+
+ down(&info->pcie_sem);
+ err = xgi_pcie_free_locked(info, offset, filp);
+ up(&info->pcie_sem);
+
+ if (err) {
+ DRM_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
+ }
+
+ return err;
+}
+
+
+int xgi_pcie_free_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ u32 offset;
+
+ DRM_COPY_FROM_USER_IOCTL(offset, (unsigned long __user *) data,
+ sizeof(offset));
+
+ return xgi_pcie_free(info, offset, filp);
+}
+
+
+/**
+ * xgi_find_pcie_virt
+ * @address: GE HW address
+ *
+ * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
+ * the same block
+ */
+void *xgi_find_pcie_virt(struct xgi_info * info, u32 address)
+{
+ const unsigned long offset = address - info->pcie.base;
+
+ return ((u8 *) info->dev->sg->virtual) + offset;
+}
+
+/*
+ address -- GE hw address
+*/
+int xgi_test_rwinkernel_ioctl(DRM_IOCTL_ARGS)
+{
+ DRM_DEVICE;
+ struct xgi_info *info = dev->dev_private;
+ u32 address;
+ u32 *virtaddr = 0;
+
+ DRM_COPY_FROM_USER_IOCTL(address, (unsigned long __user *) data,
+ sizeof(address));
+
+ DRM_INFO("input GE HW addr is 0x%x\n", address);
+
+ if (address == 0) {
+ return DRM_ERR(EFAULT);
+ }
+
+ virtaddr = (u32 *)xgi_find_pcie_virt(info, address);
+
+ DRM_INFO("convert to CPU virt addr 0x%p\n", virtaddr);
+
+ if (virtaddr != NULL) {
+ DRM_INFO("original [virtaddr] = 0x%x\n", *virtaddr);
+ *virtaddr = 0x00f00fff;
+ DRM_INFO("modified [virtaddr] = 0x%x\n", *virtaddr);
+ } else {
+ return DRM_ERR(EFAULT);
+ }
+
+ return 0;
+}
diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h
new file mode 100644
index 00000000..34268a56
--- /dev/null
+++ b/linux-core/xgi_regs.h
@@ -0,0 +1,216 @@
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ *
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * XGI AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_REGS_H_
+#define _XGI_REGS_H_
+
+#include "drmP.h"
+#include "drm.h"
+
+
+/* Hardware access functions */
+static inline void OUT3C5B(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3C4, index);
+ DRM_WRITE8(map, 0x3C5, data);
+}
+
+static inline void OUT3X5B(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3D4, index);
+ DRM_WRITE8(map, 0x3D5, data);
+}
+
+static inline void OUT3CFB(struct drm_map * map, u8 index, u8 data)
+{
+ DRM_WRITE8(map, 0x3CE, index);
+ DRM_WRITE8(map, 0x3CF, data);
+}
+
+static inline u8 IN3C5B(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3C4, index);
+ return DRM_READ8(map, 0x3C5);
+}
+
+static inline u8 IN3X5B(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3D4, index);
+ return DRM_READ8(map, 0x3D5);
+}
+
+static inline u8 IN3CFB(struct drm_map * map, u8 index)
+{
+ DRM_WRITE8(map, 0x3CE, index);
+ return DRM_READ8(map, 0x3CF);
+}
+
+
+/*
+ * Graphic engine register (2d/3d) acessing interface
+ */
+static inline void dwWriteReg(struct drm_map * map, u32 addr, u32 data)
+{
+#ifdef XGI_MMIO_DEBUG
+ DRM_INFO("mmio_map->handle = 0x%p, addr = 0x%x, data = 0x%x\n",
+ map->handle, addr, data);
+#endif
+ DRM_WRITE32(map, addr, data);
+}
+
+
+static inline void xgi_enable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+ u8 temp;
+
+ /* Unprotect registers */
+ DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+ protect = DRM_READ8(info->mmio_map, 0x3C5);
+ DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
+
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x3A);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x20);
+
+ /* Enable MMIO */
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp | 0x01);
+
+ /* Protect registers */
+ OUT3C5B(info->mmio_map, 0x11, protect);
+}
+
+static inline void xgi_disable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+ u8 temp;
+
+ /* Unprotect registers */
+ DRM_WRITE8(info->mmio_map, 0x3C4, 0x11);
+ protect = DRM_READ8(info->mmio_map, 0x3C5);
+ DRM_WRITE8(info->mmio_map, 0x3C5, 0x92);
+
+ /* Disable MMIO access */
+ DRM_WRITE8(info->mmio_map, 0x3D4, 0x39);
+ temp = DRM_READ8(info->mmio_map, 0x3D5);
+ DRM_WRITE8(info->mmio_map, 0x3D5, temp & 0xFE);
+
+ /* Protect registers */
+ OUT3C5B(info->mmio_map, 0x11, protect);
+}
+
+static inline void xgi_enable_ge(struct xgi_info * info)
+{
+ unsigned char bOld3cf2a = 0;
+ int wait = 0;
+
+ // Enable GE
+ DRM_WRITE16(info->mmio_map, 0x3C4, 0x9211);
+
+ // Save and close dynamic gating
+ bOld3cf2a = IN3CFB(info->mmio_map, 0x2a);
+ OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a & 0xfe);
+
+ // Reset both 3D and 2D engine
+ OUT3X5B(info->mmio_map, 0x36, 0x84);
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ OUT3X5B(info->mmio_map, 0x36, 0x94);
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ OUT3X5B(info->mmio_map, 0x36, 0x84);
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ // Enable 2D engine only
+ OUT3X5B(info->mmio_map, 0x36, 0x80);
+
+ // Enable 2D+3D engine
+ OUT3X5B(info->mmio_map, 0x36, 0x84);
+
+ // Restore dynamic gating
+ OUT3CFB(info->mmio_map, 0x2a, bOld3cf2a);
+}
+
+static inline void xgi_disable_ge(struct xgi_info * info)
+{
+ int wait = 0;
+
+ // Reset both 3D and 2D engine
+ OUT3X5B(info->mmio_map, 0x36, 0x84);
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ OUT3X5B(info->mmio_map, 0x36, 0x94);
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+ OUT3X5B(info->mmio_map, 0x36, 0x84);
+
+ wait = 10;
+ while (wait--) {
+ DRM_READ8(info->mmio_map, 0x36);
+ }
+
+ // Disable 2D engine only
+ OUT3X5B(info->mmio_map, 0x36, 0);
+}
+
+static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
+{
+ OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x01); //Set 3cf.39 bit 0 to 0
+ OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x01); //Set 3cf.39 bit 0 to 1
+ OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) | 0x02);
+}
+static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
+{
+ OUT3CFB(info->mmio_map, 0x39, IN3CFB(info->mmio_map, 0x39) & ~0x02);
+}
+
+static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
+{
+ OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x04);
+ OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x04);
+ OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) | 0x08);
+}
+
+static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
+{
+ OUT3CFB(info->mmio_map, 0x3d, IN3CFB(info->mmio_map, 0x3d) & ~0x08);
+}
+
+#endif