summaryrefslogtreecommitdiff
path: root/linux-core
diff options
context:
space:
mode:
Diffstat (limited to 'linux-core')
-rw-r--r--linux-core/Makefile9
-rw-r--r--linux-core/Makefile.kernel2
-rw-r--r--linux-core/xgi_cmdlist.c351
-rw-r--r--linux-core/xgi_cmdlist.h76
l---------linux-core/xgi_drm.h1
-rw-r--r--linux-core/xgi_drv.c1517
-rw-r--r--linux-core/xgi_drv.h217
-rw-r--r--linux-core/xgi_fb.c467
-rw-r--r--linux-core/xgi_fb.h47
-rw-r--r--linux-core/xgi_linux.h490
-rw-r--r--linux-core/xgi_misc.c571
-rw-r--r--linux-core/xgi_misc.h46
-rw-r--r--linux-core/xgi_pcie.c967
-rw-r--r--linux-core/xgi_pcie.h68
-rw-r--r--linux-core/xgi_regs.h404
15 files changed, 5232 insertions, 1 deletions
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 1758777c..2052459d 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
- mach64.o nv.o nouveau.o
+ mach64.o nv.o nouveau.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
@@ -91,6 +91,8 @@ MACH64HEADERS = mach64_drv.h mach64_drm.h $(DRMHEADERS)
NVHEADERS = nv_drv.h $(DRMHEADERS)
FFBHEADERS = ffb_drv.h $(DRMHEADERS)
NOUVEAUHEADERS = nouveau_drv.h nouveau_drm.h nouveau_reg.h $(DRMHEADERS)
+XGIHEADERS = xgi_cmdlist.h xgi_drv.h xgi_fb.h xgi_linux.h xgi_misc.h \
+ xgi_pcie.h xgi_regs.h xgi_types.h
PROGS = dristat drmstat
@@ -284,6 +286,7 @@ CONFIG_DRM_VIA := n
CONFIG_DRM_MACH64 := n
CONFIG_DRM_NV := n
CONFIG_DRM_NOUVEAU := n
+CONFIG_DRM_XGI := n
# Enable module builds for the modules requested/supported.
@@ -320,6 +323,9 @@ endif
ifneq (,$(findstring nouveau,$(DRM_MODULES)))
CONFIG_DRM_NOUVEAU := m
endif
+ifneq (,$(findstring xgi,$(DRM_MODULES)))
+CONFIG_DRM_XGI := m
+endif
# These require AGP support
@@ -347,6 +353,7 @@ $(via-objs): $(VIAHEADERS)
$(mach64-objs): $(MACH64HEADERS)
$(nv-objs): $(NVHEADERS)
$(nouveau-objs): $(NOUVEAUHEADERS)
+$(xgi-objs): $(XGIHEADERS)
endif
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index be2641c8..321eb807 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -37,6 +37,7 @@ via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
via_video.o via_dmablit.o via_fence.o via_buffer.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
nv-objs := nv_drv.o
+xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o
ifeq ($(CONFIG_COMPAT),y)
drm-objs += drm_ioc32.o
@@ -61,3 +62,4 @@ obj-$(CONFIG_DRM_VIA) += via.o
obj-$(CONFIG_DRM_MACH64)+= mach64.o
obj-$(CONFIG_DRM_NV) += nv.o
obj-$(CONFIG_DRM_NOUVEAU) += nouveau.o
+obj-$(CONFIG_DRM_XGI) += xgi.o \ No newline at end of file
diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c
new file mode 100644
index 00000000..7be0ac48
--- /dev/null
+++ b/linux-core/xgi_cmdlist.c
@@ -0,0 +1,351 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_linux.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+struct xgi_cmdring_info s_cmdring;
+
+static void addFlush2D(struct xgi_info * info);
+static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo);
+static void triggerHWCommandList(struct xgi_info * info,
+ unsigned int triggerCounter);
+static void xgi_cmdlist_reset(void);
+
+int xgi_cmdlist_initialize(struct xgi_info * info, size_t size)
+{
+ struct xgi_mem_alloc mem_alloc = {
+ .size = size,
+ .owner = PCIE_2D,
+ };
+
+ xgi_pcie_alloc(info, &mem_alloc, 0);
+
+ if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) {
+ return -1;
+ }
+
+ s_cmdring._cmdRingSize = mem_alloc.size;
+ s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
+ s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr;
+ s_cmdring._lastBatchStartAddr = 0;
+ s_cmdring._cmdRingOffset = 0;
+
+ return 1;
+}
+
+void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo)
+{
+ unsigned int beginPort;
+ /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/
+
+ /* Jong 05/25/2006 */
+ /* return; */
+
+ beginPort = getCurBatchBeginPort(pCmdInfo);
+ XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n");
+
+ /* Jong 05/25/2006 */
+ /* return; */
+
+ if (s_cmdring._lastBatchStartAddr == 0) {
+ unsigned int portOffset;
+
+ /* Jong 06/13/2006; remove marked for system hang test */
+ /* xgi_waitfor_pci_idle(info); */
+
+ /* Jong 06132006; BASE_3D_ENG=0x2800 */
+ /* beginPort: 2D: 0x30 */
+ portOffset = BASE_3D_ENG + beginPort;
+
+ // Enable PCI Trigger Mode
+ XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n");
+
+ /* Jong 05/25/2006 */
+ /* return; */
+
+ /* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */
+ XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n",
+ M2REG_AUTO_LINK_SETTING_ADDRESS);
+ XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n",
+ M2REG_CLEAR_COUNTERS_MASK);
+ XGI_INFO
+ ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n",
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22));
+ XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n",
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+ /* Jong 06/14/2006; 0x400001a */
+ XGI_INFO
+ ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n",
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
+ M2REG_CLEAR_COUNTERS_MASK | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+ dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
+ M2REG_CLEAR_COUNTERS_MASK | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+ /* Jong 05/25/2006 */
+ XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n");
+ /* return; *//* OK */
+
+ /* Jong 06/14/2006; 0x400000a */
+ XGI_INFO
+ ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n",
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+ dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
+ (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 |
+ M2REG_PCI_TRIGGER_MODE_MASK);
+
+ // Send PCI begin command
+ XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n");
+ /* return; */
+
+ XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n",
+ portOffset);
+ XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort);
+
+ /* beginPort = 48; */
+ /* 0xc100000 */
+ dwWriteReg(portOffset,
+ (beginPort << 22) + (BEGIN_VALID_MASK) +
+ pCmdInfo->_curDebugID);
+ XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22));
+ XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK);
+ XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n",
+ pCmdInfo->_curDebugID);
+ XGI_INFO
+ ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n",
+ (beginPort << 22) + (BEGIN_VALID_MASK) +
+ pCmdInfo->_curDebugID);
+ XGI_INFO
+ ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n");
+ /* return; *//* OK */
+
+ /* 0x80000024 */
+ dwWriteReg(portOffset + 4,
+ BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
+ XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n",
+ BEGIN_LINK_ENABLE_MASK);
+ XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n",
+ pCmdInfo->_firstSize);
+ XGI_INFO
+ ("Jong- BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n",
+ BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n");
+
+ /* 0x1010000 */
+ dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4));
+ XGI_INFO("Jong- pCmdInfo->_firstBeginAddr=0x%lx \n",
+ pCmdInfo->_firstBeginAddr);
+ XGI_INFO("Jong- (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n",
+ (pCmdInfo->_firstBeginAddr >> 4));
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n");
+
+ /* Jong 06/13/2006 */
+ xgi_dump_register(info);
+
+ /* Jong 06/12/2006; system hang; marked for test */
+ dwWriteReg(portOffset + 12, 0);
+ XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n");
+
+ /* Jong 06/13/2006; remove marked for system hang test */
+ /* xgi_waitfor_pci_idle(info); */
+ } else {
+ u32 *lastBatchVirtAddr;
+
+ XGI_INFO
+ ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n");
+
+ /* Jong 05/25/2006 */
+ /* return; */
+
+ if (pCmdInfo->_firstBeginType == BTYPE_3D) {
+ addFlush2D(info);
+ }
+
+ lastBatchVirtAddr =
+ xgi_find_pcie_virt(info,
+ s_cmdring._lastBatchStartAddr);
+
+ /* lastBatchVirtAddr should *never* be NULL. However, there
+ * are currently some bugs that cause this to happen. The
+ * if-statement here prevents some fatal (i.e., hard lock
+ * requiring the reset button) oopses.
+ */
+ if (lastBatchVirtAddr) {
+ lastBatchVirtAddr[1] =
+ BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize;
+ lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4;
+ lastBatchVirtAddr[3] = 0;
+ //barrier();
+ lastBatchVirtAddr[0] =
+ (beginPort << 22) + (BEGIN_VALID_MASK) +
+ (0xffff & pCmdInfo->_curDebugID);
+
+ /* Jong 06/12/2006; system hang; marked for test */
+ triggerHWCommandList(info, pCmdInfo->_beginCount);
+ }
+
+ XGI_INFO
+ ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n");
+ }
+
+ s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;
+ XGI_INFO("Jong-xgi_submit_cmdlist-End \n");
+}
+
+/*
+ state: 0 - console
+ 1 - graphic
+ 2 - fb
+ 3 - logout
+*/
+void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo)
+{
+#define STATE_CONSOLE 0
+#define STATE_GRAPHIC 1
+#define STATE_FBTERM 2
+#define STATE_LOGOUT 3
+#define STATE_REBOOT 4
+#define STATE_SHUTDOWN 5
+
+ if ((pStateInfo->_fromState == STATE_GRAPHIC)
+ && (pStateInfo->_toState == STATE_CONSOLE)) {
+ XGI_INFO("[kd] I see, now is to leaveVT\n");
+ // stop to received batch
+ } else if ((pStateInfo->_fromState == STATE_CONSOLE)
+ && (pStateInfo->_toState == STATE_GRAPHIC)) {
+ XGI_INFO("[kd] I see, now is to enterVT\n");
+ xgi_cmdlist_reset();
+ } else if ((pStateInfo->_fromState == STATE_GRAPHIC)
+ && ((pStateInfo->_toState == STATE_LOGOUT)
+ || (pStateInfo->_toState == STATE_REBOOT)
+ || (pStateInfo->_toState == STATE_SHUTDOWN))) {
+ XGI_INFO("[kd] I see, not is to exit from X\n");
+ // stop to received batch
+ } else {
+ XGI_ERROR("[kd] Should not happen\n");
+ }
+
+}
+
+void xgi_cmdlist_reset(void)
+{
+ s_cmdring._lastBatchStartAddr = 0;
+ s_cmdring._cmdRingOffset = 0;
+}
+
+void xgi_cmdlist_cleanup(struct xgi_info * info)
+{
+ if (s_cmdring._cmdRingBuffer != 0) {
+ xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);
+ s_cmdring._cmdRingBuffer = 0;
+ s_cmdring._cmdRingOffset = 0;
+ s_cmdring._cmdRingSize = 0;
+ }
+}
+
+static void triggerHWCommandList(struct xgi_info * info,
+ unsigned int triggerCounter)
+{
+ static unsigned int s_triggerID = 1;
+
+ //Fix me, currently we just trigger one time
+ while (triggerCounter--) {
+ dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
+ 0x05000000 + (0x0ffff & s_triggerID++));
+ // xgi_waitfor_pci_idle(info);
+ }
+}
+
+static unsigned int getCurBatchBeginPort(struct xgi_cmd_info * pCmdInfo)
+{
+ // Convert the batch type to begin port ID
+ switch (pCmdInfo->_firstBeginType) {
+ case BTYPE_2D:
+ return 0x30;
+ case BTYPE_3D:
+ return 0x40;
+ case BTYPE_FLIP:
+ return 0x50;
+ case BTYPE_CTRL:
+ return 0x20;
+ default:
+ //ASSERT(0);
+ return 0xff;
+ }
+}
+
+static void addFlush2D(struct xgi_info * info)
+{
+ u32 *flushBatchVirtAddr;
+ u32 flushBatchHWAddr;
+ u32 *lastBatchVirtAddr;
+
+ /* check buf is large enough to contain a new flush batch */
+ if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) {
+ s_cmdring._cmdRingOffset = 0;
+ }
+
+ flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset;
+ flushBatchVirtAddr = xgi_find_pcie_virt(info, flushBatchHWAddr);
+
+ /* not using memcpy for I assume the address is discrete */
+ *(flushBatchVirtAddr + 0) = 0x10000000;
+ *(flushBatchVirtAddr + 1) = 0x80000004; /* size = 0x04 dwords */
+ *(flushBatchVirtAddr + 2) = 0x00000000;
+ *(flushBatchVirtAddr + 3) = 0x00000000;
+ *(flushBatchVirtAddr + 4) = FLUSH_2D;
+ *(flushBatchVirtAddr + 5) = FLUSH_2D;
+ *(flushBatchVirtAddr + 6) = FLUSH_2D;
+ *(flushBatchVirtAddr + 7) = FLUSH_2D;
+
+ // ASSERT(s_cmdring._lastBatchStartAddr != NULL);
+ lastBatchVirtAddr =
+ xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);
+
+ lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;
+ lastBatchVirtAddr[2] = flushBatchHWAddr >> 4;
+ lastBatchVirtAddr[3] = 0;
+
+ //barrier();
+
+ // BTYPE_CTRL & NO debugID
+ lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK);
+
+ triggerHWCommandList(info, 1);
+
+ s_cmdring._cmdRingOffset += 0x20;
+ s_cmdring._lastBatchStartAddr = flushBatchHWAddr;
+}
diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h
new file mode 100644
index 00000000..d2b95c0e
--- /dev/null
+++ b/linux-core/xgi_cmdlist.h
@@ -0,0 +1,76 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_CMDLIST_H_
+#define _XGI_CMDLIST_H_
+
+#define ONE_BIT_MASK 0x1
+#define TWENTY_BIT_MASK 0xfffff
+#define M2REG_FLUSH_2D_ENGINE_MASK (ONE_BIT_MASK<<20)
+#define M2REG_FLUSH_3D_ENGINE_MASK TWENTY_BIT_MASK
+#define M2REG_FLUSH_FLIP_ENGINE_MASK (ONE_BIT_MASK<<21)
+#define BASE_3D_ENG 0x2800
+#define M2REG_AUTO_LINK_SETTING_ADDRESS 0x10
+#define M2REG_CLEAR_COUNTERS_MASK (ONE_BIT_MASK<<4)
+#define M2REG_PCI_TRIGGER_MODE_MASK (ONE_BIT_MASK<<1)
+#define BEGIN_VALID_MASK (ONE_BIT_MASK<<20)
+#define BEGIN_LINK_ENABLE_MASK (ONE_BIT_MASK<<31)
+#define M2REG_PCI_TRIGGER_REGISTER_ADDRESS 0x14
+
+typedef enum {
+ FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK,
+ FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK,
+ FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK
+} FLUSH_CODE;
+
+typedef enum {
+ AGPCMDLIST_SCRATCH_SIZE = 0x100,
+ AGPCMDLIST_BEGIN_SIZE = 0x004,
+ AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004,
+ AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c,
+ AGPCMDLIST_FLUSH_CMD_LEN = 0x004,
+ AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE
+} CMD_SIZE;
+
+struct xgi_cmdring_info {
+ unsigned int _cmdRingSize;
+ u32 _cmdRingBuffer;
+ unsigned long _cmdRingBusAddr;
+ u32 _lastBatchStartAddr;
+ u32 _cmdRingOffset;
+};
+
+extern int xgi_cmdlist_initialize(struct xgi_info * info, size_t size);
+
+extern void xgi_submit_cmdlist(struct xgi_info * info, struct xgi_cmd_info * pCmdInfo);
+
+extern void xgi_state_change(struct xgi_info * info, struct xgi_state_info * pStateInfo);
+
+extern void xgi_cmdlist_cleanup(struct xgi_info * info);
+
+#endif /* _XGI_CMDLIST_H_ */
diff --git a/linux-core/xgi_drm.h b/linux-core/xgi_drm.h
new file mode 120000
index 00000000..677586d7
--- /dev/null
+++ b/linux-core/xgi_drm.h
@@ -0,0 +1 @@
+../shared-core/xgi_drm.h \ No newline at end of file
diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c
new file mode 100644
index 00000000..b3425c75
--- /dev/null
+++ b/linux-core/xgi_drv.c
@@ -0,0 +1,1517 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+#include "xgi_linux.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_pcie.h"
+#include "xgi_misc.h"
+#include "xgi_cmdlist.h"
+
+/* for debug */
+static int xgi_temp = 1;
+/*
+ * global parameters
+ */
+static struct xgi_dev {
+ u16 vendor;
+ u16 device;
+ const char *name;
+} xgidev_list[] = {
+ {
+ PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, {
+ PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, {
+ 0, 0, NULL}
+};
+
+int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */
+
+static int xgi_num_devices = 0;
+
+struct xgi_info xgi_devices[XGI_MAX_DEVICES];
+
+#if defined(XGI_PM_SUPPORT_APM)
+static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 };
+#endif
+
+/* add one for the control device */
+struct xgi_info xgi_ctl_device;
+wait_queue_head_t xgi_ctl_waitqueue;
+
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *proc_xgi;
+#endif
+
+#ifdef CONFIG_DEVFS_FS
+devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES];
+#endif
+
+struct list_head xgi_mempid_list;
+
+/* xgi_ functions.. do not take a state device parameter */
+static int xgi_post_vbios(struct xgi_ioctl_post_vbios * info);
+static void xgi_proc_create(void);
+static void xgi_proc_remove_all(struct proc_dir_entry *);
+static void xgi_proc_remove(void);
+
+/* xgi_kern_ functions, interfaces used by linux kernel */
+int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *);
+
+unsigned int xgi_kern_poll(struct file *, poll_table *);
+int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
+int xgi_kern_mmap(struct file *, struct vm_area_struct *);
+int xgi_kern_open(struct inode *, struct file *);
+int xgi_kern_release(struct inode *inode, struct file *filp);
+
+void xgi_kern_vma_open(struct vm_area_struct *vma);
+void xgi_kern_vma_release(struct vm_area_struct *vma);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type);
+#else
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+#endif
+
+int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *);
+int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *);
+int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *);
+int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *);
+
+int xgi_kern_ctl_open(struct inode *, struct file *);
+int xgi_kern_ctl_close(struct inode *, struct file *);
+unsigned int xgi_kern_ctl_poll(struct file *, poll_table *);
+
+void xgi_kern_isr_bh(unsigned long);
+irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *);
+
+static void xgi_lock_init(struct xgi_info * info);
+
+#if defined(XGI_PM_SUPPORT_ACPI)
+int xgi_kern_acpi_standby(struct pci_dev *, u32);
+int xgi_kern_acpi_resume(struct pci_dev *);
+#endif
+
+/*
+ * verify access to pci config space wasn't disabled behind our back
+ * unfortunately, XFree86 enables/disables memory access in pci config space at
+ * various times (such as restoring initial pci config space settings during vt
+ * switches or when doing mulicard). As a result, all of our register accesses
+ * are garbage at this point. add a check to see if access was disabled and
+ * reenable any such access.
+ */
+#define XGI_CHECK_PCI_CONFIG(xgi) \
+ xgi_check_pci_config(xgi, __LINE__)
+
+static inline void xgi_check_pci_config(struct xgi_info * info, int line)
+{
+ unsigned short cmd, flag = 0;
+
+ // don't do this on the control device, only the actual devices
+ if (info->flags & XGI_FLAG_CONTROL)
+ return;
+
+ pci_read_config_word(info->dev, PCI_COMMAND, &cmd);
+ if (!(cmd & PCI_COMMAND_MASTER)) {
+ XGI_INFO("restoring bus mastering! (%d)\n", line);
+ cmd |= PCI_COMMAND_MASTER;
+ flag = 1;
+ }
+
+ if (!(cmd & PCI_COMMAND_MEMORY)) {
+ XGI_INFO("restoring MEM access! (%d)\n", line);
+ cmd |= PCI_COMMAND_MEMORY;
+ flag = 1;
+ }
+
+ if (flag)
+ pci_write_config_word(info->dev, PCI_COMMAND, cmd);
+}
+
+/*
+ * struct pci_device_id {
+ * unsigned int vendor, device; // Vendor and device ID or PCI_ANY_ID
+ * unsigned int subvendor, subdevice; // Subsystem ID's or PCI_ANY_ID
+ * unsigned int class, class_mask; // (class,subclass,prog-if) triplet
+ * unsigned long driver_data; // Data private to the driver
+ * };
+ */
+
+static struct pci_device_id xgi_dev_table[] = {
+ {
+ .vendor = PCI_VENDOR_ID_XGI,
+ .device = PCI_ANY_ID,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .class = (PCI_CLASS_DISPLAY_VGA << 8),
+ .class_mask = ~0,
+ },
+ {}
+};
+
+/*
+ * #define MODULE_DEVICE_TABLE(type,name) \
+ * MODULE_GENERIC_TABLE(type##_device,name)
+ */
+MODULE_DEVICE_TABLE(pci, xgi_dev_table);
+
+/*
+ * struct pci_driver {
+ * struct list_head node;
+ * char *name;
+ * const struct pci_device_id *id_table; // NULL if wants all devices
+ * int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted
+ * void (*remove)(struct pci_dev *dev); // Device removed (NULL if not a hot-plug capable driver)
+ * int (*save_state)(struct pci_dev *dev, u32 state); // Save Device Context
+ * int (*suspend)(struct pci_dev *dev, u32 state); // Device suspended
+ * int (*resume)(struct pci_dev *dev); // Device woken up
+ * int (*enable_wake)(struct pci_dev *dev, u32 state, int enable); // Enable wake event
+ * };
+ */
+static struct pci_driver xgi_pci_driver = {
+ .name = "xgi",
+ .id_table = xgi_dev_table,
+ .probe = xgi_kern_probe,
+#if defined(XGI_SUPPORT_ACPI)
+ .suspend = xgi_kern_acpi_standby,
+ .resume = xgi_kern_acpi_resume,
+#endif
+};
+
+/*
+ * find xgi devices and set initial state
+ */
+int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table)
+{
+ struct xgi_info *info;
+
+ if ((dev->vendor != PCI_VENDOR_ID_XGI)
+ || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) {
+ return -1;
+ }
+
+ if (xgi_num_devices == XGI_MAX_DEVICES) {
+ XGI_INFO("maximum device number (%d) reached!\n",
+ xgi_num_devices);
+ return -1;
+ }
+
+ /* enable io, mem, and bus-mastering in pci config space */
+ if (pci_enable_device(dev) != 0) {
+ XGI_INFO("pci_enable_device failed, aborting\n");
+ return -1;
+ }
+
+ XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices);
+
+ pci_set_master(dev);
+
+ info = &xgi_devices[xgi_num_devices];
+ info->dev = dev;
+
+ xgi_lock_init(info);
+
+ info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1);
+ info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1);
+
+ /* check IO region */
+ if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) {
+ XGI_ERROR("cannot reserve MMIO memory\n");
+ goto error_disable_dev;
+ }
+
+ XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base);
+ XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size);
+
+ info->mmio.vbase = ioremap_nocache(info->mmio.base, info->mmio.size);
+ if (!info->mmio.vbase) {
+ release_mem_region(info->mmio.base, info->mmio.size);
+ XGI_ERROR("info->mmio.vbase failed\n");
+ goto error_disable_dev;
+ }
+ xgi_enable_mmio(info);
+
+ //xgi_enable_ge(info);
+
+ XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase);
+
+ info->fb.base = XGI_PCI_RESOURCE_START(dev, 0);
+ info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0);
+
+ XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base);
+ XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
+
+ info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024;
+ XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
+
+ /* check frame buffer region
+ if (!request_mem_region(info->fb.base, info->fb.size, "xgi"))
+ {
+ release_mem_region(info->mmio.base, info->mmio.size);
+ XGI_ERROR("cannot reserve frame buffer memory\n");
+ goto error_disable_dev;
+ }
+
+ info->fb.vbase = ioremap_nocache(info->fb.base, info->fb.size);
+
+ if (!info->fb.vbase)
+ {
+ release_mem_region(info->mmio.base, info->mmio.size);
+ release_mem_region(info->fb.base, info->fb.size);
+ XGI_ERROR("info->fb.vbase failed\n");
+ goto error_disable_dev;
+ }
+ */
+ info->fb.vbase = NULL;
+ XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase);
+
+
+ /* check common error condition */
+ if (info->dev->irq == 0) {
+ XGI_ERROR("Can't find an IRQ for your XGI card! \n");
+ goto error_zero_dev;
+ }
+ XGI_INFO("info->irq: %lx \n", info->dev->irq);
+
+ //xgi_enable_dvi_interrupt(info);
+
+ /* sanity check the IO apertures */
+ if ((info->mmio.base == 0) || (info->mmio.size == 0)
+ || (info->fb.base == 0) || (info->fb.size == 0)) {
+ XGI_ERROR("The IO regions for your XGI card are invalid.\n");
+
+ if ((info->mmio.base == 0) || (info->mmio.size == 0)) {
+ XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n",
+ info->mmio.base, info->mmio.size);
+ }
+
+ if ((info->fb.base == 0) || (info->fb.size == 0)) {
+ XGI_ERROR
+ ("frame buffer appears to be wrong: 0x%lx 0x%lx\n",
+ info->fb.base, info->fb.size);
+ }
+
+ goto error_zero_dev;
+ }
+ //xgi_num_devices++;
+
+ return 0;
+
+ error_zero_dev:
+ release_mem_region(info->fb.base, info->fb.size);
+ release_mem_region(info->mmio.base, info->mmio.size);
+
+ error_disable_dev:
+ pci_disable_device(dev);
+ return -1;
+
+}
+
+/*
+ * vma operations...
+ * this is only called when the vmas are duplicated. this
+ * appears to only happen when the process is cloned to create
+ * a new process, and not when the process is threaded.
+ *
+ * increment the usage count for the physical pages, so when
+ * this clone unmaps the mappings, the pages are not
+ * deallocated under the original process.
+ */
+struct vm_operations_struct xgi_vm_ops = {
+ .open = xgi_kern_vma_open,
+ .close = xgi_kern_vma_release,
+ .nopage = xgi_kern_vma_nopage,
+};
+
+void xgi_kern_vma_open(struct vm_area_struct *vma)
+{
+ XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n",
+ vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
+
+ if (XGI_VMA_PRIVATE(vma)) {
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
+ XGI_ATOMIC_INC(block->use_count);
+ }
+}
+
+void xgi_kern_vma_release(struct vm_area_struct *vma)
+{
+ XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n",
+ vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
+
+ if (XGI_VMA_PRIVATE(vma)) {
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
+ XGI_ATOMIC_DEC(block->use_count);
+
+ /*
+ * if use_count is down to 0, the kernel virtual mapping was freed
+ * but the underlying physical pages were not, we need to clear the
+ * bit and free the physical pages.
+ */
+ if (XGI_ATOMIC_READ(block->use_count) == 0) {
+ // Need TO Finish
+ XGI_VMA_PRIVATE(vma) = NULL;
+ }
+ }
+}
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
+ struct page *page = NOPAGE_SIGBUS;
+ unsigned long offset = 0;
+ unsigned long page_addr = 0;
+/*
+ XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
+ vma->vm_start,
+ vma->vm_end,
+ XGI_VMA_OFFSET(vma),
+ address);
+*/
+ offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
+
+ offset = offset - block->bus_addr;
+
+ offset >>= PAGE_SHIFT;
+
+ page_addr = block->page_table[offset].virt_addr;
+
+ if (xgi_temp) {
+ XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
+ "block->page_count: 0x%lx block->page_order: 0x%lx"
+ "block->page_table[0x%lx].virt_addr: 0x%lx\n",
+ block->bus_addr, block->hw_addr,
+ block->page_count, block->page_order,
+ offset, block->page_table[offset].virt_addr);
+ xgi_temp = 0;
+ }
+
+ if (!page_addr)
+ goto out; /* hole or end-of-file */
+ page = virt_to_page(page_addr);
+
+ /* got it, now increment the count */
+ get_page(page);
+ out:
+ return page;
+
+}
+#else
+struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
+ unsigned long address, int write_access)
+{
+ struct xgi_pcie_block *block = (struct xgi_pcie_block *) XGI_VMA_PRIVATE(vma);
+ struct page *page = NOPAGE_SIGBUS;
+ unsigned long offset = 0;
+ unsigned long page_addr = 0;
+/*
+ XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
+ vma->vm_start,
+ vma->vm_end,
+ XGI_VMA_OFFSET(vma),
+ address);
+*/
+ offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
+
+ offset = offset - block->bus_addr;
+
+ offset >>= PAGE_SHIFT;
+
+ page_addr = block->page_table[offset].virt_addr;
+
+ if (xgi_temp) {
+ XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
+ "block->page_count: 0x%lx block->page_order: 0x%lx"
+ "block->page_table[0x%lx].virt_addr: 0x%lx\n",
+ block->bus_addr, block->hw_addr,
+ block->page_count, block->page_order,
+ offset, block->page_table[offset].virt_addr);
+ xgi_temp = 0;
+ }
+
+ if (!page_addr)
+ goto out; /* hole or end-of-file */
+ page = virt_to_page(page_addr);
+
+ /* got it, now increment the count */
+ get_page(page);
+ out:
+ return page;
+}
+#endif
+
+#if 0
+static struct file_operations xgi_fops = {
+ /* owner: THIS_MODULE, */
+ poll:xgi_kern_poll,
+ ioctl:xgi_kern_ioctl,
+ mmap:xgi_kern_mmap,
+ open:xgi_kern_open,
+ release:xgi_kern_release,
+};
+#endif
+
+static struct file_operations xgi_fops = {
+ .owner = THIS_MODULE,
+ .poll = xgi_kern_poll,
+ .ioctl = xgi_kern_ioctl,
+ .mmap = xgi_kern_mmap,
+ .open = xgi_kern_open,
+ .release = xgi_kern_release,
+};
+
+static struct xgi_file_private *xgi_alloc_file_private(void)
+{
+ struct xgi_file_private *fp;
+
+ XGI_KMALLOC(fp, sizeof(struct xgi_file_private));
+ if (!fp)
+ return NULL;
+
+ memset(fp, 0, sizeof(struct xgi_file_private));
+
+ /* initialize this file's event queue */
+ init_waitqueue_head(&fp->wait_queue);
+
+ xgi_init_lock(fp->fp_lock);
+
+ return fp;
+}
+
+static void xgi_free_file_private(struct xgi_file_private * fp)
+{
+ if (fp == NULL)
+ return;
+
+ XGI_KFREE(fp, sizeof(struct xgi_file_private));
+}
+
+int xgi_kern_open(struct inode *inode, struct file *filp)
+{
+ struct xgi_info *info = NULL;
+ int dev_num;
+ int result = 0, status;
+
+ /*
+ * the type and num values are only valid if we are not using devfs.
+ * However, since we use them to retrieve the device pointer, we
+ * don't need them with devfs as filp->private_data is already
+ * initialized
+ */
+ filp->private_data = xgi_alloc_file_private();
+ if (filp->private_data == NULL)
+ return -ENOMEM;
+
+ XGI_INFO("filp->private_data %p\n", filp->private_data);
+ /*
+ * for control device, just jump to its open routine
+ * after setting up the private data
+ */
+ if (XGI_IS_CONTROL_DEVICE(inode))
+ return xgi_kern_ctl_open(inode, filp);
+
+ /* what device are we talking about? */
+ dev_num = XGI_DEVICE_NUMBER(inode);
+ if (dev_num >= XGI_MAX_DEVICES) {
+ xgi_free_file_private(filp->private_data);
+ filp->private_data = NULL;
+ return -ENODEV;
+ }
+
+ info = &xgi_devices[dev_num];
+
+ XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num);
+
+ xgi_down(info->info_sem);
+ XGI_CHECK_PCI_CONFIG(info);
+
+ XGI_INFO_FROM_FP(filp) = info;
+
+ /*
+ * map the memory and allocate isr on first open
+ */
+
+ if (!(info->flags & XGI_FLAG_OPEN)) {
+ XGI_INFO("info->flags & XGI_FLAG_OPEN \n");
+
+ if (info->dev->device == 0) {
+ XGI_INFO("open of nonexistent device %d\n", dev_num);
+ result = -ENXIO;
+ goto failed;
+ }
+
+ /* initialize struct irqaction */
+ status = request_irq(info->dev->irq, xgi_kern_isr,
+ SA_INTERRUPT | SA_SHIRQ, "xgi",
+ (void *)info);
+ if (status != 0) {
+ if (info->dev->irq && (status == -EBUSY)) {
+ XGI_ERROR
+ ("Tried to get irq %d, but another driver",
+ (unsigned int)info->dev->irq);
+ XGI_ERROR("has it and is not sharing it.\n");
+ }
+ XGI_ERROR("isr request failed 0x%x\n", status);
+ result = -EIO;
+ goto failed;
+ }
+
+ /*
+ * #define DECLARE_TASKLET(name, func, data) \
+ * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
+ */
+ info->tasklet.func = xgi_kern_isr_bh;
+ info->tasklet.data = (unsigned long)info;
+ tasklet_enable(&info->tasklet);
+
+ /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
+ xgi_cmdlist_initialize(info, 0x100000);
+
+ info->flags |= XGI_FLAG_OPEN;
+ }
+
+ XGI_ATOMIC_INC(info->use_count);
+
+ failed:
+ xgi_up(info->info_sem);
+
+ if ((result) && filp->private_data) {
+ xgi_free_file_private(filp->private_data);
+ filp->private_data = NULL;
+ }
+
+ return result;
+}
+
+int xgi_kern_release(struct inode *inode, struct file *filp)
+{
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
+
+ XGI_CHECK_PCI_CONFIG(info);
+
+ /*
+ * for control device, just jump to its open routine
+ * after setting up the private data
+ */
+ if (XGI_IS_CONTROL_DEVICE(inode))
+ return xgi_kern_ctl_close(inode, filp);
+
+ XGI_INFO("Jong-xgi_kern_release on device %d\n",
+ XGI_DEVICE_NUMBER(inode));
+
+ xgi_down(info->info_sem);
+ if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) {
+
+ /*
+ * The usage count for this device has dropped to zero, it can be shut
+ * down safely; disable its interrupts.
+ */
+
+ /*
+ * Disable this device's tasklet to make sure that no bottom half will
+ * run with undefined device state.
+ */
+ tasklet_disable(&info->tasklet);
+
+ /*
+ * Free the IRQ, which may block until all pending interrupt processing
+ * has completed.
+ */
+ free_irq(info->dev->irq, (void *)info);
+
+ xgi_cmdlist_cleanup(info);
+
+ /* leave INIT flag alone so we don't reinit every time */
+ info->flags &= ~XGI_FLAG_OPEN;
+ }
+
+ xgi_up(info->info_sem);
+
+ if (FILE_PRIVATE(filp)) {
+ xgi_free_file_private(FILE_PRIVATE(filp));
+ FILE_PRIVATE(filp) = NULL;
+ }
+
+ return 0;
+}
+
+int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ //struct inode *inode = INODE_FROM_FP(filp);
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
+ struct xgi_pcie_block *block;
+ int pages = 0;
+ unsigned long prot;
+
+ XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n",
+ vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma));
+
+ XGI_CHECK_PCI_CONFIG(info);
+
+ if (XGI_MASK_OFFSET(vma->vm_start)
+ || XGI_MASK_OFFSET(vma->vm_end)) {
+ XGI_ERROR("VM: bad mmap range: %lx - %lx\n",
+ vma->vm_start, vma->vm_end);
+ return -ENXIO;
+ }
+
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+
+ vma->vm_ops = &xgi_vm_ops;
+
+ /* XGI IO(reg) space */
+ if (IS_IO_OFFSET
+ (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
+ XGI_VMA_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ /* mark it as IO so that we don't dump it on core dump */
+ vma->vm_flags |= VM_IO;
+ XGI_INFO("VM: mmap io space \n");
+ }
+ /* XGI fb space */
+ /* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */
+ else if (IS_FB_OFFSET
+ (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
+ XGI_VMA_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ // mark it as IO so that we don't dump it on core dump
+ vma->vm_flags |= VM_IO;
+ XGI_INFO("VM: mmap fb space \n");
+ }
+ /* PCIE allocator */
+ /* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */
+ else if (IS_PCIE_OFFSET
+ (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
+ xgi_down(info->pcie_sem);
+
+ block = xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma));
+
+ if (block == NULL) {
+ XGI_ERROR("couldn't find pre-allocated PCIE memory!\n");
+ xgi_up(info->pcie_sem);
+ return -EAGAIN;
+ }
+
+ if (block->page_count != pages) {
+ XGI_ERROR
+ ("pre-allocated PCIE memory has wrong number of pages!\n");
+ xgi_up(info->pcie_sem);
+ return -EAGAIN;
+ }
+
+ vma->vm_private_data = block;
+ XGI_ATOMIC_INC(block->use_count);
+ xgi_up(info->pcie_sem);
+
+ /*
+ * prevent the swapper from swapping it out
+ * mark the memory i/o so the buffers aren't
+ * dumped on core dumps */
+ vma->vm_flags |= (VM_LOCKED | VM_IO);
+
+ /* un-cached */
+ prot = pgprot_val(vma->vm_page_prot);
+ /*
+ if (boot_cpu_data.x86 > 3)
+ prot |= _PAGE_PCD | _PAGE_PWT;
+ */
+ vma->vm_page_prot = __pgprot(prot);
+
+ XGI_INFO("VM: mmap pcie space \n");
+ }
+#if 0
+ else if (IS_FB_OFFSET
+ (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) {
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
+ XGI_VMA_OFFSET(vma),
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ // mark it as IO so that we don't dump it on core dump
+ vma->vm_flags |= VM_IO;
+ XGI_INFO("VM: mmap fb space \n");
+ }
+#endif
+ else {
+ vma->vm_flags |= (VM_IO | VM_LOCKED);
+ XGI_ERROR("VM: mmap wrong range \n");
+ }
+
+ vma->vm_file = filp;
+
+ return 0;
+}
+
+unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait)
+{
+ struct xgi_file_private *fp;
+ struct xgi_info *info;
+ unsigned int mask = 0;
+ unsigned long eflags;
+
+ info = XGI_INFO_FROM_FP(filp);
+
+ if (info->device_number == XGI_CONTROL_DEVICE_NUMBER)
+ return xgi_kern_ctl_poll(filp, wait);
+
+ fp = XGI_GET_FP(filp);
+
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ /* add us to the list */
+ poll_wait(filp, &fp->wait_queue, wait);
+ }
+
+ xgi_lock_irqsave(fp->fp_lock, eflags);
+
+ /* wake the user on any event */
+ if (fp->num_events) {
+ XGI_INFO("Hey, an event occured!\n");
+ /*
+ * trigger the client, when they grab the event,
+ * we'll decrement the event count
+ */
+ mask |= (POLLPRI | POLLIN);
+ }
+ xgi_unlock_irqsave(fp->fp_lock, eflags);
+
+ return mask;
+}
+
+int xgi_kern_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct xgi_info *info;
+ struct xgi_mem_alloc *alloc = NULL;
+
+ int status = 0;
+ void *arg_copy;
+ int arg_size;
+ int err = 0;
+
+ info = XGI_INFO_FROM_FP(filp);
+
+ XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd),
+ _IOC_NR(cmd), arg, _IOC_SIZE(cmd));
+ /*
+ * extract the type and number bitfields, and don't decode
+ * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
+ */
+ if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC)
+ return -ENOTTY;
+ if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR)
+ return -ENOTTY;
+
+ /*
+ * the direction is a bitmask, and VERIFY_WRITE catches R/W
+ * transfers. `Type' is user-oriented, while
+ * access_ok is kernel-oriented, so the concept of "read" and
+ * "write" is reversed
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ) {
+ err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
+ } else if (_IOC_DIR(cmd) & _IOC_WRITE) {
+ err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
+ }
+ if (err)
+ return -EFAULT;
+
+ XGI_CHECK_PCI_CONFIG(info);
+
+ arg_size = _IOC_SIZE(cmd);
+ XGI_KMALLOC(arg_copy, arg_size);
+ if (arg_copy == NULL) {
+ XGI_ERROR("failed to allocate ioctl memory\n");
+ return -ENOMEM;
+ }
+
+ /* Jong 05/25/2006 */
+ /* copy_from_user(arg_copy, (void *)arg, arg_size); */
+ if (copy_from_user(arg_copy, (void *)arg, arg_size)) {
+ XGI_ERROR("failed to copyin ioctl data\n");
+ XGI_INFO("Jong-copy_from_user-fail! \n");
+ } else
+ XGI_INFO("Jong-copy_from_user-OK! \n");
+
+ alloc = (struct xgi_mem_alloc *) arg_copy;
+ XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg,
+ arg_size);
+
+ switch (_IOC_NR(cmd)) {
+ case XGI_ESC_DEVICE_INFO:
+ XGI_INFO("Jong-xgi_ioctl_get_device_info \n");
+ xgi_get_device_info(info, (struct xgi_chip_info *)arg_copy);
+ break;
+ case XGI_ESC_POST_VBIOS:
+ XGI_INFO("Jong-xgi_ioctl_post_vbios \n");
+ break;
+ case XGI_ESC_FB_ALLOC:
+ XGI_INFO("Jong-xgi_ioctl_fb_alloc \n");
+ xgi_fb_alloc(info, alloc, 0);
+ break;
+ case XGI_ESC_FB_FREE:
+ XGI_INFO("Jong-xgi_ioctl_fb_free \n");
+ xgi_fb_free(info, *(unsigned long *)arg_copy);
+ break;
+ case XGI_ESC_MEM_COLLECT:
+ XGI_INFO("Jong-xgi_ioctl_mem_collect \n");
+ xgi_mem_collect(info, (unsigned int *)arg_copy);
+ break;
+ case XGI_ESC_PCIE_ALLOC:
+ XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n");
+ xgi_pcie_alloc(info, alloc, 0);
+ break;
+ case XGI_ESC_PCIE_FREE:
+ XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n",
+ *((unsigned long *)arg_copy));
+ xgi_pcie_free(info, *((unsigned long *)arg_copy));
+ break;
+ case XGI_ESC_PCIE_CHECK:
+ XGI_INFO("Jong-xgi_pcie_heap_check \n");
+ xgi_pcie_heap_check();
+ break;
+ case XGI_ESC_GET_SCREEN_INFO:
+ XGI_INFO("Jong-xgi_get_screen_info \n");
+ xgi_get_screen_info(info, (struct xgi_screen_info *)arg_copy);
+ break;
+ case XGI_ESC_PUT_SCREEN_INFO:
+ XGI_INFO("Jong-xgi_put_screen_info \n");
+ xgi_put_screen_info(info, (struct xgi_screen_info *)arg_copy);
+ break;
+ case XGI_ESC_MMIO_INFO:
+ XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n");
+ xgi_get_mmio_info(info, (struct xgi_mmio_info *)arg_copy);
+ break;
+ case XGI_ESC_GE_RESET:
+ XGI_INFO("Jong-xgi_ioctl_ge_reset \n");
+ xgi_ge_reset(info);
+ break;
+ case XGI_ESC_SAREA_INFO:
+ XGI_INFO("Jong-xgi_ioctl_sarea_info \n");
+ xgi_sarea_info(info, (struct xgi_sarea_info *)arg_copy);
+ break;
+ case XGI_ESC_DUMP_REGISTER:
+ XGI_INFO("Jong-xgi_ioctl_dump_register \n");
+ xgi_dump_register(info);
+ break;
+ case XGI_ESC_DEBUG_INFO:
+ XGI_INFO("Jong-xgi_ioctl_restore_registers \n");
+ xgi_restore_registers(info);
+ break;
+ case XGI_ESC_SUBMIT_CMDLIST:
+ XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n");
+ xgi_submit_cmdlist(info, (struct xgi_cmd_info *) arg_copy);
+ break;
+ case XGI_ESC_TEST_RWINKERNEL:
+ XGI_INFO("Jong-xgi_test_rwinkernel \n");
+ xgi_test_rwinkernel(info, *(unsigned long *)arg_copy);
+ break;
+ case XGI_ESC_STATE_CHANGE:
+ XGI_INFO("Jong-xgi_state_change \n");
+ xgi_state_change(info, (struct xgi_state_info *) arg_copy);
+ break;
+ default:
+ XGI_INFO("Jong-xgi_ioctl_default \n");
+ status = -EINVAL;
+ break;
+ }
+
+ if (copy_to_user((void *)arg, arg_copy, arg_size)) {
+ XGI_ERROR("failed to copyout ioctl data\n");
+ XGI_INFO("Jong-copy_to_user-fail! \n");
+ } else
+ XGI_INFO("Jong-copy_to_user-OK! \n");
+
+ XGI_KFREE(arg_copy, arg_size);
+ return status;
+}
+
+/*
+ * xgi control driver operations defined here
+ */
+int xgi_kern_ctl_open(struct inode *inode, struct file *filp)
+{
+ struct xgi_info *info = &xgi_ctl_device;
+
+ int rc = 0;
+
+ XGI_INFO("Jong-xgi_kern_ctl_open\n");
+
+ xgi_down(info->info_sem);
+ info->device_number = XGI_CONTROL_DEVICE_NUMBER;
+
+ /* save the xgi info in file->private_data */
+ filp->private_data = info;
+
+ if (XGI_ATOMIC_READ(info->use_count) == 0) {
+ init_waitqueue_head(&xgi_ctl_waitqueue);
+ }
+
+ info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL;
+
+ XGI_ATOMIC_INC(info->use_count);
+ xgi_up(info->info_sem);
+
+ return rc;
+}
+
+int xgi_kern_ctl_close(struct inode *inode, struct file *filp)
+{
+ struct xgi_info *info = XGI_INFO_FROM_FP(filp);
+
+ XGI_INFO("Jong-xgi_kern_ctl_close\n");
+
+ xgi_down(info->info_sem);
+ if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) {
+ info->flags = 0;
+ }
+ xgi_up(info->info_sem);
+
+ if (FILE_PRIVATE(filp)) {
+ xgi_free_file_private(FILE_PRIVATE(filp));
+ FILE_PRIVATE(filp) = NULL;
+ }
+
+ return 0;
+}
+
+unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait)
+{
+ //struct xgi_info *info = XGI_INFO_FROM_FP(filp);;
+ unsigned int ret = 0;
+
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ poll_wait(filp, &xgi_ctl_waitqueue, wait);
+ }
+
+ return ret;
+}
+
+/*
+ * xgi proc system
+ */
+static u8 xgi_find_pcie_capability(struct pci_dev *dev)
+{
+ u16 status;
+ u8 cap_ptr, cap_id;
+
+ pci_read_config_word(dev, PCI_STATUS, &status);
+ status &= PCI_STATUS_CAP_LIST;
+ if (!status)
+ return 0;
+
+ switch (dev->hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ case PCI_HEADER_TYPE_BRIDGE:
+ pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
+ break;
+ default:
+ return 0;
+ }
+
+ do {
+ cap_ptr &= 0xFC;
+ pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);
+ pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT,
+ &cap_ptr);
+ } while (cap_ptr && cap_id != 0xFF);
+
+ return 0;
+}
+
+int xgi_kern_read_card_info(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ struct pci_dev *dev;
+ char *type;
+ int len = 0;
+
+ struct xgi_info *info;
+ info = (struct xgi_info *) data;
+
+ dev = info->dev;
+ if (!dev)
+ return 0;
+
+ type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI";
+ len += sprintf(page + len, "Card Type: \t %s\n", type);
+
+ XGI_PCI_DEV_PUT(dev);
+ return len;
+}
+
+int xgi_kern_read_version(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = 0;
+
+ len += sprintf(page + len, "XGI version: %s\n", "1.0");
+ len += sprintf(page + len, "GCC version: %s\n", "3.0");
+
+ return len;
+}
+
+int xgi_kern_read_pcie_info(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ return 0;
+}
+
+int xgi_kern_read_status(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ return 0;
+}
+
+static void xgi_proc_create(void)
+{
+#ifdef CONFIG_PROC_FS
+
+ struct pci_dev *dev;
+ int i = 0;
+ char name[6];
+
+ struct proc_dir_entry *entry;
+ struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards;
+
+ struct xgi_info *info;
+ struct xgi_info *xgi_max_devices;
+
+ /* world readable directory */
+ int flags = S_IFDIR | S_IRUGO | S_IXUGO;
+
+ proc_xgi = create_proc_entry("xgi", flags, proc_root_driver);
+ if (!proc_xgi)
+ goto failed;
+
+ proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi);
+ if (!proc_xgi_cards)
+ goto failed;
+
+ proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi);
+ if (!proc_xgi_pcie)
+ goto failed;
+
+ /*
+ * Set the module owner to ensure that the reference
+ * count reflects accesses to the proc files.
+ */
+ proc_xgi->owner = THIS_MODULE;
+ proc_xgi_cards->owner = THIS_MODULE;
+ proc_xgi_pcie->owner = THIS_MODULE;
+
+ xgi_max_devices = xgi_devices + XGI_MAX_DEVICES;
+ for (info = xgi_devices; info < xgi_max_devices; info++) {
+ /* world readable file */
+ flags = S_IFREG | S_IRUGO;
+
+ dev = info->dev;
+ if (!dev)
+ break;
+
+ sprintf(name, "%d", i++);
+ entry = create_proc_entry(name, flags, proc_xgi_cards);
+ if (!entry) {
+ XGI_PCI_DEV_PUT(dev);
+ goto failed;
+ }
+
+ entry->data = info;
+ entry->read_proc = xgi_kern_read_card_info;
+ entry->owner = THIS_MODULE;
+
+ if (xgi_find_pcie_capability(dev)) {
+ entry =
+ create_proc_entry("status", flags, proc_xgi_pcie);
+ if (!entry) {
+ XGI_PCI_DEV_PUT(dev);
+ goto failed;
+ }
+
+ entry->data = info;
+ entry->read_proc = xgi_kern_read_status;
+ entry->owner = THIS_MODULE;
+
+ entry = create_proc_entry("card", flags, proc_xgi_pcie);
+ if (!entry) {
+ XGI_PCI_DEV_PUT(dev);
+ goto failed;
+ }
+
+ entry->data = info;
+ entry->read_proc = xgi_kern_read_pcie_info;
+ entry->owner = THIS_MODULE;
+ }
+
+ XGI_PCI_DEV_PUT(dev);
+ }
+
+ entry = create_proc_entry("version", flags, proc_xgi);
+ if (!entry)
+ goto failed;
+
+ entry->read_proc = xgi_kern_read_version;
+ entry->owner = THIS_MODULE;
+
+ entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie);
+ if (!entry)
+ goto failed;
+
+ entry->data = NULL;
+ entry->read_proc = xgi_kern_read_pcie_info;
+ entry->owner = THIS_MODULE;
+
+ return;
+
+ failed:
+ XGI_ERROR("failed to create /proc entries!\n");
+ xgi_proc_remove_all(proc_xgi);
+#endif
+}
+
+#ifdef CONFIG_PROC_FS
+static void xgi_proc_remove_all(struct proc_dir_entry *entry)
+{
+ while (entry) {
+ struct proc_dir_entry *next = entry->next;
+ if (entry->subdir)
+ xgi_proc_remove_all(entry->subdir);
+ remove_proc_entry(entry->name, entry->parent);
+ if (entry == proc_xgi)
+ break;
+ entry = next;
+ }
+}
+#endif
+
+static void xgi_proc_remove(void)
+{
+#ifdef CONFIG_PROC_FS
+ xgi_proc_remove_all(proc_xgi);
+#endif
+}
+
+/*
+ * driver receives an interrupt if someone waiting, then hand it off.
+ */
+irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs)
+{
+ struct xgi_info *info = (struct xgi_info *) dev_id;
+ u32 need_to_run_bottom_half = 0;
+
+ //XGI_INFO("xgi_kern_isr \n");
+
+ //XGI_CHECK_PCI_CONFIG(info);
+
+ //xgi_dvi_irq_handler(info);
+
+ if (need_to_run_bottom_half) {
+ tasklet_schedule(&info->tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+void xgi_kern_isr_bh(unsigned long data)
+{
+ struct xgi_info *info = (struct xgi_info *) data;
+
+ XGI_INFO("xgi_kern_isr_bh \n");
+
+ //xgi_dvi_irq_handler(info);
+
+ XGI_CHECK_PCI_CONFIG(info);
+}
+
+static void xgi_lock_init(struct xgi_info * info)
+{
+ if (info == NULL)
+ return;
+
+ spin_lock_init(&info->info_lock);
+
+ sema_init(&info->info_sem, 1);
+ sema_init(&info->fb_sem, 1);
+ sema_init(&info->pcie_sem, 1);
+
+ XGI_ATOMIC_SET(info->use_count, 0);
+}
+
+static void xgi_dev_init(struct xgi_info * info)
+{
+ struct pci_dev *pdev = NULL;
+ struct xgi_dev *dev;
+ int found = 0;
+ u16 pci_cmd;
+
+ XGI_INFO("Enter xgi_dev_init \n");
+
+ //XGI_PCI_FOR_EACH_DEV(pdev)
+ {
+ for (dev = xgidev_list; dev->vendor; dev++) {
+ if ((dev->vendor == pdev->vendor)
+ && (dev->device == pdev->device)) {
+ u8 rev_id;
+
+ XGI_INFO("dev->vendor = pdev->vendor= %x \n",
+ dev->vendor);
+ XGI_INFO("dev->device = pdev->device= %x \n",
+ dev->device);
+
+ xgi_devices[found].dev = pdev;
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID,
+ rev_id);
+
+ XGI_INFO("PCI_REVISION_ID= %x \n", rev_id);
+
+ pci_read_config_word(pdev, PCI_COMMAND,
+ &pci_cmd);
+
+ XGI_INFO("PCI_COMMAND = %x \n", pci_cmd);
+
+ break;
+ }
+ }
+ }
+}
+
+/*
+ * Export to Linux Kernel
+ */
+
+static int __init xgi_init_module(void)
+{
+ struct xgi_info *info = &xgi_devices[xgi_num_devices];
+ int i, result;
+
+ XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION);
+ //SET_MODULE_OWNER(&xgi_fops);
+
+ memset(xgi_devices, 0, sizeof(xgi_devices));
+
+ if (pci_register_driver(&xgi_pci_driver) < 0) {
+ pci_unregister_driver(&xgi_pci_driver);
+ XGI_ERROR("no XGI graphics adapter found\n");
+ return -ENODEV;
+ }
+
+ XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices,
+ xgi_devices[xgi_num_devices].fb.base);
+ XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices,
+ xgi_devices[xgi_num_devices].fb.size);
+
+/* Jong 07/27/2006; test for ubuntu */
+/*
+#ifdef CONFIG_DEVFS_FS
+
+ XGI_INFO("Jong-Use devfs \n");
+ do
+ {
+ xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0);
+ if (xgi_devfs_handles[0] == NULL)
+ {
+ result = -ENOMEM;
+ XGI_ERROR("devfs register failed\n");
+ goto failed;
+ }
+ } while(0);
+ #else *//* no devfs, do it the "classic" way */
+
+ XGI_INFO("Jong-Use non-devfs \n");
+ /*
+ * Register your major, and accept a dynamic number. This is the
+ * first thing to do, in order to avoid releasing other module's
+ * fops in scull_cleanup_module()
+ */
+ result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops);
+ if (result < 0) {
+ XGI_ERROR("register chrdev failed\n");
+ pci_unregister_driver(&xgi_pci_driver);
+ return result;
+ }
+ if (xgi_major == 0)
+ xgi_major = result; /* dynamic */
+
+ /* #endif *//* CONFIG_DEVFS_FS */
+
+ XGI_INFO("Jong-major number %d\n", xgi_major);
+
+ /* instantiate tasklets */
+ for (i = 0; i < XGI_MAX_DEVICES; i++) {
+ /*
+ * We keep one tasklet per card to avoid latency issues with more
+ * than one device; no two instances of a single tasklet are ever
+ * executed concurrently.
+ */
+ XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1);
+ }
+
+ /* init the xgi control device */
+ {
+ struct xgi_info *info_ctl = &xgi_ctl_device;
+ xgi_lock_init(info_ctl);
+ }
+
+ /* Init the resource manager */
+ INIT_LIST_HEAD(&xgi_mempid_list);
+ if (!xgi_fb_heap_init(info)) {
+ XGI_ERROR("xgi_fb_heap_init() failed\n");
+ result = -EIO;
+ goto failed;
+ }
+
+ /* Init the resource manager */
+ if (!xgi_pcie_heap_init(info)) {
+ XGI_ERROR("xgi_pcie_heap_init() failed\n");
+ result = -EIO;
+ goto failed;
+ }
+
+ /* create /proc/driver/xgi */
+ xgi_proc_create();
+
+#if defined(DEBUG)
+ inter_module_register("xgi_devices", THIS_MODULE, xgi_devices);
+#endif
+
+ return 0;
+
+ failed:
+#ifdef CONFIG_DEVFS_FS
+ XGI_DEVFS_REMOVE_CONTROL();
+ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
+#endif
+
+ if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
+ XGI_ERROR("unregister xgi chrdev failed\n");
+
+ for (i = 0; i < xgi_num_devices; i++) {
+ if (xgi_devices[i].dev) {
+ release_mem_region(xgi_devices[i].fb.base,
+ xgi_devices[i].fb.size);
+ release_mem_region(xgi_devices[i].mmio.base,
+ xgi_devices[i].mmio.size);
+ }
+ }
+
+ pci_unregister_driver(&xgi_pci_driver);
+ return result;
+
+ return 1;
+}
+
+void __exit xgi_exit_module(void)
+{
+ int i;
+
+#ifdef CONFIG_DEVFS_FS
+ XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
+#endif
+
+ if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
+ XGI_ERROR("unregister xgi chrdev failed\n");
+
+ XGI_INFO("Jong-unregister xgi chrdev scceeded\n");
+ for (i = 0; i < XGI_MAX_DEVICES; i++) {
+ if (xgi_devices[i].dev) {
+ /* clean up the flush2D batch array */
+ xgi_cmdlist_cleanup(&xgi_devices[i]);
+
+ if (xgi_devices[i].fb.vbase != NULL) {
+ iounmap(xgi_devices[i].fb.vbase);
+ xgi_devices[i].fb.vbase = NULL;
+ }
+ if (xgi_devices[i].mmio.vbase != NULL) {
+ iounmap(xgi_devices[i].mmio.vbase);
+ xgi_devices[i].mmio.vbase = NULL;
+ }
+ //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);
+ //XGI_INFO("release frame buffer mem region scceeded\n");
+
+ release_mem_region(xgi_devices[i].mmio.base,
+ xgi_devices[i].mmio.size);
+ XGI_INFO("release MMIO mem region scceeded\n");
+
+ xgi_fb_heap_cleanup(&xgi_devices[i]);
+ XGI_INFO("xgi_fb_heap_cleanup scceeded\n");
+
+ xgi_pcie_heap_cleanup(&xgi_devices[i]);
+ XGI_INFO("xgi_pcie_heap_cleanup scceeded\n");
+
+ XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev);
+ }
+ }
+
+ pci_unregister_driver(&xgi_pci_driver);
+
+ /* remove /proc/driver/xgi */
+ xgi_proc_remove();
+
+#if defined(DEBUG)
+ inter_module_unregister("xgi_devices");
+#endif
+}
+
+module_init(xgi_init_module);
+module_exit(xgi_exit_module);
+
+#if defined(XGI_PM_SUPPORT_ACPI)
+int xgi_acpi_event(struct pci_dev *dev, u32 state)
+{
+ return 1;
+}
+
+int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state)
+{
+ return 1;
+}
+
+int xgi_kern_acpi_resume(struct pci_dev *dev)
+{
+ return 1;
+}
+#endif
+
+MODULE_AUTHOR("Andrea Zhang <andrea_zhang@macrosynergy.com>");
+MODULE_DESCRIPTION("xgi kernel driver for xgi cards");
+MODULE_LICENSE("GPL");
diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h
new file mode 100644
index 00000000..983ed0a9
--- /dev/null
+++ b/linux-core/xgi_drv.h
@@ -0,0 +1,217 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_DRV_H_
+#define _XGI_DRV_H_
+
+#include "xgi_drm.h"
+
+#define XGI_MAJOR_VERSION 0
+#define XGI_MINOR_VERSION 7
+#define XGI_PATCHLEVEL 5
+
+#define XGI_DRV_VERSION "0.7.5"
+
+#ifndef XGI_DRV_NAME
+#define XGI_DRV_NAME "xgi"
+#endif
+
+/*
+ * xgi reserved major device number, Set this to 0 to
+ * request dynamic major number allocation.
+ */
+#ifndef XGI_DEV_MAJOR
+#define XGI_DEV_MAJOR 0
+#endif
+
+#ifndef XGI_MAX_DEVICES
+#define XGI_MAX_DEVICES 1
+#endif
+
+/* Jong 06/06/2006 */
+/* #define XGI_DEBUG */
+
+#ifndef PCI_VENDOR_ID_XGI
+/*
+#define PCI_VENDOR_ID_XGI 0x1023
+*/
+#define PCI_VENDOR_ID_XGI 0x18CA
+
+#endif
+
+#ifndef PCI_DEVICE_ID_XP5
+#define PCI_DEVICE_ID_XP5 0x2200
+#endif
+
+#ifndef PCI_DEVICE_ID_XG47
+#define PCI_DEVICE_ID_XG47 0x0047
+#endif
+
+/* Macros to make printk easier */
+#define XGI_ERROR(fmt, arg...) \
+ printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
+
+#define XGI_MEM_ERROR(area, fmt, arg...) \
+ printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
+
+/* #define XGI_DEBUG */
+
+#ifdef XGI_DEBUG
+#define XGI_INFO(fmt, arg...) \
+ printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg)
+/* printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */
+#else
+#define XGI_INFO(fmt, arg...) do { } while (0)
+#endif
+
+/* device name length; must be atleast 8 */
+#define XGI_DEVICE_NAME_LENGTH 40
+
+/* need a fake device number for control device; just to flag it for msgs */
+#define XGI_CONTROL_DEVICE_NUMBER 100
+
+struct xgi_aperture {
+ unsigned long base;
+ unsigned int size;
+ void *vbase;
+};
+
+struct xgi_info {
+ struct pci_dev *dev;
+ int flags;
+ int device_number;
+
+ /* physical characteristics */
+ struct xgi_aperture mmio;
+ struct xgi_aperture fb;
+ struct xgi_aperture pcie;
+ struct xgi_screen_info scrn_info;
+ struct xgi_sarea_info sarea_info;
+
+ /* look up table parameters */
+ u32 *lut_base;
+ unsigned int lutPageSize;
+ unsigned int lutPageOrder;
+ bool isLUTInLFB;
+ unsigned int sdfbPageSize;
+
+ u32 pcie_config;
+ u32 pcie_status;
+
+ atomic_t use_count;
+
+ /* keep track of any pending bottom halfes */
+ struct tasklet_struct tasklet;
+
+ spinlock_t info_lock;
+
+ struct semaphore info_sem;
+ struct semaphore fb_sem;
+ struct semaphore pcie_sem;
+};
+
+struct xgi_ioctl_post_vbios {
+ unsigned int bus;
+ unsigned int slot;
+};
+
+enum PcieOwner {
+ PCIE_2D = 0,
+ /*
+ PCIE_3D should not begin with 1,
+ 2D alloc pcie memory will use owner 1.
+ */
+ PCIE_3D = 11, /*vetex buf */
+ PCIE_3D_CMDLIST = 12,
+ PCIE_3D_SCRATCHPAD = 13,
+ PCIE_3D_TEXTURE = 14,
+ PCIE_INVALID = 0x7fffffff
+};
+
+struct xgi_mem_pid {
+ struct list_head list;
+ enum xgi_mem_location location;
+ unsigned long bus_addr;
+ unsigned long pid;
+};
+
+
+/*
+ * flags
+ */
+#define XGI_FLAG_OPEN 0x0001
+#define XGI_FLAG_NEEDS_POSTING 0x0002
+#define XGI_FLAG_WAS_POSTED 0x0004
+#define XGI_FLAG_CONTROL 0x0010
+#define XGI_FLAG_MAP_REGS_EARLY 0x0200
+
+/* mmap(2) offsets */
+
+#define IS_IO_OFFSET(info, offset, length) \
+ (((offset) >= (info)->mmio.base) \
+ && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size))
+
+/* Jong 06/14/2006 */
+/* (info)->fb.base is a base address for physical (bus) address space */
+/* what's the definition of offest? on physical (bus) address space or HW address space */
+/* Jong 06/15/2006; use HW address space */
+#define IS_FB_OFFSET(info, offset, length) \
+ (((offset) >= 0) \
+ && (((offset) + (length)) <= (info)->fb.size))
+#if 0
+#define IS_FB_OFFSET(info, offset, length) \
+ (((offset) >= (info)->fb.base) \
+ && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size))
+#endif
+
+#define IS_PCIE_OFFSET(info, offset, length) \
+ (((offset) >= (info)->pcie.base) \
+ && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))
+
+extern int xgi_fb_heap_init(struct xgi_info * info);
+extern void xgi_fb_heap_cleanup(struct xgi_info * info);
+
+extern void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ pid_t pid);
+extern void xgi_fb_free(struct xgi_info * info, unsigned long offset);
+extern void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt);
+
+extern int xgi_pcie_heap_init(struct xgi_info * info);
+extern void xgi_pcie_heap_cleanup(struct xgi_info * info);
+
+extern void xgi_pcie_alloc(struct xgi_info * info,
+ struct xgi_mem_alloc * alloc, pid_t pid);
+extern void xgi_pcie_free(struct xgi_info * info, unsigned long offset);
+extern void xgi_pcie_heap_check(void);
+extern struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
+ unsigned long address);
+extern void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address);
+
+extern void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address);
+
+#endif
diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c
new file mode 100644
index 00000000..7d390d4b
--- /dev/null
+++ b/linux-core/xgi_fb.c
@@ -0,0 +1,467 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_linux.h"
+#include "xgi_drv.h"
+#include "xgi_fb.h"
+
+#define XGI_FB_HEAP_START 0x1000000
+
+static struct xgi_mem_heap *xgi_fb_heap;
+static struct kmem_cache *xgi_fb_cache_block = NULL;
+extern struct list_head xgi_mempid_list;
+
+static struct xgi_mem_block *xgi_mem_new_node(void);
+static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info, unsigned long size);
+static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset);
+
+void xgi_fb_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ pid_t pid)
+{
+ struct xgi_mem_block *block;
+ struct xgi_mem_pid *mempid_block;
+
+ if (alloc->is_front) {
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->bus_addr = info->fb.base;
+ alloc->hw_addr = 0;
+ XGI_INFO
+ ("Video RAM allocation on front buffer successfully! \n");
+ } else {
+ xgi_down(info->fb_sem);
+ block = xgi_mem_alloc(info, alloc->size);
+ xgi_up(info->fb_sem);
+
+ if (block == NULL) {
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->size = 0;
+ alloc->bus_addr = 0;
+ alloc->hw_addr = 0;
+ XGI_ERROR("Video RAM allocation failed\n");
+ } else {
+ XGI_INFO("Video RAM allocation succeeded: 0x%p\n",
+ (char *)block->offset);
+ alloc->location = XGI_MEMLOC_LOCAL;
+ alloc->size = block->size;
+ alloc->bus_addr = info->fb.base + block->offset;
+ alloc->hw_addr = block->offset;
+
+ /* manage mempid */
+ mempid_block =
+ kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
+ mempid_block->location = XGI_MEMLOC_LOCAL;
+ mempid_block->bus_addr = alloc->bus_addr;
+ mempid_block->pid = pid;
+
+ if (!mempid_block)
+ XGI_ERROR("mempid_block alloc failed\n");
+
+ XGI_INFO
+ ("Memory ProcessID add one fb block pid:%ld successfully! \n",
+ mempid_block->pid);
+ list_add(&mempid_block->list, &xgi_mempid_list);
+ }
+ }
+}
+
+void xgi_fb_free(struct xgi_info * info, unsigned long bus_addr)
+{
+ struct xgi_mem_block *block;
+ unsigned long offset = bus_addr - info->fb.base;
+ struct xgi_mem_pid *mempid_block;
+ struct xgi_mem_pid *mempid_freeblock = NULL;
+
+ if (offset < 0) {
+ XGI_INFO("free onscreen frame buffer successfully !\n");
+ } else {
+ xgi_down(info->fb_sem);
+ block = xgi_mem_free(info, offset);
+ xgi_up(info->fb_sem);
+
+ if (block == NULL) {
+ XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n",
+ offset);
+ }
+
+ /* manage mempid */
+ list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
+ if (mempid_block->location == XGI_MEMLOC_LOCAL
+ && mempid_block->bus_addr == bus_addr) {
+ mempid_freeblock = mempid_block;
+ break;
+ }
+ }
+ if (mempid_freeblock) {
+ list_del(&mempid_freeblock->list);
+ XGI_INFO
+ ("Memory ProcessID delete one fb block pid:%ld successfully! \n",
+ mempid_freeblock->pid);
+ kfree(mempid_freeblock);
+ }
+ }
+}
+
+int xgi_fb_heap_init(struct xgi_info * info)
+{
+ struct xgi_mem_block *block;
+
+ xgi_fb_heap = kmalloc(sizeof(struct xgi_mem_heap), GFP_KERNEL);
+ if (!xgi_fb_heap) {
+ XGI_ERROR("xgi_fb_heap alloc failed\n");
+ return 0;
+ }
+
+ INIT_LIST_HEAD(&xgi_fb_heap->free_list);
+ INIT_LIST_HEAD(&xgi_fb_heap->used_list);
+ INIT_LIST_HEAD(&xgi_fb_heap->sort_list);
+
+ xgi_fb_cache_block =
+ kmem_cache_create("xgi_fb_block", sizeof(struct xgi_mem_block), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (NULL == xgi_fb_cache_block) {
+ XGI_ERROR("Fail to creat xgi_fb_block\n");
+ goto fail1;
+ }
+
+ block =
+ (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
+ GFP_KERNEL);
+ if (!block) {
+ XGI_ERROR("kmem_cache_alloc failed\n");
+ goto fail2;
+ }
+
+ block->offset = XGI_FB_HEAP_START;
+ block->size = info->fb.size - XGI_FB_HEAP_START;
+
+ list_add(&block->list, &xgi_fb_heap->free_list);
+
+ xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START;
+
+ XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset,
+ block->size);
+ XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n",
+ xgi_fb_heap->max_freesize);
+
+ return 1;
+
+ fail2:
+ if (xgi_fb_cache_block) {
+ kmem_cache_destroy(xgi_fb_cache_block);
+ xgi_fb_cache_block = NULL;
+ }
+ fail1:
+ if (xgi_fb_heap) {
+ kfree(xgi_fb_heap);
+ xgi_fb_heap = NULL;
+ }
+ return 0;
+}
+
+void xgi_fb_heap_cleanup(struct xgi_info * info)
+{
+ struct list_head *free_list;
+ struct xgi_mem_block *block;
+ struct xgi_mem_block *next;
+ int i;
+
+ if (xgi_fb_heap) {
+ free_list = &xgi_fb_heap->free_list;
+ for (i = 0; i < 3; i++, free_list++) {
+ list_for_each_entry_safe(block, next, free_list, list) {
+ XGI_INFO
+ ("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
+ i, block->offset, block->size);
+ //XGI_INFO("No. %d free block: 0x%p \n", i, block);
+ kmem_cache_free(xgi_fb_cache_block, block);
+ block = NULL;
+ }
+ }
+ XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap);
+ kfree(xgi_fb_heap);
+ xgi_fb_heap = NULL;
+ }
+
+ if (xgi_fb_cache_block) {
+ kmem_cache_destroy(xgi_fb_cache_block);
+ xgi_fb_cache_block = NULL;
+ }
+}
+
+static struct xgi_mem_block *xgi_mem_new_node(void)
+{
+ struct xgi_mem_block *block;
+
+ block =
+ (struct xgi_mem_block *) kmem_cache_alloc(xgi_fb_cache_block,
+ GFP_KERNEL);
+ if (!block) {
+ XGI_ERROR("kmem_cache_alloc failed\n");
+ return NULL;
+ }
+
+ return block;
+}
+
+#if 0
+static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_head(struct xgi_mem_list * list,
+ struct xgi_mem_block * block);
+static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
+ struct xgi_mem_block * block);
+static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block);
+/*
+ * insert node:block after node:current
+ */
+static void xgi_mem_insert_node_after(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block)
+{
+ block->prev = current;
+ block->next = current->next;
+ current->next = block;
+
+ if (current == list->tail) {
+ list->tail = block;
+ } else {
+ block->next->prev = block;
+ }
+}
+
+/*
+ * insert node:block before node:current
+ */
+static void xgi_mem_insert_node_before(struct xgi_mem_list * list,
+ struct xgi_mem_block * current,
+ struct xgi_mem_block * block)
+{
+ block->prev = current->prev;
+ block->next = current;
+ current->prev = block;
+ if (current == list->head) {
+ list->head = block;
+ } else {
+ block->prev->next = block;
+ }
+}
+void xgi_mem_insert_node_head(struct xgi_mem_list * list, struct xgi_mem_block * block)
+{
+ block->next = list->head;
+ block->prev = NULL;
+
+ if (NULL == list->head) {
+ list->tail = block;
+ } else {
+ list->head->prev = block;
+ }
+ list->head = block;
+}
+
+static void xgi_mem_insert_node_tail(struct xgi_mem_list * list,
+ struct xgi_mem_block * block)
+{
+ block->next = NULL;
+ block->prev = list->tail;
+ if (NULL == list->tail) {
+ list->head = block;
+ } else {
+ list->tail->next = block;
+ }
+ list->tail = block;
+}
+
+static void xgi_mem_delete_node(struct xgi_mem_list * list, struct xgi_mem_block * block)
+{
+ if (block == list->head) {
+ list->head = block->next;
+ }
+ if (block == list->tail) {
+ list->tail = block->prev;
+ }
+
+ if (block->prev) {
+ block->prev->next = block->next;
+ }
+ if (block->next) {
+ block->next->prev = block->prev;
+ }
+
+ block->next = block->prev = NULL;
+}
+#endif
+static struct xgi_mem_block *xgi_mem_alloc(struct xgi_info * info,
+ unsigned long originalSize)
+{
+ struct xgi_mem_block *block, *free_block, *used_block;
+
+ unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
+
+ XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
+ originalSize, size);
+
+ if (size == 0) {
+ XGI_ERROR("size == 0\n");
+ return (NULL);
+ }
+ XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);
+ if (size > xgi_fb_heap->max_freesize) {
+ XGI_ERROR
+ ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
+ size, xgi_fb_heap->max_freesize);
+ return (NULL);
+ }
+
+ list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
+ XGI_INFO("free_list: 0x%px \n", free_list);
+ if (size <= block->size) {
+ break;
+ }
+ }
+
+ if (&block->list == &xgi_fb_heap->free_list) {
+ XGI_ERROR
+ ("Can't allocate %ldk size from frame buffer memory !\n",
+ size / 1024);
+ return (NULL);
+ }
+
+ free_block = block;
+ XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
+ size, free_block->offset, free_block->size);
+
+ if (size == free_block->size) {
+ used_block = free_block;
+ XGI_INFO("size == free_block->size: free_block = 0x%p\n",
+ free_block);
+ list_del(&free_block->list);
+ } else {
+ used_block = xgi_mem_new_node();
+
+ if (used_block == NULL)
+ return (NULL);
+
+ if (used_block == free_block) {
+ XGI_ERROR("used_block == free_block = 0x%p\n",
+ used_block);
+ }
+
+ used_block->offset = free_block->offset;
+ used_block->size = size;
+
+ free_block->offset += size;
+ free_block->size -= size;
+ }
+
+ xgi_fb_heap->max_freesize -= size;
+
+ list_add(&used_block->list, &xgi_fb_heap->used_list);
+
+ return (used_block);
+}
+
+static struct xgi_mem_block *xgi_mem_free(struct xgi_info * info, unsigned long offset)
+{
+ struct xgi_mem_block *used_block = NULL, *block;
+ struct xgi_mem_block *prev, *next;
+
+ unsigned long upper;
+ unsigned long lower;
+
+ list_for_each_entry(block, &xgi_fb_heap->used_list, list) {
+ if (block->offset == offset) {
+ break;
+ }
+ }
+
+ if (&block->list == &xgi_fb_heap->used_list) {
+ XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
+ return (NULL);
+ }
+
+ used_block = block;
+ XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
+ used_block, used_block->offset, used_block->size);
+
+ xgi_fb_heap->max_freesize += used_block->size;
+
+ prev = next = NULL;
+ upper = used_block->offset + used_block->size;
+ lower = used_block->offset;
+
+ list_for_each_entry(block, &xgi_fb_heap->free_list, list) {
+ if (block->offset == upper) {
+ next = block;
+ } else if ((block->offset + block->size) == lower) {
+ prev = block;
+ }
+ }
+
+ XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
+ list_del(&used_block->list);
+
+ if (prev && next) {
+ prev->size += (used_block->size + next->size);
+ list_del(&next->list);
+ XGI_INFO("free node 0x%p\n", next);
+ kmem_cache_free(xgi_fb_cache_block, next);
+ kmem_cache_free(xgi_fb_cache_block, used_block);
+
+ next = NULL;
+ used_block = NULL;
+ return (prev);
+ }
+
+ if (prev) {
+ prev->size += used_block->size;
+ XGI_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_fb_cache_block, used_block);
+ used_block = NULL;
+ return (prev);
+ }
+
+ if (next) {
+ next->size += used_block->size;
+ next->offset = used_block->offset;
+ XGI_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_fb_cache_block, used_block);
+ used_block = NULL;
+ return (next);
+ }
+
+ list_add(&used_block->list, &xgi_fb_heap->free_list);
+ XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
+ used_block, used_block->offset, used_block->size);
+
+ return (used_block);
+}
diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h
new file mode 100644
index 00000000..363c8bc8
--- /dev/null
+++ b/linux-core/xgi_fb.h
@@ -0,0 +1,47 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_FB_H_
+#define _XGI_FB_H_
+
+struct xgi_mem_block {
+ struct list_head list;
+ unsigned long offset;
+ unsigned long size;
+ atomic_t use_count;
+};
+
+struct xgi_mem_heap {
+ struct list_head free_list;
+ struct list_head used_list;
+ struct list_head sort_list;
+ unsigned long max_freesize;
+ spinlock_t lock;
+};
+
+#endif
diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h
new file mode 100644
index 00000000..99bf2d04
--- /dev/null
+++ b/linux-core/xgi_linux.h
@@ -0,0 +1,490 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_LINUX_H_
+#define _XGI_LINUX_H_
+
+#ifndef LINUX_VERSION_CODE
+#include <linux/version.h>
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
+# error "This driver does not support pre-2.6 kernels!"
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
+# define XGI_REMAP_PFN_RANGE_PRESENT
+#else
+# define XGI_REMAP_PAGE_RANGE_5
+#endif
+
+#if defined (CONFIG_SMP) && !defined (__SMP__)
+#define __SMP__
+#endif
+
+#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)
+#define MODVERSIONS
+#endif
+
+#include <linux/kernel.h> /* printk */
+#include <linux/module.h>
+
+#include <linux/init.h> /* module_init, module_exit */
+#include <linux/types.h> /* pic_t, size_t, __u32, etc */
+#include <linux/errno.h> /* error codes */
+#include <linux/list.h> /* circular linked list */
+#include <linux/stddef.h> /* NULL, offsetof */
+#include <linux/wait.h> /* wait queues */
+
+#include <linux/slab.h> /* kmalloc, kfree, etc */
+#include <linux/vmalloc.h> /* vmalloc, vfree, etc */
+
+#include <linux/poll.h> /* poll_wait */
+#include <linux/delay.h> /* mdelay, udelay */
+#include <asm/msr.h> /* rdtsc rdtscl */
+
+#include <linux/sched.h> /* suser(), capable() replacement
+ for_each_task, for_each_process */
+#ifdef for_each_process
+#define XGI_SCAN_PROCESS(p) for_each_process(p)
+#else
+#define XGI_SCAN_PROCESS(p) for_each_task(p)
+#endif
+
+#include <linux/moduleparam.h> /* module_param() */
+#include <linux/smp_lock.h> /* kernel_locked */
+#include <asm/tlbflush.h> /* flush_tlb(), flush_tlb_all() */
+#include <asm/kmap_types.h> /* page table entry lookup */
+
+#include <linux/pci.h> /* pci_find_class, etc */
+#include <linux/interrupt.h> /* tasklets, interrupt helpers */
+#include <linux/timer.h>
+
+#include <asm/system.h> /* cli, sli, save_flags */
+#include <asm/io.h> /* ioremap, virt_to_phys */
+#include <asm/uaccess.h> /* access_ok */
+#include <asm/page.h> /* PAGE_OFFSET */
+#include <asm/pgtable.h> /* pte bit definitions */
+
+#include <linux/spinlock.h>
+#include <asm/semaphore.h>
+#include <linux/highmem.h>
+
+#ifdef CONFIG_PROC_FS
+#include <linux/proc_fs.h>
+#endif
+
+#ifdef CONFIG_DEVFS_FS
+#include <linux/devfs_fs_kernel.h>
+#endif
+
+#ifdef CONFIG_KMOD
+#include <linux/kmod.h>
+#endif
+
+#ifdef CONFIG_PM
+#include <linux/pm.h>
+#endif
+
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+
+#ifdef CONFIG_KDB
+#include <linux/kdb.h>
+#include <asm/kdb.h>
+#endif
+
+#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
+#define AGPGART
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#endif
+
+#ifndef MAX_ORDER
+#define MAX_ORDER 11
+#endif
+
+#ifndef module_init
+#define module_init(x) int init_module(void) { return x(); }
+#define module_exit(x) void cleanup_module(void) { x(); }
+#endif
+
+#ifndef minor
+#define minor(x) MINOR(x)
+#endif
+
+#ifndef IRQ_HANDLED
+typedef void irqreturn_t;
+#define IRQ_NONE
+#define IRQ_HANDLED
+#define IRQ_RETVAL(x)
+#endif
+
+#if !defined (list_for_each)
+#define list_for_each(pos, head) \
+ for (pos = (head)->next, prefetch(pos->next); pos != (head); \
+ pos = pos->next, prefetch(pos->next))
+#endif
+
+extern struct list_head pci_devices; /* list of all devices */
+#define XGI_PCI_FOR_EACH_DEV(dev) \
+ for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next))
+
+/*
+ * the following macro causes problems when used in the same module
+ * as module_param(); undef it so we don't accidentally mix the two
+ */
+#undef MODULE_PARM
+
+#ifdef EXPORT_NO_SYMBOLS
+EXPORT_NO_SYMBOLS;
+#endif
+
+#define XGI_IS_SUSER() capable(CAP_SYS_ADMIN)
+#define XGI_PCI_DEVICE_NAME(dev) ((dev)->pretty_name)
+#define XGI_NUM_CPUS() num_online_cpus()
+#define XGI_CLI() local_irq_disable()
+#define XGI_SAVE_FLAGS(eflags) local_save_flags(eflags)
+#define XGI_RESTORE_FLAGS(eflags) local_irq_restore(eflags)
+#define XGI_MAY_SLEEP() (!in_interrupt() && !in_atomic())
+#define XGI_MODULE_PARAMETER(x) module_param(x, int, 0)
+
+
+#define XGI_PCI_DISABLE_DEVICE(dev) pci_disable_device(dev)
+
+/* common defines */
+#define GET_MODULE_SYMBOL(mod,sym) (const void *) inter_module_get(sym)
+#define PUT_MODULE_SYMBOL(sym) inter_module_put((char *) sym)
+
+#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
+#define XGI_VMA_OFFSET(vma) (((vma)->vm_pgoff) << PAGE_SHIFT)
+#define XGI_VMA_PRIVATE(vma) ((vma)->vm_private_data)
+
+#define XGI_DEVICE_NUMBER(x) minor((x)->i_rdev)
+#define XGI_IS_CONTROL_DEVICE(x) (minor((x)->i_rdev) == 255)
+
+#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start)
+#define XGI_PCI_RESOURCE_SIZE(dev, bar) ((dev)->resource[bar].end - (dev)->resource[bar].start + 1)
+
+#define XGI_PCI_BUS_NUMBER(dev) (dev)->bus->number
+#define XGI_PCI_SLOT_NUMBER(dev) PCI_SLOT((dev)->devfn)
+
+#define XGI_PCI_GET_CLASS_PRESENT
+#ifdef XGI_PCI_GET_CLASS_PRESENT
+#define XGI_PCI_DEV_PUT(dev) pci_dev_put(dev)
+#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_get_device(vendor,device,from)
+#else
+#define XGI_PCI_DEV_PUT(dev)
+#define XGI_PCI_GET_DEVICE(vendor,device,from) pci_find_device(vendor,device,from)
+#endif
+
+/*
+ * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver
+ * model is not sufficient for full acpi support. it may work in some cases,
+ * but not enough for us to officially support this configuration.
+ */
+#if defined(CONFIG_ACPI)
+#define XGI_PM_SUPPORT_ACPI
+#endif
+
+#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
+#define XGI_PM_SUPPORT_APM
+#endif
+
+#if defined(CONFIG_DEVFS_FS)
+typedef void *devfs_handle_t;
+#define XGI_DEVFS_REGISTER(_name, _minor) \
+ ({ \
+ devfs_handle_t __handle = NULL; \
+ if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \
+ S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \
+ { \
+ __handle = (void *) 1; /* XXX Fix me! (boolean) */ \
+ } \
+ __handle; \
+ })
+/*
+#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i)
+*/
+#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl")
+#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi")
+#endif /* defined(CONFIG_DEVFS_FS) */
+
+#define XGI_REGISTER_CHRDEV(x...) register_chrdev(x)
+#define XGI_UNREGISTER_CHRDEV(x...) unregister_chrdev(x)
+
+#if defined(XGI_REMAP_PFN_RANGE_PRESENT)
+#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \
+ remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
+#elif defined(XGI_REMAP_PAGE_RANGE_5)
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)
+#elif defined(XGI_REMAP_PAGE_RANGE_4)
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(x)
+#else
+#warning "xgi_configure.sh failed, assuming remap_page_range(5)!"
+#define XGI_REMAP_PAGE_RANGE(x...) remap_page_range(vma, x)
+#endif
+
+#if defined(pmd_offset_map)
+#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
+ { \
+ pg_mid_dir = pmd_offset_map(pg_dir, address); \
+ }
+#define XGI_PMD_UNMAP(pg_mid_dir) \
+ { \
+ pmd_unmap(pg_mid_dir); \
+ }
+#else
+#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
+ { \
+ pg_mid_dir = pmd_offset(pg_dir, address); \
+ }
+#define XGI_PMD_UNMAP(pg_mid_dir)
+#endif
+
+#define XGI_PMD_PRESENT(pg_mid_dir) \
+ ({ \
+ if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \
+ { \
+ XGI_PMD_UNMAP(pg_mid_dir); \
+ pg_mid_dir = NULL; \
+ } \
+ pg_mid_dir != NULL; \
+ })
+
+#if defined(pte_offset_atomic)
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
+ { \
+ pte = pte_offset_atomic(pg_mid_dir, address); \
+ XGI_PMD_UNMAP(pg_mid_dir); \
+ }
+#define XGI_PTE_UNMAP(pte) \
+ { \
+ pte_kunmap(pte); \
+ }
+#elif defined(pte_offset)
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
+ { \
+ pte = pte_offset(pg_mid_dir, address); \
+ XGI_PMD_UNMAP(pg_mid_dir); \
+ }
+#define XGI_PTE_UNMAP(pte)
+#else
+#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
+ { \
+ pte = pte_offset_map(pg_mid_dir, address); \
+ XGI_PMD_UNMAP(pg_mid_dir); \
+ }
+#define XGI_PTE_UNMAP(pte) \
+ { \
+ pte_unmap(pte); \
+ }
+#endif
+
+#define XGI_PTE_PRESENT(pte) \
+ ({ \
+ if (pte) \
+ { \
+ if (!pte_present(*pte)) \
+ { \
+ XGI_PTE_UNMAP(pte); pte = NULL; \
+ } \
+ } \
+ pte != NULL; \
+ })
+
+#define XGI_PTE_VALUE(pte) \
+ ({ \
+ unsigned long __pte_value = pte_val(*pte); \
+ XGI_PTE_UNMAP(pte); \
+ __pte_value; \
+ })
+
+#define XGI_PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
+#define XGI_MASK_OFFSET(addr) ((addr) & (PAGE_SIZE - 1))
+
+#if !defined (pgprot_noncached)
+static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
+{
+ pgprot_t new_prot = old_prot;
+ if (boot_cpu_data.x86 > 3)
+ new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
+ return new_prot;
+}
+#endif
+
+#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined)
+/* Added define for write combining page, only valid if pat enabled. */
+#define _PAGE_WRTCOMB _PAGE_PWT
+#define __PAGE_KERNEL_WRTCOMB \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED)
+#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB)
+
+static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)
+{
+ pgprot_t new_prot = old_prot;
+ if (boot_cpu_data.x86 > 3) {
+ pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
+ new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);
+ }
+ return new_prot;
+}
+#endif
+
+#if !defined(page_to_pfn)
+#define page_to_pfn(page) ((page) - mem_map)
+#endif
+
+#define XGI_VMALLOC(ptr, size) \
+ { \
+ (ptr) = vmalloc_32(size); \
+ }
+
+#define XGI_VFREE(ptr, size) \
+ { \
+ vfree((void *) (ptr)); \
+ }
+
+#define XGI_IOREMAP(ptr, physaddr, size) \
+ { \
+ (ptr) = ioremap(physaddr, size); \
+ }
+
+#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \
+ { \
+ (ptr) = ioremap_nocache(physaddr, size); \
+ }
+
+#define XGI_IOUNMAP(ptr, size) \
+ { \
+ iounmap(ptr); \
+ }
+
+/*
+ * only use this because GFP_KERNEL may sleep..
+ * GFP_ATOMIC is ok, it won't sleep
+ */
+#define XGI_KMALLOC(ptr, size) \
+ { \
+ (ptr) = kmalloc(size, GFP_KERNEL); \
+ }
+
+#define XGI_KMALLOC_ATOMIC(ptr, size) \
+ { \
+ (ptr) = kmalloc(size, GFP_ATOMIC); \
+ }
+
+#define XGI_KFREE(ptr, size) \
+ { \
+ kfree((void *) (ptr)); \
+ }
+
+#define XGI_GET_FREE_PAGES(ptr, order) \
+ { \
+ (ptr) = __get_free_pages(GFP_KERNEL, order); \
+ }
+
+#define XGI_FREE_PAGES(ptr, order) \
+ { \
+ free_pages(ptr, order); \
+ }
+
+struct xgi_pte {
+ unsigned long phys_addr;
+ unsigned long virt_addr;
+};
+
+/*
+ * AMD Athlon processors expose a subtle bug in the Linux
+ * kernel, that may lead to AGP memory corruption. Recent
+ * kernel versions had a workaround for this problem, but
+ * 2.4.20 is the first kernel to address it properly. The
+ * page_attr API provides the means to solve the problem.
+ */
+static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(struct xgi_pte * page_ptr)
+{
+ struct page *page = virt_to_page(__va(page_ptr->phys_addr));
+ change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
+}
+static inline void XGI_SET_PAGE_ATTRIB_CACHED(struct xgi_pte * page_ptr)
+{
+ struct page *page = virt_to_page(__va(page_ptr->phys_addr));
+ change_page_attr(page, 1, PAGE_KERNEL);
+}
+
+/* add for SUSE 9, Jill*/
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4)
+#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->count)
+#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->count)
+#define XGI_PAGE_COUNT(page) atomic_read(&(page)->count)
+#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->count, v)
+#else
+#define XGI_INC_PAGE_COUNT(page) atomic_inc(&(page)->_count)
+#define XGI_DEC_PAGE_COUNT(page) atomic_dec(&(page)->_count)
+#define XGI_PAGE_COUNT(page) atomic_read(&(page)->_count)
+#define XGI_SET_PAGE_COUNT(page,v) atomic_set(&(page)->_count, v)
+#endif
+#define XGILockPage(page) SetPageLocked(page)
+#define XGIUnlockPage(page) ClearPageLocked(page)
+
+struct xgi_file_private {
+ struct xgi_info *info;
+ unsigned int num_events;
+ spinlock_t fp_lock;
+ wait_queue_head_t wait_queue;
+};
+
+#define FILE_PRIVATE(filp) ((filp)->private_data)
+
+#define XGI_GET_FP(filp) ((struct xgi_file_private *) FILE_PRIVATE(filp))
+
+/* for the card devices */
+#define XGI_INFO_FROM_FP(filp) (XGI_GET_FP(filp)->info)
+
+#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode)
+
+#define XGI_ATOMIC_SET(data,val) atomic_set(&(data), (val))
+#define XGI_ATOMIC_INC(data) atomic_inc(&(data))
+#define XGI_ATOMIC_DEC(data) atomic_dec(&(data))
+#define XGI_ATOMIC_DEC_AND_TEST(data) atomic_dec_and_test(&(data))
+#define XGI_ATOMIC_READ(data) atomic_read(&(data))
+
+/*
+ * lock-related functions that should only be called from this file
+ */
+#define xgi_init_lock(lock) spin_lock_init(&lock)
+#define xgi_lock(lock) spin_lock(&lock)
+#define xgi_unlock(lock) spin_unlock(&lock)
+#define xgi_down(lock) down(&lock)
+#define xgi_up(lock) up(&lock)
+
+#define xgi_lock_irqsave(lock,flags) spin_lock_irqsave(&lock,flags)
+#define xgi_unlock_irqsave(lock,flags) spin_unlock_irqrestore(&lock,flags)
+
+#endif
diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c
new file mode 100644
index 00000000..2d310a2f
--- /dev/null
+++ b/linux-core/xgi_misc.c
@@ -0,0 +1,571 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_linux.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_pcie.h"
+
+void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req)
+{
+ req->device_id = info->dev->device;
+ req->device_name[0] = 'x';
+ req->device_name[1] = 'g';
+ req->device_name[2] = '4';
+ req->device_name[3] = '7';
+ req->vendor_id = info->dev->vendor;
+ req->curr_display_mode = 0;
+ req->fb_size = info->fb.size;
+ req->sarea_bus_addr = info->sarea_info.bus_addr;
+ req->sarea_size = info->sarea_info.size;
+}
+
+void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req)
+{
+ req->mmio_base = info->mmio.base;
+ req->size = info->mmio.size;
+}
+
+void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req)
+{
+ info->scrn_info.scrn_start = req->scrn_start;
+ info->scrn_info.scrn_xres = req->scrn_xres;
+ info->scrn_info.scrn_yres = req->scrn_yres;
+ info->scrn_info.scrn_bpp = req->scrn_bpp;
+ info->scrn_info.scrn_pitch = req->scrn_pitch;
+
+ XGI_INFO("info->scrn_info.scrn_start: 0x%lx"
+ "info->scrn_info.scrn_xres: 0x%lx"
+ "info->scrn_info.scrn_yres: 0x%lx"
+ "info->scrn_info.scrn_bpp: 0x%lx"
+ "info->scrn_info.scrn_pitch: 0x%lx\n",
+ info->scrn_info.scrn_start,
+ info->scrn_info.scrn_xres,
+ info->scrn_info.scrn_yres,
+ info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch);
+}
+
+void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req)
+{
+ req->scrn_start = info->scrn_info.scrn_start;
+ req->scrn_xres = info->scrn_info.scrn_xres;
+ req->scrn_yres = info->scrn_info.scrn_yres;
+ req->scrn_bpp = info->scrn_info.scrn_bpp;
+ req->scrn_pitch = info->scrn_info.scrn_pitch;
+
+ XGI_INFO("req->scrn_start: 0x%lx"
+ "req->scrn_xres: 0x%lx"
+ "req->scrn_yres: 0x%lx"
+ "req->scrn_bpp: 0x%lx"
+ "req->scrn_pitch: 0x%lx\n",
+ req->scrn_start,
+ req->scrn_xres,
+ req->scrn_yres, req->scrn_bpp, req->scrn_pitch);
+}
+
+void xgi_ge_reset(struct xgi_info * info)
+{
+ xgi_disable_ge(info);
+ xgi_enable_ge(info);
+}
+
+void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req)
+{
+ info->sarea_info.bus_addr = req->bus_addr;
+ info->sarea_info.size = req->size;
+ XGI_INFO("info->sarea_info.bus_addr: 0x%lx"
+ "info->sarea_info.size: 0x%lx\n",
+ info->sarea_info.bus_addr, info->sarea_info.size);
+}
+
+/*
+ * irq functions
+ */
+#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
+
+static unsigned int s_invalid_begin = 0;
+
+static bool xgi_validate_signal(volatile u8 *mmio_vbase)
+{
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ const u32 old_ge_status = ge_3d_status[0x00];
+
+ if (old_ge_status & 0x001c0000) {
+ u16 check;
+
+ /* Check Read back status */
+ *(mmio_vbase + 0x235c) = 0x80;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+
+ if ((check & 0x3f) != ((check & 0x3f00) >> 8)) {
+ return FALSE;
+ }
+
+ /* Check RO channel */
+ *(mmio_vbase + 0x235c) = 0x83;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RW channel */
+ *(mmio_vbase + 0x235c) = 0x88;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if ((check & 0x0f) != ((check & 0xf0) >> 4)) {
+ return FALSE;
+ }
+
+ /* Check RO channel outstanding */
+ *(mmio_vbase + 0x235c) = 0x8f;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* Check RW channel outstanding */
+ *(mmio_vbase + 0x235c) = 0x90;
+ check = *((volatile u16 *)(mmio_vbase + 0x2360));
+ if (0 != (check & 0x3ff)) {
+ return FALSE;
+ }
+
+ /* No pending PCIE request. GE stall. */
+ }
+
+ return TRUE;
+}
+
+
+static void xgi_ge_hang_reset(volatile u8 *mmio_vbase)
+{
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ int time_out = 0xffff;
+
+ *(mmio_vbase + 0xb057) = 8;
+ while (0 != (ge_3d_status[0x00] & 0xf0000000)) {
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ if (0 == time_out) {
+ u8 old_3ce;
+ u8 old_3cf;
+ u8 old_index;
+ u8 old_36;
+
+ XGI_INFO("Can not reset back 0x%x!\n",
+ ge_3d_status[0x00]);
+
+ *(mmio_vbase + 0xb057) = 0;
+
+ /* Have to use 3x5.36 to reset. */
+ /* Save and close dynamic gating */
+
+ old_3ce = *(mmio_vbase + 0x3ce);
+ *(mmio_vbase + 0x3ce) = 0x2a;
+ old_3cf = *(mmio_vbase + 0x3cf);
+ *(mmio_vbase + 0x3cf) = old_3cf & 0xfe;
+
+ /* Reset GE */
+ old_index = *(mmio_vbase + 0x3d4);
+ *(mmio_vbase + 0x3d4) = 0x36;
+ old_36 = *(mmio_vbase + 0x3d5);
+ *(mmio_vbase + 0x3d5) = old_36 | 0x10;
+
+ while (0 != ((--time_out) & 0xfff))
+ /* empty */ ;
+
+ *(mmio_vbase + 0x3d5) = old_36;
+ *(mmio_vbase + 0x3d4) = old_index;
+
+ /* Restore dynamic gating */
+ *(mmio_vbase + 0x3cf) = old_3cf;
+ *(mmio_vbase + 0x3ce) = old_3ce;
+ break;
+ }
+ }
+
+ *(mmio_vbase + 0xb057) = 0;
+}
+
+
+bool xgi_ge_irq_handler(struct xgi_info * info)
+{
+ volatile u8 *const mmio_vbase = info->mmio.vbase;
+ volatile u32 *const ge_3d_status =
+ (volatile u32 *)(mmio_vbase + 0x2800);
+ const u32 int_status = ge_3d_status[4];
+ bool is_support_auto_reset = FALSE;
+
+ /* Check GE on/off */
+ if (0 == (0xffffc0f0 & int_status)) {
+ u32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a];
+
+ if (0 != (0x1000 & int_status)) {
+ /* We got GE stall interrupt.
+ */
+ ge_3d_status[0x04] = int_status | 0x04000000;
+
+ if (is_support_auto_reset) {
+ static cycles_t last_tick;
+ static unsigned continue_int_count = 0;
+
+ /* OE II is busy. */
+
+ if (!xgi_validate_signal(mmio_vbase)) {
+ /* Nothing but skip. */
+ } else if (0 == continue_int_count++) {
+ last_tick = get_cycles();
+ } else {
+ const cycles_t new_tick = get_cycles();
+ if ((new_tick - last_tick) >
+ STALL_INTERRUPT_RESET_THRESHOLD) {
+ continue_int_count = 0;
+ } else if (continue_int_count >= 3) {
+ continue_int_count = 0;
+
+ /* GE Hung up, need reset. */
+ XGI_INFO("Reset GE!\n");
+
+ xgi_ge_hang_reset(mmio_vbase);
+ }
+ }
+ }
+ } else if (0 != (0x1 & int_status)) {
+ s_invalid_begin++;
+ ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000;
+ }
+
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+bool xgi_crt_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ u8 save_3ce = bReadReg(0x3ce);
+
+ if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened
+ {
+ u8 op3cf_3d;
+ u8 op3cf_37;
+
+ // What happened?
+ op3cf_37 = bIn3cf(0x37);
+
+ // Clear CRT interrupt
+ op3cf_3d = bIn3cf(0x3d);
+ bOut3cf(0x3d, (op3cf_3d | 0x04));
+ bOut3cf(0x3d, (op3cf_3d & ~0x04));
+ ret = TRUE;
+ }
+ bWriteReg(0x3ce, save_3ce);
+
+ return (ret);
+}
+
+bool xgi_dvi_irq_handler(struct xgi_info * info)
+{
+ bool ret = FALSE;
+ u8 save_3ce = bReadReg(0x3ce);
+
+ if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened
+ {
+ u8 op3cf_39;
+ u8 op3cf_37;
+ u8 op3x5_5a;
+ u8 save_3x4 = bReadReg(0x3d4);;
+
+ // What happened?
+ op3cf_37 = bIn3cf(0x37);
+
+ //Notify BIOS that DVI plug/unplug happened
+ op3x5_5a = bIn3x5(0x5a);
+ bOut3x5(0x5a, op3x5_5a & 0xf7);
+
+ bWriteReg(0x3d4, save_3x4);
+
+ // Clear DVI interrupt
+ op3cf_39 = bIn3cf(0x39);
+ bOut3c5(0x39, (op3cf_39 & ~0x01)); //Set 3cf.39 bit 0 to 0
+ bOut3c5(0x39, (op3cf_39 | 0x01)); //Set 3cf.39 bit 0 to 1
+
+ ret = TRUE;
+ }
+ bWriteReg(0x3ce, save_3ce);
+
+ return (ret);
+}
+
+void xgi_dump_register(struct xgi_info * info)
+{
+ int i, j;
+ unsigned char temp;
+
+ // 0x3C5
+ printk("\r\n=====xgi_dump_register========0x%x===============\r\n",
+ 0x3C5);
+
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bIn3c5(i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ // 0x3D5
+ printk("\r\n====xgi_dump_register=========0x%x===============\r\n",
+ 0x3D5);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bIn3x5(i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ // 0x3CF
+ printk("\r\n=========xgi_dump_register====0x%x===============\r\n",
+ 0x3CF);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bIn3cf(i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n=====xgi_dump_register======0x%x===============\r\n",
+ 0xB000);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x5; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bReadReg(0xB000 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2200);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0xB; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bReadReg(0x2200 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2300);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x7; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bReadReg(0x2300 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2400);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bReadReg(0x2400 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+
+ printk("\r\n==================0x%x===============\r\n", 0x2800);
+ for (i = 0; i < 0x10; i++) {
+ if (i == 0) {
+ printk("%5x", i);
+ } else {
+ printk("%3x", i);
+ }
+ }
+ printk("\r\n");
+
+ for (i = 0; i < 0x10; i++) {
+ printk("%1x ", i);
+
+ for (j = 0; j < 0x10; j++) {
+ temp = bReadReg(0x2800 + i * 0x10 + j);
+ printk("%3x", temp);
+ }
+ printk("\r\n");
+ }
+}
+
+void xgi_restore_registers(struct xgi_info * info)
+{
+ bOut3x5(0x13, 0);
+ bOut3x5(0x8b, 2);
+}
+
+void xgi_waitfor_pci_idle(struct xgi_info * info)
+{
+#define WHOLD_GE_STATUS 0x2800
+#define IDLE_MASK ~0x90200000
+
+ int idleCount = 0;
+ while (idleCount < 5) {
+ if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) {
+ idleCount = 0;
+ } else {
+ idleCount++;
+ }
+ }
+}
+
+
+/*memory collect function*/
+extern struct list_head xgi_mempid_list;
+void xgi_mem_collect(struct xgi_info * info, unsigned int *pcnt)
+{
+ struct xgi_mem_pid *block;
+ struct xgi_mem_pid *next;
+ struct task_struct *p, *find;
+ unsigned int cnt = 0;
+
+ list_for_each_entry_safe(block, next, &xgi_mempid_list, list) {
+
+ find = NULL;
+ XGI_SCAN_PROCESS(p) {
+ if (p->pid == block->pid) {
+ XGI_INFO
+ ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n",
+ block->pid, p->state,
+ block->location,
+ block->bus_addr);
+ find = p;
+ if (block->bus_addr == 0xFFFFFFFF)
+ ++cnt;
+ break;
+ }
+ }
+ if (!find) {
+ if (block->location == XGI_MEMLOC_LOCAL) {
+ XGI_INFO
+ ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n",
+ block->pid, block->bus_addr);
+ xgi_fb_free(info, block->bus_addr);
+ } else if (block->bus_addr != 0xFFFFFFFF) {
+ XGI_INFO
+ ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n",
+ block->pid, block->bus_addr);
+ xgi_pcie_free(info, block->bus_addr);
+ } else {
+ /*only delete the memory block */
+ list_del(&block->list);
+ XGI_INFO
+ ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
+ block->pid);
+ kfree(block);
+ }
+ }
+ }
+ *pcnt = cnt;
+}
diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h
new file mode 100644
index 00000000..85cfbf2b
--- /dev/null
+++ b/linux-core/xgi_misc.h
@@ -0,0 +1,46 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_MISC_H_
+#define _XGI_MISC_H_
+
+extern void xgi_dump_register(struct xgi_info * info);
+extern void xgi_get_device_info(struct xgi_info * info, struct xgi_chip_info * req);
+extern void xgi_get_mmio_info(struct xgi_info * info, struct xgi_mmio_info * req);
+extern void xgi_get_screen_info(struct xgi_info * info, struct xgi_screen_info * req);
+extern void xgi_put_screen_info(struct xgi_info * info, struct xgi_screen_info * req);
+extern void xgi_ge_reset(struct xgi_info * info);
+extern void xgi_sarea_info(struct xgi_info * info, struct xgi_sarea_info * req);
+
+extern void xgi_restore_registers(struct xgi_info * info);
+extern bool xgi_ge_irq_handler(struct xgi_info * info);
+extern bool xgi_crt_irq_handler(struct xgi_info * info);
+extern bool xgi_dvi_irq_handler(struct xgi_info * info);
+extern void xgi_waitfor_pci_idle(struct xgi_info * info);
+
+#endif
diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c
new file mode 100644
index 00000000..70459b2c
--- /dev/null
+++ b/linux-core/xgi_pcie.c
@@ -0,0 +1,967 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#include "xgi_linux.h"
+#include "xgi_drv.h"
+#include "xgi_regs.h"
+#include "xgi_pcie.h"
+#include "xgi_misc.h"
+
+static struct xgi_pcie_heap *xgi_pcie_heap = NULL;
+static struct kmem_cache *xgi_pcie_cache_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_vertex_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_cmdlist_block = NULL;
+static struct xgi_pcie_block *xgi_pcie_scratchpad_block = NULL;
+extern struct list_head xgi_mempid_list;
+
+static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
+{
+ struct page *page;
+ unsigned long page_addr = 0;
+ unsigned long page_count = 0;
+ int i;
+
+ page_count = (1 << page_order);
+ page_addr = __get_free_pages(GFP_KERNEL, page_order);
+
+ if (page_addr == 0UL) {
+ XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",
+ page_count);
+ return 0;
+ }
+
+ page = virt_to_page(page_addr);
+
+ for (i = 0; i < page_count; i++, page++) {
+ XGI_INC_PAGE_COUNT(page);
+ XGILockPage(page);
+ }
+
+ XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",
+ page_count, page_order, page_addr);
+ return page_addr;
+}
+
+static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)
+{
+ struct page *page;
+ unsigned long page_count = 0;
+ int i;
+
+ page_count = (1 << page_order);
+ page = virt_to_page(page_addr);
+
+ for (i = 0; i < page_count; i++, page++) {
+ XGI_DEC_PAGE_COUNT(page);
+ XGIUnlockPage(page);
+ }
+
+ free_pages(page_addr, page_order);
+}
+
+static int xgi_pcie_lut_init(struct xgi_info * info)
+{
+ unsigned char *page_addr = NULL;
+ unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
+ unsigned long count = 0;
+ u8 temp = 0;
+
+ /* Jong 06/06/2006 */
+ unsigned long pcie_aperture_size;
+
+ info->pcie.size = 128 * 1024 * 1024;
+
+ /* Get current FB aperture size */
+ temp = In3x5(0x27);
+ XGI_INFO("In3x5(0x27): 0x%x \n", temp);
+
+ if (temp & 0x01) { /* 256MB; Jong 06/05/2006; 0x10000000 */
+ /* Jong 06/06/2006; allocate memory */
+ pcie_aperture_size = 256 * 1024 * 1024;
+ /* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */
+ } else { /* 128MB; Jong 06/05/2006; 0x08000000 */
+
+ /* Jong 06/06/2006; allocate memory */
+ pcie_aperture_size = 128 * 1024 * 1024;
+ /* info->pcie.base = 128 * 1024 * 1024; */
+ }
+
+ /* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */
+ /* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */
+ /* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */
+ /* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */
+ info->pcie.base = pcie_aperture_size; /* works */
+ /* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */
+ /* info->pcie.base=128 * 1024 * 1024; *//* System hang */
+
+ XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);
+
+ /* Get current lookup table page size */
+ temp = bReadReg(0xB00C);
+ if (temp & 0x04) { /* 8KB */
+ info->lutPageSize = 8 * 1024;
+ } else { /* 4KB */
+
+ info->lutPageSize = 4 * 1024;
+ }
+
+ XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+
+#if 0
+ /* Get current lookup table location */
+ temp = bReadReg(0xB00C);
+ if (temp & 0x02) { /* LFB */
+ info->isLUTInLFB = TRUE;
+ /* Current we only support lookup table in LFB */
+ temp &= 0xFD;
+ bWriteReg(0xB00C, temp);
+ info->isLUTInLFB = FALSE;
+ } else { /* SFB */
+
+ info->isLUTInLFB = FALSE;
+ }
+
+ XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
+
+ /* Get current SDFB page size */
+ temp = bReadReg(0xB00C);
+ if (temp & 0x08) { /* 8MB */
+ info->sdfbPageSize = 8 * 1024 * 1024;
+ } else { /* 4MB */
+
+ info->sdfbPageSize = 4 * 1024 * 1024;
+ }
+#endif
+ pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /*
+ * Allocate memory for PCIE GART table;
+ */
+ lutEntryNum = pciePageCount;
+ lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;
+
+ /* get page_order base on page_count */
+ count = lutPageCount;
+ for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ;
+
+ if ((lutPageCount << 1) == (1 << lutPageOrder)) {
+ lutPageOrder -= 1;
+ }
+
+ XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",
+ lutEntryNum, lutPageCount, lutPageOrder);
+
+ info->lutPageOrder = lutPageOrder;
+ page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);
+
+ if (!page_addr) {
+ XGI_ERROR("cannot allocate PCIE lut page!\n");
+ goto fail;
+ }
+ info->lut_base = (unsigned long *)page_addr;
+
+ XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",
+ page_addr, virt_to_phys(page_addr));
+
+ XGI_INFO
+ ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",
+ info->lut_base, __pa(info->lut_base), info->lutPageOrder);
+
+ /*
+ * clean all PCIE GART Entry
+ */
+ memset(page_addr, 0, PAGE_SIZE << lutPageOrder);
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ("wbinvd":::"memory");
+#else
+ mb();
+#endif
+
+ /* Set GART in SFB */
+ bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);
+ /* Set GART base address to HW */
+ dwWriteReg(0xB034, __pa(info->lut_base));
+
+ return 1;
+ fail:
+ return 0;
+}
+
+static void xgi_pcie_lut_cleanup(struct xgi_info * info)
+{
+ if (info->lut_base) {
+ XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
+ info->lut_base, info->lutPageOrder);
+ xgi_pcie_lut_free((unsigned long)info->lut_base,
+ info->lutPageOrder);
+ info->lut_base = NULL;
+ }
+}
+
+static struct xgi_pcie_block *xgi_pcie_new_node(void)
+{
+ struct xgi_pcie_block *block =
+ (struct xgi_pcie_block *) kmem_cache_alloc(xgi_pcie_cache_block,
+ GFP_KERNEL);
+ if (block == NULL) {
+ return NULL;
+ }
+
+ block->offset = 0; /* block's offset in pcie memory, begin from 0 */
+ block->size = 0; /* The block size. */
+ block->bus_addr = 0; /* CPU access address/bus address */
+ block->hw_addr = 0; /* GE access address */
+ block->page_count = 0;
+ block->page_order = 0;
+ block->page_block = NULL;
+ block->page_table = NULL;
+ block->owner = PCIE_INVALID;
+
+ return block;
+}
+
+static void xgi_pcie_block_stuff_free(struct xgi_pcie_block * block)
+{
+ struct page *page;
+ struct xgi_page_block *page_block = block->page_block;
+ struct xgi_page_block *free_block;
+ unsigned long page_count = 0;
+ int i;
+
+ //XGI_INFO("block->page_block: 0x%p \n", block->page_block);
+ while (page_block) {
+ page_count = page_block->page_count;
+
+ page = virt_to_page(page_block->virt_addr);
+ for (i = 0; i < page_count; i++, page++) {
+ XGI_DEC_PAGE_COUNT(page);
+ XGIUnlockPage(page);
+ }
+ free_pages(page_block->virt_addr, page_block->page_order);
+
+ page_block->phys_addr = 0;
+ page_block->virt_addr = 0;
+ page_block->page_count = 0;
+ page_block->page_order = 0;
+
+ free_block = page_block;
+ page_block = page_block->next;
+ //XGI_INFO("free free_block: 0x%p \n", free_block);
+ kfree(free_block);
+ free_block = NULL;
+ }
+
+ if (block->page_table) {
+ //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);
+ kfree(block->page_table);
+ block->page_table = NULL;
+ }
+}
+
+int xgi_pcie_heap_init(struct xgi_info * info)
+{
+ struct xgi_pcie_block *block;
+
+ if (!xgi_pcie_lut_init(info)) {
+ XGI_ERROR("xgi_pcie_lut_init failed\n");
+ return 0;
+ }
+
+ xgi_pcie_heap =
+ (struct xgi_pcie_heap *) kmalloc(sizeof(struct xgi_pcie_heap), GFP_KERNEL);
+ if (!xgi_pcie_heap) {
+ XGI_ERROR("xgi_pcie_heap alloc failed\n");
+ goto fail1;
+ }
+ INIT_LIST_HEAD(&xgi_pcie_heap->free_list);
+ INIT_LIST_HEAD(&xgi_pcie_heap->used_list);
+ INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);
+
+ xgi_pcie_heap->max_freesize = info->pcie.size;
+
+ xgi_pcie_cache_block =
+ kmem_cache_create("xgi_pcie_block", sizeof(struct xgi_pcie_block), 0,
+ SLAB_HWCACHE_ALIGN, NULL, NULL);
+
+ if (NULL == xgi_pcie_cache_block) {
+ XGI_ERROR("Fail to creat xgi_pcie_block\n");
+ goto fail2;
+ }
+
+ block = (struct xgi_pcie_block *) xgi_pcie_new_node();
+ if (!block) {
+ XGI_ERROR("xgi_pcie_new_node failed\n");
+ goto fail3;
+ }
+
+ block->offset = 0; /* block's offset in pcie memory, begin from 0 */
+ block->size = info->pcie.size;
+
+ list_add(&block->list, &xgi_pcie_heap->free_list);
+
+ XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n",
+ block->offset, block->size);
+ return 1;
+ fail3:
+ if (xgi_pcie_cache_block) {
+ kmem_cache_destroy(xgi_pcie_cache_block);
+ xgi_pcie_cache_block = NULL;
+ }
+
+ fail2:
+ if (xgi_pcie_heap) {
+ kfree(xgi_pcie_heap);
+ xgi_pcie_heap = NULL;
+ }
+ fail1:
+ xgi_pcie_lut_cleanup(info);
+ return 0;
+}
+
+void xgi_pcie_heap_check(void)
+{
+#ifdef XGI_DEBUG
+ struct xgi_pcie_block *block;
+ unsigned int ownerIndex;
+ static const char *const ownerStr[6] =
+ { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" };
+
+ if (!xgi_pcie_heap) {
+ return;
+ }
+
+ XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize);
+ list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
+ if (block->owner == PCIE_2D)
+ ownerIndex = 0;
+ else if (block->owner > PCIE_3D_TEXTURE
+ || block->owner < PCIE_2D
+ || block->owner < PCIE_3D)
+ ownerIndex = 5;
+ else
+ ownerIndex = block->owner - PCIE_3D + 1;
+
+ XGI_INFO("Allocated by %s, block offset: 0x%lx, size: 0x%lx \n",
+ ownerStr[ownerIndex], block->offset, block->size);
+ }
+#endif
+}
+
+void xgi_pcie_heap_cleanup(struct xgi_info * info)
+{
+ struct list_head *free_list;
+ struct xgi_pcie_block *block;
+ struct xgi_pcie_block *next;
+ int j;
+
+ xgi_pcie_lut_cleanup(info);
+ XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");
+
+ if (xgi_pcie_heap) {
+ free_list = &xgi_pcie_heap->free_list;
+ for (j = 0; j < 3; j++, free_list++) {
+ list_for_each_entry_safe(block, next, free_list, list) {
+ XGI_INFO
+ ("No. %d block offset: 0x%lx size: 0x%lx\n",
+ j, block->offset, block->size);
+ xgi_pcie_block_stuff_free(block);
+ block->bus_addr = 0;
+ block->hw_addr = 0;
+
+ //XGI_INFO("No. %d free block: 0x%p \n", j, block);
+ kmem_cache_free(xgi_pcie_cache_block, block);
+ }
+ }
+
+ XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);
+ kfree(xgi_pcie_heap);
+ xgi_pcie_heap = NULL;
+ }
+
+ if (xgi_pcie_cache_block) {
+ kmem_cache_destroy(xgi_pcie_cache_block);
+ xgi_pcie_cache_block = NULL;
+ }
+}
+
+static struct xgi_pcie_block *xgi_pcie_mem_alloc(struct xgi_info * info,
+ unsigned long originalSize,
+ enum PcieOwner owner)
+{
+ struct xgi_pcie_block *block, *used_block, *free_block;
+ struct xgi_page_block *page_block, *prev_page_block;
+ struct page *page;
+ unsigned long page_order = 0, count = 0, index = 0;
+ unsigned long page_addr = 0;
+ u32 *lut_addr = NULL;
+ unsigned long lut_id = 0;
+ unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
+ int i, j, page_count = 0;
+ int temp = 0;
+
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");
+ XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n",
+ originalSize, size);
+
+ if (owner == PCIE_3D) {
+ if (xgi_pcie_vertex_block) {
+ XGI_INFO
+ ("PCIE Vertex has been created, return directly.\n");
+ return xgi_pcie_vertex_block;
+ }
+ }
+
+ if (owner == PCIE_3D_CMDLIST) {
+ if (xgi_pcie_cmdlist_block) {
+ XGI_INFO
+ ("PCIE Cmdlist has been created, return directly.\n");
+ return xgi_pcie_cmdlist_block;
+ }
+ }
+
+ if (owner == PCIE_3D_SCRATCHPAD) {
+ if (xgi_pcie_scratchpad_block) {
+ XGI_INFO
+ ("PCIE Scratchpad has been created, return directly.\n");
+ return xgi_pcie_scratchpad_block;
+ }
+ }
+
+ if (size == 0) {
+ XGI_ERROR("size == 0 \n");
+ return (NULL);
+ }
+
+ XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);
+ if (size > xgi_pcie_heap->max_freesize) {
+ XGI_ERROR
+ ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",
+ size, xgi_pcie_heap->max_freesize);
+ return (NULL);
+ }
+
+ /* Jong 05/30/2006; find next free list which has enough space */
+ list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
+ if (size <= block->size) {
+ break;
+ }
+ }
+
+ if (&block->list == &xgi_pcie_heap->free_list) {
+ XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n",
+ size / 1024);
+ return (NULL);
+ }
+
+ free_block = block;
+ XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
+ size, free_block->offset, free_block->size);
+
+ if (size == free_block->size) {
+ used_block = free_block;
+ XGI_INFO("size==free_block->size: free_block = 0x%p\n",
+ free_block);
+ list_del(&free_block->list);
+ } else {
+ used_block = xgi_pcie_new_node();
+ if (used_block == NULL) {
+ return NULL;
+ }
+
+ if (used_block == free_block) {
+ XGI_ERROR("used_block == free_block = 0x%p\n",
+ used_block);
+ }
+
+ used_block->offset = free_block->offset;
+ used_block->size = size;
+
+ free_block->offset += size;
+ free_block->size -= size;
+ }
+
+ xgi_pcie_heap->max_freesize -= size;
+
+ used_block->bus_addr = info->pcie.base + used_block->offset;
+ used_block->hw_addr = info->pcie.base + used_block->offset;
+ used_block->page_count = page_count = size / PAGE_SIZE;
+
+ /* get page_order base on page_count */
+ for (used_block->page_order = 0; page_count; page_count >>= 1) {
+ ++used_block->page_order;
+ }
+
+ if ((used_block->page_count << 1) == (1 << used_block->page_order)) {
+ used_block->page_order--;
+ }
+ XGI_INFO
+ ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",
+ used_block->offset, used_block->size, used_block->bus_addr,
+ used_block->hw_addr, used_block->page_count,
+ used_block->page_order);
+
+ used_block->page_block = NULL;
+ //used_block->page_block = (struct xgi_pages_block *)kmalloc(sizeof(struct xgi_pages_block), GFP_KERNEL);
+ //if (!used_block->page_block) return NULL;_t
+ //used_block->page_block->next = NULL;
+
+ used_block->page_table =
+ (struct xgi_pte *) kmalloc(sizeof(struct xgi_pte) * used_block->page_count,
+ GFP_KERNEL);
+ if (used_block->page_table == NULL) {
+ goto fail;
+ }
+
+ lut_id = (used_block->offset >> PAGE_SHIFT);
+ lut_addr = info->lut_base;
+ lut_addr += lut_id;
+ XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);
+
+ /* alloc free pages from system */
+ page_count = used_block->page_count;
+ page_block = used_block->page_block;
+ prev_page_block = used_block->page_block;
+ for (i = 0; page_count > 0; i++) {
+ /* if size is bigger than 2M bytes, it should be split */
+ if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) {
+ page_order = XGI_PCIE_ALLOC_MAX_ORDER;
+ } else {
+ count = page_count;
+ for (page_order = 0; count; count >>= 1, ++page_order) ;
+
+ if ((page_count << 1) == (1 << page_order)) {
+ page_order -= 1;
+ }
+ }
+
+ count = (1 << page_order);
+ page_addr = __get_free_pages(GFP_KERNEL, page_order);
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n",
+ page_addr);
+
+ if (!page_addr) {
+ XGI_ERROR
+ ("No: %d :Can't get free pages: 0x%lx from system memory !\n",
+ i, count);
+ goto fail;
+ }
+
+ /* Jong 05/30/2006; test */
+ memset((unsigned char *)page_addr, 0xFF,
+ PAGE_SIZE << page_order);
+ /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */
+
+ if (page_block == NULL) {
+ page_block =
+ (struct xgi_page_block *)
+ kmalloc(sizeof(struct xgi_page_block), GFP_KERNEL);
+ if (!page_block) {
+ XGI_ERROR
+ ("Can't get memory for page_block! \n");
+ goto fail;
+ }
+ }
+
+ if (prev_page_block == NULL) {
+ used_block->page_block = page_block;
+ prev_page_block = page_block;
+ } else {
+ prev_page_block->next = page_block;
+ prev_page_block = page_block;
+ }
+
+ page_block->next = NULL;
+ page_block->phys_addr = __pa(page_addr);
+ page_block->virt_addr = page_addr;
+ page_block->page_count = count;
+ page_block->page_order = page_order;
+
+ XGI_INFO
+ ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n",
+ page_block->phys_addr);
+ XGI_INFO
+ ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n",
+ page_block->virt_addr);
+
+ page = virt_to_page(page_addr);
+
+ //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"
+ // "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",
+ // i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);
+
+ for (j = 0; j < count; j++, page++, lut_addr++) {
+ used_block->page_table[index + j].phys_addr =
+ __pa(page_address(page));
+ used_block->page_table[index + j].virt_addr =
+ (unsigned long)page_address(page);
+
+ XGI_INFO
+ ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n",
+ used_block->page_table[index + j].phys_addr);
+ XGI_INFO
+ ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n",
+ used_block->page_table[index + j].virt_addr);
+
+ *lut_addr = __pa(page_address(page));
+ XGI_INC_PAGE_COUNT(page);
+ XGILockPage(page);
+
+ if (temp) {
+ XGI_INFO
+ ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",
+ __pa(page_address(page)), lut_addr, j,
+ *lut_addr);
+ temp--;
+ }
+ }
+
+ page_block = page_block->next;
+ page_count -= count;
+ index += count;
+ temp = 0;
+ }
+
+ used_block->owner = owner;
+ list_add(&used_block->list, &xgi_pcie_heap->used_list);
+
+#if defined(__i386__) || defined(__x86_64__)
+ asm volatile ("wbinvd":::"memory");
+#else
+ mb();
+#endif
+
+ /* Flush GART Table */
+ bWriteReg(0xB03F, 0x40);
+ bWriteReg(0xB03F, 0x00);
+
+ if (owner == PCIE_3D) {
+ xgi_pcie_vertex_block = used_block;
+ }
+
+ if (owner == PCIE_3D_CMDLIST) {
+ xgi_pcie_cmdlist_block = used_block;
+ }
+
+ if (owner == PCIE_3D_SCRATCHPAD) {
+ xgi_pcie_scratchpad_block = used_block;
+ }
+
+ XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");
+ return (used_block);
+
+ fail:
+ xgi_pcie_block_stuff_free(used_block);
+ kmem_cache_free(xgi_pcie_cache_block, used_block);
+ return NULL;
+}
+
+static struct xgi_pcie_block *xgi_pcie_mem_free(struct xgi_info * info,
+ unsigned long offset)
+{
+ struct xgi_pcie_block *used_block, *block;
+ struct xgi_pcie_block *prev, *next;
+ unsigned long upper, lower;
+
+ list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
+ if (block->offset == offset) {
+ break;
+ }
+ }
+
+ if (&block->list == &xgi_pcie_heap->used_list) {
+ XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
+ return (NULL);
+ }
+
+ used_block = block;
+ XGI_INFO
+ ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",
+ used_block, used_block->offset, used_block->size,
+ used_block->bus_addr, used_block->hw_addr);
+
+ xgi_pcie_block_stuff_free(used_block);
+
+ /* update xgi_pcie_heap */
+ xgi_pcie_heap->max_freesize += used_block->size;
+
+ prev = next = NULL;
+ upper = used_block->offset + used_block->size;
+ lower = used_block->offset;
+
+ list_for_each_entry(block, &xgi_pcie_heap->free_list, list) {
+ if (block->offset == upper) {
+ next = block;
+ } else if ((block->offset + block->size) == lower) {
+ prev = block;
+ }
+ }
+
+ XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
+ list_del(&used_block->list);
+
+ if (prev && next) {
+ prev->size += (used_block->size + next->size);
+ list_del(&next->list);
+ XGI_INFO("free node 0x%p\n", next);
+ kmem_cache_free(xgi_pcie_cache_block, next);
+ kmem_cache_free(xgi_pcie_cache_block, used_block);
+ next = NULL;
+ used_block = NULL;
+ return (prev);
+ }
+
+ if (prev) {
+ prev->size += used_block->size;
+ XGI_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_pcie_cache_block, used_block);
+ used_block = NULL;
+ return (prev);
+ }
+
+ if (next) {
+ next->size += used_block->size;
+ next->offset = used_block->offset;
+ XGI_INFO("free node 0x%p\n", used_block);
+ kmem_cache_free(xgi_pcie_cache_block, used_block);
+ used_block = NULL;
+ return (next);
+ }
+
+ used_block->bus_addr = 0;
+ used_block->hw_addr = 0;
+ used_block->page_count = 0;
+ used_block->page_order = 0;
+ list_add(&used_block->list, &xgi_pcie_heap->free_list);
+ XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
+ used_block, used_block->offset, used_block->size);
+ return (used_block);
+}
+
+void xgi_pcie_alloc(struct xgi_info * info, struct xgi_mem_alloc * alloc,
+ pid_t pid)
+{
+ struct xgi_pcie_block *block;
+
+ xgi_down(info->pcie_sem);
+ block = xgi_pcie_mem_alloc(info, alloc->size, alloc->owner);
+ xgi_up(info->pcie_sem);
+
+ if (block == NULL) {
+ alloc->location = XGI_MEMLOC_INVALID;
+ alloc->size = 0;
+ alloc->bus_addr = 0;
+ alloc->hw_addr = 0;
+ XGI_ERROR("PCIE RAM allocation failed\n");
+ } else {
+ XGI_INFO
+ ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",
+ block->offset, block->bus_addr);
+ alloc->location = XGI_MEMLOC_NON_LOCAL;
+ alloc->size = block->size;
+ alloc->bus_addr = block->bus_addr;
+ alloc->hw_addr = block->hw_addr;
+
+ /*
+ manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.
+ PCIE_3D request means a opengl process created.
+ PCIE_3D_TEXTURE request means texture cannot alloc from fb.
+ */
+ if ((alloc->owner == PCIE_3D)
+ || (alloc->owner == PCIE_3D_TEXTURE)) {
+ struct xgi_mem_pid *mempid_block =
+ kmalloc(sizeof(struct xgi_mem_pid), GFP_KERNEL);
+ if (!mempid_block)
+ XGI_ERROR("mempid_block alloc failed\n");
+ mempid_block->location = XGI_MEMLOC_NON_LOCAL;
+ if (alloc->owner == PCIE_3D)
+ mempid_block->bus_addr = 0xFFFFFFFF; /*xgi_pcie_vertex_block has the address */
+ else
+ mempid_block->bus_addr = alloc->bus_addr;
+ mempid_block->pid = pid;
+
+ XGI_INFO
+ ("Memory ProcessID add one pcie block pid:%ld successfully! \n",
+ mempid_block->pid);
+ list_add(&mempid_block->list, &xgi_mempid_list);
+ }
+ }
+}
+
+void xgi_pcie_free(struct xgi_info * info, unsigned long bus_addr)
+{
+ struct xgi_pcie_block *block;
+ unsigned long offset = bus_addr - info->pcie.base;
+ struct xgi_mem_pid *mempid_block;
+ struct xgi_mem_pid *mempid_freeblock = NULL;
+ char isvertex = 0;
+ int processcnt;
+
+ if (xgi_pcie_vertex_block
+ && xgi_pcie_vertex_block->bus_addr == bus_addr)
+ isvertex = 1;
+
+ if (isvertex) {
+ /*check is there any other process using vertex */
+ processcnt = 0;
+
+ list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
+ if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
+ && mempid_block->bus_addr == 0xFFFFFFFF) {
+ ++processcnt;
+ }
+ }
+ if (processcnt > 1) {
+ return;
+ }
+ }
+
+ xgi_down(info->pcie_sem);
+ block = xgi_pcie_mem_free(info, offset);
+ xgi_up(info->pcie_sem);
+
+ if (block == NULL) {
+ XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
+ }
+
+ if (isvertex)
+ xgi_pcie_vertex_block = NULL;
+
+ /* manage mempid */
+ list_for_each_entry(mempid_block, &xgi_mempid_list, list) {
+ if (mempid_block->location == XGI_MEMLOC_NON_LOCAL
+ && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF)
+ || (!isvertex && mempid_block->bus_addr == bus_addr))) {
+ mempid_freeblock = mempid_block;
+ break;
+ }
+ }
+ if (mempid_freeblock) {
+ list_del(&mempid_freeblock->list);
+ XGI_INFO
+ ("Memory ProcessID delete one pcie block pid:%ld successfully! \n",
+ mempid_freeblock->pid);
+ kfree(mempid_freeblock);
+ }
+}
+
+/*
+ * given a bus address, fid the pcie mem block
+ * uses the bus address as the key.
+ */
+struct xgi_pcie_block *xgi_find_pcie_block(struct xgi_info * info,
+ unsigned long address)
+{
+ struct xgi_pcie_block *block;
+ int i;
+
+
+ list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
+ if (block->bus_addr == address) {
+ return block;
+ }
+
+ if (block->page_table) {
+ for (i = 0; i < block->page_count; i++) {
+ unsigned long offset = block->bus_addr;
+ if ((address >= offset)
+ && (address < (offset + PAGE_SIZE))) {
+ return block;
+ }
+ }
+ }
+ }
+
+ XGI_ERROR("could not find map for vm 0x%lx\n", address);
+
+ return NULL;
+}
+
+/**
+ * xgi_find_pcie_virt
+ * @address: GE HW address
+ *
+ * Returns CPU virtual address. Assumes the CPU VAddr is continuous in not
+ * the same block
+ */
+void *xgi_find_pcie_virt(struct xgi_info * info, unsigned long address)
+{
+ struct xgi_pcie_block *block;
+ const unsigned long offset_in_page = address & (PAGE_SIZE - 1);
+
+ XGI_INFO("begin (address = 0x%lx, offset_in_page = %lu)\n",
+ address, offset_in_page);
+
+ list_for_each_entry(block, &xgi_pcie_heap->used_list, list) {
+ XGI_INFO("block = 0x%p (hw_addr = 0x%lx, size=%lu)\n",
+ block, block->hw_addr, block->size);
+
+ if ((address >= block->hw_addr)
+ && (address < (block->hw_addr + block->size))) {
+ const unsigned long loc_in_pagetable =
+ (address - block->hw_addr) >> PAGE_SHIFT;
+ void *const ret =
+ (void *)(block->page_table[loc_in_pagetable].
+ virt_addr + offset_in_page);
+
+ XGI_INFO("PAGE_SHIFT = %d\n", PAGE_SHIFT);
+ XGI_INFO("block->page_table[0x%lx].virt_addr = 0x%lx\n",
+ loc_in_pagetable,
+ block->page_table[loc_in_pagetable].virt_addr);
+ XGI_INFO("return 0x%p\n", ret);
+
+ return ret;
+ }
+ }
+
+ XGI_ERROR("could not find map for vm 0x%lx\n", address);
+ return NULL;
+}
+
+/*
+ address -- GE hw address
+*/
+void xgi_test_rwinkernel(struct xgi_info * info, unsigned long address)
+{
+ unsigned long *virtaddr = 0;
+ if (address == 0) {
+ XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n");
+ return;
+ }
+
+ virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address);
+
+ XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address);
+ XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr);
+ XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr);
+ if (virtaddr != NULL) {
+ *virtaddr = 0x00f00fff;
+ }
+
+ XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr);
+}
diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h
new file mode 100644
index 00000000..b66d6a28
--- /dev/null
+++ b/linux-core/xgi_pcie.h
@@ -0,0 +1,68 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_PCIE_H_
+#define _XGI_PCIE_H_
+
+#ifndef XGI_PCIE_ALLOC_MAX_ORDER
+#define XGI_PCIE_ALLOC_MAX_ORDER 1 /* 8K in Kernel 2.4.* */
+#endif
+
+struct xgi_page_block {
+ struct xgi_page_block *next;
+ unsigned long phys_addr;
+ unsigned long virt_addr;
+ unsigned long page_count;
+ unsigned long page_order;
+};
+
+struct xgi_pcie_block {
+ struct list_head list;
+ unsigned long offset; /* block's offset in pcie memory, begin from 0 */
+ unsigned long size; /* The block size. */
+ unsigned long bus_addr; /* CPU access address/bus address */
+ unsigned long hw_addr; /* GE access address */
+
+ unsigned long page_count;
+ unsigned long page_order;
+ struct xgi_page_block *page_block;
+ struct xgi_pte *page_table; /* list of physical pages allocated */
+
+ atomic_t use_count;
+ enum PcieOwner owner;
+ unsigned long processID;
+};
+
+struct xgi_pcie_heap {
+ struct list_head free_list;
+ struct list_head used_list;
+ struct list_head sort_list;
+ unsigned long max_freesize;
+};
+
+#endif
diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h
new file mode 100644
index 00000000..0e54e7d8
--- /dev/null
+++ b/linux-core/xgi_regs.h
@@ -0,0 +1,404 @@
+
+/****************************************************************************
+ * Copyright (C) 2003-2006 by XGI Technology, Taiwan.
+ * *
+ * All Rights Reserved. *
+ * *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation on the rights to use, copy, modify, merge,
+ * publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so,
+ * subject to the following conditions:
+ * *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ * *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL XGI AND/OR
+ * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ ***************************************************************************/
+
+#ifndef _XGI_REGS_H_
+#define _XGI_REGS_H_
+
+#ifndef XGI_MMIO
+#define XGI_MMIO 1
+#endif
+
+#if XGI_MMIO
+#define OUTB(port, value) writeb(value, info->mmio.vbase + port)
+#define INB(port) readb(info->mmio.vbase + port)
+#define OUTW(port, value) writew(value, info->mmio.vbase + port)
+#define INW(port) readw(info->mmio.vbase + port)
+#define OUTDW(port, value) writel(value, info->mmio.vbase + port)
+#define INDW(port) readl(info->mmio.vbase + port)
+#else
+#define OUTB(port, value) outb(value, port)
+#define INB(port) inb(port)
+#define OUTW(port, value) outw(value, port)
+#define INW(port) inw(port)
+#define OUTDW(port, value) outl(value, port)
+#define INDW(port) inl(port)
+#endif
+
+/* Hardware access functions */
+static inline void OUT3C5B(struct xgi_info * info, u8 index, u8 data)
+{
+ OUTB(0x3C4, index);
+ OUTB(0x3C5, data);
+}
+
+static inline void OUT3X5B(struct xgi_info * info, u8 index, u8 data)
+{
+ OUTB(0x3D4, index);
+ OUTB(0x3D5, data);
+}
+
+static inline void OUT3CFB(struct xgi_info * info, u8 index, u8 data)
+{
+ OUTB(0x3CE, index);
+ OUTB(0x3CF, data);
+}
+
+static inline u8 IN3C5B(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3C4, index);
+ data = INB(0x3C5);
+ return data;
+}
+
+static inline u8 IN3X5B(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3D4, index);
+ data = INB(0x3D5);
+ return data;
+}
+
+static inline u8 IN3CFB(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3CE, index);
+ data = INB(0x3CF);
+ return data;
+}
+
+static inline void OUT3C5W(struct xgi_info * info, u8 index, u16 data)
+{
+ OUTB(0x3C4, index);
+ OUTB(0x3C5, data);
+}
+
+static inline void OUT3X5W(struct xgi_info * info, u8 index, u16 data)
+{
+ OUTB(0x3D4, index);
+ OUTB(0x3D5, data);
+}
+
+static inline void OUT3CFW(struct xgi_info * info, u8 index, u8 data)
+{
+ OUTB(0x3CE, index);
+ OUTB(0x3CF, data);
+}
+
+static inline u8 IN3C5W(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3C4, index);
+ data = INB(0x3C5);
+ return data;
+}
+
+static inline u8 IN3X5W(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3D4, index);
+ data = INB(0x3D5);
+ return data;
+}
+
+static inline u8 IN3CFW(struct xgi_info * info, u8 index)
+{
+ volatile u8 data = 0;
+ OUTB(0x3CE, index);
+ data = INB(0x3CF);
+ return data;
+}
+
+static inline u8 readAttr(struct xgi_info * info, u8 index)
+{
+ INB(0x3DA); /* flip-flop to index */
+ OUTB(0x3C0, index);
+ return INB(0x3C1);
+}
+
+static inline void writeAttr(struct xgi_info * info, u8 index, u8 value)
+{
+ INB(0x3DA); /* flip-flop to index */
+ OUTB(0x3C0, index);
+ OUTB(0x3C0, value);
+}
+
+/*
+ * Graphic engine register (2d/3d) acessing interface
+ */
+static inline void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data)
+{
+ /* Jong 05/25/2006 */
+ XGI_INFO("Jong-WriteRegDWord()-Begin \n");
+ XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n",
+ info->mmio.vbase);
+ XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr);
+ XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data);
+ /* return; */
+
+ *(volatile u32 *)(info->mmio.vbase + addr) = (data);
+ XGI_INFO("Jong-WriteRegDWord()-End \n");
+}
+
+static inline void WriteRegWord(struct xgi_info * info, u32 addr, u16 data)
+{
+ *(volatile u16 *)(info->mmio.vbase + addr) = (data);
+}
+
+static inline void WriteRegByte(struct xgi_info * info, u32 addr, u8 data)
+{
+ *(volatile u8 *)(info->mmio.vbase + addr) = (data);
+}
+
+static inline u32 ReadRegDWord(struct xgi_info * info, u32 addr)
+{
+ volatile u32 data;
+ data = *(volatile u32 *)(info->mmio.vbase + addr);
+ return data;
+}
+
+static inline u16 ReadRegWord(struct xgi_info * info, u32 addr)
+{
+ volatile u16 data;
+ data = *(volatile u16 *)(info->mmio.vbase + addr);
+ return data;
+}
+
+static inline u8 ReadRegByte(struct xgi_info * info, u32 addr)
+{
+ volatile u8 data;
+ data = *(volatile u8 *)(info->mmio.vbase + addr);
+ return data;
+}
+
+#if 0
+extern void OUT3C5B(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3X5B(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3CFB(struct xgi_info * info, u8 index, u8 data);
+extern u8 IN3C5B(struct xgi_info * info, u8 index);
+extern u8 IN3X5B(struct xgi_info * info, u8 index);
+extern u8 IN3CFB(struct xgi_info * info, u8 index);
+extern void OUT3C5W(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3X5W(struct xgi_info * info, u8 index, u8 data);
+extern void OUT3CFW(struct xgi_info * info, u8 index, u8 data);
+extern u8 IN3C5W(struct xgi_info * info, u8 index);
+extern u8 IN3X5W(struct xgi_info * info, u8 index);
+extern u8 IN3CFW(struct xgi_info * info, u8 index);
+
+extern void WriteRegDWord(struct xgi_info * info, u32 addr, u32 data);
+extern void WriteRegWord(struct xgi_info * info, u32 addr, u16 data);
+extern void WriteRegByte(struct xgi_info * info, u32 addr, u8 data);
+extern u32 ReadRegDWord(struct xgi_info * info, u32 addr);
+extern u16 ReadRegWord(struct xgi_info * info, u32 addr);
+extern u8 ReadRegByte(struct xgi_info * info, u32 addr);
+
+extern void EnableProtect();
+extern void DisableProtect();
+#endif
+
+#define Out(port, data) OUTB(port, data)
+#define bOut(port, data) OUTB(port, data)
+#define wOut(port, data) OUTW(port, data)
+#define dwOut(port, data) OUTDW(port, data)
+
+#define Out3x5(index, data) OUT3X5B(info, index, data)
+#define bOut3x5(index, data) OUT3X5B(info, index, data)
+#define wOut3x5(index, data) OUT3X5W(info, index, data)
+
+#define Out3c5(index, data) OUT3C5B(info, index, data)
+#define bOut3c5(index, data) OUT3C5B(info, index, data)
+#define wOut3c5(index, data) OUT3C5W(info, index, data)
+
+#define Out3cf(index, data) OUT3CFB(info, index, data)
+#define bOut3cf(index, data) OUT3CFB(info, index, data)
+#define wOut3cf(index, data) OUT3CFW(info, index, data)
+
+#define In(port) INB(port)
+#define bIn(port) INB(port)
+#define wIn(port) INW(port)
+#define dwIn(port) INDW(port)
+
+#define In3x5(index) IN3X5B(info, index)
+#define bIn3x5(index) IN3X5B(info, index)
+#define wIn3x5(index) IN3X5W(info, index)
+
+#define In3c5(index) IN3C5B(info, index)
+#define bIn3c5(index) IN3C5B(info, index)
+#define wIn3c5(index) IN3C5W(info, index)
+
+#define In3cf(index) IN3CFB(info, index)
+#define bIn3cf(index) IN3CFB(info, index)
+#define wIn3cf(index) IN3CFW(info, index)
+
+#define dwWriteReg(addr, data) WriteRegDWord(info, addr, data)
+#define wWriteReg(addr, data) WriteRegWord(info, addr, data)
+#define bWriteReg(addr, data) WriteRegByte(info, addr, data)
+#define dwReadReg(addr) ReadRegDWord(info, addr)
+#define wReadReg(addr) ReadRegWord(info, addr)
+#define bReadReg(addr) ReadRegByte(info, addr)
+
+static inline void xgi_protect_all(struct xgi_info * info)
+{
+ OUTB(0x3C4, 0x11);
+ OUTB(0x3C5, 0x92);
+}
+
+static inline void xgi_unprotect_all(struct xgi_info * info)
+{
+ OUTB(0x3C4, 0x11);
+ OUTB(0x3C5, 0x92);
+}
+
+static inline void xgi_enable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+
+ /* Unprotect registers */
+ outb(0x11, 0x3C4);
+ protect = inb(0x3C5);
+ outb(0x92, 0x3C5);
+
+ outb(0x3A, 0x3D4);
+ outb(inb(0x3D5) | 0x20, 0x3D5);
+
+ /* Enable MMIO */
+ outb(0x39, 0x3D4);
+ outb(inb(0x3D5) | 0x01, 0x3D5);
+
+ OUTB(0x3C4, 0x11);
+ OUTB(0x3C5, protect);
+}
+
+static inline void xgi_disable_mmio(struct xgi_info * info)
+{
+ u8 protect = 0;
+
+ /* unprotect registers */
+ OUTB(0x3C4, 0x11);
+ protect = INB(0x3C5);
+ OUTB(0x3C5, 0x92);
+
+ /* Disable MMIO access */
+ OUTB(0x3D4, 0x39);
+ OUTB(0x3D5, INB(0x3D5) & 0xFE);
+
+ /* Protect registers */
+ outb(0x11, 0x3C4);
+ outb(protect, 0x3C5);
+}
+
+static inline void xgi_enable_ge(struct xgi_info * info)
+{
+ unsigned char bOld3cf2a = 0;
+ int wait = 0;
+
+ // Enable GE
+ OUTW(0x3C4, 0x9211);
+
+ // Save and close dynamic gating
+ bOld3cf2a = bIn3cf(0x2a);
+ bOut3cf(0x2a, bOld3cf2a & 0xfe);
+
+ // Reset both 3D and 2D engine
+ bOut3x5(0x36, 0x84);
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+ bOut3x5(0x36, 0x94);
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+ bOut3x5(0x36, 0x84);
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+ // Enable 2D engine only
+ bOut3x5(0x36, 0x80);
+
+ // Enable 2D+3D engine
+ bOut3x5(0x36, 0x84);
+
+ // Restore dynamic gating
+ bOut3cf(0x2a, bOld3cf2a);
+}
+
+static inline void xgi_disable_ge(struct xgi_info * info)
+{
+ int wait = 0;
+
+ // Reset both 3D and 2D engine
+ bOut3x5(0x36, 0x84);
+
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+ bOut3x5(0x36, 0x94);
+
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+ bOut3x5(0x36, 0x84);
+
+ wait = 10;
+ while (wait--) {
+ bIn(0x36);
+ }
+
+ // Disable 2D engine only
+ bOut3x5(0x36, 0);
+}
+
+static inline void xgi_enable_dvi_interrupt(struct xgi_info * info)
+{
+ Out3cf(0x39, In3cf(0x39) & ~0x01); //Set 3cf.39 bit 0 to 0
+ Out3cf(0x39, In3cf(0x39) | 0x01); //Set 3cf.39 bit 0 to 1
+ Out3cf(0x39, In3cf(0x39) | 0x02);
+}
+static inline void xgi_disable_dvi_interrupt(struct xgi_info * info)
+{
+ Out3cf(0x39, In3cf(0x39) & ~0x02);
+}
+
+static inline void xgi_enable_crt1_interrupt(struct xgi_info * info)
+{
+ Out3cf(0x3d, In3cf(0x3d) | 0x04);
+ Out3cf(0x3d, In3cf(0x3d) & ~0x04);
+ Out3cf(0x3d, In3cf(0x3d) | 0x08);
+}
+
+static inline void xgi_disable_crt1_interrupt(struct xgi_info * info)
+{
+ Out3cf(0x3d, In3cf(0x3d) & ~0x08);
+}
+
+#endif