diff options
| -rw-r--r-- | linux-core/xgi_cmdlist.c | 705 | ||||
| -rw-r--r-- | linux-core/xgi_cmdlist.h | 155 | ||||
| -rw-r--r-- | linux-core/xgi_drv.c | 3174 | ||||
| -rw-r--r-- | linux-core/xgi_drv.h | 728 | ||||
| -rw-r--r-- | linux-core/xgi_fb.c | 1019 | ||||
| -rw-r--r-- | linux-core/xgi_fb.h | 141 | ||||
| -rw-r--r-- | linux-core/xgi_linux.h | 1187 | ||||
| -rw-r--r-- | linux-core/xgi_misc.c | 1287 | ||||
| -rw-r--r-- | linux-core/xgi_misc.h | 96 | ||||
| -rw-r--r-- | linux-core/xgi_pcie.c | 2091 | ||||
| -rw-r--r-- | linux-core/xgi_pcie.h | 146 | ||||
| -rw-r--r-- | linux-core/xgi_regs.h | 814 | ||||
| -rw-r--r-- | linux-core/xgi_types.h | 135 | 
13 files changed, 5765 insertions, 5913 deletions
| diff --git a/linux-core/xgi_cmdlist.c b/linux-core/xgi_cmdlist.c index 024b021c..e00ea228 100644 --- a/linux-core/xgi_cmdlist.c +++ b/linux-core/xgi_cmdlist.c @@ -1,348 +1,357 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -
 -#include "xgi_types.h"
 -#include "xgi_linux.h"
 -#include "xgi_drv.h"
 -#include "xgi_regs.h"
 -#include "xgi_misc.h"
 -#include "xgi_cmdlist.h"
 -
 -
 -
 -U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] =
 -{
 -    0x10000000,     // 3D Type Begin, Invalid
 -    0x80000004,     // Length = 4;
 -    0x00000000,
 -    0x00000000
 -};
 -
 -U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] =
 -{
 -	FLUSH_2D,
 -	FLUSH_2D,
 -	FLUSH_2D,
 -	FLUSH_2D
 -};
 -
 -xgi_cmdring_info_t s_cmdring;
 -
 -static void addFlush2D(xgi_info_t *info);
 -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo);
 -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter);
 -static void xgi_cmdlist_reset(void);
 -
 -int xgi_cmdlist_initialize(xgi_info_t *info, U32 size)
 -{
 -    //xgi_mem_req_t mem_req;
 -    xgi_mem_alloc_t mem_alloc;
 -
 -    //mem_req.size = size;
 -
 -    xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc);
 -
 -    if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0))
 -    {
 -        return -1;
 -    }
 -
 -    s_cmdring._cmdRingSize = mem_alloc.size;
 -    s_cmdring._cmdRingBuffer = mem_alloc.hw_addr;
 -    s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr;
 -    s_cmdring._lastBatchStartAddr = 0;
 -    s_cmdring._cmdRingOffset = 0;
 -
 -    return 1;
 -}
 -
 -void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t *pCmdInfo)
 -{
 -    U32 beginPort;
 -    /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/
 -
 -	/* Jong 05/25/2006 */
 -	/* return; */
 -
 -    beginPort = getCurBatchBeginPort(pCmdInfo);
 -    XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); 
 -
 -	/* Jong 05/25/2006 */
 -	/* return; */
 -
 -    if (s_cmdring._lastBatchStartAddr == 0)
 -    {
 -        U32 portOffset;
 -
 -		/* Jong 06/13/2006; remove marked for system hang test */
 -        /* xgi_waitfor_pci_idle(info); */
 -
 -		/* Jong 06132006; BASE_3D_ENG=0x2800 */
 -		/* beginPort: 2D: 0x30 */
 -        portOffset = BASE_3D_ENG + beginPort;
 -
 -        // Enable PCI Trigger Mode
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); 
 -
 -		/* Jong 05/25/2006 */
 -		/* return; */
 -
 -		/* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */
 -	    XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", M2REG_AUTO_LINK_SETTING_ADDRESS); 
 -	    XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", M2REG_CLEAR_COUNTERS_MASK); 
 -	    XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); 
 -	    XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", M2REG_PCI_TRIGGER_MODE_MASK); 
 -
 -		/* Jong 06/14/2006; 0x400001a */
 -	    XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", 
 -			(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK); 
 -        dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
 -                   (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
 -                   M2REG_CLEAR_COUNTERS_MASK |
 -                   0x08 |
 -                   M2REG_PCI_TRIGGER_MODE_MASK);
 -
 -		/* Jong 05/25/2006 */
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); 
 -		/* return; */ /* OK */
 -
 -		/* Jong 06/14/2006; 0x400000a */
 -	    XGI_INFO("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", 
 -			(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK); 
 -        dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS,
 -                   (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) |
 -                   0x08 |
 -                   M2REG_PCI_TRIGGER_MODE_MASK);
 -
 -        // Send PCI begin command
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); 
 -		/* return; */
 -
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", portOffset);
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); 
 -
 -		/* beginPort = 48; */
 -		/* 0xc100000 */
 -        dwWriteReg(portOffset, (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID);
 -	    XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort<<22)); 
 -	    XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); 
 -	    XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n",  pCmdInfo->_curDebugID); 
 -	    XGI_INFO("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID); 
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); 
 -		/* return; */ /* OK */
 -
 -		/* 0x80000024 */
 -        dwWriteReg(portOffset+4, BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize);
 -	    XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n",  BEGIN_LINK_ENABLE_MASK); 
 -	    XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n",  pCmdInfo->_firstSize); 
 -	    XGI_INFO("Jong-  BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n",   BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); 
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); 
 -
 -        /* 0x1010000 */
 -		dwWriteReg(portOffset+8, (pCmdInfo->_firstBeginAddr >> 4));
 -	    XGI_INFO("Jong-  pCmdInfo->_firstBeginAddr=0x%lx \n",   pCmdInfo->_firstBeginAddr); 
 -	    XGI_INFO("Jong-  (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n",   (pCmdInfo->_firstBeginAddr >> 4)); 
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); 
 -
 -		/* Jong 06/13/2006 */
 -        xgi_dump_register(info);
 -
 -		/* Jong 06/12/2006; system hang; marked for test */
 -        dwWriteReg(portOffset+12, 0); 
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); 
 -
 -		/* Jong 06/13/2006; remove marked for system hang test */
 -        /* xgi_waitfor_pci_idle(info); */
 -    }
 -    else
 -    {
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); 
 -        U32 *lastBatchVirtAddr;
 -
 -		/* Jong 05/25/2006 */
 -		/* return; */
 -
 -        if (pCmdInfo->_firstBeginType == BTYPE_3D)
 -        {
 -            addFlush2D(info);
 -        }
 -
 -        lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);
 -
 -        lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize;
 -        lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4;
 -        lastBatchVirtAddr[3] = 0;
 -        //barrier();
 -        lastBatchVirtAddr[0] = (beginPort<<22) + (BEGIN_VALID_MASK) + (0xffff & pCmdInfo->_curDebugID);
 -
 -		/* Jong 06/12/2006; system hang; marked for test */
 -        triggerHWCommandList(info, pCmdInfo->_beginCount); 
 -
 -	    XGI_INFO("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); 
 -    }
 -
 -    s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr;
 -    XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); 
 -}
 -
 -
 -/*
 -    state:      0 - console
 -                1 - graphic
 -                2 - fb
 -                3 - logout
 -*/
 -void xgi_state_change(xgi_info_t *info, xgi_state_info_t *pStateInfo)
 -{
 -#define STATE_CONSOLE   0
 -#define STATE_GRAPHIC   1
 -#define STATE_FBTERM    2
 -#define STATE_LOGOUT    3
 -#define STATE_REBOOT    4
 -#define STATE_SHUTDOWN  5
 -
 -    if ((pStateInfo->_fromState == STATE_GRAPHIC)
 -        && (pStateInfo->_toState == STATE_CONSOLE))
 -    {
 -        XGI_INFO("[kd] I see, now is to leaveVT\n");
 -        // stop to received batch
 -    }
 -    else if ((pStateInfo->_fromState == STATE_CONSOLE)
 -             && (pStateInfo->_toState == STATE_GRAPHIC))
 -    {
 -        XGI_INFO("[kd] I see, now is to enterVT\n");
 -        xgi_cmdlist_reset();
 -    }
 -    else if ((pStateInfo->_fromState == STATE_GRAPHIC)
 -             && ( (pStateInfo->_toState == STATE_LOGOUT)
 -                ||(pStateInfo->_toState == STATE_REBOOT)
 -                ||(pStateInfo->_toState == STATE_SHUTDOWN)))
 -    {
 -        XGI_INFO("[kd] I see, not is to exit from X\n");
 -        // stop to received batch
 -    }
 -    else
 -    {
 -        XGI_ERROR("[kd] Should not happen\n");
 -    }
 -
 -}
 -
 -void xgi_cmdlist_reset(void)
 -{
 -    s_cmdring._lastBatchStartAddr = 0;
 -    s_cmdring._cmdRingOffset = 0;
 -}
 -
 -void xgi_cmdlist_cleanup(xgi_info_t *info)
 -{
 -    if (s_cmdring._cmdRingBuffer != 0)
 -    {
 -        xgi_pcie_free(info, s_cmdring._cmdRingBusAddr);
 -        s_cmdring._cmdRingBuffer = 0;
 -        s_cmdring._cmdRingOffset = 0;
 -        s_cmdring._cmdRingSize = 0;
 -    }
 -}
 -
 -static void triggerHWCommandList(xgi_info_t *info, U32 triggerCounter)
 -{
 -    static U32 s_triggerID = 1;
 -
 -    //Fix me, currently we just trigger one time
 -    while (triggerCounter--)
 -    {
 -        dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS,
 -                   0x05000000 + (0xffff & s_triggerID++));
 -        // xgi_waitfor_pci_idle(info);
 -    }
 -}
 -
 -static U32 getCurBatchBeginPort(xgi_cmd_info_t *pCmdInfo)
 -{
 -    // Convert the batch type to begin port ID
 -    switch(pCmdInfo->_firstBeginType)
 -    {
 -    case BTYPE_2D:
 -        return 0x30;
 -    case BTYPE_3D:
 -        return 0x40;
 -    case BTYPE_FLIP:
 -        return 0x50;
 -    case BTYPE_CTRL:
 -        return 0x20;
 -    default:
 -        //ASSERT(0);
 -		return 0xff;
 -    }
 -}
 -
 -static void addFlush2D(xgi_info_t *info)
 -{
 -    U32 *flushBatchVirtAddr;
 -    U32 flushBatchHWAddr;
 -
 -    U32 *lastBatchVirtAddr;
 -
 -    /* check buf is large enough to contain a new flush batch */
 -    if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize)
 -    {
 -        s_cmdring._cmdRingOffset = 0;
 -    }
 -
 -    flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset;
 -    flushBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, flushBatchHWAddr);
 -
 -    /* not using memcpy for I assume the address is discrete */
 -    *(flushBatchVirtAddr + 0) = 0x10000000;
 -    *(flushBatchVirtAddr + 1) = 0x80000004;   /* size = 0x04 dwords */
 -    *(flushBatchVirtAddr + 2) = 0x00000000;
 -    *(flushBatchVirtAddr + 3) = 0x00000000;
 -    *(flushBatchVirtAddr + 4) = FLUSH_2D;
 -    *(flushBatchVirtAddr + 5) = FLUSH_2D;
 -    *(flushBatchVirtAddr + 6) = FLUSH_2D;
 -    *(flushBatchVirtAddr + 7) = FLUSH_2D;
 -
 -    // ASSERT(s_cmdring._lastBatchStartAddr != NULL);
 -    lastBatchVirtAddr = (U32*) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr);
 -
 -    lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08;
 -    lastBatchVirtAddr[2] = flushBatchHWAddr >> 4;
 -    lastBatchVirtAddr[3] = 0;
 -
 -    //barrier();
 -
 -    // BTYPE_CTRL & NO debugID
 -    lastBatchVirtAddr[0] = (0x20<<22) + (BEGIN_VALID_MASK);
 -
 -    triggerHWCommandList(info, 1);
 -
 -    s_cmdring._cmdRingOffset += 0x20;
 -    s_cmdring._lastBatchStartAddr = flushBatchHWAddr;
 -}
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +U32 s_emptyBegin[AGPCMDLIST_BEGIN_SIZE] = { +	0x10000000,		// 3D Type Begin, Invalid +	0x80000004,		// Length = 4; +	0x00000000, +	0x00000000 +}; + +U32 s_flush2D[AGPCMDLIST_FLUSH_CMD_LEN] = { +	FLUSH_2D, +	FLUSH_2D, +	FLUSH_2D, +	FLUSH_2D +}; + +xgi_cmdring_info_t s_cmdring; + +static void addFlush2D(xgi_info_t * info); +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo); +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter); +static void xgi_cmdlist_reset(void); + +int xgi_cmdlist_initialize(xgi_info_t * info, U32 size) +{ +	//xgi_mem_req_t mem_req; +	xgi_mem_alloc_t mem_alloc; + +	//mem_req.size = size; + +	xgi_pcie_alloc(info, size, PCIE_2D, &mem_alloc); + +	if ((mem_alloc.size == 0) && (mem_alloc.hw_addr == 0)) { +		return -1; +	} + +	s_cmdring._cmdRingSize = mem_alloc.size; +	s_cmdring._cmdRingBuffer = mem_alloc.hw_addr; +	s_cmdring._cmdRingBusAddr = mem_alloc.bus_addr; +	s_cmdring._lastBatchStartAddr = 0; +	s_cmdring._cmdRingOffset = 0; + +	return 1; +} + +void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo) +{ +	U32 beginPort; +    /** XGI_INFO("Jong-xgi_submit_cmdlist-Begin \n"); **/ + +	/* Jong 05/25/2006 */ +	/* return; */ + +	beginPort = getCurBatchBeginPort(pCmdInfo); +	XGI_INFO("Jong-xgi_submit_cmdlist-After getCurBatchBeginPort() \n"); + +	/* Jong 05/25/2006 */ +	/* return; */ + +	if (s_cmdring._lastBatchStartAddr == 0) { +		U32 portOffset; + +		/* Jong 06/13/2006; remove marked for system hang test */ +		/* xgi_waitfor_pci_idle(info); */ + +		/* Jong 06132006; BASE_3D_ENG=0x2800 */ +		/* beginPort: 2D: 0x30 */ +		portOffset = BASE_3D_ENG + beginPort; + +		// Enable PCI Trigger Mode +		XGI_INFO("Jong-xgi_submit_cmdlist-Enable PCI Trigger Mode \n"); + +		/* Jong 05/25/2006 */ +		/* return; */ + +		/* Jong 06/13/2006; M2REG_AUTO_LINK_SETTING_ADDRESS=0x10 */ +		XGI_INFO("Jong-M2REG_AUTO_LINK_SETTING_ADDRESS=0x%lx \n", +			 M2REG_AUTO_LINK_SETTING_ADDRESS); +		XGI_INFO("Jong-M2REG_CLEAR_COUNTERS_MASK=0x%lx \n", +			 M2REG_CLEAR_COUNTERS_MASK); +		XGI_INFO +		    ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)=0x%lx \n", +		     (M2REG_AUTO_LINK_SETTING_ADDRESS << 22)); +		XGI_INFO("Jong-M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n\n", +			 M2REG_PCI_TRIGGER_MODE_MASK); + +		/* Jong 06/14/2006; 0x400001a */ +		XGI_INFO +		    ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|M2REG_CLEAR_COUNTERS_MASK|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", +		     (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | +		     M2REG_CLEAR_COUNTERS_MASK | 0x08 | +		     M2REG_PCI_TRIGGER_MODE_MASK); +		dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, +			   (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | +			   M2REG_CLEAR_COUNTERS_MASK | 0x08 | +			   M2REG_PCI_TRIGGER_MODE_MASK); + +		/* Jong 05/25/2006 */ +		XGI_INFO("Jong-xgi_submit_cmdlist-After dwWriteReg() \n"); +		/* return; *//* OK */ + +		/* Jong 06/14/2006; 0x400000a */ +		XGI_INFO +		    ("Jong-(M2REG_AUTO_LINK_SETTING_ADDRESS << 22)|0x08|M2REG_PCI_TRIGGER_MODE_MASK=0x%lx \n", +		     (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | +		     M2REG_PCI_TRIGGER_MODE_MASK); +		dwWriteReg(BASE_3D_ENG + M2REG_AUTO_LINK_SETTING_ADDRESS, +			   (M2REG_AUTO_LINK_SETTING_ADDRESS << 22) | 0x08 | +			   M2REG_PCI_TRIGGER_MODE_MASK); + +		// Send PCI begin command +		XGI_INFO("Jong-xgi_submit_cmdlist-Send PCI begin command \n"); +		/* return; */ + +		XGI_INFO("Jong-xgi_submit_cmdlist-portOffset=%d \n", +			 portOffset); +		XGI_INFO("Jong-xgi_submit_cmdlist-beginPort=%d \n", beginPort); + +		/* beginPort = 48; */ +		/* 0xc100000 */ +		dwWriteReg(portOffset, +			   (beginPort << 22) + (BEGIN_VALID_MASK) + +			   pCmdInfo->_curDebugID); +		XGI_INFO("Jong-(beginPort<<22)=0x%lx \n", (beginPort << 22)); +		XGI_INFO("Jong-(BEGIN_VALID_MASK)=0x%lx \n", BEGIN_VALID_MASK); +		XGI_INFO("Jong- pCmdInfo->_curDebugID=0x%lx \n", +			 pCmdInfo->_curDebugID); +		XGI_INFO +		    ("Jong- (beginPort<<22) + (BEGIN_VALID_MASK) + pCmdInfo->_curDebugID=0x%lx \n", +		     (beginPort << 22) + (BEGIN_VALID_MASK) + +		     pCmdInfo->_curDebugID); +		XGI_INFO +		    ("Jong-xgi_submit_cmdlist-Send PCI begin command- After \n"); +		/* return; *//* OK */ + +		/* 0x80000024 */ +		dwWriteReg(portOffset + 4, +			   BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); +		XGI_INFO("Jong- BEGIN_LINK_ENABLE_MASK=0x%lx \n", +			 BEGIN_LINK_ENABLE_MASK); +		XGI_INFO("Jong- pCmdInfo->_firstSize=0x%lx \n", +			 pCmdInfo->_firstSize); +		XGI_INFO +		    ("Jong-  BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize=0x%lx \n", +		     BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize); +		XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-1 \n"); + +		/* 0x1010000 */ +		dwWriteReg(portOffset + 8, (pCmdInfo->_firstBeginAddr >> 4)); +		XGI_INFO("Jong-  pCmdInfo->_firstBeginAddr=0x%lx \n", +			 pCmdInfo->_firstBeginAddr); +		XGI_INFO("Jong-  (pCmdInfo->_firstBeginAddr >> 4)=0x%lx \n", +			 (pCmdInfo->_firstBeginAddr >> 4)); +		XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-2 \n"); + +		/* Jong 06/13/2006 */ +		xgi_dump_register(info); + +		/* Jong 06/12/2006; system hang; marked for test */ +		dwWriteReg(portOffset + 12, 0); +		XGI_INFO("Jong-xgi_submit_cmdlist-dwWriteReg-3 \n"); + +		/* Jong 06/13/2006; remove marked for system hang test */ +		/* xgi_waitfor_pci_idle(info); */ +	} else { +		XGI_INFO +		    ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 \n"); +		U32 *lastBatchVirtAddr; + +		/* Jong 05/25/2006 */ +		/* return; */ + +		if (pCmdInfo->_firstBeginType == BTYPE_3D) { +			addFlush2D(info); +		} + +		lastBatchVirtAddr = +		    (U32 *) xgi_find_pcie_virt(info, +					       s_cmdring._lastBatchStartAddr); + +		lastBatchVirtAddr[1] = +		    BEGIN_LINK_ENABLE_MASK + pCmdInfo->_firstSize; +		lastBatchVirtAddr[2] = pCmdInfo->_firstBeginAddr >> 4; +		lastBatchVirtAddr[3] = 0; +		//barrier(); +		lastBatchVirtAddr[0] = +		    (beginPort << 22) + (BEGIN_VALID_MASK) + +		    (0xffff & pCmdInfo->_curDebugID); + +		/* Jong 06/12/2006; system hang; marked for test */ +		triggerHWCommandList(info, pCmdInfo->_beginCount); + +		XGI_INFO +		    ("Jong-xgi_submit_cmdlist-s_cmdring._lastBatchStartAddr != 0 - End\n"); +	} + +	s_cmdring._lastBatchStartAddr = pCmdInfo->_lastBeginAddr; +	XGI_INFO("Jong-xgi_submit_cmdlist-End \n"); +} + +/* +    state:      0 - console +                1 - graphic +                2 - fb +                3 - logout +*/ +void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo) +{ +#define STATE_CONSOLE   0 +#define STATE_GRAPHIC   1 +#define STATE_FBTERM    2 +#define STATE_LOGOUT    3 +#define STATE_REBOOT    4 +#define STATE_SHUTDOWN  5 + +	if ((pStateInfo->_fromState == STATE_GRAPHIC) +	    && (pStateInfo->_toState == STATE_CONSOLE)) { +		XGI_INFO("[kd] I see, now is to leaveVT\n"); +		// stop to received batch +	} else if ((pStateInfo->_fromState == STATE_CONSOLE) +		   && (pStateInfo->_toState == STATE_GRAPHIC)) { +		XGI_INFO("[kd] I see, now is to enterVT\n"); +		xgi_cmdlist_reset(); +	} else if ((pStateInfo->_fromState == STATE_GRAPHIC) +		   && ((pStateInfo->_toState == STATE_LOGOUT) +		       || (pStateInfo->_toState == STATE_REBOOT) +		       || (pStateInfo->_toState == STATE_SHUTDOWN))) { +		XGI_INFO("[kd] I see, not is to exit from X\n"); +		// stop to received batch +	} else { +		XGI_ERROR("[kd] Should not happen\n"); +	} + +} + +void xgi_cmdlist_reset(void) +{ +	s_cmdring._lastBatchStartAddr = 0; +	s_cmdring._cmdRingOffset = 0; +} + +void xgi_cmdlist_cleanup(xgi_info_t * info) +{ +	if (s_cmdring._cmdRingBuffer != 0) { +		xgi_pcie_free(info, s_cmdring._cmdRingBusAddr); +		s_cmdring._cmdRingBuffer = 0; +		s_cmdring._cmdRingOffset = 0; +		s_cmdring._cmdRingSize = 0; +	} +} + +static void triggerHWCommandList(xgi_info_t * info, U32 triggerCounter) +{ +	static U32 s_triggerID = 1; + +	//Fix me, currently we just trigger one time +	while (triggerCounter--) { +		dwWriteReg(BASE_3D_ENG + M2REG_PCI_TRIGGER_REGISTER_ADDRESS, +			   0x05000000 + (0xffff & s_triggerID++)); +		// xgi_waitfor_pci_idle(info); +	} +} + +static U32 getCurBatchBeginPort(xgi_cmd_info_t * pCmdInfo) +{ +	// Convert the batch type to begin port ID +	switch (pCmdInfo->_firstBeginType) { +	case BTYPE_2D: +		return 0x30; +	case BTYPE_3D: +		return 0x40; +	case BTYPE_FLIP: +		return 0x50; +	case BTYPE_CTRL: +		return 0x20; +	default: +		//ASSERT(0); +		return 0xff; +	} +} + +static void addFlush2D(xgi_info_t * info) +{ +	U32 *flushBatchVirtAddr; +	U32 flushBatchHWAddr; + +	U32 *lastBatchVirtAddr; + +	/* check buf is large enough to contain a new flush batch */ +	if ((s_cmdring._cmdRingOffset + 0x20) >= s_cmdring._cmdRingSize) { +		s_cmdring._cmdRingOffset = 0; +	} + +	flushBatchHWAddr = s_cmdring._cmdRingBuffer + s_cmdring._cmdRingOffset; +	flushBatchVirtAddr = (U32 *) xgi_find_pcie_virt(info, flushBatchHWAddr); + +	/* not using memcpy for I assume the address is discrete */ +	*(flushBatchVirtAddr + 0) = 0x10000000; +	*(flushBatchVirtAddr + 1) = 0x80000004;	/* size = 0x04 dwords */ +	*(flushBatchVirtAddr + 2) = 0x00000000; +	*(flushBatchVirtAddr + 3) = 0x00000000; +	*(flushBatchVirtAddr + 4) = FLUSH_2D; +	*(flushBatchVirtAddr + 5) = FLUSH_2D; +	*(flushBatchVirtAddr + 6) = FLUSH_2D; +	*(flushBatchVirtAddr + 7) = FLUSH_2D; + +	// ASSERT(s_cmdring._lastBatchStartAddr != NULL); +	lastBatchVirtAddr = +	    (U32 *) xgi_find_pcie_virt(info, s_cmdring._lastBatchStartAddr); + +	lastBatchVirtAddr[1] = BEGIN_LINK_ENABLE_MASK + 0x08; +	lastBatchVirtAddr[2] = flushBatchHWAddr >> 4; +	lastBatchVirtAddr[3] = 0; + +	//barrier(); + +	// BTYPE_CTRL & NO debugID +	lastBatchVirtAddr[0] = (0x20 << 22) + (BEGIN_VALID_MASK); + +	triggerHWCommandList(info, 1); + +	s_cmdring._cmdRingOffset += 0x20; +	s_cmdring._lastBatchStartAddr = flushBatchHWAddr; +} diff --git a/linux-core/xgi_cmdlist.h b/linux-core/xgi_cmdlist.h index 1b0c4965..5fe1de71 100644 --- a/linux-core/xgi_cmdlist.h +++ b/linux-core/xgi_cmdlist.h @@ -1,79 +1,76 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#ifndef _XGI_CMDLIST_H_
 -#define _XGI_CMDLIST_H_
 -
 -#define		ONE_BIT_MASK							0x1
 -#define		TWENTY_BIT_MASK							0xfffff
 -#define 	M2REG_FLUSH_2D_ENGINE_MASK				(ONE_BIT_MASK<<20)
 -#define 	M2REG_FLUSH_3D_ENGINE_MASK				TWENTY_BIT_MASK
 -#define 	M2REG_FLUSH_FLIP_ENGINE_MASK			(ONE_BIT_MASK<<21)
 -#define     BASE_3D_ENG                             0x2800
 -#define     M2REG_AUTO_LINK_SETTING_ADDRESS         0x10
 -#define 	M2REG_CLEAR_COUNTERS_MASK				(ONE_BIT_MASK<<4)
 -#define 	M2REG_PCI_TRIGGER_MODE_MASK				(ONE_BIT_MASK<<1)
 -#define 	BEGIN_VALID_MASK                        (ONE_BIT_MASK<<20)
 -#define 	BEGIN_LINK_ENABLE_MASK                  (ONE_BIT_MASK<<31)
 -#define     M2REG_PCI_TRIGGER_REGISTER_ADDRESS      0x14
 -
 -typedef enum
 -{
 -    FLUSH_2D			= M2REG_FLUSH_2D_ENGINE_MASK,
 -    FLUSH_3D			= M2REG_FLUSH_3D_ENGINE_MASK,
 -    FLUSH_FLIP			= M2REG_FLUSH_FLIP_ENGINE_MASK
 -}FLUSH_CODE;
 -
 -typedef enum
 -{
 -    AGPCMDLIST_SCRATCH_SIZE         = 0x100,
 -    AGPCMDLIST_BEGIN_SIZE           = 0x004,
 -    AGPCMDLIST_3D_SCRATCH_CMD_SIZE  = 0x004,
 -    AGPCMDLIST_2D_SCRATCH_CMD_SIZE  = 0x00c,
 -    AGPCMDLIST_FLUSH_CMD_LEN        = 0x004,
 -    AGPCMDLIST_DUMY_END_BATCH_LEN   = AGPCMDLIST_BEGIN_SIZE
 -}CMD_SIZE;
 -
 -typedef struct xgi_cmdring_info_s
 -{
 -	U32		_cmdRingSize;
 -	U32     _cmdRingBuffer;
 -    U32     _cmdRingBusAddr;
 -	U32     _lastBatchStartAddr;
 -    U32     _cmdRingOffset;
 -}xgi_cmdring_info_t;
 -
 -extern int xgi_cmdlist_initialize(xgi_info_t *info, U32 size);
 -
 -extern void xgi_submit_cmdlist(xgi_info_t *info, xgi_cmd_info_t * pCmdInfo);
 -
 -extern void xgi_state_change(xgi_info_t *info, xgi_state_info_t * pStateInfo);
 -
 -extern void xgi_cmdlist_cleanup(xgi_info_t *info);
 -
 -#endif /* _XGI_CMDLIST_H_ */
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_CMDLIST_H_ +#define _XGI_CMDLIST_H_ + +#define		ONE_BIT_MASK							0x1 +#define		TWENTY_BIT_MASK							0xfffff +#define 	M2REG_FLUSH_2D_ENGINE_MASK				(ONE_BIT_MASK<<20) +#define 	M2REG_FLUSH_3D_ENGINE_MASK				TWENTY_BIT_MASK +#define 	M2REG_FLUSH_FLIP_ENGINE_MASK			(ONE_BIT_MASK<<21) +#define     BASE_3D_ENG                             0x2800 +#define     M2REG_AUTO_LINK_SETTING_ADDRESS         0x10 +#define 	M2REG_CLEAR_COUNTERS_MASK				(ONE_BIT_MASK<<4) +#define 	M2REG_PCI_TRIGGER_MODE_MASK				(ONE_BIT_MASK<<1) +#define 	BEGIN_VALID_MASK                        (ONE_BIT_MASK<<20) +#define 	BEGIN_LINK_ENABLE_MASK                  (ONE_BIT_MASK<<31) +#define     M2REG_PCI_TRIGGER_REGISTER_ADDRESS      0x14 + +typedef enum { +	FLUSH_2D = M2REG_FLUSH_2D_ENGINE_MASK, +	FLUSH_3D = M2REG_FLUSH_3D_ENGINE_MASK, +	FLUSH_FLIP = M2REG_FLUSH_FLIP_ENGINE_MASK +} FLUSH_CODE; + +typedef enum { +	AGPCMDLIST_SCRATCH_SIZE = 0x100, +	AGPCMDLIST_BEGIN_SIZE = 0x004, +	AGPCMDLIST_3D_SCRATCH_CMD_SIZE = 0x004, +	AGPCMDLIST_2D_SCRATCH_CMD_SIZE = 0x00c, +	AGPCMDLIST_FLUSH_CMD_LEN = 0x004, +	AGPCMDLIST_DUMY_END_BATCH_LEN = AGPCMDLIST_BEGIN_SIZE +} CMD_SIZE; + +typedef struct xgi_cmdring_info_s { +	U32 _cmdRingSize; +	U32 _cmdRingBuffer; +	U32 _cmdRingBusAddr; +	U32 _lastBatchStartAddr; +	U32 _cmdRingOffset; +} xgi_cmdring_info_t; + +extern int xgi_cmdlist_initialize(xgi_info_t * info, U32 size); + +extern void xgi_submit_cmdlist(xgi_info_t * info, xgi_cmd_info_t * pCmdInfo); + +extern void xgi_state_change(xgi_info_t * info, xgi_state_info_t * pStateInfo); + +extern void xgi_cmdlist_cleanup(xgi_info_t * info); + +#endif				/* _XGI_CMDLIST_H_ */ diff --git a/linux-core/xgi_drv.c b/linux-core/xgi_drv.c index 5e80d417..0c37d00e 100644 --- a/linux-core/xgi_drv.c +++ b/linux-core/xgi_drv.c @@ -1,1610 +1,1564 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -#include "xgi_types.h"
 -#include "xgi_linux.h"
 -#include "xgi_drv.h"
 -#include "xgi_regs.h"
 -#include "xgi_pcie.h"
 -#include "xgi_misc.h"
 -#include "xgi_cmdlist.h"
 -
 -/* for debug */
 -static int xgi_temp = 1;
 -/*
 - * global parameters
 - */
 -static struct xgi_dev {
 -    u16 vendor;
 -    u16 device;
 -    const char *name;
 -} xgidev_list[] = {
 -    {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5,      "XP5"},
 -    {PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47,     "XG47"},
 -    {0, 0, NULL}
 -};
 -
 -int xgi_major = XGI_DEV_MAJOR; /* xgi reserved major device number. */
 -
 -static int xgi_num_devices = 0;
 -
 -xgi_info_t xgi_devices[XGI_MAX_DEVICES];
 -
 -#if defined(XGI_PM_SUPPORT_APM)
 -static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 };
 -#endif
 -
 -/* add one for the control device */
 -xgi_info_t          xgi_ctl_device;
 -wait_queue_head_t   xgi_ctl_waitqueue;
 -
 -#ifdef CONFIG_PROC_FS
 -struct proc_dir_entry *proc_xgi;
 -#endif
 -
 -#ifdef CONFIG_DEVFS_FS
 -devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES];
 -#endif
 -
 -struct list_head xgi_mempid_list;
 -
 -/* xgi_ functions.. do not take a state device parameter  */
 -static int      xgi_post_vbios(xgi_ioctl_post_vbios_t *info);
 -static void     xgi_proc_create(void);
 -static void     xgi_proc_remove_all(struct proc_dir_entry *);
 -static void     xgi_proc_remove(void);
 -
 -/* xgi_kern_ functions, interfaces used by linux kernel */
 -int             xgi_kern_probe(struct pci_dev *, const struct pci_device_id *);
 -
 -unsigned int    xgi_kern_poll(struct file *, poll_table *);
 -int             xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long);
 -int             xgi_kern_mmap(struct file *, struct vm_area_struct *);
 -int             xgi_kern_open(struct inode *, struct file *);
 -int             xgi_kern_release(struct inode *inode, struct file *filp);
 -
 -void            xgi_kern_vma_open(struct vm_area_struct *vma);
 -void            xgi_kern_vma_release(struct vm_area_struct *vma);
 -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
 -struct page     *xgi_kern_vma_nopage(struct vm_area_struct *vma,
 -                                     unsigned long address, int *type);
 -#else
 -struct page     *xgi_kern_vma_nopage(struct vm_area_struct *vma,
 -                                     unsigned long address, int write_access);
 -#endif
 -
 -int             xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *);
 -int             xgi_kern_read_status(char *, char **, off_t off, int, int *, void *);
 -int             xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *);
 -int             xgi_kern_read_version(char *, char **, off_t off, int, int *, void *);
 -
 -int             xgi_kern_ctl_open(struct inode *, struct file *);
 -int             xgi_kern_ctl_close(struct inode *, struct file *);
 -unsigned int    xgi_kern_ctl_poll(struct file *, poll_table *);
 -
 -void            xgi_kern_isr_bh(unsigned long);
 -irqreturn_t     xgi_kern_isr(int, void *, struct pt_regs *);
 -
 -static void     xgi_lock_init(xgi_info_t *info);
 -
 -#if defined(XGI_PM_SUPPORT_ACPI)
 -int             xgi_kern_acpi_standby(struct pci_dev *, u32);
 -int             xgi_kern_acpi_resume(struct pci_dev *);
 -#endif
 -
 -/*
 - * verify access to pci config space wasn't disabled behind our back
 - * unfortunately, XFree86 enables/disables memory access in pci config space at
 - * various times (such as restoring initial pci config space settings during vt
 - * switches or when doing mulicard). As a result, all of our register accesses
 - * are garbage at this point. add a check to see if access was disabled and
 - * reenable any such access.
 - */
 -#define XGI_CHECK_PCI_CONFIG(xgi) \
 -    xgi_check_pci_config(xgi, __LINE__)
 -
 -static inline void xgi_check_pci_config(xgi_info_t *info, int line)
 -{
 -    unsigned short cmd, flag = 0;
 -
 -    // don't do this on the control device, only the actual devices
 -    if (info->flags & XGI_FLAG_CONTROL)
 -        return;
 -
 -    pci_read_config_word(info->dev, PCI_COMMAND, &cmd);
 -    if (!(cmd & PCI_COMMAND_MASTER))
 -    {
 -        XGI_INFO("restoring bus mastering! (%d)\n", line);
 -        cmd |= PCI_COMMAND_MASTER;
 -        flag = 1;
 -    }
 -
 -    if (!(cmd & PCI_COMMAND_MEMORY))
 -    {
 -        XGI_INFO("restoring MEM access! (%d)\n", line);
 -        cmd |= PCI_COMMAND_MEMORY;
 -        flag = 1;
 -    }
 -
 -    if (flag)
 -        pci_write_config_word(info->dev, PCI_COMMAND, cmd);
 -}
 -
 -static int xgi_post_vbios(xgi_ioctl_post_vbios_t *info)
 -{
 -    return 1;
 -}
 -
 -/*
 - * struct pci_device_id {
 - *  unsigned int vendor, device;        // Vendor and device ID or PCI_ANY_ID
 - *  unsigned int subvendor, subdevice;  // Subsystem ID's or PCI_ANY_ID
 - *  unsigned int class, class_mask;     // (class,subclass,prog-if) triplet
 - *  unsigned long driver_data;          // Data private to the driver
 - * };
 - */
 -
 -static struct pci_device_id xgi_dev_table[] = {
 -    {
 -        .vendor      = PCI_VENDOR_ID_XGI,
 -        .device      = PCI_ANY_ID,
 -        .subvendor   = PCI_ANY_ID,
 -        .subdevice   = PCI_ANY_ID,
 -        .class       = (PCI_CLASS_DISPLAY_VGA << 8),
 -        .class_mask  = ~0,
 -    },
 -    { }
 -};
 -
 -/*
 - *  #define MODULE_DEVICE_TABLE(type,name) \
 - *      MODULE_GENERIC_TABLE(type##_device,name)
 - */
 - MODULE_DEVICE_TABLE(pci, xgi_dev_table);
 -
 -/*
 - * struct pci_driver {
 - *  struct list_head node;
 - *  char *name;
 - *  const struct pci_device_id *id_table;   // NULL if wants all devices
 - *  int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted
 - *  void (*remove)(struct pci_dev *dev);    // Device removed (NULL if not a hot-plug capable driver)
 - *  int  (*save_state)(struct pci_dev *dev, u32 state);     // Save Device Context
 - *  int  (*suspend)(struct pci_dev *dev, u32 state);        // Device suspended
 - *  int  (*resume)(struct pci_dev *dev);                    // Device woken up
 - *  int  (*enable_wake)(struct pci_dev *dev, u32 state, int enable);   // Enable wake event
 - * };
 - */
 -static struct pci_driver xgi_pci_driver = {
 -    .name     = "xgi",
 -    .id_table = xgi_dev_table,
 -    .probe    = xgi_kern_probe,
 -#if defined(XGI_SUPPORT_ACPI)
 -    .suspend  = xgi_kern_acpi_standby,
 -    .resume   = xgi_kern_acpi_resume,
 -#endif
 -};
 -
 -/*
 - * find xgi devices and set initial state
 - */
 -int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table)
 -{
 -    xgi_info_t *info;
 -
 -    if ((dev->vendor != PCI_VENDOR_ID_XGI)
 -      || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8)))
 -    {
 -        return -1;
 -    }
 -
 -    if (xgi_num_devices == XGI_MAX_DEVICES)
 -    {
 -        XGI_INFO("maximum device number (%d) reached!\n", xgi_num_devices);
 -        return -1;
 -    }
 -
 -    /* enable io, mem, and bus-mastering in pci config space */
 -    if (pci_enable_device(dev) != 0)
 -    {
 -        XGI_INFO("pci_enable_device failed, aborting\n");
 -        return -1;
 -    }
 -
 -    XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices);
 -
 -    pci_set_master(dev);
 -
 -    info                = &xgi_devices[xgi_num_devices];
 -    info->dev           = dev;
 -    info->vendor_id     = dev->vendor;
 -    info->device_id     = dev->device;
 -    info->bus           = dev->bus->number;
 -    info->slot          = PCI_SLOT((dev)->devfn);
 -
 -    xgi_lock_init(info);
 -
 -    info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1);
 -    info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1);
 -
 -    /* check IO region */
 -    if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi"))
 -    {
 -        XGI_ERROR("cannot reserve MMIO memory\n");
 -        goto error_disable_dev;
 -    }
 -
 -    XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base);
 -    XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size);
 -
 -    info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base,
 -                                                        info->mmio.size);
 -    if (!info->mmio.vbase)
 -    {
 -        release_mem_region(info->mmio.base, info->mmio.size);
 -        XGI_ERROR("info->mmio.vbase failed\n");
 -        goto error_disable_dev;
 -    }
 -    xgi_enable_mmio(info);
 -
 -    //xgi_enable_ge(info);
 -
 -    XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase);
 -
 -    info->fb.base   = XGI_PCI_RESOURCE_START(dev, 0);
 -    info->fb.size   = XGI_PCI_RESOURCE_SIZE(dev, 0);
 -
 -    XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base);
 -    XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
 -
 -    info->fb.size   = bIn3cf(0x54) * 8 * 1024 * 1024;
 -    XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size);
 -
 -    /* check frame buffer region
 -    if (!request_mem_region(info->fb.base, info->fb.size, "xgi"))
 -    {
 -        release_mem_region(info->mmio.base, info->mmio.size);
 -        XGI_ERROR("cannot reserve frame buffer memory\n");
 -        goto error_disable_dev;
 -    }
 -
 -
 -    info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base,
 -                                                      info->fb.size);
 -
 -    if (!info->fb.vbase)
 -    {
 -        release_mem_region(info->mmio.base, info->mmio.size);
 -        release_mem_region(info->fb.base, info->fb.size);
 -        XGI_ERROR("info->fb.vbase failed\n");
 -        goto error_disable_dev;
 -    }
 -    */
 -    info->fb.vbase = NULL;
 -    XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase);
 -
 -    info->irq = dev->irq;
 -
 -    /* check common error condition */
 -    if (info->irq == 0)
 -    {
 -        XGI_ERROR("Can't find an IRQ for your XGI card!  \n");
 -        goto error_zero_dev;
 -    }
 -    XGI_INFO("info->irq: %lx \n", info->irq);
 -
 -    //xgi_enable_dvi_interrupt(info);
 -
 -    /* sanity check the IO apertures */
 -    if ((info->mmio.base == 0) || (info->mmio.size == 0)
 -         || (info->fb.base == 0) || (info->fb.size == 0))
 -    {
 -        XGI_ERROR("The IO regions for your XGI card are invalid.\n");
 -
 -        if ((info->mmio.base == 0) || (info->mmio.size == 0))
 -        {
 -            XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n",
 -                       info->mmio.base,
 -                       info->mmio.size);
 -        }
 -
 -        if ((info->fb.base == 0) || (info->fb.size == 0))
 -        {
 -            XGI_ERROR("frame buffer appears to be wrong: 0x%lx 0x%lx\n",
 -                      info->fb.base,
 -                      info->fb.size);
 -        }
 -
 -        goto error_zero_dev;
 -    }
 -
 -    //xgi_num_devices++;
 -
 -    return 0;
 -
 -error_zero_dev:
 -    release_mem_region(info->fb.base, info->fb.size);
 -    release_mem_region(info->mmio.base, info->mmio.size);
 -
 -error_disable_dev:
 -    pci_disable_device(dev);
 -    return -1;
 -
 -}
 -
 -/*
 - * vma operations...
 - * this is only called when the vmas are duplicated. this
 - * appears to only happen when the process is cloned to create
 - * a new process, and not when the process is threaded.
 - *
 - * increment the usage count for the physical pages, so when
 - * this clone unmaps the mappings, the pages are not
 - * deallocated under the original process.
 - */
 -struct vm_operations_struct xgi_vm_ops = {
 -    .open   = xgi_kern_vma_open,
 -    .close  = xgi_kern_vma_release,
 -    .nopage = xgi_kern_vma_nopage,
 -};
 -
 -void xgi_kern_vma_open(struct vm_area_struct *vma)
 -{
 -    XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n",
 -             vma->vm_start,
 -             vma->vm_end,
 -             XGI_VMA_OFFSET(vma));
 -
 -    if (XGI_VMA_PRIVATE(vma))
 -    {
 -        xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);
 -        XGI_ATOMIC_INC(block->use_count);
 -    }
 -}
 -
 -void xgi_kern_vma_release(struct vm_area_struct *vma)
 -{
 -    XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n",
 -             vma->vm_start,
 -             vma->vm_end,
 -             XGI_VMA_OFFSET(vma));
 -
 -    if (XGI_VMA_PRIVATE(vma))
 -    {
 -        xgi_pcie_block_t *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);
 -        XGI_ATOMIC_DEC(block->use_count);
 -
 -        /*
 -         * if use_count is down to 0, the kernel virtual mapping was freed
 -         * but the underlying physical pages were not, we need to clear the
 -         * bit and free the physical pages.
 -         */
 -        if (XGI_ATOMIC_READ(block->use_count) == 0)
 -        {
 -            // Need TO Finish
 -            XGI_VMA_PRIVATE(vma) = NULL;
 -        }
 -    }
 -}
 -
 -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1))
 -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
 -                                 unsigned long address, int *type)
 -{
 -    xgi_pcie_block_t    *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);
 -    struct page         *page = NOPAGE_SIGBUS;
 -    unsigned long       offset = 0;
 -    unsigned long       page_addr = 0;
 -/*
 -    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
 -              vma->vm_start,
 -              vma->vm_end,
 -              XGI_VMA_OFFSET(vma),
 -              address);
 -*/
 -    offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
 -
 -    offset = offset - block->bus_addr;
 -
 -    offset >>= PAGE_SHIFT;
 -
 -    page_addr = block->page_table[offset].virt_addr;
 -
 -    if (xgi_temp)
 -    {
 -        XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
 -                 "block->page_count: 0x%lx block->page_order: 0x%lx"
 -                 "block->page_table[0x%lx].virt_addr: 0x%lx\n",
 -                  block->bus_addr, block->hw_addr,
 -                  block->page_count, block->page_order,
 -                  offset,
 -                  block->page_table[offset].virt_addr);
 -        xgi_temp = 0;
 -    }
 -
 -    if (!page_addr)     goto out; /* hole or end-of-file */
 -    page = virt_to_page(page_addr);
 -
 -    /* got it, now increment the count */
 -    get_page(page);
 -out:
 -    return page;
 -
 -}
 -#else
 -struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma,
 -                                 unsigned long address, int write_access)
 -{
 -    xgi_pcie_block_t    *block = (xgi_pcie_block_t *)XGI_VMA_PRIVATE(vma);
 -    struct page         *page = NOPAGE_SIGBUS;
 -    unsigned long       offset = 0;
 -    unsigned long       page_addr = 0;
 -/*
 -    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n",
 -              vma->vm_start,
 -              vma->vm_end,
 -              XGI_VMA_OFFSET(vma),
 -              address);
 -*/
 -    offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma);
 -
 -    offset = offset - block->bus_addr;
 -
 -    offset >>= PAGE_SHIFT;
 -
 -    page_addr = block->page_table[offset].virt_addr;
 -
 -    if (xgi_temp)
 -    {
 -        XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx"
 -                 "block->page_count: 0x%lx block->page_order: 0x%lx"
 -                 "block->page_table[0x%lx].virt_addr: 0x%lx\n",
 -                  block->bus_addr, block->hw_addr,
 -                  block->page_count, block->page_order,
 -                  offset,
 -                  block->page_table[offset].virt_addr);
 -        xgi_temp = 0;
 -    }
 -
 -    if (!page_addr)     goto out; /* hole or end-of-file */
 -    page = virt_to_page(page_addr);
 -
 -    /* got it, now increment the count */
 -    get_page(page);
 -out:
 -    return page;
 -}
 -#endif
 -
 -#if 0
 -static struct file_operations xgi_fops = {
 -    /* owner:      THIS_MODULE, */
 -    poll:       xgi_kern_poll,
 -    ioctl:      xgi_kern_ioctl,
 -    mmap:       xgi_kern_mmap,
 -    open:       xgi_kern_open,
 -    release:    xgi_kern_release,
 -};
 -#endif
 -
 -static struct file_operations xgi_fops = {
 -    .owner      = THIS_MODULE,
 -    .poll       = xgi_kern_poll,
 -    .ioctl      = xgi_kern_ioctl,
 -    .mmap       = xgi_kern_mmap,
 -    .open       = xgi_kern_open,
 -    .release    = xgi_kern_release,
 -};
 -
 -static xgi_file_private_t * xgi_alloc_file_private(void)
 -{
 -    xgi_file_private_t *fp;
 -
 -    XGI_KMALLOC(fp, sizeof(xgi_file_private_t));
 -    if (!fp)
 -        return NULL;
 -
 -    memset(fp, 0, sizeof(xgi_file_private_t));
 -
 -    /* initialize this file's event queue */
 -    init_waitqueue_head(&fp->wait_queue);
 -
 -    xgi_init_lock(fp->fp_lock);
 -
 -    return fp;
 -}
 -
 -static void xgi_free_file_private(xgi_file_private_t *fp)
 -{
 -    if (fp == NULL)
 -        return;
 -
 -    XGI_KFREE(fp, sizeof(xgi_file_private_t));
 -}
 -
 -int xgi_kern_open(struct inode *inode, struct file *filp)
 -{
 -    xgi_info_t  *info = NULL;
 -    int         dev_num;
 -    int         result = 0, status;
 -
 -    /*
 -     * the type and num values are only valid if we are not using devfs.
 -     * However, since we use them to retrieve the device pointer, we
 -     * don't need them with devfs as filp->private_data is already
 -     * initialized
 -     */
 -    filp->private_data = xgi_alloc_file_private();
 -    if (filp->private_data == NULL)
 -        return -ENOMEM;
 -
 -    XGI_INFO("filp->private_data %p\n", filp->private_data);
 -    /*
 -     * for control device, just jump to its open routine
 -     * after setting up the private data
 -     */
 -    if (XGI_IS_CONTROL_DEVICE(inode))
 -        return xgi_kern_ctl_open(inode, filp);
 -
 -    /* what device are we talking about? */
 -    dev_num = XGI_DEVICE_NUMBER(inode);
 -    if (dev_num >= XGI_MAX_DEVICES)
 -    {
 -        xgi_free_file_private(filp->private_data);
 -        filp->private_data = NULL;
 -        return -ENODEV;
 -    }
 -
 -    info = &xgi_devices[dev_num];
 -
 -    XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num);
 -
 -    xgi_down(info->info_sem);
 -    XGI_CHECK_PCI_CONFIG(info);
 -
 -    XGI_INFO_FROM_FP(filp) = info;
 -
 -    /*
 -     * map the memory and allocate isr on first open
 -     */
 -
 -    if (!(info->flags & XGI_FLAG_OPEN))
 -    {
 -        XGI_INFO("info->flags & XGI_FLAG_OPEN \n");
 -
 -        if (info->device_id == 0)
 -        {
 -            XGI_INFO("open of nonexistent device %d\n", dev_num);
 -            result = -ENXIO;
 -            goto failed;
 -        }
 -
 -        /* initialize struct irqaction */
 -        status = request_irq(info->irq, xgi_kern_isr,
 -                             SA_INTERRUPT | SA_SHIRQ, "xgi",
 -                             (void *) info);
 -        if (status != 0)
 -        {
 -            if (info->irq && (status == -EBUSY))
 -            {
 -                XGI_ERROR("Tried to get irq %d, but another driver",
 -                          (unsigned int) info->irq);
 -                XGI_ERROR("has it and is not sharing it.\n");
 -            }
 -            XGI_ERROR("isr request failed 0x%x\n", status);
 -            result = -EIO;
 -            goto failed;
 -        }
 -
 -        /*
 -         * #define DECLARE_TASKLET(name, func, data) \
 -         * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
 -         */
 -        info->tasklet.func = xgi_kern_isr_bh;
 -        info->tasklet.data = (unsigned long) info;
 -        tasklet_enable(&info->tasklet);
 -
 -        /* Alloc 1M bytes for cmdbuffer which is flush2D batch array */
 -        xgi_cmdlist_initialize(info, 0x100000);
 -
 -        info->flags |= XGI_FLAG_OPEN;
 -    }
 -
 -    XGI_ATOMIC_INC(info->use_count);
 -
 -failed:
 -    xgi_up(info->info_sem);
 -
 -    if ((result) && filp->private_data)
 -    {
 -        xgi_free_file_private(filp->private_data);
 -        filp->private_data = NULL;
 -    }
 -
 -    return result;
 -}
 -
 -int xgi_kern_release(struct inode *inode, struct file *filp)
 -{
 -    xgi_info_t  *info = XGI_INFO_FROM_FP(filp);
 -
 -    XGI_CHECK_PCI_CONFIG(info);
 -
 -    /*
 -     * for control device, just jump to its open routine
 -     * after setting up the private data
 -     */
 -    if (XGI_IS_CONTROL_DEVICE(inode))
 -        return xgi_kern_ctl_close(inode, filp);
 -
 -    XGI_INFO("Jong-xgi_kern_release on device %d\n", XGI_DEVICE_NUMBER(inode));
 -
 -    xgi_down(info->info_sem);
 -    if (XGI_ATOMIC_DEC_AND_TEST(info->use_count))
 -    {
 -
 -        /*
 -         * The usage count for this device has dropped to zero, it can be shut
 -         * down safely; disable its interrupts.
 -         */
 -
 -        /*
 -         * Disable this device's tasklet to make sure that no bottom half will
 -         * run with undefined device state.
 -         */
 -        tasklet_disable(&info->tasklet);
 -
 -        /*
 -         * Free the IRQ, which may block until all pending interrupt processing
 -         * has completed.
 -         */
 -        free_irq(info->irq, (void *)info);
 -
 -        xgi_cmdlist_cleanup(info);
 -
 -        /* leave INIT flag alone so we don't reinit every time */
 -        info->flags &= ~XGI_FLAG_OPEN;
 -    }
 -
 -    xgi_up(info->info_sem);
 -
 -    if (FILE_PRIVATE(filp))
 -    {
 -        xgi_free_file_private(FILE_PRIVATE(filp));
 -        FILE_PRIVATE(filp) = NULL;
 -    }
 -
 -    return 0;
 -}
 -
 -int xgi_kern_mmap(struct file  *filp, struct vm_area_struct *vma)
 -{
 -    //struct inode        *inode = INODE_FROM_FP(filp);
 -    xgi_info_t          *info  = XGI_INFO_FROM_FP(filp);
 -    xgi_pcie_block_t    *block;
 -    int                 pages = 0;
 -    unsigned long       prot;
 -
 -    XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n",
 -             vma->vm_start,
 -             vma->vm_end,
 -             XGI_VMA_OFFSET(vma));
 -
 -    XGI_CHECK_PCI_CONFIG(info);
 -
 -    if (XGI_MASK_OFFSET(vma->vm_start)
 -        || XGI_MASK_OFFSET(vma->vm_end))
 -    {
 -        XGI_ERROR("VM: bad mmap range: %lx - %lx\n",
 -                  vma->vm_start, vma->vm_end);
 -        return -ENXIO;
 -    }
 -
 -    pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
 -
 -    vma->vm_ops = &xgi_vm_ops;
 -
 -    /* XGI IO(reg) space */
 -    if (IS_IO_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
 -    {
 -        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 -        if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
 -                                 XGI_VMA_OFFSET(vma),
 -                                 vma->vm_end - vma->vm_start,
 -                                 vma->vm_page_prot))
 -            return -EAGAIN;
 -
 -        /* mark it as IO so that we don't dump it on core dump */
 -        vma->vm_flags |= VM_IO;
 -        XGI_INFO("VM: mmap io space \n");
 -    }
 -    /* XGI fb space */
 -	/* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */
 -    else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
 -    {
 -        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 -        if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
 -                                 XGI_VMA_OFFSET(vma),
 -                                 vma->vm_end - vma->vm_start,
 -                                 vma->vm_page_prot))
 -            return -EAGAIN;
 -
 -        // mark it as IO so that we don't dump it on core dump
 -        vma->vm_flags |= VM_IO;
 -        XGI_INFO("VM: mmap fb space \n");
 -    }
 -    /* PCIE allocator */
 -	/* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */
 -    else if (IS_PCIE_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
 -    {
 -        xgi_down(info->pcie_sem);
 -
 -        block = (xgi_pcie_block_t *)xgi_find_pcie_block(info, XGI_VMA_OFFSET(vma));
 -
 -        if (block == NULL)
 -        {
 -            XGI_ERROR("couldn't find pre-allocated PCIE memory!\n");
 -            xgi_up(info->pcie_sem);
 -            return -EAGAIN;
 -        }
 -
 -        if (block->page_count != pages)
 -        {
 -            XGI_ERROR("pre-allocated PCIE memory has wrong number of pages!\n");
 -            xgi_up(info->pcie_sem);
 -            return -EAGAIN;
 -        }
 -
 -        vma->vm_private_data = block;
 -        XGI_ATOMIC_INC(block->use_count);
 -        xgi_up(info->pcie_sem);
 -
 -        /*
 -         * prevent the swapper from swapping it out
 -         * mark the memory i/o so the buffers aren't
 -         * dumped on core dumps */
 -        vma->vm_flags |= (VM_LOCKED | VM_IO);
 -
 -        /* un-cached */
 -        prot = pgprot_val(vma->vm_page_prot);
 -        /* 
 -        if (boot_cpu_data.x86 > 3)
 -		    prot |= _PAGE_PCD | _PAGE_PWT;
 -        */
 -	    vma->vm_page_prot = __pgprot(prot);
 -
 -        XGI_INFO("VM: mmap pcie space \n");
 -    }
 -#if 0
 -    else if (IS_FB_OFFSET(info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start))
 -    {
 -        vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 -        if (XGI_REMAP_PAGE_RANGE(vma->vm_start,
 -                                 XGI_VMA_OFFSET(vma),
 -                                 vma->vm_end - vma->vm_start,
 -                                 vma->vm_page_prot))
 -            return -EAGAIN;
 -
 -        // mark it as IO so that we don't dump it on core dump
 -        vma->vm_flags |= VM_IO;
 -        XGI_INFO("VM: mmap fb space \n");
 -    }
 -#endif
 -    else
 -    {
 -        vma->vm_flags |= (VM_IO | VM_LOCKED);
 -        XGI_ERROR("VM: mmap wrong range \n");
 -    }
 -
 -    vma->vm_file = filp;
 -
 -    return 0;
 -}
 -
 -unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait)
 -{
 -    xgi_file_private_t  *fp;
 -    xgi_info_t          *info;
 -    unsigned int        mask = 0;
 -    unsigned long       eflags;
 -
 -    info = XGI_INFO_FROM_FP(filp);
 -
 -    if (info->device_number == XGI_CONTROL_DEVICE_NUMBER)
 -        return xgi_kern_ctl_poll(filp, wait);
 -
 -    fp = XGI_GET_FP(filp);
 -
 -    if (!(filp->f_flags & O_NONBLOCK))
 -    {
 -        /* add us to the list */
 -        poll_wait(filp, &fp->wait_queue, wait);
 -    }
 -
 -    xgi_lock_irqsave(fp->fp_lock, eflags);
 -
 -    /* wake the user on any event */
 -    if (fp->num_events)
 -    {
 -        XGI_INFO("Hey, an event occured!\n");
 -        /*
 -         * trigger the client, when they grab the event,
 -         * we'll decrement the event count
 -         */
 -        mask |= (POLLPRI|POLLIN);
 -    }
 -    xgi_unlock_irqsave(fp->fp_lock, eflags);
 -
 -    return mask;
 -}
 -
 -int xgi_kern_ioctl(struct inode *inode, struct file *filp,
 -                   unsigned int cmd, unsigned long arg)
 -{
 -    xgi_info_t      *info;
 -    xgi_mem_alloc_t *alloc = NULL;
 -
 -    int             status = 0;
 -    void            *arg_copy;
 -    int             arg_size;
 -    int             err = 0;
 -
 -    info = XGI_INFO_FROM_FP(filp);
 -
 -    XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), _IOC_NR(cmd), arg, _IOC_SIZE(cmd));
 -    /*
 -     * extract the type and number bitfields, and don't decode
 -     * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok()
 -     */
 -    if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) return -ENOTTY;
 -    if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) return -ENOTTY;
 -
 -    /*
 -     * the direction is a bitmask, and VERIFY_WRITE catches R/W
 -     * transfers. `Type' is user-oriented, while
 -     * access_ok is kernel-oriented, so the concept of "read" and
 -     * "write" is reversed
 -     */
 -    if (_IOC_DIR(cmd) & _IOC_READ)
 -    {
 -        err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
 -    }
 -    else if (_IOC_DIR(cmd) & _IOC_WRITE)
 -    {
 -        err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
 -    }
 -    if (err) return -EFAULT;
 -
 -    XGI_CHECK_PCI_CONFIG(info);
 -
 -    arg_size = _IOC_SIZE(cmd);
 -    XGI_KMALLOC(arg_copy, arg_size);
 -    if (arg_copy == NULL)
 -    {
 -        XGI_ERROR("failed to allocate ioctl memory\n");
 -        return -ENOMEM;
 -    }
 -
 -	/* Jong 05/25/2006 */
 -    /* copy_from_user(arg_copy, (void *)arg, arg_size); */
 -    if(copy_from_user(arg_copy, (void *)arg, arg_size))
 -	{
 -        XGI_ERROR("failed to copyin ioctl data\n");
 -        XGI_INFO("Jong-copy_from_user-fail! \n");
 -	}
 -	else
 -        XGI_INFO("Jong-copy_from_user-OK! \n");
 -
 -    alloc = (xgi_mem_alloc_t *)arg_copy;
 -    XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, arg_size);
 -
 -    switch (_IOC_NR(cmd))
 -    {
 -    case XGI_ESC_DEVICE_INFO:
 -        XGI_INFO("Jong-xgi_ioctl_get_device_info \n");
 -        xgi_get_device_info(info, (struct xgi_chip_info_s *) arg_copy);
 -        break;
 -    case XGI_ESC_POST_VBIOS:
 -        XGI_INFO("Jong-xgi_ioctl_post_vbios \n");
 -        break;
 -    case XGI_ESC_FB_ALLOC:
 -        XGI_INFO("Jong-xgi_ioctl_fb_alloc \n");
 -        xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc);
 -        break;
 -    case XGI_ESC_FB_FREE:
 -        XGI_INFO("Jong-xgi_ioctl_fb_free \n");
 -        xgi_fb_free(info, *(unsigned long *) arg_copy);
 -        break;
 -    case XGI_ESC_MEM_COLLECT:
 -        XGI_INFO("Jong-xgi_ioctl_mem_collect \n");
 -        xgi_mem_collect(info, (unsigned int *) arg_copy);
 -        break;
 -    case XGI_ESC_PCIE_ALLOC:
 -        XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n");
 -        xgi_pcie_alloc(info, ((xgi_mem_req_t *)arg_copy)->size,
 -                       ((xgi_mem_req_t *)arg_copy)->owner, alloc);
 -        break;
 -    case XGI_ESC_PCIE_FREE:
 -        XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", *((unsigned long *) arg_copy));
 -        xgi_pcie_free(info, *((unsigned long *) arg_copy));
 -        break;
 -    case XGI_ESC_PCIE_CHECK:
 -        XGI_INFO("Jong-xgi_pcie_heap_check \n");
 -        xgi_pcie_heap_check();
 -        break;
 -    case XGI_ESC_GET_SCREEN_INFO:
 -        XGI_INFO("Jong-xgi_get_screen_info \n");
 -        xgi_get_screen_info(info, (struct xgi_screen_info_s *) arg_copy);
 -        break;
 -    case XGI_ESC_PUT_SCREEN_INFO:
 -        XGI_INFO("Jong-xgi_put_screen_info \n");
 -        xgi_put_screen_info(info, (struct xgi_screen_info_s *) arg_copy);
 -        break;
 -    case XGI_ESC_MMIO_INFO:
 -        XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n");
 -        xgi_get_mmio_info(info, (struct xgi_mmio_info_s *) arg_copy);
 -        break;
 -    case XGI_ESC_GE_RESET:
 -        XGI_INFO("Jong-xgi_ioctl_ge_reset \n");
 -        xgi_ge_reset(info);
 -        break;
 -    case XGI_ESC_SAREA_INFO:
 -        XGI_INFO("Jong-xgi_ioctl_sarea_info \n");
 -        xgi_sarea_info(info, (struct xgi_sarea_info_s *) arg_copy);
 -        break;
 -    case XGI_ESC_DUMP_REGISTER:
 -        XGI_INFO("Jong-xgi_ioctl_dump_register \n");
 -        xgi_dump_register(info);
 -        break;
 -    case XGI_ESC_DEBUG_INFO:
 -        XGI_INFO("Jong-xgi_ioctl_restore_registers \n");
 -        xgi_restore_registers(info);
 -        //xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);
 -        //xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy);
 -        break;
 -    case XGI_ESC_SUBMIT_CMDLIST:
 -        XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n");
 -        xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy);
 -        break;
 -    case XGI_ESC_TEST_RWINKERNEL:
 -        XGI_INFO("Jong-xgi_test_rwinkernel \n");
 -        xgi_test_rwinkernel(info, *(unsigned long*) arg_copy);
 -        break;
 -    case XGI_ESC_STATE_CHANGE:
 -        XGI_INFO("Jong-xgi_state_change \n");
 -        xgi_state_change(info, (xgi_state_info_t *) arg_copy);
 -        break;
 -    case XGI_ESC_CPUID:
 -        XGI_INFO("Jong-XGI_ESC_CPUID \n");
 -        xgi_get_cpu_id((struct cpu_info_s*) arg_copy);
 -        break;
 -    default:
 -        XGI_INFO("Jong-xgi_ioctl_default \n");
 -        status = -EINVAL;
 -        break;
 -    }
 -
 -    if (copy_to_user((void *)arg, arg_copy, arg_size))
 -	{
 -        XGI_ERROR("failed to copyout ioctl data\n");
 -        XGI_INFO("Jong-copy_to_user-fail! \n");
 -	}
 -	else
 -        XGI_INFO("Jong-copy_to_user-OK! \n");
 -
 -    XGI_KFREE(arg_copy, arg_size);
 -    return status;
 -}
 -
 -
 -/*
 - * xgi control driver operations defined here
 - */
 -int xgi_kern_ctl_open(struct inode *inode, struct file *filp)
 -{
 -    xgi_info_t  *info = &xgi_ctl_device;
 -
 -    int rc = 0;
 -
 -    XGI_INFO("Jong-xgi_kern_ctl_open\n");
 -
 -    xgi_down(info->info_sem);
 -    info->device_number = XGI_CONTROL_DEVICE_NUMBER;
 -
 -    /* save the xgi info in file->private_data */
 -    filp->private_data = info;
 -
 -    if (XGI_ATOMIC_READ(info->use_count) == 0)
 -    {
 -        init_waitqueue_head(&xgi_ctl_waitqueue);
 -    }
 -
 -    info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL;
 -
 -    XGI_ATOMIC_INC(info->use_count);
 -    xgi_up(info->info_sem);
 -
 -    return rc;
 -}
 -
 -int xgi_kern_ctl_close(struct inode *inode, struct file *filp)
 -{
 -    xgi_info_t  *info = XGI_INFO_FROM_FP(filp);
 -
 -    XGI_INFO("Jong-xgi_kern_ctl_close\n");
 -
 -    xgi_down(info->info_sem);
 -    if (XGI_ATOMIC_DEC_AND_TEST(info->use_count))
 -    {
 -        info->flags = 0;
 -    }
 -    xgi_up(info->info_sem);
 -
 -    if (FILE_PRIVATE(filp))
 -    {
 -        xgi_free_file_private(FILE_PRIVATE(filp));
 -        FILE_PRIVATE(filp) = NULL;
 -    }
 -
 -    return 0;
 -}
 -
 -unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table *wait)
 -{
 -    //xgi_info_t  *info = XGI_INFO_FROM_FP(filp);;
 -    unsigned int ret = 0;
 -
 -    if (!(filp->f_flags & O_NONBLOCK))
 -    {
 -        poll_wait(filp, &xgi_ctl_waitqueue, wait);
 -    }
 -
 -    return ret;
 -}
 -
 -/*
 - * xgi proc system
 - */
 -static u8 xgi_find_pcie_capability(struct pci_dev *dev)
 -{
 -    u16 status;
 -    u8  cap_ptr, cap_id;
 -
 -    pci_read_config_word(dev, PCI_STATUS, &status);
 -    status &= PCI_STATUS_CAP_LIST;
 -    if (!status)
 -        return 0;
 -
 -    switch (dev->hdr_type)
 -    {
 -    case PCI_HEADER_TYPE_NORMAL:
 -    case PCI_HEADER_TYPE_BRIDGE:
 -        pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr);
 -        break;
 -    default:
 -        return 0;
 -    }
 -
 -    do
 -    {
 -        cap_ptr &= 0xFC;
 -        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id);
 -        pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, &cap_ptr);
 -    } while (cap_ptr && cap_id != 0xFF);
 -
 -    return 0;
 -}
 -
 -static struct pci_dev* xgi_get_pci_device(xgi_info_t *info)
 -{
 -    struct pci_dev *dev;
 -
 -    dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL);
 -    while (dev)
 -    {
 -        if (XGI_PCI_SLOT_NUMBER(dev) == info->slot
 -            && XGI_PCI_BUS_NUMBER(dev) == info->bus)
 -            return dev;
 -        dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev);
 -    }
 -
 -    return NULL;
 -}
 -
 -int xgi_kern_read_card_info(char *page, char **start, off_t off,
 -                           int count, int *eof, void *data)
 -{
 -    struct pci_dev  *dev;
 -    char    *type;
 -    int    len = 0;
 -
 -    xgi_info_t  *info;
 -    info = (xgi_info_t *) data;
 -
 -    dev = xgi_get_pci_device(info);
 -    if (!dev)
 -        return 0;
 -
 -    type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI";
 -    len += sprintf(page+len, "Card Type: \t %s\n", type);
 -
 -    XGI_PCI_DEV_PUT(dev);
 -    return len;
 -}
 -
 -int xgi_kern_read_version(char *page, char **start, off_t off,
 -                          int count, int *eof, void *data)
 -{
 -    int len = 0;
 -
 -    len += sprintf(page+len, "XGI version: %s\n", "1.0");
 -    len += sprintf(page+len, "GCC version:  %s\n", "3.0");
 -
 -    return len;
 -}
 -
 -int xgi_kern_read_pcie_info(char *page, char **start, off_t off,
 -                          int count, int *eof, void *data)
 -{
 -    return 0;
 -}
 -
 -int xgi_kern_read_status(char *page, char **start, off_t off,
 -                         int count, int *eof, void *data)
 -{
 -    return 0;
 -}
 -
 -
 -static void xgi_proc_create(void)
 -{
 -#ifdef CONFIG_PROC_FS
 -
 -    struct pci_dev *dev;
 -    int     i = 0;
 -    char    name[6];
 -
 -    struct proc_dir_entry *entry;
 -    struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards;
 -
 -    xgi_info_t  *info;
 -    xgi_info_t  *xgi_max_devices;
 -
 -    /* world readable directory */
 -    int flags = S_IFDIR | S_IRUGO | S_IXUGO;
 -
 -    proc_xgi = create_proc_entry("xgi", flags, proc_root_driver);
 -    if (!proc_xgi)
 -        goto failed;
 -
 -    proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi);
 -    if (!proc_xgi_cards)
 -        goto failed;
 -
 -    proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi);
 -    if (!proc_xgi_pcie)
 -        goto failed;
 -
 -    /*
 -     * Set the module owner to ensure that the reference
 -     * count reflects accesses to the proc files.
 -     */
 -    proc_xgi->owner       = THIS_MODULE;
 -    proc_xgi_cards->owner = THIS_MODULE;
 -    proc_xgi_pcie->owner   = THIS_MODULE;
 -
 -    xgi_max_devices = xgi_devices + XGI_MAX_DEVICES;
 -    for (info = xgi_devices; info < xgi_max_devices; info++)
 -    {
 -        if (info->device_id == 0)
 -            break;
 -
 -        /* world readable file */
 -        flags = S_IFREG | S_IRUGO;
 -
 -        dev = xgi_get_pci_device(info);
 -        if (!dev)
 -            break;
 -
 -        sprintf(name, "%d", i++);
 -        entry = create_proc_entry(name, flags, proc_xgi_cards);
 -        if (!entry)
 -        {
 -            XGI_PCI_DEV_PUT(dev);
 -            goto failed;
 -        }
 -
 -        entry->data = info;
 -        entry->read_proc = xgi_kern_read_card_info;
 -        entry->owner = THIS_MODULE;
 -
 -        if (xgi_find_pcie_capability(dev))
 -        {
 -            entry = create_proc_entry("status", flags, proc_xgi_pcie);
 -            if (!entry)
 -            {
 -                XGI_PCI_DEV_PUT(dev);
 -                goto failed;
 -            }
 -
 -            entry->data = info;
 -            entry->read_proc = xgi_kern_read_status;
 -            entry->owner = THIS_MODULE;
 -
 -            entry = create_proc_entry("card", flags, proc_xgi_pcie);
 -            if (!entry)
 -            {
 -                XGI_PCI_DEV_PUT(dev);
 -                goto failed;
 -            }
 -
 -            entry->data = info;
 -            entry->read_proc = xgi_kern_read_pcie_info;
 -            entry->owner = THIS_MODULE;
 -        }
 -
 -        XGI_PCI_DEV_PUT(dev);
 -    }
 -
 -    entry = create_proc_entry("version", flags, proc_xgi);
 -    if (!entry)
 -        goto failed;
 -
 -    entry->read_proc = xgi_kern_read_version;
 -    entry->owner = THIS_MODULE;
 -
 -    entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie);
 -    if (!entry)
 -        goto failed;
 -
 -    entry->data = NULL;
 -    entry->read_proc = xgi_kern_read_pcie_info;
 -    entry->owner = THIS_MODULE;
 -
 -    return;
 -
 -failed:
 -    XGI_ERROR("failed to create /proc entries!\n");
 -    xgi_proc_remove_all(proc_xgi);
 -#endif
 -}
 -
 -#ifdef CONFIG_PROC_FS
 -static void xgi_proc_remove_all(struct proc_dir_entry *entry)
 -{
 -    while (entry)
 -    {
 -        struct proc_dir_entry *next = entry->next;
 -        if (entry->subdir)
 -            xgi_proc_remove_all(entry->subdir);
 -        remove_proc_entry(entry->name, entry->parent);
 -        if (entry == proc_xgi)
 -            break;
 -        entry = next;
 -    }
 -}
 -#endif
 -
 -static void xgi_proc_remove(void)
 -{
 -#ifdef CONFIG_PROC_FS
 -    xgi_proc_remove_all(proc_xgi);
 -#endif
 -}
 -
 -/*
 - * driver receives an interrupt if someone waiting, then hand it off.
 - */
 -irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs)
 -{
 -    xgi_info_t *info = (xgi_info_t *) dev_id;
 -    u32 need_to_run_bottom_half = 0;
 -
 -    //XGI_INFO("xgi_kern_isr \n");
 -
 -    //XGI_CHECK_PCI_CONFIG(info);
 -
 -    //xgi_dvi_irq_handler(info);
 -
 -    if (need_to_run_bottom_half)
 -    {
 -        tasklet_schedule(&info->tasklet);
 -    }
 -
 -    return IRQ_HANDLED;
 -}
 -
 -void xgi_kern_isr_bh(unsigned long data)
 -{
 -    xgi_info_t *info = (xgi_info_t *) data;
 -
 -    XGI_INFO("xgi_kern_isr_bh \n");
 -
 -    //xgi_dvi_irq_handler(info);
 -
 -    XGI_CHECK_PCI_CONFIG(info);
 -}
 -
 -static void xgi_lock_init(xgi_info_t *info)
 -{
 -    if (info == NULL) return;
 -
 -    spin_lock_init(&info->info_lock);
 -
 -    sema_init(&info->info_sem, 1);
 -    sema_init(&info->fb_sem, 1);
 -    sema_init(&info->pcie_sem, 1);
 -
 -    XGI_ATOMIC_SET(info->use_count, 0);
 -}
 -
 -static void xgi_dev_init(xgi_info_t *info)
 -{
 -    struct pci_dev *pdev = NULL;
 -    struct xgi_dev *dev;
 -    int    found = 0;
 -    u16    pci_cmd;
 -
 -    XGI_INFO("Enter xgi_dev_init \n");
 -
 -    //XGI_PCI_FOR_EACH_DEV(pdev)
 -    {
 -        for (dev = xgidev_list; dev->vendor; dev++)
 -        {
 -            if ((dev->vendor == pdev->vendor) && (dev->device == pdev->device))
 -            {
 -                XGI_INFO("dev->vendor = pdev->vendor= %x \n", dev->vendor);
 -                XGI_INFO("dev->device = pdev->device= %x \n", dev->device);
 -
 -                xgi_devices[found].device_id = pdev->device;
 -
 -                pci_read_config_byte(pdev, PCI_REVISION_ID, &xgi_devices[found].revision_id);
 -
 -                XGI_INFO("PCI_REVISION_ID= %x \n", xgi_devices[found].revision_id);
 -
 -                pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
 -
 -                XGI_INFO("PCI_COMMAND = %x \n", pci_cmd);
 -
 -                break;
 -            }
 -        }
 -    }
 -}
 -/*
 - * Export to Linux Kernel
 - */
 -
 -static int __init xgi_init_module(void)
 -{
 -    xgi_info_t  *info = &xgi_devices[xgi_num_devices];
 -    int         i, result;
 -
 -    XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION);
 -    //SET_MODULE_OWNER(&xgi_fops);
 -
 -    memset(xgi_devices, 0, sizeof(xgi_devices));
 -
 -    if (pci_register_driver(&xgi_pci_driver) < 0)
 -    {
 -        pci_unregister_driver(&xgi_pci_driver);
 -        XGI_ERROR("no XGI graphics adapter found\n");
 -        return -ENODEV;
 -    }
 -
 -    XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.base);
 -    XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, xgi_devices[xgi_num_devices].fb.size);
 -
 -/* Jong 07/27/2006; test for ubuntu */
 -/*
 -#ifdef CONFIG_DEVFS_FS
 -
 -    XGI_INFO("Jong-Use devfs \n");
 -    do
 -    {
 -        xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0);
 -        if (xgi_devfs_handles[0] == NULL)
 -        {
 -            result = -ENOMEM;
 -            XGI_ERROR("devfs register failed\n");
 -            goto failed;
 -        }
 -    } while(0);
 -#else */ /* no devfs, do it the "classic" way  */
 -
 -
 -    XGI_INFO("Jong-Use non-devfs \n");
 -    /*
 -     * Register your major, and accept a dynamic number. This is the
 -     * first thing to do, in order to avoid releasing other module's
 -     * fops in scull_cleanup_module()
 -     */
 -    result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops);
 -    if (result < 0)
 -    {
 -        XGI_ERROR("register chrdev failed\n");
 -        pci_unregister_driver(&xgi_pci_driver);
 -        return result;
 -    }
 -    if (xgi_major == 0) xgi_major = result; /* dynamic */
 -
 -/* #endif */ /* CONFIG_DEVFS_FS */
 -
 -    XGI_INFO("Jong-major number %d\n", xgi_major);
 -
 -    /* instantiate tasklets */
 -    for (i = 0; i < XGI_MAX_DEVICES; i++)
 -    {
 -        /*
 -         * We keep one tasklet per card to avoid latency issues with more
 -         * than one device; no two instances of a single tasklet are ever
 -         * executed concurrently.
 -         */
 -        XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1);
 -    }
 -
 -    /* init the xgi control device */
 -    {
 -        xgi_info_t *info_ctl = &xgi_ctl_device;
 -        xgi_lock_init(info_ctl);
 -    }
 -
 -    /* Init the resource manager */
 -    INIT_LIST_HEAD(&xgi_mempid_list);
 -    if (!xgi_fb_heap_init(info))
 -    {
 -        XGI_ERROR("xgi_fb_heap_init() failed\n");
 -        result = -EIO;
 -        goto failed;
 -    }
 -
 -    /* Init the resource manager */
 -    if (!xgi_pcie_heap_init(info))
 -    {
 -        XGI_ERROR("xgi_pcie_heap_init() failed\n");
 -        result = -EIO;
 -        goto failed;
 -    }
 -
 -    /* create /proc/driver/xgi */
 -    xgi_proc_create();
 -
 -#if defined(DEBUG)
 -    inter_module_register("xgi_devices", THIS_MODULE, xgi_devices);
 -#endif
 -
 -    return 0;
 -
 -failed:
 -#ifdef CONFIG_DEVFS_FS
 -    XGI_DEVFS_REMOVE_CONTROL();
 -    XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
 -#endif
 -
 -    if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
 -        XGI_ERROR("unregister xgi chrdev failed\n");
 -
 -    for (i = 0; i < xgi_num_devices; i++)
 -    {
 -        if (xgi_devices[i].dev)
 -        {
 -            release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);
 -            release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size);
 -        }
 -    }
 -
 -    pci_unregister_driver(&xgi_pci_driver);
 -    return result;
 -
 -    return 1;
 -}
 -
 -void __exit xgi_exit_module(void)
 -{
 -    int i;
 -    xgi_info_t *info, *max_devices;
 -
 -#ifdef CONFIG_DEVFS_FS
 -    /*
 -    XGI_DEVFS_REMOVE_CONTROL();
 -    for (i = 0; i < XGI_MAX_DEVICES; i++)
 -        XGI_DEVFS_REMOVE_DEVICE(i);
 -    */
 -    XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices);
 -#endif
 -
 -    if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0)
 -        XGI_ERROR("unregister xgi chrdev failed\n");
 -
 -    XGI_INFO("Jong-unregister xgi chrdev scceeded\n");
 -    for (i = 0; i < XGI_MAX_DEVICES; i++)
 -    {
 -        if (xgi_devices[i].dev)
 -        {
 -        	/* clean up the flush2D batch array */
 -		    xgi_cmdlist_cleanup(&xgi_devices[i]);
 -
 -            if(xgi_devices[i].fb.vbase != NULL)
 -            {
 -                iounmap((void *)xgi_devices[i].fb.vbase);
 -                xgi_devices[i].fb.vbase = NULL;
 -            }
 -            if(xgi_devices[i].mmio.vbase != NULL)
 -            {
 -                iounmap((void *)xgi_devices[i].mmio.vbase);
 -                xgi_devices[i].mmio.vbase = NULL;
 -            }
 -
 -            //release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size);
 -            //XGI_INFO("release frame buffer mem region scceeded\n");
 -
 -            release_mem_region(xgi_devices[i].mmio.base, xgi_devices[i].mmio.size);
 -            XGI_INFO("release MMIO mem region scceeded\n");
 -
 -            xgi_fb_heap_cleanup(&xgi_devices[i]);
 -            XGI_INFO("xgi_fb_heap_cleanup scceeded\n");
 -
 -            xgi_pcie_heap_cleanup(&xgi_devices[i]);
 -            XGI_INFO("xgi_pcie_heap_cleanup scceeded\n");
 -
 -            XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev);
 -        }
 -    }
 -
 -    pci_unregister_driver(&xgi_pci_driver);
 -
 -    /* remove /proc/driver/xgi */
 -    xgi_proc_remove();
 -
 -#if defined(DEBUG)
 -    inter_module_unregister("xgi_devices");
 -#endif
 -}
 -
 -module_init(xgi_init_module);
 -module_exit(xgi_exit_module);
 -
 -#if defined(XGI_PM_SUPPORT_ACPI)
 -int xgi_acpi_event(struct pci_dev *dev, u32 state)
 -{
 -    return 1;
 -}
 -
 -int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state)
 -{
 -    return 1;
 -}
 -
 -int xgi_kern_acpi_resume(struct pci_dev *dev)
 -{
 -    return 1;
 -}
 -#endif
 -
 -MODULE_AUTHOR("Andrea Zhang <andrea_zhang@macrosynergy.com>");
 -MODULE_DESCRIPTION("xgi kernel driver for xgi cards");
 -MODULE_LICENSE("GPL");
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" +#include "xgi_cmdlist.h" + +/* for debug */ +static int xgi_temp = 1; +/* + * global parameters + */ +static struct xgi_dev { +	u16 vendor; +	u16 device; +	const char *name; +} xgidev_list[] = { +	{ +	PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XP5, "XP5"}, { +	PCI_VENDOR_ID_XGI, PCI_DEVICE_ID_XG47, "XG47"}, { +	0, 0, NULL} +}; + +int xgi_major = XGI_DEV_MAJOR;	/* xgi reserved major device number. */ + +static int xgi_num_devices = 0; + +xgi_info_t xgi_devices[XGI_MAX_DEVICES]; + +#if defined(XGI_PM_SUPPORT_APM) +static struct pm_dev *apm_xgi_dev[XGI_MAX_DEVICES] = { 0 }; +#endif + +/* add one for the control device */ +xgi_info_t xgi_ctl_device; +wait_queue_head_t xgi_ctl_waitqueue; + +#ifdef CONFIG_PROC_FS +struct proc_dir_entry *proc_xgi; +#endif + +#ifdef CONFIG_DEVFS_FS +devfs_handle_t xgi_devfs_handles[XGI_MAX_DEVICES]; +#endif + +struct list_head xgi_mempid_list; + +/* xgi_ functions.. do not take a state device parameter  */ +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info); +static void xgi_proc_create(void); +static void xgi_proc_remove_all(struct proc_dir_entry *); +static void xgi_proc_remove(void); + +/* xgi_kern_ functions, interfaces used by linux kernel */ +int xgi_kern_probe(struct pci_dev *, const struct pci_device_id *); + +unsigned int xgi_kern_poll(struct file *, poll_table *); +int xgi_kern_ioctl(struct inode *, struct file *, unsigned int, unsigned long); +int xgi_kern_mmap(struct file *, struct vm_area_struct *); +int xgi_kern_open(struct inode *, struct file *); +int xgi_kern_release(struct inode *inode, struct file *filp); + +void xgi_kern_vma_open(struct vm_area_struct *vma); +void xgi_kern_vma_release(struct vm_area_struct *vma); +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, +				 unsigned long address, int *type); +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, +				 unsigned long address, int write_access); +#endif + +int xgi_kern_read_card_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_status(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_pcie_info(char *, char **, off_t off, int, int *, void *); +int xgi_kern_read_version(char *, char **, off_t off, int, int *, void *); + +int xgi_kern_ctl_open(struct inode *, struct file *); +int xgi_kern_ctl_close(struct inode *, struct file *); +unsigned int xgi_kern_ctl_poll(struct file *, poll_table *); + +void xgi_kern_isr_bh(unsigned long); +irqreturn_t xgi_kern_isr(int, void *, struct pt_regs *); + +static void xgi_lock_init(xgi_info_t * info); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_kern_acpi_standby(struct pci_dev *, u32); +int xgi_kern_acpi_resume(struct pci_dev *); +#endif + +/* + * verify access to pci config space wasn't disabled behind our back + * unfortunately, XFree86 enables/disables memory access in pci config space at + * various times (such as restoring initial pci config space settings during vt + * switches or when doing mulicard). As a result, all of our register accesses + * are garbage at this point. add a check to see if access was disabled and + * reenable any such access. + */ +#define XGI_CHECK_PCI_CONFIG(xgi) \ +    xgi_check_pci_config(xgi, __LINE__) + +static inline void xgi_check_pci_config(xgi_info_t * info, int line) +{ +	unsigned short cmd, flag = 0; + +	// don't do this on the control device, only the actual devices +	if (info->flags & XGI_FLAG_CONTROL) +		return; + +	pci_read_config_word(info->dev, PCI_COMMAND, &cmd); +	if (!(cmd & PCI_COMMAND_MASTER)) { +		XGI_INFO("restoring bus mastering! (%d)\n", line); +		cmd |= PCI_COMMAND_MASTER; +		flag = 1; +	} + +	if (!(cmd & PCI_COMMAND_MEMORY)) { +		XGI_INFO("restoring MEM access! (%d)\n", line); +		cmd |= PCI_COMMAND_MEMORY; +		flag = 1; +	} + +	if (flag) +		pci_write_config_word(info->dev, PCI_COMMAND, cmd); +} + +static int xgi_post_vbios(xgi_ioctl_post_vbios_t * info) +{ +	return 1; +} + +/* + * struct pci_device_id { + *  unsigned int vendor, device;        // Vendor and device ID or PCI_ANY_ID + *  unsigned int subvendor, subdevice;  // Subsystem ID's or PCI_ANY_ID + *  unsigned int class, class_mask;     // (class,subclass,prog-if) triplet + *  unsigned long driver_data;          // Data private to the driver + * }; + */ + +static struct pci_device_id xgi_dev_table[] = { +	{ +	 .vendor = PCI_VENDOR_ID_XGI, +	 .device = PCI_ANY_ID, +	 .subvendor = PCI_ANY_ID, +	 .subdevice = PCI_ANY_ID, +	 .class = (PCI_CLASS_DISPLAY_VGA << 8), +	 .class_mask = ~0, +	 }, +	{} +}; + +/* + *  #define MODULE_DEVICE_TABLE(type,name) \ + *      MODULE_GENERIC_TABLE(type##_device,name) + */ +MODULE_DEVICE_TABLE(pci, xgi_dev_table); + +/* + * struct pci_driver { + *  struct list_head node; + *  char *name; + *  const struct pci_device_id *id_table;   // NULL if wants all devices + *  int  (*probe)(struct pci_dev *dev, const struct pci_device_id *id); // New device inserted + *  void (*remove)(struct pci_dev *dev);    // Device removed (NULL if not a hot-plug capable driver) + *  int  (*save_state)(struct pci_dev *dev, u32 state);     // Save Device Context + *  int  (*suspend)(struct pci_dev *dev, u32 state);        // Device suspended + *  int  (*resume)(struct pci_dev *dev);                    // Device woken up + *  int  (*enable_wake)(struct pci_dev *dev, u32 state, int enable);   // Enable wake event + * }; + */ +static struct pci_driver xgi_pci_driver = { +	.name = "xgi", +	.id_table = xgi_dev_table, +	.probe = xgi_kern_probe, +#if defined(XGI_SUPPORT_ACPI) +	.suspend = xgi_kern_acpi_standby, +	.resume = xgi_kern_acpi_resume, +#endif +}; + +/* + * find xgi devices and set initial state + */ +int xgi_kern_probe(struct pci_dev *dev, const struct pci_device_id *id_table) +{ +	xgi_info_t *info; + +	if ((dev->vendor != PCI_VENDOR_ID_XGI) +	    || (dev->class != (PCI_CLASS_DISPLAY_VGA << 8))) { +		return -1; +	} + +	if (xgi_num_devices == XGI_MAX_DEVICES) { +		XGI_INFO("maximum device number (%d) reached!\n", +			 xgi_num_devices); +		return -1; +	} + +	/* enable io, mem, and bus-mastering in pci config space */ +	if (pci_enable_device(dev) != 0) { +		XGI_INFO("pci_enable_device failed, aborting\n"); +		return -1; +	} + +	XGI_INFO("maximum device number (%d) reached \n", xgi_num_devices); + +	pci_set_master(dev); + +	info = &xgi_devices[xgi_num_devices]; +	info->dev = dev; +	info->vendor_id = dev->vendor; +	info->device_id = dev->device; +	info->bus = dev->bus->number; +	info->slot = PCI_SLOT((dev)->devfn); + +	xgi_lock_init(info); + +	info->mmio.base = XGI_PCI_RESOURCE_START(dev, 1); +	info->mmio.size = XGI_PCI_RESOURCE_SIZE(dev, 1); + +	/* check IO region */ +	if (!request_mem_region(info->mmio.base, info->mmio.size, "xgi")) { +		XGI_ERROR("cannot reserve MMIO memory\n"); +		goto error_disable_dev; +	} + +	XGI_INFO("info->mmio.base: 0x%lx \n", info->mmio.base); +	XGI_INFO("info->mmio.size: 0x%lx \n", info->mmio.size); + +	info->mmio.vbase = (unsigned char *)ioremap_nocache(info->mmio.base, +							    info->mmio.size); +	if (!info->mmio.vbase) { +		release_mem_region(info->mmio.base, info->mmio.size); +		XGI_ERROR("info->mmio.vbase failed\n"); +		goto error_disable_dev; +	} +	xgi_enable_mmio(info); + +	//xgi_enable_ge(info); + +	XGI_INFO("info->mmio.vbase: 0x%p \n", info->mmio.vbase); + +	info->fb.base = XGI_PCI_RESOURCE_START(dev, 0); +	info->fb.size = XGI_PCI_RESOURCE_SIZE(dev, 0); + +	XGI_INFO("info->fb.base: 0x%lx \n", info->fb.base); +	XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + +	info->fb.size = bIn3cf(0x54) * 8 * 1024 * 1024; +	XGI_INFO("info->fb.size: 0x%lx \n", info->fb.size); + +	/* check frame buffer region +	   if (!request_mem_region(info->fb.base, info->fb.size, "xgi")) +	   { +	   release_mem_region(info->mmio.base, info->mmio.size); +	   XGI_ERROR("cannot reserve frame buffer memory\n"); +	   goto error_disable_dev; +	   } + +	   info->fb.vbase = (unsigned char *)ioremap_nocache(info->fb.base, +	   info->fb.size); + +	   if (!info->fb.vbase) +	   { +	   release_mem_region(info->mmio.base, info->mmio.size); +	   release_mem_region(info->fb.base, info->fb.size); +	   XGI_ERROR("info->fb.vbase failed\n"); +	   goto error_disable_dev; +	   } +	 */ +	info->fb.vbase = NULL; +	XGI_INFO("info->fb.vbase: 0x%p \n", info->fb.vbase); + +	info->irq = dev->irq; + +	/* check common error condition */ +	if (info->irq == 0) { +		XGI_ERROR("Can't find an IRQ for your XGI card!  \n"); +		goto error_zero_dev; +	} +	XGI_INFO("info->irq: %lx \n", info->irq); + +	//xgi_enable_dvi_interrupt(info); + +	/* sanity check the IO apertures */ +	if ((info->mmio.base == 0) || (info->mmio.size == 0) +	    || (info->fb.base == 0) || (info->fb.size == 0)) { +		XGI_ERROR("The IO regions for your XGI card are invalid.\n"); + +		if ((info->mmio.base == 0) || (info->mmio.size == 0)) { +			XGI_ERROR("mmio appears to be wrong: 0x%lx 0x%lx\n", +				  info->mmio.base, info->mmio.size); +		} + +		if ((info->fb.base == 0) || (info->fb.size == 0)) { +			XGI_ERROR +			    ("frame buffer appears to be wrong: 0x%lx 0x%lx\n", +			     info->fb.base, info->fb.size); +		} + +		goto error_zero_dev; +	} +	//xgi_num_devices++; + +	return 0; + +      error_zero_dev: +	release_mem_region(info->fb.base, info->fb.size); +	release_mem_region(info->mmio.base, info->mmio.size); + +      error_disable_dev: +	pci_disable_device(dev); +	return -1; + +} + +/* + * vma operations... + * this is only called when the vmas are duplicated. this + * appears to only happen when the process is cloned to create + * a new process, and not when the process is threaded. + * + * increment the usage count for the physical pages, so when + * this clone unmaps the mappings, the pages are not + * deallocated under the original process. + */ +struct vm_operations_struct xgi_vm_ops = { +	.open = xgi_kern_vma_open, +	.close = xgi_kern_vma_release, +	.nopage = xgi_kern_vma_nopage, +}; + +void xgi_kern_vma_open(struct vm_area_struct *vma) +{ +	XGI_INFO("VM: vma_open for 0x%lx - 0x%lx, offset 0x%lx\n", +		 vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + +	if (XGI_VMA_PRIVATE(vma)) { +		xgi_pcie_block_t *block = +		    (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); +		XGI_ATOMIC_INC(block->use_count); +	} +} + +void xgi_kern_vma_release(struct vm_area_struct *vma) +{ +	XGI_INFO("VM: vma_release for 0x%lx - 0x%lx, offset 0x%lx\n", +		 vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + +	if (XGI_VMA_PRIVATE(vma)) { +		xgi_pcie_block_t *block = +		    (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); +		XGI_ATOMIC_DEC(block->use_count); + +		/* +		 * if use_count is down to 0, the kernel virtual mapping was freed +		 * but the underlying physical pages were not, we need to clear the +		 * bit and free the physical pages. +		 */ +		if (XGI_ATOMIC_READ(block->use_count) == 0) { +			// Need TO Finish +			XGI_VMA_PRIVATE(vma) = NULL; +		} +	} +} + +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 1)) +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, +				 unsigned long address, int *type) +{ +	xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); +	struct page *page = NOPAGE_SIGBUS; +	unsigned long offset = 0; +	unsigned long page_addr = 0; +/* +    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", +              vma->vm_start, +              vma->vm_end, +              XGI_VMA_OFFSET(vma), +              address); +*/ +	offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + +	offset = offset - block->bus_addr; + +	offset >>= PAGE_SHIFT; + +	page_addr = block->page_table[offset].virt_addr; + +	if (xgi_temp) { +		XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" +			 "block->page_count: 0x%lx block->page_order: 0x%lx" +			 "block->page_table[0x%lx].virt_addr: 0x%lx\n", +			 block->bus_addr, block->hw_addr, +			 block->page_count, block->page_order, +			 offset, block->page_table[offset].virt_addr); +		xgi_temp = 0; +	} + +	if (!page_addr) +		goto out;	/* hole or end-of-file */ +	page = virt_to_page(page_addr); + +	/* got it, now increment the count */ +	get_page(page); +      out: +	return page; + +} +#else +struct page *xgi_kern_vma_nopage(struct vm_area_struct *vma, +				 unsigned long address, int write_access) +{ +	xgi_pcie_block_t *block = (xgi_pcie_block_t *) XGI_VMA_PRIVATE(vma); +	struct page *page = NOPAGE_SIGBUS; +	unsigned long offset = 0; +	unsigned long page_addr = 0; +/* +    XGI_INFO("VM: mmap([0x%lx-0x%lx] off=0x%lx) address: 0x%lx \n", +              vma->vm_start, +              vma->vm_end, +              XGI_VMA_OFFSET(vma), +              address); +*/ +	offset = (address - vma->vm_start) + XGI_VMA_OFFSET(vma); + +	offset = offset - block->bus_addr; + +	offset >>= PAGE_SHIFT; + +	page_addr = block->page_table[offset].virt_addr; + +	if (xgi_temp) { +		XGI_INFO("block->bus_addr: 0x%lx block->hw_addr: 0x%lx" +			 "block->page_count: 0x%lx block->page_order: 0x%lx" +			 "block->page_table[0x%lx].virt_addr: 0x%lx\n", +			 block->bus_addr, block->hw_addr, +			 block->page_count, block->page_order, +			 offset, block->page_table[offset].virt_addr); +		xgi_temp = 0; +	} + +	if (!page_addr) +		goto out;	/* hole or end-of-file */ +	page = virt_to_page(page_addr); + +	/* got it, now increment the count */ +	get_page(page); +      out: +	return page; +} +#endif + +#if 0 +static struct file_operations xgi_fops = { +	/* owner:      THIS_MODULE, */ +      poll:xgi_kern_poll, +      ioctl:xgi_kern_ioctl, +      mmap:xgi_kern_mmap, +      open:xgi_kern_open, +      release:xgi_kern_release, +}; +#endif + +static struct file_operations xgi_fops = { +	.owner = THIS_MODULE, +	.poll = xgi_kern_poll, +	.ioctl = xgi_kern_ioctl, +	.mmap = xgi_kern_mmap, +	.open = xgi_kern_open, +	.release = xgi_kern_release, +}; + +static xgi_file_private_t *xgi_alloc_file_private(void) +{ +	xgi_file_private_t *fp; + +	XGI_KMALLOC(fp, sizeof(xgi_file_private_t)); +	if (!fp) +		return NULL; + +	memset(fp, 0, sizeof(xgi_file_private_t)); + +	/* initialize this file's event queue */ +	init_waitqueue_head(&fp->wait_queue); + +	xgi_init_lock(fp->fp_lock); + +	return fp; +} + +static void xgi_free_file_private(xgi_file_private_t * fp) +{ +	if (fp == NULL) +		return; + +	XGI_KFREE(fp, sizeof(xgi_file_private_t)); +} + +int xgi_kern_open(struct inode *inode, struct file *filp) +{ +	xgi_info_t *info = NULL; +	int dev_num; +	int result = 0, status; + +	/* +	 * the type and num values are only valid if we are not using devfs. +	 * However, since we use them to retrieve the device pointer, we +	 * don't need them with devfs as filp->private_data is already +	 * initialized +	 */ +	filp->private_data = xgi_alloc_file_private(); +	if (filp->private_data == NULL) +		return -ENOMEM; + +	XGI_INFO("filp->private_data %p\n", filp->private_data); +	/* +	 * for control device, just jump to its open routine +	 * after setting up the private data +	 */ +	if (XGI_IS_CONTROL_DEVICE(inode)) +		return xgi_kern_ctl_open(inode, filp); + +	/* what device are we talking about? */ +	dev_num = XGI_DEVICE_NUMBER(inode); +	if (dev_num >= XGI_MAX_DEVICES) { +		xgi_free_file_private(filp->private_data); +		filp->private_data = NULL; +		return -ENODEV; +	} + +	info = &xgi_devices[dev_num]; + +	XGI_INFO("Jong-xgi_kern_open on device %d\n", dev_num); + +	xgi_down(info->info_sem); +	XGI_CHECK_PCI_CONFIG(info); + +	XGI_INFO_FROM_FP(filp) = info; + +	/* +	 * map the memory and allocate isr on first open +	 */ + +	if (!(info->flags & XGI_FLAG_OPEN)) { +		XGI_INFO("info->flags & XGI_FLAG_OPEN \n"); + +		if (info->device_id == 0) { +			XGI_INFO("open of nonexistent device %d\n", dev_num); +			result = -ENXIO; +			goto failed; +		} + +		/* initialize struct irqaction */ +		status = request_irq(info->irq, xgi_kern_isr, +				     SA_INTERRUPT | SA_SHIRQ, "xgi", +				     (void *)info); +		if (status != 0) { +			if (info->irq && (status == -EBUSY)) { +				XGI_ERROR +				    ("Tried to get irq %d, but another driver", +				     (unsigned int)info->irq); +				XGI_ERROR("has it and is not sharing it.\n"); +			} +			XGI_ERROR("isr request failed 0x%x\n", status); +			result = -EIO; +			goto failed; +		} + +		/* +		 * #define DECLARE_TASKLET(name, func, data) \ +		 * struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data } +		 */ +		info->tasklet.func = xgi_kern_isr_bh; +		info->tasklet.data = (unsigned long)info; +		tasklet_enable(&info->tasklet); + +		/* Alloc 1M bytes for cmdbuffer which is flush2D batch array */ +		xgi_cmdlist_initialize(info, 0x100000); + +		info->flags |= XGI_FLAG_OPEN; +	} + +	XGI_ATOMIC_INC(info->use_count); + +      failed: +	xgi_up(info->info_sem); + +	if ((result) && filp->private_data) { +		xgi_free_file_private(filp->private_data); +		filp->private_data = NULL; +	} + +	return result; +} + +int xgi_kern_release(struct inode *inode, struct file *filp) +{ +	xgi_info_t *info = XGI_INFO_FROM_FP(filp); + +	XGI_CHECK_PCI_CONFIG(info); + +	/* +	 * for control device, just jump to its open routine +	 * after setting up the private data +	 */ +	if (XGI_IS_CONTROL_DEVICE(inode)) +		return xgi_kern_ctl_close(inode, filp); + +	XGI_INFO("Jong-xgi_kern_release on device %d\n", +		 XGI_DEVICE_NUMBER(inode)); + +	xgi_down(info->info_sem); +	if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { + +		/* +		 * The usage count for this device has dropped to zero, it can be shut +		 * down safely; disable its interrupts. +		 */ + +		/* +		 * Disable this device's tasklet to make sure that no bottom half will +		 * run with undefined device state. +		 */ +		tasklet_disable(&info->tasklet); + +		/* +		 * Free the IRQ, which may block until all pending interrupt processing +		 * has completed. +		 */ +		free_irq(info->irq, (void *)info); + +		xgi_cmdlist_cleanup(info); + +		/* leave INIT flag alone so we don't reinit every time */ +		info->flags &= ~XGI_FLAG_OPEN; +	} + +	xgi_up(info->info_sem); + +	if (FILE_PRIVATE(filp)) { +		xgi_free_file_private(FILE_PRIVATE(filp)); +		FILE_PRIVATE(filp) = NULL; +	} + +	return 0; +} + +int xgi_kern_mmap(struct file *filp, struct vm_area_struct *vma) +{ +	//struct inode        *inode = INODE_FROM_FP(filp); +	xgi_info_t *info = XGI_INFO_FROM_FP(filp); +	xgi_pcie_block_t *block; +	int pages = 0; +	unsigned long prot; + +	XGI_INFO("Jong-VM: mmap([0x%lx-0x%lx] off=0x%lx)\n", +		 vma->vm_start, vma->vm_end, XGI_VMA_OFFSET(vma)); + +	XGI_CHECK_PCI_CONFIG(info); + +	if (XGI_MASK_OFFSET(vma->vm_start) +	    || XGI_MASK_OFFSET(vma->vm_end)) { +		XGI_ERROR("VM: bad mmap range: %lx - %lx\n", +			  vma->vm_start, vma->vm_end); +		return -ENXIO; +	} + +	pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + +	vma->vm_ops = &xgi_vm_ops; + +	/* XGI IO(reg) space */ +	if (IS_IO_OFFSET +	    (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +		if (XGI_REMAP_PAGE_RANGE(vma->vm_start, +					 XGI_VMA_OFFSET(vma), +					 vma->vm_end - vma->vm_start, +					 vma->vm_page_prot)) +			return -EAGAIN; + +		/* mark it as IO so that we don't dump it on core dump */ +		vma->vm_flags |= VM_IO; +		XGI_INFO("VM: mmap io space \n"); +	} +	/* XGI fb space */ +	/* Jong 06/14/2006; moved behind PCIE or modify IS_FB_OFFSET */ +	else if (IS_FB_OFFSET +		 (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +		if (XGI_REMAP_PAGE_RANGE(vma->vm_start, +					 XGI_VMA_OFFSET(vma), +					 vma->vm_end - vma->vm_start, +					 vma->vm_page_prot)) +			return -EAGAIN; + +		// mark it as IO so that we don't dump it on core dump +		vma->vm_flags |= VM_IO; +		XGI_INFO("VM: mmap fb space \n"); +	} +	/* PCIE allocator */ +	/* XGI_VMA_OFFSET(vma) is offset based on pcie.base (HW address space) */ +	else if (IS_PCIE_OFFSET +		 (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { +		xgi_down(info->pcie_sem); + +		block = +		    (xgi_pcie_block_t *) xgi_find_pcie_block(info, +							     XGI_VMA_OFFSET +							     (vma)); + +		if (block == NULL) { +			XGI_ERROR("couldn't find pre-allocated PCIE memory!\n"); +			xgi_up(info->pcie_sem); +			return -EAGAIN; +		} + +		if (block->page_count != pages) { +			XGI_ERROR +			    ("pre-allocated PCIE memory has wrong number of pages!\n"); +			xgi_up(info->pcie_sem); +			return -EAGAIN; +		} + +		vma->vm_private_data = block; +		XGI_ATOMIC_INC(block->use_count); +		xgi_up(info->pcie_sem); + +		/* +		 * prevent the swapper from swapping it out +		 * mark the memory i/o so the buffers aren't +		 * dumped on core dumps */ +		vma->vm_flags |= (VM_LOCKED | VM_IO); + +		/* un-cached */ +		prot = pgprot_val(vma->vm_page_prot); +		/*  +		   if (boot_cpu_data.x86 > 3) +		   prot |= _PAGE_PCD | _PAGE_PWT; +		 */ +		vma->vm_page_prot = __pgprot(prot); + +		XGI_INFO("VM: mmap pcie space \n"); +	} +#if 0 +	else if (IS_FB_OFFSET +		 (info, XGI_VMA_OFFSET(vma), vma->vm_end - vma->vm_start)) { +		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); +		if (XGI_REMAP_PAGE_RANGE(vma->vm_start, +					 XGI_VMA_OFFSET(vma), +					 vma->vm_end - vma->vm_start, +					 vma->vm_page_prot)) +			return -EAGAIN; + +		// mark it as IO so that we don't dump it on core dump +		vma->vm_flags |= VM_IO; +		XGI_INFO("VM: mmap fb space \n"); +	} +#endif +	else { +		vma->vm_flags |= (VM_IO | VM_LOCKED); +		XGI_ERROR("VM: mmap wrong range \n"); +	} + +	vma->vm_file = filp; + +	return 0; +} + +unsigned int xgi_kern_poll(struct file *filp, struct poll_table_struct *wait) +{ +	xgi_file_private_t *fp; +	xgi_info_t *info; +	unsigned int mask = 0; +	unsigned long eflags; + +	info = XGI_INFO_FROM_FP(filp); + +	if (info->device_number == XGI_CONTROL_DEVICE_NUMBER) +		return xgi_kern_ctl_poll(filp, wait); + +	fp = XGI_GET_FP(filp); + +	if (!(filp->f_flags & O_NONBLOCK)) { +		/* add us to the list */ +		poll_wait(filp, &fp->wait_queue, wait); +	} + +	xgi_lock_irqsave(fp->fp_lock, eflags); + +	/* wake the user on any event */ +	if (fp->num_events) { +		XGI_INFO("Hey, an event occured!\n"); +		/* +		 * trigger the client, when they grab the event, +		 * we'll decrement the event count +		 */ +		mask |= (POLLPRI | POLLIN); +	} +	xgi_unlock_irqsave(fp->fp_lock, eflags); + +	return mask; +} + +int xgi_kern_ioctl(struct inode *inode, struct file *filp, +		   unsigned int cmd, unsigned long arg) +{ +	xgi_info_t *info; +	xgi_mem_alloc_t *alloc = NULL; + +	int status = 0; +	void *arg_copy; +	int arg_size; +	int err = 0; + +	info = XGI_INFO_FROM_FP(filp); + +	XGI_INFO("Jong-ioctl(0x%x, 0x%x, 0x%lx, 0x%x)\n", _IOC_TYPE(cmd), +		 _IOC_NR(cmd), arg, _IOC_SIZE(cmd)); +	/* +	 * extract the type and number bitfields, and don't decode +	 * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() +	 */ +	if (_IOC_TYPE(cmd) != XGI_IOCTL_MAGIC) +		return -ENOTTY; +	if (_IOC_NR(cmd) > XGI_IOCTL_MAXNR) +		return -ENOTTY; + +	/* +	 * the direction is a bitmask, and VERIFY_WRITE catches R/W +	 * transfers. `Type' is user-oriented, while +	 * access_ok is kernel-oriented, so the concept of "read" and +	 * "write" is reversed +	 */ +	if (_IOC_DIR(cmd) & _IOC_READ) { +		err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd)); +	} else if (_IOC_DIR(cmd) & _IOC_WRITE) { +		err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd)); +	} +	if (err) +		return -EFAULT; + +	XGI_CHECK_PCI_CONFIG(info); + +	arg_size = _IOC_SIZE(cmd); +	XGI_KMALLOC(arg_copy, arg_size); +	if (arg_copy == NULL) { +		XGI_ERROR("failed to allocate ioctl memory\n"); +		return -ENOMEM; +	} + +	/* Jong 05/25/2006 */ +	/* copy_from_user(arg_copy, (void *)arg, arg_size); */ +	if (copy_from_user(arg_copy, (void *)arg, arg_size)) { +		XGI_ERROR("failed to copyin ioctl data\n"); +		XGI_INFO("Jong-copy_from_user-fail! \n"); +	} else +		XGI_INFO("Jong-copy_from_user-OK! \n"); + +	alloc = (xgi_mem_alloc_t *) arg_copy; +	XGI_INFO("Jong-succeeded in copy_from_user 0x%lx, 0x%x bytes.\n", arg, +		 arg_size); + +	switch (_IOC_NR(cmd)) { +	case XGI_ESC_DEVICE_INFO: +		XGI_INFO("Jong-xgi_ioctl_get_device_info \n"); +		xgi_get_device_info(info, (struct xgi_chip_info_s *)arg_copy); +		break; +	case XGI_ESC_POST_VBIOS: +		XGI_INFO("Jong-xgi_ioctl_post_vbios \n"); +		break; +	case XGI_ESC_FB_ALLOC: +		XGI_INFO("Jong-xgi_ioctl_fb_alloc \n"); +		xgi_fb_alloc(info, (struct xgi_mem_req_s *)arg_copy, alloc); +		break; +	case XGI_ESC_FB_FREE: +		XGI_INFO("Jong-xgi_ioctl_fb_free \n"); +		xgi_fb_free(info, *(unsigned long *)arg_copy); +		break; +	case XGI_ESC_MEM_COLLECT: +		XGI_INFO("Jong-xgi_ioctl_mem_collect \n"); +		xgi_mem_collect(info, (unsigned int *)arg_copy); +		break; +	case XGI_ESC_PCIE_ALLOC: +		XGI_INFO("Jong-xgi_ioctl_pcie_alloc \n"); +		xgi_pcie_alloc(info, ((xgi_mem_req_t *) arg_copy)->size, +			       ((xgi_mem_req_t *) arg_copy)->owner, alloc); +		break; +	case XGI_ESC_PCIE_FREE: +		XGI_INFO("Jong-xgi_ioctl_pcie_free: bus_addr = 0x%lx \n", +			 *((unsigned long *)arg_copy)); +		xgi_pcie_free(info, *((unsigned long *)arg_copy)); +		break; +	case XGI_ESC_PCIE_CHECK: +		XGI_INFO("Jong-xgi_pcie_heap_check \n"); +		xgi_pcie_heap_check(); +		break; +	case XGI_ESC_GET_SCREEN_INFO: +		XGI_INFO("Jong-xgi_get_screen_info \n"); +		xgi_get_screen_info(info, (struct xgi_screen_info_s *)arg_copy); +		break; +	case XGI_ESC_PUT_SCREEN_INFO: +		XGI_INFO("Jong-xgi_put_screen_info \n"); +		xgi_put_screen_info(info, (struct xgi_screen_info_s *)arg_copy); +		break; +	case XGI_ESC_MMIO_INFO: +		XGI_INFO("Jong-xgi_ioctl_get_mmio_info \n"); +		xgi_get_mmio_info(info, (struct xgi_mmio_info_s *)arg_copy); +		break; +	case XGI_ESC_GE_RESET: +		XGI_INFO("Jong-xgi_ioctl_ge_reset \n"); +		xgi_ge_reset(info); +		break; +	case XGI_ESC_SAREA_INFO: +		XGI_INFO("Jong-xgi_ioctl_sarea_info \n"); +		xgi_sarea_info(info, (struct xgi_sarea_info_s *)arg_copy); +		break; +	case XGI_ESC_DUMP_REGISTER: +		XGI_INFO("Jong-xgi_ioctl_dump_register \n"); +		xgi_dump_register(info); +		break; +	case XGI_ESC_DEBUG_INFO: +		XGI_INFO("Jong-xgi_ioctl_restore_registers \n"); +		xgi_restore_registers(info); +		//xgi_write_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); +		//xgi_read_pcie_mem(info, (struct xgi_mem_req_s *) arg_copy); +		break; +	case XGI_ESC_SUBMIT_CMDLIST: +		XGI_INFO("Jong-xgi_ioctl_submit_cmdlist \n"); +		xgi_submit_cmdlist(info, (xgi_cmd_info_t *) arg_copy); +		break; +	case XGI_ESC_TEST_RWINKERNEL: +		XGI_INFO("Jong-xgi_test_rwinkernel \n"); +		xgi_test_rwinkernel(info, *(unsigned long *)arg_copy); +		break; +	case XGI_ESC_STATE_CHANGE: +		XGI_INFO("Jong-xgi_state_change \n"); +		xgi_state_change(info, (xgi_state_info_t *) arg_copy); +		break; +	case XGI_ESC_CPUID: +		XGI_INFO("Jong-XGI_ESC_CPUID \n"); +		xgi_get_cpu_id((struct cpu_info_s *)arg_copy); +		break; +	default: +		XGI_INFO("Jong-xgi_ioctl_default \n"); +		status = -EINVAL; +		break; +	} + +	if (copy_to_user((void *)arg, arg_copy, arg_size)) { +		XGI_ERROR("failed to copyout ioctl data\n"); +		XGI_INFO("Jong-copy_to_user-fail! \n"); +	} else +		XGI_INFO("Jong-copy_to_user-OK! \n"); + +	XGI_KFREE(arg_copy, arg_size); +	return status; +} + +/* + * xgi control driver operations defined here + */ +int xgi_kern_ctl_open(struct inode *inode, struct file *filp) +{ +	xgi_info_t *info = &xgi_ctl_device; + +	int rc = 0; + +	XGI_INFO("Jong-xgi_kern_ctl_open\n"); + +	xgi_down(info->info_sem); +	info->device_number = XGI_CONTROL_DEVICE_NUMBER; + +	/* save the xgi info in file->private_data */ +	filp->private_data = info; + +	if (XGI_ATOMIC_READ(info->use_count) == 0) { +		init_waitqueue_head(&xgi_ctl_waitqueue); +	} + +	info->flags |= XGI_FLAG_OPEN + XGI_FLAG_CONTROL; + +	XGI_ATOMIC_INC(info->use_count); +	xgi_up(info->info_sem); + +	return rc; +} + +int xgi_kern_ctl_close(struct inode *inode, struct file *filp) +{ +	xgi_info_t *info = XGI_INFO_FROM_FP(filp); + +	XGI_INFO("Jong-xgi_kern_ctl_close\n"); + +	xgi_down(info->info_sem); +	if (XGI_ATOMIC_DEC_AND_TEST(info->use_count)) { +		info->flags = 0; +	} +	xgi_up(info->info_sem); + +	if (FILE_PRIVATE(filp)) { +		xgi_free_file_private(FILE_PRIVATE(filp)); +		FILE_PRIVATE(filp) = NULL; +	} + +	return 0; +} + +unsigned int xgi_kern_ctl_poll(struct file *filp, poll_table * wait) +{ +	//xgi_info_t  *info = XGI_INFO_FROM_FP(filp);; +	unsigned int ret = 0; + +	if (!(filp->f_flags & O_NONBLOCK)) { +		poll_wait(filp, &xgi_ctl_waitqueue, wait); +	} + +	return ret; +} + +/* + * xgi proc system + */ +static u8 xgi_find_pcie_capability(struct pci_dev *dev) +{ +	u16 status; +	u8 cap_ptr, cap_id; + +	pci_read_config_word(dev, PCI_STATUS, &status); +	status &= PCI_STATUS_CAP_LIST; +	if (!status) +		return 0; + +	switch (dev->hdr_type) { +	case PCI_HEADER_TYPE_NORMAL: +	case PCI_HEADER_TYPE_BRIDGE: +		pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &cap_ptr); +		break; +	default: +		return 0; +	} + +	do { +		cap_ptr &= 0xFC; +		pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_ID, &cap_id); +		pci_read_config_byte(dev, cap_ptr + PCI_CAP_LIST_NEXT, +				     &cap_ptr); +	} while (cap_ptr && cap_id != 0xFF); + +	return 0; +} + +static struct pci_dev *xgi_get_pci_device(xgi_info_t * info) +{ +	struct pci_dev *dev; + +	dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, NULL); +	while (dev) { +		if (XGI_PCI_SLOT_NUMBER(dev) == info->slot +		    && XGI_PCI_BUS_NUMBER(dev) == info->bus) +			return dev; +		dev = XGI_PCI_GET_DEVICE(info->vendor_id, info->device_id, dev); +	} + +	return NULL; +} + +int xgi_kern_read_card_info(char *page, char **start, off_t off, +			    int count, int *eof, void *data) +{ +	struct pci_dev *dev; +	char *type; +	int len = 0; + +	xgi_info_t *info; +	info = (xgi_info_t *) data; + +	dev = xgi_get_pci_device(info); +	if (!dev) +		return 0; + +	type = xgi_find_pcie_capability(dev) ? "PCIE" : "PCI"; +	len += sprintf(page + len, "Card Type: \t %s\n", type); + +	XGI_PCI_DEV_PUT(dev); +	return len; +} + +int xgi_kern_read_version(char *page, char **start, off_t off, +			  int count, int *eof, void *data) +{ +	int len = 0; + +	len += sprintf(page + len, "XGI version: %s\n", "1.0"); +	len += sprintf(page + len, "GCC version:  %s\n", "3.0"); + +	return len; +} + +int xgi_kern_read_pcie_info(char *page, char **start, off_t off, +			    int count, int *eof, void *data) +{ +	return 0; +} + +int xgi_kern_read_status(char *page, char **start, off_t off, +			 int count, int *eof, void *data) +{ +	return 0; +} + +static void xgi_proc_create(void) +{ +#ifdef CONFIG_PROC_FS + +	struct pci_dev *dev; +	int i = 0; +	char name[6]; + +	struct proc_dir_entry *entry; +	struct proc_dir_entry *proc_xgi_pcie, *proc_xgi_cards; + +	xgi_info_t *info; +	xgi_info_t *xgi_max_devices; + +	/* world readable directory */ +	int flags = S_IFDIR | S_IRUGO | S_IXUGO; + +	proc_xgi = create_proc_entry("xgi", flags, proc_root_driver); +	if (!proc_xgi) +		goto failed; + +	proc_xgi_cards = create_proc_entry("cards", flags, proc_xgi); +	if (!proc_xgi_cards) +		goto failed; + +	proc_xgi_pcie = create_proc_entry("pcie", flags, proc_xgi); +	if (!proc_xgi_pcie) +		goto failed; + +	/* +	 * Set the module owner to ensure that the reference +	 * count reflects accesses to the proc files. +	 */ +	proc_xgi->owner = THIS_MODULE; +	proc_xgi_cards->owner = THIS_MODULE; +	proc_xgi_pcie->owner = THIS_MODULE; + +	xgi_max_devices = xgi_devices + XGI_MAX_DEVICES; +	for (info = xgi_devices; info < xgi_max_devices; info++) { +		if (info->device_id == 0) +			break; + +		/* world readable file */ +		flags = S_IFREG | S_IRUGO; + +		dev = xgi_get_pci_device(info); +		if (!dev) +			break; + +		sprintf(name, "%d", i++); +		entry = create_proc_entry(name, flags, proc_xgi_cards); +		if (!entry) { +			XGI_PCI_DEV_PUT(dev); +			goto failed; +		} + +		entry->data = info; +		entry->read_proc = xgi_kern_read_card_info; +		entry->owner = THIS_MODULE; + +		if (xgi_find_pcie_capability(dev)) { +			entry = +			    create_proc_entry("status", flags, proc_xgi_pcie); +			if (!entry) { +				XGI_PCI_DEV_PUT(dev); +				goto failed; +			} + +			entry->data = info; +			entry->read_proc = xgi_kern_read_status; +			entry->owner = THIS_MODULE; + +			entry = create_proc_entry("card", flags, proc_xgi_pcie); +			if (!entry) { +				XGI_PCI_DEV_PUT(dev); +				goto failed; +			} + +			entry->data = info; +			entry->read_proc = xgi_kern_read_pcie_info; +			entry->owner = THIS_MODULE; +		} + +		XGI_PCI_DEV_PUT(dev); +	} + +	entry = create_proc_entry("version", flags, proc_xgi); +	if (!entry) +		goto failed; + +	entry->read_proc = xgi_kern_read_version; +	entry->owner = THIS_MODULE; + +	entry = create_proc_entry("host-bridge", flags, proc_xgi_pcie); +	if (!entry) +		goto failed; + +	entry->data = NULL; +	entry->read_proc = xgi_kern_read_pcie_info; +	entry->owner = THIS_MODULE; + +	return; + +      failed: +	XGI_ERROR("failed to create /proc entries!\n"); +	xgi_proc_remove_all(proc_xgi); +#endif +} + +#ifdef CONFIG_PROC_FS +static void xgi_proc_remove_all(struct proc_dir_entry *entry) +{ +	while (entry) { +		struct proc_dir_entry *next = entry->next; +		if (entry->subdir) +			xgi_proc_remove_all(entry->subdir); +		remove_proc_entry(entry->name, entry->parent); +		if (entry == proc_xgi) +			break; +		entry = next; +	} +} +#endif + +static void xgi_proc_remove(void) +{ +#ifdef CONFIG_PROC_FS +	xgi_proc_remove_all(proc_xgi); +#endif +} + +/* + * driver receives an interrupt if someone waiting, then hand it off. + */ +irqreturn_t xgi_kern_isr(int irq, void *dev_id, struct pt_regs *regs) +{ +	xgi_info_t *info = (xgi_info_t *) dev_id; +	u32 need_to_run_bottom_half = 0; + +	//XGI_INFO("xgi_kern_isr \n"); + +	//XGI_CHECK_PCI_CONFIG(info); + +	//xgi_dvi_irq_handler(info); + +	if (need_to_run_bottom_half) { +		tasklet_schedule(&info->tasklet); +	} + +	return IRQ_HANDLED; +} + +void xgi_kern_isr_bh(unsigned long data) +{ +	xgi_info_t *info = (xgi_info_t *) data; + +	XGI_INFO("xgi_kern_isr_bh \n"); + +	//xgi_dvi_irq_handler(info); + +	XGI_CHECK_PCI_CONFIG(info); +} + +static void xgi_lock_init(xgi_info_t * info) +{ +	if (info == NULL) +		return; + +	spin_lock_init(&info->info_lock); + +	sema_init(&info->info_sem, 1); +	sema_init(&info->fb_sem, 1); +	sema_init(&info->pcie_sem, 1); + +	XGI_ATOMIC_SET(info->use_count, 0); +} + +static void xgi_dev_init(xgi_info_t * info) +{ +	struct pci_dev *pdev = NULL; +	struct xgi_dev *dev; +	int found = 0; +	u16 pci_cmd; + +	XGI_INFO("Enter xgi_dev_init \n"); + +	//XGI_PCI_FOR_EACH_DEV(pdev) +	{ +		for (dev = xgidev_list; dev->vendor; dev++) { +			if ((dev->vendor == pdev->vendor) +			    && (dev->device == pdev->device)) { +				XGI_INFO("dev->vendor = pdev->vendor= %x \n", +					 dev->vendor); +				XGI_INFO("dev->device = pdev->device= %x \n", +					 dev->device); + +				xgi_devices[found].device_id = pdev->device; + +				pci_read_config_byte(pdev, PCI_REVISION_ID, +						     &xgi_devices[found]. +						     revision_id); + +				XGI_INFO("PCI_REVISION_ID= %x \n", +					 xgi_devices[found].revision_id); + +				pci_read_config_word(pdev, PCI_COMMAND, +						     &pci_cmd); + +				XGI_INFO("PCI_COMMAND = %x \n", pci_cmd); + +				break; +			} +		} +	} +} + +/* + * Export to Linux Kernel + */ + +static int __init xgi_init_module(void) +{ +	xgi_info_t *info = &xgi_devices[xgi_num_devices]; +	int i, result; + +	XGI_INFO("Jong-xgi kernel driver %s initializing\n", XGI_DRV_VERSION); +	//SET_MODULE_OWNER(&xgi_fops); + +	memset(xgi_devices, 0, sizeof(xgi_devices)); + +	if (pci_register_driver(&xgi_pci_driver) < 0) { +		pci_unregister_driver(&xgi_pci_driver); +		XGI_ERROR("no XGI graphics adapter found\n"); +		return -ENODEV; +	} + +	XGI_INFO("Jong-xgi_devices[%d].fb.base.: 0x%lx \n", xgi_num_devices, +		 xgi_devices[xgi_num_devices].fb.base); +	XGI_INFO("Jong-xgi_devices[%d].fb.size.: 0x%lx \n", xgi_num_devices, +		 xgi_devices[xgi_num_devices].fb.size); + +/* Jong 07/27/2006; test for ubuntu */ +/* +#ifdef CONFIG_DEVFS_FS + +    XGI_INFO("Jong-Use devfs \n"); +    do +    { +        xgi_devfs_handles[0] = XGI_DEVFS_REGISTER("xgi", 0); +        if (xgi_devfs_handles[0] == NULL) +        { +            result = -ENOMEM; +            XGI_ERROR("devfs register failed\n"); +            goto failed; +        } +    } while(0); +	#else *//* no devfs, do it the "classic" way  */ + +	XGI_INFO("Jong-Use non-devfs \n"); +	/* +	 * Register your major, and accept a dynamic number. This is the +	 * first thing to do, in order to avoid releasing other module's +	 * fops in scull_cleanup_module() +	 */ +	result = XGI_REGISTER_CHRDEV(xgi_major, "xgi", &xgi_fops); +	if (result < 0) { +		XGI_ERROR("register chrdev failed\n"); +		pci_unregister_driver(&xgi_pci_driver); +		return result; +	} +	if (xgi_major == 0) +		xgi_major = result;	/* dynamic */ + +	/* #endif *//* CONFIG_DEVFS_FS */ + +	XGI_INFO("Jong-major number %d\n", xgi_major); + +	/* instantiate tasklets */ +	for (i = 0; i < XGI_MAX_DEVICES; i++) { +		/* +		 * We keep one tasklet per card to avoid latency issues with more +		 * than one device; no two instances of a single tasklet are ever +		 * executed concurrently. +		 */ +		XGI_ATOMIC_SET(xgi_devices[i].tasklet.count, 1); +	} + +	/* init the xgi control device */ +	{ +		xgi_info_t *info_ctl = &xgi_ctl_device; +		xgi_lock_init(info_ctl); +	} + +	/* Init the resource manager */ +	INIT_LIST_HEAD(&xgi_mempid_list); +	if (!xgi_fb_heap_init(info)) { +		XGI_ERROR("xgi_fb_heap_init() failed\n"); +		result = -EIO; +		goto failed; +	} + +	/* Init the resource manager */ +	if (!xgi_pcie_heap_init(info)) { +		XGI_ERROR("xgi_pcie_heap_init() failed\n"); +		result = -EIO; +		goto failed; +	} + +	/* create /proc/driver/xgi */ +	xgi_proc_create(); + +#if defined(DEBUG) +	inter_module_register("xgi_devices", THIS_MODULE, xgi_devices); +#endif + +	return 0; + +      failed: +#ifdef CONFIG_DEVFS_FS +	XGI_DEVFS_REMOVE_CONTROL(); +	XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + +	if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) +		XGI_ERROR("unregister xgi chrdev failed\n"); + +	for (i = 0; i < xgi_num_devices; i++) { +		if (xgi_devices[i].dev) { +			release_mem_region(xgi_devices[i].fb.base, +					   xgi_devices[i].fb.size); +			release_mem_region(xgi_devices[i].mmio.base, +					   xgi_devices[i].mmio.size); +		} +	} + +	pci_unregister_driver(&xgi_pci_driver); +	return result; + +	return 1; +} + +void __exit xgi_exit_module(void) +{ +	int i; +	xgi_info_t *info, *max_devices; + +#ifdef CONFIG_DEVFS_FS +	/* +	   XGI_DEVFS_REMOVE_CONTROL(); +	   for (i = 0; i < XGI_MAX_DEVICES; i++) +	   XGI_DEVFS_REMOVE_DEVICE(i); +	 */ +	XGI_DEVFS_REMOVE_DEVICE(xgi_num_devices); +#endif + +	if (XGI_UNREGISTER_CHRDEV(xgi_major, "xgi") < 0) +		XGI_ERROR("unregister xgi chrdev failed\n"); + +	XGI_INFO("Jong-unregister xgi chrdev scceeded\n"); +	for (i = 0; i < XGI_MAX_DEVICES; i++) { +		if (xgi_devices[i].dev) { +			/* clean up the flush2D batch array */ +			xgi_cmdlist_cleanup(&xgi_devices[i]); + +			if (xgi_devices[i].fb.vbase != NULL) { +				iounmap((void *)xgi_devices[i].fb.vbase); +				xgi_devices[i].fb.vbase = NULL; +			} +			if (xgi_devices[i].mmio.vbase != NULL) { +				iounmap((void *)xgi_devices[i].mmio.vbase); +				xgi_devices[i].mmio.vbase = NULL; +			} +			//release_mem_region(xgi_devices[i].fb.base, xgi_devices[i].fb.size); +			//XGI_INFO("release frame buffer mem region scceeded\n"); + +			release_mem_region(xgi_devices[i].mmio.base, +					   xgi_devices[i].mmio.size); +			XGI_INFO("release MMIO mem region scceeded\n"); + +			xgi_fb_heap_cleanup(&xgi_devices[i]); +			XGI_INFO("xgi_fb_heap_cleanup scceeded\n"); + +			xgi_pcie_heap_cleanup(&xgi_devices[i]); +			XGI_INFO("xgi_pcie_heap_cleanup scceeded\n"); + +			XGI_PCI_DISABLE_DEVICE(xgi_devices[i].dev); +		} +	} + +	pci_unregister_driver(&xgi_pci_driver); + +	/* remove /proc/driver/xgi */ +	xgi_proc_remove(); + +#if defined(DEBUG) +	inter_module_unregister("xgi_devices"); +#endif +} + +module_init(xgi_init_module); +module_exit(xgi_exit_module); + +#if defined(XGI_PM_SUPPORT_ACPI) +int xgi_acpi_event(struct pci_dev *dev, u32 state) +{ +	return 1; +} + +int xgi_kern_acpi_standby(struct pci_dev *dev, u32 state) +{ +	return 1; +} + +int xgi_kern_acpi_resume(struct pci_dev *dev) +{ +	return 1; +} +#endif + +MODULE_AUTHOR("Andrea Zhang <andrea_zhang@macrosynergy.com>"); +MODULE_DESCRIPTION("xgi kernel driver for xgi cards"); +MODULE_LICENSE("GPL"); diff --git a/linux-core/xgi_drv.h b/linux-core/xgi_drv.h index 568a7af1..429719a7 100644 --- a/linux-core/xgi_drv.h +++ b/linux-core/xgi_drv.h @@ -1,364 +1,364 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#ifndef _XGI_DRV_H_
 -#define _XGI_DRV_H_
 -
 -#define XGI_MAJOR_VERSION   0
 -#define XGI_MINOR_VERSION   7
 -#define XGI_PATCHLEVEL      5
 -
 -#define XGI_DRV_VERSION     "0.7.5"
 -
 -#ifndef XGI_DRV_NAME
 -#define XGI_DRV_NAME        "xgi"
 -#endif
 -
 -/*
 - * xgi reserved major device number, Set this to 0 to
 - * request dynamic major number allocation.
 - */
 -#ifndef XGI_DEV_MAJOR
 -#define XGI_DEV_MAJOR   0
 -#endif
 -
 -#ifndef XGI_MAX_DEVICES
 -#define XGI_MAX_DEVICES 1
 -#endif
 -
 -/* Jong 06/06/2006 */
 -/* #define XGI_DEBUG */
 -
 -#ifndef PCI_VENDOR_ID_XGI
 -/*
 -#define PCI_VENDOR_ID_XGI       0x1023
 -*/
 -#define PCI_VENDOR_ID_XGI       0x18CA
 -
 -#endif
 -
 -#ifndef PCI_DEVICE_ID_XP5
 -#define PCI_DEVICE_ID_XP5       0x2200
 -#endif
 -
 -#ifndef PCI_DEVICE_ID_XG47
 -#define PCI_DEVICE_ID_XG47      0x0047
 -#endif
 -
 -/* Macros to make printk easier */
 -#define XGI_ERROR(fmt, arg...) \
 -    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
 -
 -#define XGI_MEM_ERROR(area, fmt, arg...) \
 -    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg)
 -
 -/* #define XGI_DEBUG */ 
 -
 -#ifdef XGI_DEBUG
 -#define XGI_INFO(fmt, arg...) \
 -    printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg)
 -/*    printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */
 -#else
 -#define XGI_INFO(fmt, arg...)   do { } while (0)
 -#endif
 -
 -/* device name length; must be atleast 8 */
 -#define XGI_DEVICE_NAME_LENGTH      40
 -
 -/* need a fake device number for control device; just to flag it for msgs */
 -#define XGI_CONTROL_DEVICE_NUMBER   100
 -
 -typedef struct {
 -    U32 base;   // pcie base is different from fb base
 -    U32 size;
 -    U8 *vbase;
 -} xgi_aperture_t;
 -
 -typedef struct xgi_screen_info_s {
 -    U32     scrn_start;
 -    U32     scrn_xres;
 -    U32     scrn_yres;
 -    U32     scrn_bpp;
 -    U32     scrn_pitch;
 -} xgi_screen_info_t;
 -
 -typedef struct xgi_sarea_info_s {
 -    U32                 bus_addr;
 -    U32                 size;
 -} xgi_sarea_info_t;
 -
 -typedef struct xgi_info_s {
 -    struct pci_dev  *dev;
 -    int             flags;
 -    int             device_number;
 -    int             bus;    /* PCI config info */
 -    int             slot;
 -    int             vendor_id;
 -    U32             device_id;
 -    U8              revision_id;
 -
 -    /* physical characteristics */
 -    xgi_aperture_t  mmio;
 -    xgi_aperture_t  fb;
 -    xgi_aperture_t  pcie;
 -    xgi_screen_info_t scrn_info;
 -    xgi_sarea_info_t sarea_info;
 -
 -    /* look up table parameters */
 -    U32             *lut_base;
 -    U32             lutPageSize;
 -    U32             lutPageOrder;
 -    U32             isLUTInLFB;
 -    U32             sdfbPageSize;
 -
 -    U32             pcie_config;
 -    U32             pcie_status;
 -    U32             irq;
 -
 -    atomic_t        use_count;
 -
 -    /* keep track of any pending bottom halfes */
 -    struct tasklet_struct tasklet;
 -
 -    spinlock_t      info_lock;
 -
 -    struct semaphore info_sem;
 -    struct semaphore fb_sem;
 -    struct semaphore pcie_sem;
 -} xgi_info_t;
 -
 -typedef struct xgi_ioctl_post_vbios {
 -    U32 bus;
 -    U32 slot;
 -} xgi_ioctl_post_vbios_t;
 -
 -typedef enum xgi_mem_location_s
 -{
 -    NON_LOCAL = 0,
 -    LOCAL     = 1,
 -    INVALID   = 0x7fffffff
 -} xgi_mem_location_t;
 -
 -enum PcieOwner
 -{
 -    PCIE_2D = 0,
 -    /*
 -    PCIE_3D should not begin with 1,
 -    2D alloc pcie memory will use owner 1.
 -    */
 -    PCIE_3D = 11,/*vetex buf*/
 -    PCIE_3D_CMDLIST = 12,
 -    PCIE_3D_SCRATCHPAD = 13,
 -    PCIE_3D_TEXTURE = 14,
 -    PCIE_INVALID = 0x7fffffff
 -};
 -
 -typedef struct xgi_mem_req_s {
 -    xgi_mem_location_t  location;
 -    unsigned long       size;
 -    unsigned long       is_front;
 -    enum PcieOwner      owner;
 -    unsigned long       pid;
 -} xgi_mem_req_t;
 -
 -typedef struct xgi_mem_alloc_s {
 -    xgi_mem_location_t  location;
 -    unsigned long       size;
 -    unsigned long       bus_addr;
 -    unsigned long       hw_addr;
 -    unsigned long       pid;
 -} xgi_mem_alloc_t;
 -
 -typedef struct xgi_chip_info_s {
 -    U32     device_id;
 -    char    device_name[32];
 -    U32     vendor_id;
 -    U32     curr_display_mode; //Singe, DualView(Contained), MHS
 -    U32     fb_size;
 -    U32     sarea_bus_addr;
 -    U32     sarea_size;
 -} xgi_chip_info_t;
 -
 -typedef struct xgi_opengl_cmd_s {
 -    U32     cmd;
 -} xgi_opengl_cmd_t;
 -
 -typedef struct xgi_mmio_info_s {
 -    xgi_opengl_cmd_t    cmd_head;
 -    void                *mmioBase;
 -    int                 size;
 -} xgi_mmio_info_t;
 -
 -typedef enum {
 -    BTYPE_2D = 0,
 -    BTYPE_3D = 1,
 -    BTYPE_FLIP = 2,
 -    BTYPE_CTRL = 3,
 -    BTYPE_NONE = 0x7fffffff
 -}BATCH_TYPE;
 -
 -typedef struct xgi_cmd_info_s {
 -    BATCH_TYPE  _firstBeginType;
 -    U32         _firstBeginAddr;
 -    U32         _firstSize;
 -    U32         _curDebugID;
 -    U32         _lastBeginAddr;
 -    U32         _beginCount;
 -} xgi_cmd_info_t;
 -
 -typedef struct xgi_state_info_s {
 -    U32         _fromState;
 -    U32         _toState;
 -} xgi_state_info_t;
 -
 -typedef struct cpu_info_s {
 -    U32 _eax;
 -    U32 _ebx;
 -    U32 _ecx;
 -    U32 _edx;
 -} cpu_info_t;
 -
 -typedef struct xgi_mem_pid_s {
 -    struct list_head    list;
 -    xgi_mem_location_t  location;
 -    unsigned long       bus_addr;
 -    unsigned long       pid;
 -} xgi_mem_pid_t;
 -
 -/*
 - * Ioctl definitions
 - */
 -
 -#define XGI_IOCTL_MAGIC             'x'     /* use 'x' as magic number */
 -
 -#define XGI_IOCTL_BASE              0
 -#define XGI_ESC_DEVICE_INFO         (XGI_IOCTL_BASE + 0)
 -#define XGI_ESC_POST_VBIOS          (XGI_IOCTL_BASE + 1)
 -
 -#define XGI_ESC_FB_INIT             (XGI_IOCTL_BASE + 2)
 -#define XGI_ESC_FB_ALLOC            (XGI_IOCTL_BASE + 3)
 -#define XGI_ESC_FB_FREE             (XGI_IOCTL_BASE + 4)
 -#define XGI_ESC_PCIE_INIT           (XGI_IOCTL_BASE + 5)
 -#define XGI_ESC_PCIE_ALLOC          (XGI_IOCTL_BASE + 6)
 -#define XGI_ESC_PCIE_FREE           (XGI_IOCTL_BASE + 7)
 -#define XGI_ESC_SUBMIT_CMDLIST      (XGI_IOCTL_BASE + 8)
 -#define XGI_ESC_PUT_SCREEN_INFO     (XGI_IOCTL_BASE + 9)
 -#define XGI_ESC_GET_SCREEN_INFO     (XGI_IOCTL_BASE + 10)
 -#define XGI_ESC_GE_RESET            (XGI_IOCTL_BASE + 11)
 -#define XGI_ESC_SAREA_INFO          (XGI_IOCTL_BASE + 12)
 -#define XGI_ESC_DUMP_REGISTER       (XGI_IOCTL_BASE + 13)
 -#define XGI_ESC_DEBUG_INFO          (XGI_IOCTL_BASE + 14)
 -#define XGI_ESC_TEST_RWINKERNEL     (XGI_IOCTL_BASE + 16)
 -#define XGI_ESC_STATE_CHANGE        (XGI_IOCTL_BASE + 17)
 -#define XGI_ESC_MMIO_INFO           (XGI_IOCTL_BASE + 18)
 -#define XGI_ESC_PCIE_CHECK          (XGI_IOCTL_BASE + 19)
 -#define XGI_ESC_CPUID               (XGI_IOCTL_BASE + 20)
 -#define XGI_ESC_MEM_COLLECT          (XGI_IOCTL_BASE + 21)
 -
 -#define XGI_IOCTL_DEVICE_INFO       _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t)
 -#define XGI_IOCTL_POST_VBIOS        _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS)
 -
 -#define XGI_IOCTL_FB_INIT           _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT)
 -#define XGI_IOCTL_FB_ALLOC          _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t)
 -#define XGI_IOCTL_FB_FREE           _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long)
 -
 -#define XGI_IOCTL_PCIE_INIT         _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT)
 -#define XGI_IOCTL_PCIE_ALLOC        _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t)
 -#define XGI_IOCTL_PCIE_FREE         _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long)
 -
 -#define XGI_IOCTL_PUT_SCREEN_INFO   _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t)
 -#define XGI_IOCTL_GET_SCREEN_INFO   _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t)
 -
 -#define XGI_IOCTL_GE_RESET          _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET)
 -#define XGI_IOCTL_SAREA_INFO        _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t)
 -#define XGI_IOCTL_DUMP_REGISTER     _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER)
 -#define XGI_IOCTL_DEBUG_INFO        _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO)
 -#define XGI_IOCTL_MMIO_INFO         _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t)
 -
 -#define XGI_IOCTL_SUBMIT_CMDLIST	_IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t)
 -#define XGI_IOCTL_TEST_RWINKERNEL	_IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long)
 -#define XGI_IOCTL_STATE_CHANGE      _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t)
 -
 -#define XGI_IOCTL_PCIE_CHECK        _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK)
 -#define XGI_IOCTL_CPUID             _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t)
 -#define XGI_IOCTL_MAXNR          30
 -
 -/*
 - * flags
 - */
 -#define XGI_FLAG_OPEN            0x0001
 -#define XGI_FLAG_NEEDS_POSTING   0x0002
 -#define XGI_FLAG_WAS_POSTED      0x0004
 -#define XGI_FLAG_CONTROL         0x0010
 -#define XGI_FLAG_MAP_REGS_EARLY  0x0200
 -
 -/* mmap(2) offsets */
 -
 -#define IS_IO_OFFSET(info, offset, length) \
 -            (((offset) >= (info)->mmio.base) \
 -            && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size))
 -
 -/* Jong 06/14/2006 */
 -/* (info)->fb.base is a base address for physical (bus) address space */
 -/* what's the definition of offest? on  physical (bus) address space or HW address space */
 -/* Jong 06/15/2006; use HW address space */
 -#define IS_FB_OFFSET(info, offset, length) \
 -            (((offset) >= 0) \
 -            && (((offset) + (length)) <= (info)->fb.size))
 -#if 0
 -#define IS_FB_OFFSET(info, offset, length) \
 -            (((offset) >= (info)->fb.base) \
 -            && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size))
 -#endif
 -
 -#define IS_PCIE_OFFSET(info, offset, length) \
 -            (((offset) >= (info)->pcie.base) \
 -            && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size))
 -
 -extern int  xgi_fb_heap_init(xgi_info_t *info);
 -extern void xgi_fb_heap_cleanup(xgi_info_t *info);
 -
 -extern void xgi_fb_alloc(xgi_info_t *info, xgi_mem_req_t *req, xgi_mem_alloc_t *alloc);
 -extern void xgi_fb_free(xgi_info_t *info, unsigned long offset);
 -extern void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt);
 -
 -extern int  xgi_pcie_heap_init(xgi_info_t *info);
 -extern void xgi_pcie_heap_cleanup(xgi_info_t *info);
 -
 -extern void xgi_pcie_alloc(xgi_info_t *info, unsigned long size, enum PcieOwner owner, xgi_mem_alloc_t *alloc);
 -extern void xgi_pcie_free(xgi_info_t *info, unsigned long offset);
 -extern void xgi_pcie_heap_check(void);
 -extern void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address);
 -extern void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address);
 -
 -extern void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req);
 -extern void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req);
 -
 -extern void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address);
 -
 -#endif
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_DRV_H_ +#define _XGI_DRV_H_ + +#define XGI_MAJOR_VERSION   0 +#define XGI_MINOR_VERSION   7 +#define XGI_PATCHLEVEL      5 + +#define XGI_DRV_VERSION     "0.7.5" + +#ifndef XGI_DRV_NAME +#define XGI_DRV_NAME        "xgi" +#endif + +/* + * xgi reserved major device number, Set this to 0 to + * request dynamic major number allocation. + */ +#ifndef XGI_DEV_MAJOR +#define XGI_DEV_MAJOR   0 +#endif + +#ifndef XGI_MAX_DEVICES +#define XGI_MAX_DEVICES 1 +#endif + +/* Jong 06/06/2006 */ +/* #define XGI_DEBUG */ + +#ifndef PCI_VENDOR_ID_XGI +/* +#define PCI_VENDOR_ID_XGI       0x1023 +*/ +#define PCI_VENDOR_ID_XGI       0x18CA + +#endif + +#ifndef PCI_DEVICE_ID_XP5 +#define PCI_DEVICE_ID_XP5       0x2200 +#endif + +#ifndef PCI_DEVICE_ID_XG47 +#define PCI_DEVICE_ID_XG47      0x0047 +#endif + +/* Macros to make printk easier */ +#define XGI_ERROR(fmt, arg...) \ +    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +#define XGI_MEM_ERROR(area, fmt, arg...) \ +    printk(KERN_ERR "[" XGI_DRV_NAME ":%s] *ERROR* " fmt, __FUNCTION__, ##arg) + +/* #define XGI_DEBUG */ + +#ifdef XGI_DEBUG +#define XGI_INFO(fmt, arg...) \ +    printk(KERN_ALERT "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) +/*    printk(KERN_INFO "[" XGI_DRV_NAME ":%s] " fmt, __FUNCTION__, ##arg) */ +#else +#define XGI_INFO(fmt, arg...)   do { } while (0) +#endif + +/* device name length; must be atleast 8 */ +#define XGI_DEVICE_NAME_LENGTH      40 + +/* need a fake device number for control device; just to flag it for msgs */ +#define XGI_CONTROL_DEVICE_NUMBER   100 + +typedef struct { +	U32 base;		// pcie base is different from fb base +	U32 size; +	U8 *vbase; +} xgi_aperture_t; + +typedef struct xgi_screen_info_s { +	U32 scrn_start; +	U32 scrn_xres; +	U32 scrn_yres; +	U32 scrn_bpp; +	U32 scrn_pitch; +} xgi_screen_info_t; + +typedef struct xgi_sarea_info_s { +	U32 bus_addr; +	U32 size; +} xgi_sarea_info_t; + +typedef struct xgi_info_s { +	struct pci_dev *dev; +	int flags; +	int device_number; +	int bus;		/* PCI config info */ +	int slot; +	int vendor_id; +	U32 device_id; +	U8 revision_id; + +	/* physical characteristics */ +	xgi_aperture_t mmio; +	xgi_aperture_t fb; +	xgi_aperture_t pcie; +	xgi_screen_info_t scrn_info; +	xgi_sarea_info_t sarea_info; + +	/* look up table parameters */ +	U32 *lut_base; +	U32 lutPageSize; +	U32 lutPageOrder; +	U32 isLUTInLFB; +	U32 sdfbPageSize; + +	U32 pcie_config; +	U32 pcie_status; +	U32 irq; + +	atomic_t use_count; + +	/* keep track of any pending bottom halfes */ +	struct tasklet_struct tasklet; + +	spinlock_t info_lock; + +	struct semaphore info_sem; +	struct semaphore fb_sem; +	struct semaphore pcie_sem; +} xgi_info_t; + +typedef struct xgi_ioctl_post_vbios { +	U32 bus; +	U32 slot; +} xgi_ioctl_post_vbios_t; + +typedef enum xgi_mem_location_s { +	NON_LOCAL = 0, +	LOCAL = 1, +	INVALID = 0x7fffffff +} xgi_mem_location_t; + +enum PcieOwner { +	PCIE_2D = 0, +	/* +	   PCIE_3D should not begin with 1, +	   2D alloc pcie memory will use owner 1. +	 */ +	PCIE_3D = 11,		/*vetex buf */ +	PCIE_3D_CMDLIST = 12, +	PCIE_3D_SCRATCHPAD = 13, +	PCIE_3D_TEXTURE = 14, +	PCIE_INVALID = 0x7fffffff +}; + +typedef struct xgi_mem_req_s { +	xgi_mem_location_t location; +	unsigned long size; +	unsigned long is_front; +	enum PcieOwner owner; +	unsigned long pid; +} xgi_mem_req_t; + +typedef struct xgi_mem_alloc_s { +	xgi_mem_location_t location; +	unsigned long size; +	unsigned long bus_addr; +	unsigned long hw_addr; +	unsigned long pid; +} xgi_mem_alloc_t; + +typedef struct xgi_chip_info_s { +	U32 device_id; +	char device_name[32]; +	U32 vendor_id; +	U32 curr_display_mode;	//Singe, DualView(Contained), MHS +	U32 fb_size; +	U32 sarea_bus_addr; +	U32 sarea_size; +} xgi_chip_info_t; + +typedef struct xgi_opengl_cmd_s { +	U32 cmd; +} xgi_opengl_cmd_t; + +typedef struct xgi_mmio_info_s { +	xgi_opengl_cmd_t cmd_head; +	void *mmioBase; +	int size; +} xgi_mmio_info_t; + +typedef enum { +	BTYPE_2D = 0, +	BTYPE_3D = 1, +	BTYPE_FLIP = 2, +	BTYPE_CTRL = 3, +	BTYPE_NONE = 0x7fffffff +} BATCH_TYPE; + +typedef struct xgi_cmd_info_s { +	BATCH_TYPE _firstBeginType; +	U32 _firstBeginAddr; +	U32 _firstSize; +	U32 _curDebugID; +	U32 _lastBeginAddr; +	U32 _beginCount; +} xgi_cmd_info_t; + +typedef struct xgi_state_info_s { +	U32 _fromState; +	U32 _toState; +} xgi_state_info_t; + +typedef struct cpu_info_s { +	U32 _eax; +	U32 _ebx; +	U32 _ecx; +	U32 _edx; +} cpu_info_t; + +typedef struct xgi_mem_pid_s { +	struct list_head list; +	xgi_mem_location_t location; +	unsigned long bus_addr; +	unsigned long pid; +} xgi_mem_pid_t; + +/* + * Ioctl definitions + */ + +#define XGI_IOCTL_MAGIC             'x'	/* use 'x' as magic number */ + +#define XGI_IOCTL_BASE              0 +#define XGI_ESC_DEVICE_INFO         (XGI_IOCTL_BASE + 0) +#define XGI_ESC_POST_VBIOS          (XGI_IOCTL_BASE + 1) + +#define XGI_ESC_FB_INIT             (XGI_IOCTL_BASE + 2) +#define XGI_ESC_FB_ALLOC            (XGI_IOCTL_BASE + 3) +#define XGI_ESC_FB_FREE             (XGI_IOCTL_BASE + 4) +#define XGI_ESC_PCIE_INIT           (XGI_IOCTL_BASE + 5) +#define XGI_ESC_PCIE_ALLOC          (XGI_IOCTL_BASE + 6) +#define XGI_ESC_PCIE_FREE           (XGI_IOCTL_BASE + 7) +#define XGI_ESC_SUBMIT_CMDLIST      (XGI_IOCTL_BASE + 8) +#define XGI_ESC_PUT_SCREEN_INFO     (XGI_IOCTL_BASE + 9) +#define XGI_ESC_GET_SCREEN_INFO     (XGI_IOCTL_BASE + 10) +#define XGI_ESC_GE_RESET            (XGI_IOCTL_BASE + 11) +#define XGI_ESC_SAREA_INFO          (XGI_IOCTL_BASE + 12) +#define XGI_ESC_DUMP_REGISTER       (XGI_IOCTL_BASE + 13) +#define XGI_ESC_DEBUG_INFO          (XGI_IOCTL_BASE + 14) +#define XGI_ESC_TEST_RWINKERNEL     (XGI_IOCTL_BASE + 16) +#define XGI_ESC_STATE_CHANGE        (XGI_IOCTL_BASE + 17) +#define XGI_ESC_MMIO_INFO           (XGI_IOCTL_BASE + 18) +#define XGI_ESC_PCIE_CHECK          (XGI_IOCTL_BASE + 19) +#define XGI_ESC_CPUID               (XGI_IOCTL_BASE + 20) +#define XGI_ESC_MEM_COLLECT          (XGI_IOCTL_BASE + 21) + +#define XGI_IOCTL_DEVICE_INFO       _IOR(XGI_IOCTL_MAGIC, XGI_ESC_DEVICE_INFO, xgi_chip_info_t) +#define XGI_IOCTL_POST_VBIOS        _IO(XGI_IOCTL_MAGIC, XGI_ESC_POST_VBIOS) + +#define XGI_IOCTL_FB_INIT           _IO(XGI_IOCTL_MAGIC, XGI_ESC_FB_INIT) +#define XGI_IOCTL_FB_ALLOC          _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_FB_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_FB_FREE           _IOW(XGI_IOCTL_MAGIC, XGI_ESC_FB_FREE, unsigned long) + +#define XGI_IOCTL_PCIE_INIT         _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_INIT) +#define XGI_IOCTL_PCIE_ALLOC        _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_ALLOC, xgi_mem_req_t) +#define XGI_IOCTL_PCIE_FREE         _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_FREE, unsigned long) + +#define XGI_IOCTL_PUT_SCREEN_INFO   _IOW(XGI_IOCTL_MAGIC, XGI_ESC_PUT_SCREEN_INFO, xgi_screen_info_t) +#define XGI_IOCTL_GET_SCREEN_INFO   _IOR(XGI_IOCTL_MAGIC, XGI_ESC_GET_SCREEN_INFO, xgi_screen_info_t) + +#define XGI_IOCTL_GE_RESET          _IO(XGI_IOCTL_MAGIC, XGI_ESC_GE_RESET) +#define XGI_IOCTL_SAREA_INFO        _IOW(XGI_IOCTL_MAGIC, XGI_ESC_SAREA_INFO, xgi_sarea_info_t) +#define XGI_IOCTL_DUMP_REGISTER     _IO(XGI_IOCTL_MAGIC, XGI_ESC_DUMP_REGISTER) +#define XGI_IOCTL_DEBUG_INFO        _IO(XGI_IOCTL_MAGIC, XGI_ESC_DEBUG_INFO) +#define XGI_IOCTL_MMIO_INFO         _IOR(XGI_IOCTL_MAGIC, XGI_ESC_MMIO_INFO, xgi_mmio_info_t) + +#define XGI_IOCTL_SUBMIT_CMDLIST	_IOWR(XGI_IOCTL_MAGIC, XGI_ESC_SUBMIT_CMDLIST, xgi_cmd_info_t) +#define XGI_IOCTL_TEST_RWINKERNEL	_IOWR(XGI_IOCTL_MAGIC, XGI_ESC_TEST_RWINKERNEL, unsigned long) +#define XGI_IOCTL_STATE_CHANGE      _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_STATE_CHANGE, xgi_state_info_t) + +#define XGI_IOCTL_PCIE_CHECK        _IO(XGI_IOCTL_MAGIC, XGI_ESC_PCIE_CHECK) +#define XGI_IOCTL_CPUID             _IOWR(XGI_IOCTL_MAGIC, XGI_ESC_CPUID, cpu_info_t) +#define XGI_IOCTL_MAXNR          30 + +/* + * flags + */ +#define XGI_FLAG_OPEN            0x0001 +#define XGI_FLAG_NEEDS_POSTING   0x0002 +#define XGI_FLAG_WAS_POSTED      0x0004 +#define XGI_FLAG_CONTROL         0x0010 +#define XGI_FLAG_MAP_REGS_EARLY  0x0200 + +/* mmap(2) offsets */ + +#define IS_IO_OFFSET(info, offset, length) \ +            (((offset) >= (info)->mmio.base) \ +            && (((offset) + (length)) <= (info)->mmio.base + (info)->mmio.size)) + +/* Jong 06/14/2006 */ +/* (info)->fb.base is a base address for physical (bus) address space */ +/* what's the definition of offest? on  physical (bus) address space or HW address space */ +/* Jong 06/15/2006; use HW address space */ +#define IS_FB_OFFSET(info, offset, length) \ +            (((offset) >= 0) \ +            && (((offset) + (length)) <= (info)->fb.size)) +#if 0 +#define IS_FB_OFFSET(info, offset, length) \ +            (((offset) >= (info)->fb.base) \ +            && (((offset) + (length)) <= (info)->fb.base + (info)->fb.size)) +#endif + +#define IS_PCIE_OFFSET(info, offset, length) \ +            (((offset) >= (info)->pcie.base) \ +            && (((offset) + (length)) <= (info)->pcie.base + (info)->pcie.size)) + +extern int xgi_fb_heap_init(xgi_info_t * info); +extern void xgi_fb_heap_cleanup(xgi_info_t * info); + +extern void xgi_fb_alloc(xgi_info_t * info, xgi_mem_req_t * req, +			 xgi_mem_alloc_t * alloc); +extern void xgi_fb_free(xgi_info_t * info, unsigned long offset); +extern void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt); + +extern int xgi_pcie_heap_init(xgi_info_t * info); +extern void xgi_pcie_heap_cleanup(xgi_info_t * info); + +extern void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, +			   enum PcieOwner owner, xgi_mem_alloc_t * alloc); +extern void xgi_pcie_free(xgi_info_t * info, unsigned long offset); +extern void xgi_pcie_heap_check(void); +extern void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address); +extern void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address); + +extern void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); +extern void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req); + +extern void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address); + +#endif diff --git a/linux-core/xgi_fb.c b/linux-core/xgi_fb.c index 67fdfe17..fab99ae2 100644 --- a/linux-core/xgi_fb.c +++ b/linux-core/xgi_fb.c @@ -1,528 +1,491 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#include "xgi_types.h"
 -#include "xgi_linux.h"
 -#include "xgi_drv.h"
 -#include "xgi_fb.h"
 -
 -#define XGI_FB_HEAP_START 0x1000000
 -
 -static xgi_mem_heap_t   *xgi_fb_heap;
 -static kmem_cache_t     *xgi_fb_cache_block = NULL;
 -extern struct list_head xgi_mempid_list;
 -
 -static xgi_mem_block_t *xgi_mem_new_node(void);
 -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long size);
 -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset);
 -
 -void xgi_fb_alloc(xgi_info_t *info,
 -                  xgi_mem_req_t *req,
 -                  xgi_mem_alloc_t *alloc)
 -{
 -    xgi_mem_block_t *block;
 -    xgi_mem_pid_t *mempid_block;
 -
 -    if (req->is_front)
 -    {
 -        alloc->location = LOCAL;
 -        alloc->bus_addr = info->fb.base;
 -        alloc->hw_addr  = 0;
 -        XGI_INFO("Video RAM allocation on front buffer successfully! \n");
 -    }
 -    else
 -    {
 -        xgi_down(info->fb_sem);
 -        block = xgi_mem_alloc(info, req->size);
 -        xgi_up(info->fb_sem);
 -
 -        if (block == NULL)
 -        {
 -            alloc->location = LOCAL;
 -            alloc->size     = 0;
 -            alloc->bus_addr = 0;
 -            alloc->hw_addr  = 0;
 -            XGI_ERROR("Video RAM allocation failed\n");
 -        }
 -        else
 -        {
 -            XGI_INFO("Video RAM allocation succeeded: 0x%p\n",
 -                    (char *) block->offset);
 -            alloc->location = LOCAL;
 -            alloc->size     = block->size;
 -            alloc->bus_addr = info->fb.base + block->offset;
 -            alloc->hw_addr  = block->offset;
 -
 -            /* manage mempid */
 -            mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);
 -            mempid_block->location = LOCAL;
 -            mempid_block->bus_addr = alloc->bus_addr;
 -            mempid_block->pid = alloc->pid;
 -
 -            if (!mempid_block)
 -                XGI_ERROR("mempid_block alloc failed\n");
 -
 -            XGI_INFO("Memory ProcessID add one fb block pid:%ld successfully! \n", mempid_block->pid);
 -            list_add(&mempid_block->list, &xgi_mempid_list);
 -        }
 -    }
 -}
 -
 -void xgi_fb_free(xgi_info_t *info, unsigned long bus_addr)
 -{
 -    xgi_mem_block_t     *block;
 -    unsigned long       offset = bus_addr - info->fb.base;
 -    xgi_mem_pid_t       *mempid_block;
 -    xgi_mem_pid_t       *mempid_freeblock = NULL;
 -    struct list_head    *mempid_list;
 -
 -    if (offset < 0)
 -    {
 -        XGI_INFO("free onscreen frame buffer successfully !\n");
 -    }
 -    else
 -    {
 -        xgi_down(info->fb_sem);
 -        block = xgi_mem_free(info, offset);
 -        xgi_up(info->fb_sem);
 -
 -        if (block == NULL)
 -        {
 -            XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", offset);
 -        }
 -
 -        /* manage mempid */
 -        mempid_list = xgi_mempid_list.next;
 -        while (mempid_list != &xgi_mempid_list)
 -        {
 -            mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);
 -            if (mempid_block->location == LOCAL && mempid_block->bus_addr == bus_addr)
 -            {
 -                mempid_freeblock = mempid_block;
 -                break;
 -            }
 -            mempid_list = mempid_list->next;
 -        }
 -        if (mempid_freeblock)
 -        {
 -            list_del(&mempid_freeblock->list);
 -            XGI_INFO("Memory ProcessID delete one fb block pid:%ld successfully! \n", mempid_freeblock->pid);
 -            kfree(mempid_freeblock);
 -        }
 -    }
 -}
 -
 -int xgi_fb_heap_init(xgi_info_t *info)
 -{
 -    xgi_mem_block_t *block;
 -
 -    xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL);
 -    if (!xgi_fb_heap)
 -    {
 -        XGI_ERROR("xgi_fb_heap alloc failed\n");
 -        return 0;
 -    }
 -
 -    INIT_LIST_HEAD(&xgi_fb_heap->free_list);
 -    INIT_LIST_HEAD(&xgi_fb_heap->used_list);
 -    INIT_LIST_HEAD(&xgi_fb_heap->sort_list);
 -
 -    xgi_fb_cache_block = kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t),
 -                                           0, SLAB_HWCACHE_ALIGN, NULL, NULL);
 -
 -    if (NULL == xgi_fb_cache_block)
 -    {
 -         XGI_ERROR("Fail to creat xgi_fb_block\n");
 -         goto fail1;
 -    }
 -
 -    block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL);
 -    if (!block)
 -    {
 -        XGI_ERROR("kmem_cache_alloc failed\n");
 -        goto fail2;
 -    }
 -
 -    block->offset = XGI_FB_HEAP_START;
 -    block->size   = info->fb.size - XGI_FB_HEAP_START;
 -
 -    list_add(&block->list, &xgi_fb_heap->free_list);
 -
 -    xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START;
 -
 -    XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, block->size);
 -    XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);
 -
 -    return 1;
 -
 -fail2:
 -    if (xgi_fb_cache_block)
 -    {
 -        kmem_cache_destroy(xgi_fb_cache_block);
 -        xgi_fb_cache_block = NULL;
 -    }
 -fail1:
 -    if(xgi_fb_heap)
 -    {
 -        kfree(xgi_fb_heap);
 -        xgi_fb_heap = NULL;
 -    }
 -    return 0;
 -}
 -
 -void xgi_fb_heap_cleanup(xgi_info_t *info)
 -{
 -    struct list_head    *free_list, *temp;
 -    xgi_mem_block_t     *block;
 -    int                 i;
 -
 -    if (xgi_fb_heap)
 -    {
 -        free_list = &xgi_fb_heap->free_list;
 -        for (i = 0; i < 3; i++, free_list++)
 -        {
 -            temp = free_list->next;
 -            while (temp != free_list)
 -            {
 -                block = list_entry(temp, struct xgi_mem_block_s, list);
 -                temp = temp->next;
 -
 -                XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
 -                          i, block->offset, block->size);
 -                //XGI_INFO("No. %d free block: 0x%p \n", i, block);
 -                kmem_cache_free(xgi_fb_cache_block, block);
 -                block = NULL;
 -            }
 -        }
 -        XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap);
 -        kfree(xgi_fb_heap);
 -        xgi_fb_heap = NULL;
 -    }
 -
 -    if (xgi_fb_cache_block)
 -    {
 -        kmem_cache_destroy(xgi_fb_cache_block);
 -        xgi_fb_cache_block = NULL;
 -    }
 -}
 -
 -static xgi_mem_block_t * xgi_mem_new_node(void)
 -{
 -    xgi_mem_block_t *block;
 -
 -    block = (xgi_mem_block_t *)kmem_cache_alloc(xgi_fb_cache_block, GFP_KERNEL);
 -    if (!block)
 -    {
 -        XGI_ERROR("kmem_cache_alloc failed\n");
 -        return NULL;
 -    }
 -
 -    return block;
 -}
 -
 -#if 0
 -static void xgi_mem_insert_node_after(xgi_mem_list_t *list,
 -                                      xgi_mem_block_t *current,
 -                                      xgi_mem_block_t *block);
 -static void xgi_mem_insert_node_before(xgi_mem_list_t *list,
 -                                       xgi_mem_block_t *current,
 -                                       xgi_mem_block_t *block);
 -static void xgi_mem_insert_node_head(xgi_mem_list_t *list,
 -                                     xgi_mem_block_t *block);
 -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list,
 -                                     xgi_mem_block_t *block);
 -static void xgi_mem_delete_node(xgi_mem_list_t *list,
 -                                xgi_mem_block_t *block);
 -/*
 - *  insert node:block after node:current
 - */
 -static void xgi_mem_insert_node_after(xgi_mem_list_t *list,
 -                                      xgi_mem_block_t *current,
 -                                      xgi_mem_block_t *block)
 -{
 -    block->prev = current;
 -    block->next = current->next;
 -    current->next = block;
 -
 -    if (current == list->tail)
 -    {
 -        list->tail = block;
 -    }
 -    else
 -    {
 -        block->next->prev = block;
 -    }
 -}
 -
 -/*
 - *  insert node:block before node:current
 - */
 -static void xgi_mem_insert_node_before(xgi_mem_list_t *list,
 -                                       xgi_mem_block_t *current,
 -                                       xgi_mem_block_t *block)
 -{
 -    block->prev = current->prev;
 -    block->next = current;
 -    current->prev = block;
 -    if (current == list->head)
 -    {
 -        list->head = block;
 -    }
 -    else
 -    {
 -        block->prev->next = block;
 -    }
 -}
 -void xgi_mem_insert_node_head(xgi_mem_list_t *list,
 -                              xgi_mem_block_t *block)
 -{
 -    block->next = list->head;
 -    block->prev = NULL;
 -
 -    if (NULL == list->head)
 -    {
 -        list->tail = block;
 -    }
 -    else
 -    {
 -        list->head->prev = block;
 -    }
 -    list->head = block;
 -}
 -
 -static void xgi_mem_insert_node_tail(xgi_mem_list_t *list,
 -                                     xgi_mem_block_t *block)
 -
 -{
 -    block->next = NULL;
 -    block->prev = list->tail;
 -    if (NULL == list->tail)
 -    {
 -        list->head = block;
 -    }
 -    else
 -    {
 -        list->tail->next = block;
 -    }
 -    list->tail = block;
 -}
 -
 -static void xgi_mem_delete_node(xgi_mem_list_t *list,
 -                         xgi_mem_block_t *block)
 -{
 -    if (block == list->head)
 -    {
 -        list->head = block->next;
 -    }
 -    if (block == list->tail)
 -    {
 -        list->tail = block->prev;
 -    }
 -
 -    if (block->prev)
 -    {
 -        block->prev->next = block->next;
 -    }
 -    if (block->next)
 -    {
 -        block->next->prev = block->prev;
 -    }
 -
 -    block->next = block->prev = NULL;
 -}
 -#endif
 -static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t *info, unsigned long originalSize)
 -{
 -    struct list_head    *free_list;
 -    xgi_mem_block_t     *block, *free_block, *used_block;
 -
 -    unsigned long       size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
 -
 -    XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size);
 -
 -    if (size == 0)
 -    {
 -        XGI_ERROR("size == 0\n");
 -        return (NULL);
 -    }
 -    XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize);
 -    if (size > xgi_fb_heap->max_freesize)
 -    {
 -        XGI_ERROR("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n",
 -                  size, xgi_fb_heap->max_freesize);
 -        return (NULL);
 -    }
 -
 -    free_list = xgi_fb_heap->free_list.next;
 -
 -    while (free_list != &xgi_fb_heap->free_list)
 -    {
 -        XGI_INFO("free_list: 0x%px \n", free_list);
 -        block = list_entry(free_list, struct xgi_mem_block_s, list);
 -        if (size <= block->size)
 -        {
 -            break;
 -        }
 -        free_list = free_list->next;
 -    }
 -
 -    if (free_list == &xgi_fb_heap->free_list)
 -    {
 -        XGI_ERROR("Can't allocate %ldk size from frame buffer memory !\n", size/1024);
 -        return (NULL);
 -    }
 -
 -    free_block = block;
 -    XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
 -              size, free_block->offset, free_block->size);
 -
 -    if (size == free_block->size)
 -    {
 -        used_block = free_block;
 -        XGI_INFO("size == free_block->size: free_block = 0x%p\n", free_block);
 -        list_del(&free_block->list);
 -    }
 -    else
 -    {
 -        used_block = xgi_mem_new_node();
 -
 -        if (used_block == NULL)  return (NULL);
 -
 -        if (used_block == free_block)
 -        {
 -            XGI_ERROR("used_block == free_block = 0x%p\n", used_block);
 -        }
 -
 -        used_block->offset = free_block->offset;
 -        used_block->size = size;
 -
 -        free_block->offset += size;
 -        free_block->size -= size;
 -    }
 -
 -    xgi_fb_heap->max_freesize -= size;
 -
 -    list_add(&used_block->list, &xgi_fb_heap->used_list);
 -
 -    return (used_block);
 -}
 -
 -static xgi_mem_block_t *xgi_mem_free(xgi_info_t *info, unsigned long offset)
 -{
 -    struct list_head    *free_list, *used_list;
 -    xgi_mem_block_t     *used_block = NULL, *block = NULL;
 -    xgi_mem_block_t     *prev, *next;
 -
 -    unsigned long       upper;
 -    unsigned long       lower;
 -
 -    used_list = xgi_fb_heap->used_list.next;
 -    while (used_list != &xgi_fb_heap->used_list)
 -    {
 -        block = list_entry(used_list, struct xgi_mem_block_s, list);
 -        if (block->offset == offset)
 -        {
 -            break;
 -        }
 -        used_list = used_list->next;
 -    }
 -
 -    if (used_list == &xgi_fb_heap->used_list)
 -    {
 -        XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
 -        return (NULL);
 -    }
 -
 -    used_block = block;
 -    XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n",
 -              used_block, used_block->offset, used_block->size);
 -
 -    xgi_fb_heap->max_freesize += used_block->size;
 -
 -    prev = next = NULL;
 -    upper = used_block->offset + used_block->size;
 -    lower = used_block->offset;
 -
 -    free_list = xgi_fb_heap->free_list.next;
 -    while (free_list != &xgi_fb_heap->free_list)
 -    {
 -        block = list_entry(free_list, struct xgi_mem_block_s, list);
 -
 -        if (block->offset == upper)
 -        {
 -            next = block;
 -        }
 -        else if ((block->offset + block->size) == lower)
 -        {
 -            prev = block;
 -        }
 -        free_list = free_list->next;
 -    }
 -
 -    XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
 -    list_del(&used_block->list);
 -
 -    if (prev && next)
 -    {
 -        prev->size += (used_block->size + next->size);
 -        list_del(&next->list);
 -        XGI_INFO("free node 0x%p\n", next);
 -        kmem_cache_free(xgi_fb_cache_block, next);
 -        kmem_cache_free(xgi_fb_cache_block, used_block);
 -
 -        next = NULL;
 -        used_block = NULL;
 -        return (prev);
 -    }
 -
 -    if (prev)
 -    {
 -        prev->size += used_block->size;
 -        XGI_INFO("free node 0x%p\n", used_block);
 -        kmem_cache_free(xgi_fb_cache_block, used_block);
 -        used_block = NULL;
 -        return (prev);
 -    }
 -
 -    if (next)
 -    {
 -        next->size += used_block->size;
 -        next->offset = used_block->offset;
 -        XGI_INFO("free node 0x%p\n", used_block);
 -        kmem_cache_free(xgi_fb_cache_block, used_block);
 -        used_block = NULL;
 -        return (next);
 -    }
 -
 -    list_add(&used_block->list, &xgi_fb_heap->free_list);
 -    XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
 -              used_block, used_block->offset, used_block->size);
 -
 -    return (used_block);
 -}
 -
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_fb.h" + +#define XGI_FB_HEAP_START 0x1000000 + +static xgi_mem_heap_t *xgi_fb_heap; +static kmem_cache_t *xgi_fb_cache_block = NULL; +extern struct list_head xgi_mempid_list; + +static xgi_mem_block_t *xgi_mem_new_node(void); +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, unsigned long size); +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset); + +void xgi_fb_alloc(xgi_info_t * info, +		  xgi_mem_req_t * req, xgi_mem_alloc_t * alloc) +{ +	xgi_mem_block_t *block; +	xgi_mem_pid_t *mempid_block; + +	if (req->is_front) { +		alloc->location = LOCAL; +		alloc->bus_addr = info->fb.base; +		alloc->hw_addr = 0; +		XGI_INFO +		    ("Video RAM allocation on front buffer successfully! \n"); +	} else { +		xgi_down(info->fb_sem); +		block = xgi_mem_alloc(info, req->size); +		xgi_up(info->fb_sem); + +		if (block == NULL) { +			alloc->location = LOCAL; +			alloc->size = 0; +			alloc->bus_addr = 0; +			alloc->hw_addr = 0; +			XGI_ERROR("Video RAM allocation failed\n"); +		} else { +			XGI_INFO("Video RAM allocation succeeded: 0x%p\n", +				 (char *)block->offset); +			alloc->location = LOCAL; +			alloc->size = block->size; +			alloc->bus_addr = info->fb.base + block->offset; +			alloc->hw_addr = block->offset; + +			/* manage mempid */ +			mempid_block = +			    kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); +			mempid_block->location = LOCAL; +			mempid_block->bus_addr = alloc->bus_addr; +			mempid_block->pid = alloc->pid; + +			if (!mempid_block) +				XGI_ERROR("mempid_block alloc failed\n"); + +			XGI_INFO +			    ("Memory ProcessID add one fb block pid:%ld successfully! \n", +			     mempid_block->pid); +			list_add(&mempid_block->list, &xgi_mempid_list); +		} +	} +} + +void xgi_fb_free(xgi_info_t * info, unsigned long bus_addr) +{ +	xgi_mem_block_t *block; +	unsigned long offset = bus_addr - info->fb.base; +	xgi_mem_pid_t *mempid_block; +	xgi_mem_pid_t *mempid_freeblock = NULL; +	struct list_head *mempid_list; + +	if (offset < 0) { +		XGI_INFO("free onscreen frame buffer successfully !\n"); +	} else { +		xgi_down(info->fb_sem); +		block = xgi_mem_free(info, offset); +		xgi_up(info->fb_sem); + +		if (block == NULL) { +			XGI_ERROR("xgi_mem_free() failed at base 0x%lx\n", +				  offset); +		} + +		/* manage mempid */ +		mempid_list = xgi_mempid_list.next; +		while (mempid_list != &xgi_mempid_list) { +			mempid_block = +			    list_entry(mempid_list, struct xgi_mem_pid_s, list); +			if (mempid_block->location == LOCAL +			    && mempid_block->bus_addr == bus_addr) { +				mempid_freeblock = mempid_block; +				break; +			} +			mempid_list = mempid_list->next; +		} +		if (mempid_freeblock) { +			list_del(&mempid_freeblock->list); +			XGI_INFO +			    ("Memory ProcessID delete one fb block pid:%ld successfully! \n", +			     mempid_freeblock->pid); +			kfree(mempid_freeblock); +		} +	} +} + +int xgi_fb_heap_init(xgi_info_t * info) +{ +	xgi_mem_block_t *block; + +	xgi_fb_heap = kmalloc(sizeof(xgi_mem_heap_t), GFP_KERNEL); +	if (!xgi_fb_heap) { +		XGI_ERROR("xgi_fb_heap alloc failed\n"); +		return 0; +	} + +	INIT_LIST_HEAD(&xgi_fb_heap->free_list); +	INIT_LIST_HEAD(&xgi_fb_heap->used_list); +	INIT_LIST_HEAD(&xgi_fb_heap->sort_list); + +	xgi_fb_cache_block = +	    kmem_cache_create("xgi_fb_block", sizeof(xgi_mem_block_t), 0, +			      SLAB_HWCACHE_ALIGN, NULL, NULL); + +	if (NULL == xgi_fb_cache_block) { +		XGI_ERROR("Fail to creat xgi_fb_block\n"); +		goto fail1; +	} + +	block = +	    (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, +						 GFP_KERNEL); +	if (!block) { +		XGI_ERROR("kmem_cache_alloc failed\n"); +		goto fail2; +	} + +	block->offset = XGI_FB_HEAP_START; +	block->size = info->fb.size - XGI_FB_HEAP_START; + +	list_add(&block->list, &xgi_fb_heap->free_list); + +	xgi_fb_heap->max_freesize = info->fb.size - XGI_FB_HEAP_START; + +	XGI_INFO("fb start offset: 0x%lx, memory size : 0x%lx\n", block->offset, +		 block->size); +	XGI_INFO("xgi_fb_heap->max_freesize: 0x%lx \n", +		 xgi_fb_heap->max_freesize); + +	return 1; + +      fail2: +	if (xgi_fb_cache_block) { +		kmem_cache_destroy(xgi_fb_cache_block); +		xgi_fb_cache_block = NULL; +	} +      fail1: +	if (xgi_fb_heap) { +		kfree(xgi_fb_heap); +		xgi_fb_heap = NULL; +	} +	return 0; +} + +void xgi_fb_heap_cleanup(xgi_info_t * info) +{ +	struct list_head *free_list, *temp; +	xgi_mem_block_t *block; +	int i; + +	if (xgi_fb_heap) { +		free_list = &xgi_fb_heap->free_list; +		for (i = 0; i < 3; i++, free_list++) { +			temp = free_list->next; +			while (temp != free_list) { +				block = +				    list_entry(temp, struct xgi_mem_block_s, +					       list); +				temp = temp->next; + +				XGI_INFO +				    ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", +				     i, block->offset, block->size); +				//XGI_INFO("No. %d free block: 0x%p \n", i, block); +				kmem_cache_free(xgi_fb_cache_block, block); +				block = NULL; +			} +		} +		XGI_INFO("xgi_fb_heap: 0x%p \n", xgi_fb_heap); +		kfree(xgi_fb_heap); +		xgi_fb_heap = NULL; +	} + +	if (xgi_fb_cache_block) { +		kmem_cache_destroy(xgi_fb_cache_block); +		xgi_fb_cache_block = NULL; +	} +} + +static xgi_mem_block_t *xgi_mem_new_node(void) +{ +	xgi_mem_block_t *block; + +	block = +	    (xgi_mem_block_t *) kmem_cache_alloc(xgi_fb_cache_block, +						 GFP_KERNEL); +	if (!block) { +		XGI_ERROR("kmem_cache_alloc failed\n"); +		return NULL; +	} + +	return block; +} + +#if 0 +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, +				      xgi_mem_block_t * current, +				      xgi_mem_block_t * block); +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, +				       xgi_mem_block_t * current, +				       xgi_mem_block_t * block); +static void xgi_mem_insert_node_head(xgi_mem_list_t * list, +				     xgi_mem_block_t * block); +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, +				     xgi_mem_block_t * block); +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block); +/* + *  insert node:block after node:current + */ +static void xgi_mem_insert_node_after(xgi_mem_list_t * list, +				      xgi_mem_block_t * current, +				      xgi_mem_block_t * block) +{ +	block->prev = current; +	block->next = current->next; +	current->next = block; + +	if (current == list->tail) { +		list->tail = block; +	} else { +		block->next->prev = block; +	} +} + +/* + *  insert node:block before node:current + */ +static void xgi_mem_insert_node_before(xgi_mem_list_t * list, +				       xgi_mem_block_t * current, +				       xgi_mem_block_t * block) +{ +	block->prev = current->prev; +	block->next = current; +	current->prev = block; +	if (current == list->head) { +		list->head = block; +	} else { +		block->prev->next = block; +	} +} +void xgi_mem_insert_node_head(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ +	block->next = list->head; +	block->prev = NULL; + +	if (NULL == list->head) { +		list->tail = block; +	} else { +		list->head->prev = block; +	} +	list->head = block; +} + +static void xgi_mem_insert_node_tail(xgi_mem_list_t * list, +				     xgi_mem_block_t * block) +{ +	block->next = NULL; +	block->prev = list->tail; +	if (NULL == list->tail) { +		list->head = block; +	} else { +		list->tail->next = block; +	} +	list->tail = block; +} + +static void xgi_mem_delete_node(xgi_mem_list_t * list, xgi_mem_block_t * block) +{ +	if (block == list->head) { +		list->head = block->next; +	} +	if (block == list->tail) { +		list->tail = block->prev; +	} + +	if (block->prev) { +		block->prev->next = block->next; +	} +	if (block->next) { +		block->next->prev = block->prev; +	} + +	block->next = block->prev = NULL; +} +#endif +static xgi_mem_block_t *xgi_mem_alloc(xgi_info_t * info, +				      unsigned long originalSize) +{ +	struct list_head *free_list; +	xgi_mem_block_t *block, *free_block, *used_block; + +	unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; + +	XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", +		 originalSize, size); + +	if (size == 0) { +		XGI_ERROR("size == 0\n"); +		return (NULL); +	} +	XGI_INFO("max_freesize: 0x%lx \n", xgi_fb_heap->max_freesize); +	if (size > xgi_fb_heap->max_freesize) { +		XGI_ERROR +		    ("size: 0x%lx is bigger than frame buffer total free size: 0x%lx !\n", +		     size, xgi_fb_heap->max_freesize); +		return (NULL); +	} + +	free_list = xgi_fb_heap->free_list.next; + +	while (free_list != &xgi_fb_heap->free_list) { +		XGI_INFO("free_list: 0x%px \n", free_list); +		block = list_entry(free_list, struct xgi_mem_block_s, list); +		if (size <= block->size) { +			break; +		} +		free_list = free_list->next; +	} + +	if (free_list == &xgi_fb_heap->free_list) { +		XGI_ERROR +		    ("Can't allocate %ldk size from frame buffer memory !\n", +		     size / 1024); +		return (NULL); +	} + +	free_block = block; +	XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", +		 size, free_block->offset, free_block->size); + +	if (size == free_block->size) { +		used_block = free_block; +		XGI_INFO("size == free_block->size: free_block = 0x%p\n", +			 free_block); +		list_del(&free_block->list); +	} else { +		used_block = xgi_mem_new_node(); + +		if (used_block == NULL) +			return (NULL); + +		if (used_block == free_block) { +			XGI_ERROR("used_block == free_block = 0x%p\n", +				  used_block); +		} + +		used_block->offset = free_block->offset; +		used_block->size = size; + +		free_block->offset += size; +		free_block->size -= size; +	} + +	xgi_fb_heap->max_freesize -= size; + +	list_add(&used_block->list, &xgi_fb_heap->used_list); + +	return (used_block); +} + +static xgi_mem_block_t *xgi_mem_free(xgi_info_t * info, unsigned long offset) +{ +	struct list_head *free_list, *used_list; +	xgi_mem_block_t *used_block = NULL, *block = NULL; +	xgi_mem_block_t *prev, *next; + +	unsigned long upper; +	unsigned long lower; + +	used_list = xgi_fb_heap->used_list.next; +	while (used_list != &xgi_fb_heap->used_list) { +		block = list_entry(used_list, struct xgi_mem_block_s, list); +		if (block->offset == offset) { +			break; +		} +		used_list = used_list->next; +	} + +	if (used_list == &xgi_fb_heap->used_list) { +		XGI_ERROR("can't find block: 0x%lx to free!\n", offset); +		return (NULL); +	} + +	used_block = block; +	XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx\n", +		 used_block, used_block->offset, used_block->size); + +	xgi_fb_heap->max_freesize += used_block->size; + +	prev = next = NULL; +	upper = used_block->offset + used_block->size; +	lower = used_block->offset; + +	free_list = xgi_fb_heap->free_list.next; +	while (free_list != &xgi_fb_heap->free_list) { +		block = list_entry(free_list, struct xgi_mem_block_s, list); + +		if (block->offset == upper) { +			next = block; +		} else if ((block->offset + block->size) == lower) { +			prev = block; +		} +		free_list = free_list->next; +	} + +	XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); +	list_del(&used_block->list); + +	if (prev && next) { +		prev->size += (used_block->size + next->size); +		list_del(&next->list); +		XGI_INFO("free node 0x%p\n", next); +		kmem_cache_free(xgi_fb_cache_block, next); +		kmem_cache_free(xgi_fb_cache_block, used_block); + +		next = NULL; +		used_block = NULL; +		return (prev); +	} + +	if (prev) { +		prev->size += used_block->size; +		XGI_INFO("free node 0x%p\n", used_block); +		kmem_cache_free(xgi_fb_cache_block, used_block); +		used_block = NULL; +		return (prev); +	} + +	if (next) { +		next->size += used_block->size; +		next->offset = used_block->offset; +		XGI_INFO("free node 0x%p\n", used_block); +		kmem_cache_free(xgi_fb_cache_block, used_block); +		used_block = NULL; +		return (next); +	} + +	list_add(&used_block->list, &xgi_fb_heap->free_list); +	XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", +		 used_block, used_block->offset, used_block->size); + +	return (used_block); +} diff --git a/linux-core/xgi_fb.h b/linux-core/xgi_fb.h index 4b7ec2f2..ae078ae0 100644 --- a/linux-core/xgi_fb.h +++ b/linux-core/xgi_fb.h @@ -1,71 +1,70 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#ifndef _XGI_FB_H_
 -#define _XGI_FB_H_
 -
 -typedef struct xgi_mem_block_s {
 -    struct  list_head       list;
 -    unsigned long           offset;
 -    unsigned long           size;
 -    atomic_t                use_count;
 -} xgi_mem_block_t;
 -
 -typedef struct xgi_mem_heap_s {
 -    struct list_head    free_list;
 -    struct list_head    used_list;
 -    struct list_head    sort_list;
 -    unsigned long       max_freesize;
 -    spinlock_t          lock;
 -} xgi_mem_heap_t;
 -
 -#if 0
 -typedef struct xgi_mem_block_s {
 -    struct xgi_mem_block_s  *next;
 -    struct xgi_mem_block_s  *prev;
 -    unsigned long           offset;
 -    unsigned long           size;
 -    atomic_t                use_count;
 -} xgi_mem_block_t;
 -
 -typedef struct xgi_mem_list_s {
 -    xgi_mem_block_t     *head;
 -    xgi_mem_block_t     *tail;
 -} xgi_mem_list_t;
 -
 -typedef struct xgi_mem_heap_s {
 -    xgi_mem_list_t      *free_list;
 -    xgi_mem_list_t      *used_list;
 -    xgi_mem_list_t      *sort_list;
 -    unsigned long       max_freesize;
 -    spinlock_t          lock;
 -} xgi_mem_heap_t;
 -#endif
 -
 -#endif
 -
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_FB_H_ +#define _XGI_FB_H_ + +typedef struct xgi_mem_block_s { +	struct list_head list; +	unsigned long offset; +	unsigned long size; +	atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_heap_s { +	struct list_head free_list; +	struct list_head used_list; +	struct list_head sort_list; +	unsigned long max_freesize; +	spinlock_t lock; +} xgi_mem_heap_t; + +#if 0 +typedef struct xgi_mem_block_s { +	struct xgi_mem_block_s *next; +	struct xgi_mem_block_s *prev; +	unsigned long offset; +	unsigned long size; +	atomic_t use_count; +} xgi_mem_block_t; + +typedef struct xgi_mem_list_s { +	xgi_mem_block_t *head; +	xgi_mem_block_t *tail; +} xgi_mem_list_t; + +typedef struct xgi_mem_heap_s { +	xgi_mem_list_t *free_list; +	xgi_mem_list_t *used_list; +	xgi_mem_list_t *sort_list; +	unsigned long max_freesize; +	spinlock_t lock; +} xgi_mem_heap_t; +#endif + +#endif diff --git a/linux-core/xgi_linux.h b/linux-core/xgi_linux.h index f207a4f6..67c1af82 100644 --- a/linux-core/xgi_linux.h +++ b/linux-core/xgi_linux.h @@ -1,596 +1,591 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -
 -#ifndef _XGI_LINUX_H_
 -#define _XGI_LINUX_H_
 -
 -#include <linux/config.h>
 -
 -#ifndef LINUX_VERSION_CODE
 -#include <linux/version.h>
 -#endif
 -
 -#ifndef KERNEL_VERSION /* pre-2.1.90 didn't have it */
 -#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
 -#endif
 -
 -#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
 -#   error "This driver does not support pre-2.4 kernels!"
 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
 -#define KERNEL_2_4
 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
 -#   error "This driver does not support 2.5 kernels!"
 -#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0)
 -#define KERNEL_2_6
 -#else
 -#   error "This driver does not support development kernels!"
 -#endif
 -
 -#if defined (CONFIG_SMP) && !defined (__SMP__)
 -#define __SMP__
 -#endif
 -
 -#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS)
 -#define MODVERSIONS
 -#endif
 -
 -#if defined (MODVERSIONS) && !defined (KERNEL_2_6)
 -#include <linux/modversions.h>
 -#endif
 -
 -#include <linux/kernel.h>           /* printk */
 -#include <linux/module.h>
 -
 -#include <linux/init.h>             /* module_init, module_exit         */
 -#include <linux/types.h>            /* pic_t, size_t, __u32, etc        */
 -#include <linux/errno.h>            /* error codes                      */
 -#include <linux/list.h>             /* circular linked list             */
 -#include <linux/stddef.h>           /* NULL, offsetof                   */
 -#include <linux/wait.h>             /* wait queues                      */
 -
 -#include <linux/slab.h>             /* kmalloc, kfree, etc              */
 -#include <linux/vmalloc.h>          /* vmalloc, vfree, etc              */
 -
 -#include <linux/poll.h>             /* poll_wait                        */
 -#include <linux/delay.h>            /* mdelay, udelay                   */
 -#include <asm/msr.h>                /* rdtsc rdtscl                     */
 -
 -#include <linux/sched.h>            /* suser(), capable() replacement
 -                                       for_each_task, for_each_process  */
 -#ifdef for_each_process
 -#define XGI_SCAN_PROCESS(p) for_each_process(p)
 -#else
 -#define XGI_SCAN_PROCESS(p) for_each_task(p)
 -#endif
 -
 -#ifdef KERNEL_2_6
 -#include <linux/moduleparam.h>      /* module_param()                   */
 -#include <linux/smp_lock.h>         /* kernel_locked                    */
 -#include <asm/tlbflush.h>           /* flush_tlb(), flush_tlb_all()     */
 -#include <asm/kmap_types.h>         /* page table entry lookup          */
 -#endif
 -
 -#include <linux/pci.h>              /* pci_find_class, etc              */
 -#include <linux/interrupt.h>        /* tasklets, interrupt helpers      */
 -#include <linux/timer.h>
 -
 -#include <asm/system.h>             /* cli, sli, save_flags             */
 -#include <asm/io.h>                 /* ioremap, virt_to_phys            */
 -#include <asm/uaccess.h>            /* access_ok                        */
 -#include <asm/page.h>               /* PAGE_OFFSET                      */
 -#include <asm/pgtable.h>            /* pte bit definitions              */
 -
 -#include <linux/spinlock.h>
 -#include <asm/semaphore.h>
 -#include <linux/highmem.h>
 -
 -#ifdef CONFIG_PROC_FS
 -#include <linux/proc_fs.h>
 -#endif
 -
 -#ifdef CONFIG_DEVFS_FS
 -#include <linux/devfs_fs_kernel.h>
 -#endif
 -
 -#ifdef CONFIG_KMOD
 -#include <linux/kmod.h>
 -#endif
 -
 -#ifdef CONFIG_PM
 -#include <linux/pm.h>
 -#endif
 -
 -#ifdef CONFIG_MTRR
 -#include <asm/mtrr.h>
 -#endif
 -
 -#ifdef CONFIG_KDB
 -#include <linux/kdb.h>
 -#include <asm/kdb.h>
 -#endif
 -
 -#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE)
 -#define AGPGART
 -#include <linux/agp_backend.h>
 -#include <linux/agpgart.h>
 -#endif
 -
 -#ifndef MAX_ORDER
 -#ifdef KERNEL_2_4
 -#define MAX_ORDER 10
 -#endif
 -#ifdef KERNEL_2_6
 -#define MAX_ORDER 11
 -#endif
 -#endif
 -
 -#ifndef module_init
 -#define module_init(x)  int init_module(void) { return x(); }
 -#define module_exit(x)  void cleanup_module(void) { x(); }
 -#endif
 -
 -#ifndef minor
 -#define minor(x) MINOR(x)
 -#endif
 -
 -#ifndef IRQ_HANDLED
 -typedef void irqreturn_t;
 -#define IRQ_NONE
 -#define IRQ_HANDLED
 -#define IRQ_RETVAL(x)
 -#endif
 -
 -#if !defined (list_for_each)
 -#define list_for_each(pos, head) \
 -    for (pos = (head)->next, prefetch(pos->next); pos != (head); \
 -         pos = pos->next, prefetch(pos->next))
 -#endif
 -
 -#ifdef KERNEL_2_4
 -#define XGI_PCI_FOR_EACH_DEV(dev)   pci_for_each_dev(dev)
 -#endif
 -#ifdef KERNEL_2_6
 -extern struct list_head pci_devices;	/* list of all devices */
 -#define XGI_PCI_FOR_EACH_DEV(dev) \
 -    for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next))
 -#endif
 -
 -/*
 - * the following macro causes problems when used in the same module
 - * as module_param(); undef it so we don't accidentally mix the two
 - */
 -#if defined (KERNEL_2_6)
 -#undef  MODULE_PARM
 -#endif
 -
 -#ifdef EXPORT_NO_SYMBOLS
 -EXPORT_NO_SYMBOLS;
 -#endif
 -
 -#if defined (KERNEL_2_4)
 -#define XGI_IS_SUSER()                 suser()
 -#define XGI_PCI_DEVICE_NAME(dev)       ((dev)->name)
 -#define XGI_NUM_CPUS()                 smp_num_cpus
 -#define XGI_CLI()                      __cli()
 -#define XGI_SAVE_FLAGS(eflags)         __save_flags(eflags)
 -#define XGI_RESTORE_FLAGS(eflags)      __restore_flags(eflags)
 -#define XGI_MAY_SLEEP()                (!in_interrupt())
 -#define XGI_MODULE_PARAMETER(x)        MODULE_PARM(x, "i")
 -#endif
 -
 -#if defined (KERNEL_2_6)
 -#define XGI_IS_SUSER()                 capable(CAP_SYS_ADMIN)
 -#define XGI_PCI_DEVICE_NAME(dev)       ((dev)->pretty_name)
 -#define XGI_NUM_CPUS()                 num_online_cpus()
 -#define XGI_CLI()                      local_irq_disable()
 -#define XGI_SAVE_FLAGS(eflags)         local_save_flags(eflags)
 -#define XGI_RESTORE_FLAGS(eflags)      local_irq_restore(eflags)
 -#define XGI_MAY_SLEEP()                (!in_interrupt() && !in_atomic())
 -#define XGI_MODULE_PARAMETER(x)        module_param(x, int, 0)
 -#endif
 -
 -/* Earlier 2.4.x kernels don't have pci_disable_device() */
 -#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT
 -#define XGI_PCI_DISABLE_DEVICE(dev)      pci_disable_device(dev)
 -#else
 -#define XGI_PCI_DISABLE_DEVICE(dev)
 -#endif
 -
 -/* common defines */
 -#define GET_MODULE_SYMBOL(mod,sym)    (const void *) inter_module_get(sym)
 -#define PUT_MODULE_SYMBOL(sym)        inter_module_put((char *) sym)
 -
 -#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page))
 -#define XGI_VMA_OFFSET(vma)            (((vma)->vm_pgoff) << PAGE_SHIFT)
 -#define XGI_VMA_PRIVATE(vma)           ((vma)->vm_private_data)
 -
 -#define XGI_DEVICE_NUMBER(x)           minor((x)->i_rdev)
 -#define XGI_IS_CONTROL_DEVICE(x)       (minor((x)->i_rdev) == 255)
 -
 -#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start)
 -#define XGI_PCI_RESOURCE_SIZE(dev, bar)  ((dev)->resource[bar].end - (dev)->resource[bar].start + 1)
 -
 -#define XGI_PCI_BUS_NUMBER(dev)        (dev)->bus->number
 -#define XGI_PCI_SLOT_NUMBER(dev)       PCI_SLOT((dev)->devfn)
 -
 -#ifdef XGI_PCI_GET_CLASS_PRESENT
 -#define XGI_PCI_DEV_PUT(dev)                    pci_dev_put(dev)
 -#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_get_device(vendor,device,from)
 -#define XGI_PCI_GET_SLOT(bus,devfn)             pci_get_slot(pci_find_bus(0,bus),devfn)
 -#define XGI_PCI_GET_CLASS(class,from)           pci_get_class(class,from)
 -#else
 -#define XGI_PCI_DEV_PUT(dev)
 -#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_find_device(vendor,device,from)
 -#define XGI_PCI_GET_SLOT(bus,devfn)             pci_find_slot(bus,devfn)
 -#define XGI_PCI_GET_CLASS(class,from)           pci_find_class(class,from)
 -#endif
 -
 -/*
 - * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver
 - * model is not sufficient for full acpi support. it may work in some cases,
 - * but not enough for us to officially support this configuration.
 - */
 -#if defined(CONFIG_ACPI) && defined(KERNEL_2_6)
 -#define XGI_PM_SUPPORT_ACPI
 -#endif
 -
 -#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE)
 -#define XGI_PM_SUPPORT_APM
 -#endif
 -
 -
 -#if defined(CONFIG_DEVFS_FS)
 -#if defined(KERNEL_2_6)
 -typedef void* devfs_handle_t;
 -#define XGI_DEVFS_REGISTER(_name, _minor) \
 -    ({ \
 -        devfs_handle_t __handle = NULL; \
 -        if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \
 -                          S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \
 -        { \
 -            __handle = (void *) 1; /* XXX Fix me! (boolean) */ \
 -        } \
 -        __handle; \
 -    })
 -/*
 -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i)
 -*/
 -#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl")
 -#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi")
 -#else // defined(KERNEL_2_4)
 -#define XGI_DEVFS_REGISTER(_name, _minor) \
 -    ({ \
 -        devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \
 -                                                 XGI_DEV_MAJOR, _minor, \
 -                                                 S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \
 -        __handle; \
 -    })
 -
 -#define XGI_DEVFS_REMOVE_DEVICE(i)                                    \
 -    ({ \
 -        if (xgi_devfs_handles[i] != NULL) \
 -        { \
 -            devfs_unregister(xgi_devfs_handles[i]); \
 -        } \
 -    })
 -#define XGI_DEVFS_REMOVE_CONTROL()                                    \
 -    ({ \
 -        if (xgi_devfs_handles[0] != NULL) \
 -        { \
 -            devfs_unregister(xgi_devfs_handles[0]); \
 -        } \
 -    })
 -#endif /* defined(KERNEL_2_4) */
 -#endif /* defined(CONFIG_DEVFS_FS) */
 -
 -#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6)
 -#define XGI_REGISTER_CHRDEV(x...)    devfs_register_chrdev(x)
 -#define XGI_UNREGISTER_CHRDEV(x...)  devfs_unregister_chrdev(x)
 -#else
 -#define XGI_REGISTER_CHRDEV(x...)    register_chrdev(x)
 -#define XGI_UNREGISTER_CHRDEV(x...)  unregister_chrdev(x)
 -#endif
 -
 -#if defined(XGI_REMAP_PFN_RANGE_PRESENT)
 -#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \
 -    remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x)
 -#elif defined(XGI_REMAP_PAGE_RANGE_5)
 -#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x)
 -#elif defined(XGI_REMAP_PAGE_RANGE_4)
 -#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(x)
 -#else
 -#warning "xgi_configure.sh failed, assuming remap_page_range(5)!"
 -#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x)
 -#endif
 -
 -#if defined(pmd_offset_map)
 -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
 -    { \
 -        pg_mid_dir = pmd_offset_map(pg_dir, address); \
 -    }
 -#define XGI_PMD_UNMAP(pg_mid_dir) \
 -    { \
 -        pmd_unmap(pg_mid_dir); \
 -    }
 -#else
 -#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \
 -    { \
 -        pg_mid_dir = pmd_offset(pg_dir, address); \
 -    }
 -#define XGI_PMD_UNMAP(pg_mid_dir)
 -#endif
 -
 -#define XGI_PMD_PRESENT(pg_mid_dir) \
 -    ({ \
 -        if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \
 -        { \
 -            XGI_PMD_UNMAP(pg_mid_dir); \
 -            pg_mid_dir = NULL; \
 -        } \
 -        pg_mid_dir != NULL; \
 -    })
 -
 -#if defined(pte_offset_atomic)
 -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
 -    { \
 -        pte = pte_offset_atomic(pg_mid_dir, address); \
 -        XGI_PMD_UNMAP(pg_mid_dir); \
 -    }
 -#define XGI_PTE_UNMAP(pte) \
 -    { \
 -        pte_kunmap(pte); \
 -    }
 -#elif defined(pte_offset)
 -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
 -    { \
 -        pte = pte_offset(pg_mid_dir, address); \
 -        XGI_PMD_UNMAP(pg_mid_dir); \
 -    }
 -#define XGI_PTE_UNMAP(pte)
 -#else
 -#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \
 -    { \
 -        pte = pte_offset_map(pg_mid_dir, address); \
 -        XGI_PMD_UNMAP(pg_mid_dir); \
 -    }
 -#define XGI_PTE_UNMAP(pte) \
 -    { \
 -        pte_unmap(pte); \
 -    }
 -#endif
 -
 -#define XGI_PTE_PRESENT(pte) \
 -    ({ \
 -        if (pte) \
 -        { \
 -            if (!pte_present(*pte)) \
 -            { \
 -                XGI_PTE_UNMAP(pte); pte = NULL; \
 -            } \
 -        } \
 -        pte != NULL; \
 -    })
 -
 -#define XGI_PTE_VALUE(pte) \
 -    ({ \
 -        unsigned long __pte_value = pte_val(*pte); \
 -        XGI_PTE_UNMAP(pte); \
 -        __pte_value; \
 -    })
 -
 -#define XGI_PAGE_ALIGN(addr)             (((addr) + PAGE_SIZE - 1) / PAGE_SIZE)
 -#define XGI_MASK_OFFSET(addr)            ((addr) & (PAGE_SIZE - 1))
 -
 -#if !defined (pgprot_noncached)
 -static inline pgprot_t pgprot_noncached(pgprot_t old_prot)
 -    {
 -        pgprot_t new_prot = old_prot;
 -        if (boot_cpu_data.x86 > 3)
 -            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD);
 -        return new_prot;
 -    }
 -#endif
 -
 -#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined)
 -/* Added define for write combining page, only valid if pat enabled. */
 -#define _PAGE_WRTCOMB _PAGE_PWT
 -#define __PAGE_KERNEL_WRTCOMB \
 -    (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED)
 -#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB)
 -
 -static inline pgprot_t pgprot_writecombined(pgprot_t old_prot)
 -    {
 -        pgprot_t new_prot = old_prot;
 -        if (boot_cpu_data.x86 > 3)
 -        {
 -            pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT);
 -            new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB);
 -        }
 -        return new_prot;
 -    }
 -#endif
 -
 -#if !defined(page_to_pfn)
 -#define page_to_pfn(page)  ((page) - mem_map)
 -#endif
 -
 -#define XGI_VMALLOC(ptr, size) \
 -    { \
 -        (ptr) = vmalloc_32(size); \
 -    }
 -
 -#define XGI_VFREE(ptr, size) \
 -    { \
 -        vfree((void *) (ptr)); \
 -    }
 -
 -#define XGI_IOREMAP(ptr, physaddr, size) \
 -    { \
 -        (ptr) = ioremap(physaddr, size); \
 -    }
 -
 -#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \
 -    { \
 -        (ptr) = ioremap_nocache(physaddr, size); \
 -    }
 -
 -#define XGI_IOUNMAP(ptr, size) \
 -    { \
 -        iounmap(ptr); \
 -    }
 -
 -/*
 - * only use this because GFP_KERNEL may sleep..
 - * GFP_ATOMIC is ok, it won't sleep
 - */
 -#define XGI_KMALLOC(ptr, size) \
 -    { \
 -        (ptr) = kmalloc(size, GFP_KERNEL); \
 -    }
 -
 -#define XGI_KMALLOC_ATOMIC(ptr, size) \
 -    { \
 -        (ptr) = kmalloc(size, GFP_ATOMIC); \
 -    }
 -
 -#define XGI_KFREE(ptr, size) \
 -    { \
 -        kfree((void *) (ptr)); \
 -    }
 -
 -#define XGI_GET_FREE_PAGES(ptr, order) \
 -    { \
 -        (ptr) = __get_free_pages(GFP_KERNEL, order); \
 -    }
 -
 -#define XGI_FREE_PAGES(ptr, order) \
 -    { \
 -        free_pages(ptr, order); \
 -    }
 -
 -typedef struct xgi_pte_s  {
 -    unsigned long       phys_addr;
 -    unsigned long       virt_addr;
 -} xgi_pte_t;
 -
 -/*
 - * AMD Athlon processors expose a subtle bug in the Linux
 - * kernel, that may lead to AGP memory corruption. Recent
 - * kernel versions had a workaround for this problem, but
 - * 2.4.20 is the first kernel to address it properly. The
 - * page_attr API provides the means to solve the problem.
 - */
 -#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT)
 -static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t *page_ptr)
 -    {
 -        struct page *page = virt_to_page(__va(page_ptr->phys_addr));
 -        change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
 -    }
 -static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t *page_ptr)
 -    {
 -        struct page *page = virt_to_page(__va(page_ptr->phys_addr));
 -        change_page_attr(page, 1, PAGE_KERNEL);
 -    }
 -#else
 -#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list)
 -#define XGI_SET_PAGE_ATTRIB_CACHED(page_list)
 -#endif
 -
 -#ifdef KERNEL_2_4
 -#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->count)
 -#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->count)
 -#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->count)
 -#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->count, v)
 -
 -#define XGILockPage(page)           set_bit(PG_locked, &(page)->flags)
 -#define XGIUnlockPage(page)         clear_bit(PG_locked, &(page)->flags)
 -#endif
 -
 -#ifdef KERNEL_2_6
 -/* add for SUSE 9, Jill*/
 -#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4)
 -#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->count)
 -#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->count)
 -#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->count)
 -#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->count, v)
 -#else
 -#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->_count)
 -#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->_count)
 -#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->_count)
 -#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->_count, v)
 -#endif
 -#define XGILockPage(page)           SetPageLocked(page)
 -#define XGIUnlockPage(page)         ClearPageLocked(page)
 -#endif
 -
 -
 -/*
 - * hide a pointer to struct xgi_info_t in a file-private info
 - */
 -
 -typedef struct
 -{
 -    void                *info;
 -    U32                 num_events;
 -    spinlock_t          fp_lock;
 -    wait_queue_head_t   wait_queue;
 -} xgi_file_private_t;
 -
 -#define FILE_PRIVATE(filp)      ((filp)->private_data)
 -
 -#define XGI_GET_FP(filp)        ((xgi_file_private_t *) FILE_PRIVATE(filp))
 -
 -/* for the card devices */
 -#define XGI_INFO_FROM_FP(filp)  (XGI_GET_FP(filp)->info)
 -
 -#ifdef KERNEL_2_0
 -#define INODE_FROM_FP(filp) ((filp)->f_inode)
 -#else
 -#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode)
 -#endif
 -
 -#define XGI_ATOMIC_SET(data,val)         atomic_set(&(data), (val))
 -#define XGI_ATOMIC_INC(data)             atomic_inc(&(data))
 -#define XGI_ATOMIC_DEC(data)             atomic_dec(&(data))
 -#define XGI_ATOMIC_DEC_AND_TEST(data)    atomic_dec_and_test(&(data))
 -#define XGI_ATOMIC_READ(data)            atomic_read(&(data))
 -
 -/*
 - * lock-related functions that should only be called from this file
 - */
 -#define xgi_init_lock(lock)             spin_lock_init(&lock)
 -#define xgi_lock(lock)                  spin_lock(&lock)
 -#define xgi_unlock(lock)                spin_unlock(&lock)
 -#define xgi_down(lock)                  down(&lock)
 -#define xgi_up(lock)                    up(&lock)
 -
 -#define xgi_lock_irqsave(lock,flags)    spin_lock_irqsave(&lock,flags)
 -#define xgi_unlock_irqsave(lock,flags)  spin_unlock_irqrestore(&lock,flags)
 -
 -#endif
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_LINUX_H_ +#define _XGI_LINUX_H_ + +#include <linux/config.h> + +#ifndef LINUX_VERSION_CODE +#include <linux/version.h> +#endif + +#ifndef KERNEL_VERSION		/* pre-2.1.90 didn't have it */ +#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) +#endif + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) +#   error "This driver does not support pre-2.4 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) +#define KERNEL_2_4 +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) +#   error "This driver does not support 2.5 kernels!" +#elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 7, 0) +#define KERNEL_2_6 +#else +#   error "This driver does not support development kernels!" +#endif + +#if defined (CONFIG_SMP) && !defined (__SMP__) +#define __SMP__ +#endif + +#if defined (CONFIG_MODVERSIONS) && !defined (MODVERSIONS) +#define MODVERSIONS +#endif + +#if defined (MODVERSIONS) && !defined (KERNEL_2_6) +#include <linux/modversions.h> +#endif + +#include <linux/kernel.h>	/* printk */ +#include <linux/module.h> + +#include <linux/init.h>		/* module_init, module_exit         */ +#include <linux/types.h>	/* pic_t, size_t, __u32, etc        */ +#include <linux/errno.h>	/* error codes                      */ +#include <linux/list.h>		/* circular linked list             */ +#include <linux/stddef.h>	/* NULL, offsetof                   */ +#include <linux/wait.h>		/* wait queues                      */ + +#include <linux/slab.h>		/* kmalloc, kfree, etc              */ +#include <linux/vmalloc.h>	/* vmalloc, vfree, etc              */ + +#include <linux/poll.h>		/* poll_wait                        */ +#include <linux/delay.h>	/* mdelay, udelay                   */ +#include <asm/msr.h>		/* rdtsc rdtscl                     */ + +#include <linux/sched.h>	/* suser(), capable() replacement +				   for_each_task, for_each_process  */ +#ifdef for_each_process +#define XGI_SCAN_PROCESS(p) for_each_process(p) +#else +#define XGI_SCAN_PROCESS(p) for_each_task(p) +#endif + +#ifdef KERNEL_2_6 +#include <linux/moduleparam.h>	/* module_param()                   */ +#include <linux/smp_lock.h>	/* kernel_locked                    */ +#include <asm/tlbflush.h>	/* flush_tlb(), flush_tlb_all()     */ +#include <asm/kmap_types.h>	/* page table entry lookup          */ +#endif + +#include <linux/pci.h>		/* pci_find_class, etc              */ +#include <linux/interrupt.h>	/* tasklets, interrupt helpers      */ +#include <linux/timer.h> + +#include <asm/system.h>		/* cli, sli, save_flags             */ +#include <asm/io.h>		/* ioremap, virt_to_phys            */ +#include <asm/uaccess.h>	/* access_ok                        */ +#include <asm/page.h>		/* PAGE_OFFSET                      */ +#include <asm/pgtable.h>	/* pte bit definitions              */ + +#include <linux/spinlock.h> +#include <asm/semaphore.h> +#include <linux/highmem.h> + +#ifdef CONFIG_PROC_FS +#include <linux/proc_fs.h> +#endif + +#ifdef CONFIG_DEVFS_FS +#include <linux/devfs_fs_kernel.h> +#endif + +#ifdef CONFIG_KMOD +#include <linux/kmod.h> +#endif + +#ifdef CONFIG_PM +#include <linux/pm.h> +#endif + +#ifdef CONFIG_MTRR +#include <asm/mtrr.h> +#endif + +#ifdef CONFIG_KDB +#include <linux/kdb.h> +#include <asm/kdb.h> +#endif + +#if defined (CONFIG_AGP) || defined (CONFIG_AGP_MODULE) +#define AGPGART +#include <linux/agp_backend.h> +#include <linux/agpgart.h> +#endif + +#ifndef MAX_ORDER +#ifdef KERNEL_2_4 +#define MAX_ORDER 10 +#endif +#ifdef KERNEL_2_6 +#define MAX_ORDER 11 +#endif +#endif + +#ifndef module_init +#define module_init(x)  int init_module(void) { return x(); } +#define module_exit(x)  void cleanup_module(void) { x(); } +#endif + +#ifndef minor +#define minor(x) MINOR(x) +#endif + +#ifndef IRQ_HANDLED +typedef void irqreturn_t; +#define IRQ_NONE +#define IRQ_HANDLED +#define IRQ_RETVAL(x) +#endif + +#if !defined (list_for_each) +#define list_for_each(pos, head) \ +    for (pos = (head)->next, prefetch(pos->next); pos != (head); \ +         pos = pos->next, prefetch(pos->next)) +#endif + +#ifdef KERNEL_2_4 +#define XGI_PCI_FOR_EACH_DEV(dev)   pci_for_each_dev(dev) +#endif +#ifdef KERNEL_2_6 +extern struct list_head pci_devices;	/* list of all devices */ +#define XGI_PCI_FOR_EACH_DEV(dev) \ +    for(dev = pci_dev_g(pci_devices.next); dev != pci_dev_g(&pci_devices); dev = pci_dev_g(dev->global_list.next)) +#endif + +/* + * the following macro causes problems when used in the same module + * as module_param(); undef it so we don't accidentally mix the two + */ +#if defined (KERNEL_2_6) +#undef  MODULE_PARM +#endif + +#ifdef EXPORT_NO_SYMBOLS +EXPORT_NO_SYMBOLS; +#endif + +#if defined (KERNEL_2_4) +#define XGI_IS_SUSER()                 suser() +#define XGI_PCI_DEVICE_NAME(dev)       ((dev)->name) +#define XGI_NUM_CPUS()                 smp_num_cpus +#define XGI_CLI()                      __cli() +#define XGI_SAVE_FLAGS(eflags)         __save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags)      __restore_flags(eflags) +#define XGI_MAY_SLEEP()                (!in_interrupt()) +#define XGI_MODULE_PARAMETER(x)        MODULE_PARM(x, "i") +#endif + +#if defined (KERNEL_2_6) +#define XGI_IS_SUSER()                 capable(CAP_SYS_ADMIN) +#define XGI_PCI_DEVICE_NAME(dev)       ((dev)->pretty_name) +#define XGI_NUM_CPUS()                 num_online_cpus() +#define XGI_CLI()                      local_irq_disable() +#define XGI_SAVE_FLAGS(eflags)         local_save_flags(eflags) +#define XGI_RESTORE_FLAGS(eflags)      local_irq_restore(eflags) +#define XGI_MAY_SLEEP()                (!in_interrupt() && !in_atomic()) +#define XGI_MODULE_PARAMETER(x)        module_param(x, int, 0) +#endif + +/* Earlier 2.4.x kernels don't have pci_disable_device() */ +#ifdef XGI_PCI_DISABLE_DEVICE_PRESENT +#define XGI_PCI_DISABLE_DEVICE(dev)      pci_disable_device(dev) +#else +#define XGI_PCI_DISABLE_DEVICE(dev) +#endif + +/* common defines */ +#define GET_MODULE_SYMBOL(mod,sym)    (const void *) inter_module_get(sym) +#define PUT_MODULE_SYMBOL(sym)        inter_module_put((char *) sym) + +#define XGI_GET_PAGE_STRUCT(phys_page) virt_to_page(__va(phys_page)) +#define XGI_VMA_OFFSET(vma)            (((vma)->vm_pgoff) << PAGE_SHIFT) +#define XGI_VMA_PRIVATE(vma)           ((vma)->vm_private_data) + +#define XGI_DEVICE_NUMBER(x)           minor((x)->i_rdev) +#define XGI_IS_CONTROL_DEVICE(x)       (minor((x)->i_rdev) == 255) + +#define XGI_PCI_RESOURCE_START(dev, bar) ((dev)->resource[bar].start) +#define XGI_PCI_RESOURCE_SIZE(dev, bar)  ((dev)->resource[bar].end - (dev)->resource[bar].start + 1) + +#define XGI_PCI_BUS_NUMBER(dev)        (dev)->bus->number +#define XGI_PCI_SLOT_NUMBER(dev)       PCI_SLOT((dev)->devfn) + +#ifdef XGI_PCI_GET_CLASS_PRESENT +#define XGI_PCI_DEV_PUT(dev)                    pci_dev_put(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_get_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn)             pci_get_slot(pci_find_bus(0,bus),devfn) +#define XGI_PCI_GET_CLASS(class,from)           pci_get_class(class,from) +#else +#define XGI_PCI_DEV_PUT(dev) +#define XGI_PCI_GET_DEVICE(vendor,device,from)  pci_find_device(vendor,device,from) +#define XGI_PCI_GET_SLOT(bus,devfn)             pci_find_slot(bus,devfn) +#define XGI_PCI_GET_CLASS(class,from)           pci_find_class(class,from) +#endif + +/* + * acpi support has been back-ported to the 2.4 kernel, but the 2.4 driver + * model is not sufficient for full acpi support. it may work in some cases, + * but not enough for us to officially support this configuration. + */ +#if defined(CONFIG_ACPI) && defined(KERNEL_2_6) +#define XGI_PM_SUPPORT_ACPI +#endif + +#if defined(CONFIG_APM) || defined(CONFIG_APM_MODULE) +#define XGI_PM_SUPPORT_APM +#endif + +#if defined(CONFIG_DEVFS_FS) +#if defined(KERNEL_2_6) +typedef void *devfs_handle_t; +#define XGI_DEVFS_REGISTER(_name, _minor) \ +    ({ \ +        devfs_handle_t __handle = NULL; \ +        if (devfs_mk_cdev(MKDEV(XGI_DEV_MAJOR, _minor), \ +                          S_IFCHR | S_IRUGO | S_IWUGO, _name) == 0) \ +        { \ +            __handle = (void *) 1; /* XXX Fix me! (boolean) */ \ +        } \ +        __handle; \ +    }) +/* +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi%d", i) +*/ +#define XGI_DEVFS_REMOVE_CONTROL() devfs_remove("xgi_ctl") +#define XGI_DEVFS_REMOVE_DEVICE(i) devfs_remove("xgi") +#else				// defined(KERNEL_2_4) +#define XGI_DEVFS_REGISTER(_name, _minor) \ +    ({ \ +        devfs_handle_t __handle = devfs_register(NULL, _name, DEVFS_FL_AUTO_DEVNUM, \ +                                                 XGI_DEV_MAJOR, _minor, \ +                                                 S_IFCHR | S_IRUGO | S_IWUGO, &xgi_fops, NULL); \ +        __handle; \ +    }) + +#define XGI_DEVFS_REMOVE_DEVICE(i)                                    \ +    ({ \ +        if (xgi_devfs_handles[i] != NULL) \ +        { \ +            devfs_unregister(xgi_devfs_handles[i]); \ +        } \ +    }) +#define XGI_DEVFS_REMOVE_CONTROL()                                    \ +    ({ \ +        if (xgi_devfs_handles[0] != NULL) \ +        { \ +            devfs_unregister(xgi_devfs_handles[0]); \ +        } \ +    }) +#endif				/* defined(KERNEL_2_4) */ +#endif				/* defined(CONFIG_DEVFS_FS) */ + +#if defined(CONFIG_DEVFS_FS) && !defined(KERNEL_2_6) +#define XGI_REGISTER_CHRDEV(x...)    devfs_register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...)  devfs_unregister_chrdev(x) +#else +#define XGI_REGISTER_CHRDEV(x...)    register_chrdev(x) +#define XGI_UNREGISTER_CHRDEV(x...)  unregister_chrdev(x) +#endif + +#if defined(XGI_REMAP_PFN_RANGE_PRESENT) +#define XGI_REMAP_PAGE_RANGE(from, offset, x...) \ +    remap_pfn_range(vma, from, ((offset) >> PAGE_SHIFT), x) +#elif defined(XGI_REMAP_PAGE_RANGE_5) +#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x) +#elif defined(XGI_REMAP_PAGE_RANGE_4) +#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(x) +#else +#warning "xgi_configure.sh failed, assuming remap_page_range(5)!" +#define XGI_REMAP_PAGE_RANGE(x...)      remap_page_range(vma, x) +#endif + +#if defined(pmd_offset_map) +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ +    { \ +        pg_mid_dir = pmd_offset_map(pg_dir, address); \ +    } +#define XGI_PMD_UNMAP(pg_mid_dir) \ +    { \ +        pmd_unmap(pg_mid_dir); \ +    } +#else +#define XGI_PMD_OFFSET(addres, pg_dir, pg_mid_dir) \ +    { \ +        pg_mid_dir = pmd_offset(pg_dir, address); \ +    } +#define XGI_PMD_UNMAP(pg_mid_dir) +#endif + +#define XGI_PMD_PRESENT(pg_mid_dir) \ +    ({ \ +        if ((pg_mid_dir) && (pmd_none(*pg_mid_dir))) \ +        { \ +            XGI_PMD_UNMAP(pg_mid_dir); \ +            pg_mid_dir = NULL; \ +        } \ +        pg_mid_dir != NULL; \ +    }) + +#if defined(pte_offset_atomic) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ +    { \ +        pte = pte_offset_atomic(pg_mid_dir, address); \ +        XGI_PMD_UNMAP(pg_mid_dir); \ +    } +#define XGI_PTE_UNMAP(pte) \ +    { \ +        pte_kunmap(pte); \ +    } +#elif defined(pte_offset) +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ +    { \ +        pte = pte_offset(pg_mid_dir, address); \ +        XGI_PMD_UNMAP(pg_mid_dir); \ +    } +#define XGI_PTE_UNMAP(pte) +#else +#define XGI_PTE_OFFSET(addres, pg_mid_dir, pte) \ +    { \ +        pte = pte_offset_map(pg_mid_dir, address); \ +        XGI_PMD_UNMAP(pg_mid_dir); \ +    } +#define XGI_PTE_UNMAP(pte) \ +    { \ +        pte_unmap(pte); \ +    } +#endif + +#define XGI_PTE_PRESENT(pte) \ +    ({ \ +        if (pte) \ +        { \ +            if (!pte_present(*pte)) \ +            { \ +                XGI_PTE_UNMAP(pte); pte = NULL; \ +            } \ +        } \ +        pte != NULL; \ +    }) + +#define XGI_PTE_VALUE(pte) \ +    ({ \ +        unsigned long __pte_value = pte_val(*pte); \ +        XGI_PTE_UNMAP(pte); \ +        __pte_value; \ +    }) + +#define XGI_PAGE_ALIGN(addr)             (((addr) + PAGE_SIZE - 1) / PAGE_SIZE) +#define XGI_MASK_OFFSET(addr)            ((addr) & (PAGE_SIZE - 1)) + +#if !defined (pgprot_noncached) +static inline pgprot_t pgprot_noncached(pgprot_t old_prot) +{ +	pgprot_t new_prot = old_prot; +	if (boot_cpu_data.x86 > 3) +		new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_PCD); +	return new_prot; +} +#endif + +#if defined(XGI_BUILD_XGI_PAT_SUPPORT) && !defined (pgprot_writecombined) +/* Added define for write combining page, only valid if pat enabled. */ +#define _PAGE_WRTCOMB _PAGE_PWT +#define __PAGE_KERNEL_WRTCOMB \ +    (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_WRTCOMB | _PAGE_ACCESSED) +#define PAGE_KERNEL_WRTCOMB MAKE_GLOBAL(__PAGE_KERNEL_WRTCOMB) + +static inline pgprot_t pgprot_writecombined(pgprot_t old_prot) +{ +	pgprot_t new_prot = old_prot; +	if (boot_cpu_data.x86 > 3) { +		pgprot_val(old_prot) &= ~(_PAGE_PCD | _PAGE_PWT); +		new_prot = __pgprot(pgprot_val(old_prot) | _PAGE_WRTCOMB); +	} +	return new_prot; +} +#endif + +#if !defined(page_to_pfn) +#define page_to_pfn(page)  ((page) - mem_map) +#endif + +#define XGI_VMALLOC(ptr, size) \ +    { \ +        (ptr) = vmalloc_32(size); \ +    } + +#define XGI_VFREE(ptr, size) \ +    { \ +        vfree((void *) (ptr)); \ +    } + +#define XGI_IOREMAP(ptr, physaddr, size) \ +    { \ +        (ptr) = ioremap(physaddr, size); \ +    } + +#define XGI_IOREMAP_NOCACHE(ptr, physaddr, size) \ +    { \ +        (ptr) = ioremap_nocache(physaddr, size); \ +    } + +#define XGI_IOUNMAP(ptr, size) \ +    { \ +        iounmap(ptr); \ +    } + +/* + * only use this because GFP_KERNEL may sleep.. + * GFP_ATOMIC is ok, it won't sleep + */ +#define XGI_KMALLOC(ptr, size) \ +    { \ +        (ptr) = kmalloc(size, GFP_KERNEL); \ +    } + +#define XGI_KMALLOC_ATOMIC(ptr, size) \ +    { \ +        (ptr) = kmalloc(size, GFP_ATOMIC); \ +    } + +#define XGI_KFREE(ptr, size) \ +    { \ +        kfree((void *) (ptr)); \ +    } + +#define XGI_GET_FREE_PAGES(ptr, order) \ +    { \ +        (ptr) = __get_free_pages(GFP_KERNEL, order); \ +    } + +#define XGI_FREE_PAGES(ptr, order) \ +    { \ +        free_pages(ptr, order); \ +    } + +typedef struct xgi_pte_s { +	unsigned long phys_addr; +	unsigned long virt_addr; +} xgi_pte_t; + +/* + * AMD Athlon processors expose a subtle bug in the Linux + * kernel, that may lead to AGP memory corruption. Recent + * kernel versions had a workaround for this problem, but + * 2.4.20 is the first kernel to address it properly. The + * page_attr API provides the means to solve the problem. + */ +#if defined(XGI_CHANGE_PAGE_ATTR_PRESENT) +static inline void XGI_SET_PAGE_ATTRIB_UNCACHED(xgi_pte_t * page_ptr) +{ +	struct page *page = virt_to_page(__va(page_ptr->phys_addr)); +	change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); +} +static inline void XGI_SET_PAGE_ATTRIB_CACHED(xgi_pte_t * page_ptr) +{ +	struct page *page = virt_to_page(__va(page_ptr->phys_addr)); +	change_page_attr(page, 1, PAGE_KERNEL); +} +#else +#define XGI_SET_PAGE_ATTRIB_UNCACHED(page_list) +#define XGI_SET_PAGE_ATTRIB_CACHED(page_list) +#endif + +#ifdef KERNEL_2_4 +#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->count, v) + +#define XGILockPage(page)           set_bit(PG_locked, &(page)->flags) +#define XGIUnlockPage(page)         clear_bit(PG_locked, &(page)->flags) +#endif + +#ifdef KERNEL_2_6 +/* add for SUSE 9, Jill*/ +#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 4) +#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->count) +#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->count) +#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->count) +#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->count, v) +#else +#define XGI_INC_PAGE_COUNT(page)    atomic_inc(&(page)->_count) +#define XGI_DEC_PAGE_COUNT(page)    atomic_dec(&(page)->_count) +#define XGI_PAGE_COUNT(page)		atomic_read(&(page)->_count) +#define XGI_SET_PAGE_COUNT(page,v) 	atomic_set(&(page)->_count, v) +#endif +#define XGILockPage(page)           SetPageLocked(page) +#define XGIUnlockPage(page)         ClearPageLocked(page) +#endif + +/* + * hide a pointer to struct xgi_info_t in a file-private info + */ + +typedef struct { +	void *info; +	U32 num_events; +	spinlock_t fp_lock; +	wait_queue_head_t wait_queue; +} xgi_file_private_t; + +#define FILE_PRIVATE(filp)      ((filp)->private_data) + +#define XGI_GET_FP(filp)        ((xgi_file_private_t *) FILE_PRIVATE(filp)) + +/* for the card devices */ +#define XGI_INFO_FROM_FP(filp)  (XGI_GET_FP(filp)->info) + +#ifdef KERNEL_2_0 +#define INODE_FROM_FP(filp) ((filp)->f_inode) +#else +#define INODE_FROM_FP(filp) ((filp)->f_dentry->d_inode) +#endif + +#define XGI_ATOMIC_SET(data,val)         atomic_set(&(data), (val)) +#define XGI_ATOMIC_INC(data)             atomic_inc(&(data)) +#define XGI_ATOMIC_DEC(data)             atomic_dec(&(data)) +#define XGI_ATOMIC_DEC_AND_TEST(data)    atomic_dec_and_test(&(data)) +#define XGI_ATOMIC_READ(data)            atomic_read(&(data)) + +/* + * lock-related functions that should only be called from this file + */ +#define xgi_init_lock(lock)             spin_lock_init(&lock) +#define xgi_lock(lock)                  spin_lock(&lock) +#define xgi_unlock(lock)                spin_unlock(&lock) +#define xgi_down(lock)                  down(&lock) +#define xgi_up(lock)                    up(&lock) + +#define xgi_lock_irqsave(lock,flags)    spin_lock_irqsave(&lock,flags) +#define xgi_unlock_irqsave(lock,flags)  spin_unlock_irqrestore(&lock,flags) + +#endif diff --git a/linux-core/xgi_misc.c b/linux-core/xgi_misc.c index b15c7ecf..61e40594 100644 --- a/linux-core/xgi_misc.c +++ b/linux-core/xgi_misc.c @@ -1,657 +1,630 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#include "xgi_types.h"
 -#include "xgi_linux.h"
 -#include "xgi_drv.h"
 -#include "xgi_regs.h"
 -#include "xgi_pcie.h"
 -
 -void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t *req)
 -{
 -    req->device_id      = info->device_id;
 -    req->device_name[0] = 'x';
 -    req->device_name[1] = 'g';
 -    req->device_name[2] = '4';
 -    req->device_name[3] = '7';
 -    req->vendor_id      = info->vendor_id;
 -    req->curr_display_mode  = 0;
 -    req->fb_size            = info->fb.size;
 -    req->sarea_bus_addr     = info->sarea_info.bus_addr;
 -    req->sarea_size         = info->sarea_info.size;
 -}
 -
 -void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req)
 -{
 -    req->mmioBase = (void *)info->mmio.base;
 -    req->size     = info->mmio.size;
 -}
 -
 -void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req)
 -{
 -    info->scrn_info.scrn_start = req->scrn_start;
 -    info->scrn_info.scrn_xres  = req->scrn_xres;
 -    info->scrn_info.scrn_yres  = req->scrn_yres;
 -    info->scrn_info.scrn_bpp   = req->scrn_bpp;
 -    info->scrn_info.scrn_pitch = req->scrn_pitch;
 -
 -    XGI_INFO("info->scrn_info.scrn_start: 0x%lx"
 -             "info->scrn_info.scrn_xres: 0x%lx"
 -             "info->scrn_info.scrn_yres: 0x%lx"
 -             "info->scrn_info.scrn_bpp: 0x%lx"
 -             "info->scrn_info.scrn_pitch: 0x%lx\n",
 -              info->scrn_info.scrn_start,
 -              info->scrn_info.scrn_xres,
 -              info->scrn_info.scrn_yres,
 -              info->scrn_info.scrn_bpp,
 -              info->scrn_info.scrn_pitch);
 -}
 -
 -void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req)
 -{
 -    req->scrn_start = info->scrn_info.scrn_start;
 -    req->scrn_xres  = info->scrn_info.scrn_xres;
 -    req->scrn_yres  = info->scrn_info.scrn_yres;
 -    req->scrn_bpp   = info->scrn_info.scrn_bpp;
 -    req->scrn_pitch = info->scrn_info.scrn_pitch;
 -
 -    XGI_INFO("req->scrn_start: 0x%lx"
 -             "req->scrn_xres: 0x%lx"
 -             "req->scrn_yres: 0x%lx"
 -             "req->scrn_bpp: 0x%lx"
 -             "req->scrn_pitch: 0x%lx\n",
 -              req->scrn_start,
 -              req->scrn_xres,
 -              req->scrn_yres,
 -              req->scrn_bpp,
 -              req->scrn_pitch);
 -}
 -
 -void xgi_ge_reset(xgi_info_t *info)
 -{
 -    xgi_disable_ge(info);
 -    xgi_enable_ge(info);
 -}
 -
 -void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req)
 -{
 -    info->sarea_info.bus_addr = req->bus_addr;
 -    info->sarea_info.size     = req->size;
 -    XGI_INFO("info->sarea_info.bus_addr: 0x%lx"
 -             "info->sarea_info.size: 0x%lx\n",
 -              info->sarea_info.bus_addr,
 -              info->sarea_info.size);
 -}
 -
 -/*
 - * irq functions
 - */
 -#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff
 -
 -static U32  s_invalid_begin = 0;
 -
 -BOOL xgi_ge_irq_handler(xgi_info_t *info)
 -{
 -    volatile U8     *mmio_vbase = info->mmio.vbase;
 -    volatile U32    *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800);
 -    U32             int_status = ge_3d_status[4];   // interrupt status
 -    U32             auto_reset_count = 0;
 -    BOOL            is_support_auto_reset = FALSE;
 -
 -    // Check GE on/off
 -    if (0 == (0xffffc0f0 & int_status))
 -    {
 -        U32 old_ge_status           = ge_3d_status[0x00];
 -        U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a];
 -        if (0 != (0x1000 & int_status))
 -        {
 -            // We got GE stall interrupt.
 -            ge_3d_status[0x04] = int_status | 0x04000000;
 -
 -            if (TRUE == is_support_auto_reset)
 -            {
 -                BOOL    is_wrong_signal = FALSE;
 -                static  U32 last_int_tick_low, last_int_tick_high;
 -                static  U32 new_int_tick_low,  new_int_tick_high;
 -                static  U32 continoue_int_count = 0;
 -                // OE II is busy.
 -                while (old_ge_status & 0x001c0000)
 -                {
 -                    U16 check;
 -                    // Check Read back status
 -                    *(mmio_vbase + 0x235c) = 0x80;
 -                    check = *((volatile U16*)(mmio_vbase + 0x2360));
 -                    if ((check & 0x3f) != ((check & 0x3f00) >> 8))
 -                    {
 -                        is_wrong_signal = TRUE;
 -                        break;
 -                    }
 -                    // Check RO channel
 -                    *(mmio_vbase + 0x235c) = 0x83;
 -                    check = *((volatile U16*)(mmio_vbase + 0x2360));
 -                    if ((check & 0x0f) != ((check & 0xf0) >> 4))
 -                    {
 -                        is_wrong_signal = TRUE;
 -                        break;
 -                    }
 -                    // Check RW channel
 -                    *(mmio_vbase + 0x235c) = 0x88;
 -                    check = *((volatile U16*)(mmio_vbase + 0x2360));
 -                    if ((check & 0x0f) != ((check & 0xf0) >> 4))
 -                    {
 -                        is_wrong_signal = TRUE;
 -                        break;
 -                    }
 -                    // Check RO channel outstanding
 -                    *(mmio_vbase + 0x235c) = 0x8f;
 -                    check = *((volatile U16*)(mmio_vbase + 0x2360));
 -                    if (0 != (check & 0x3ff))
 -                    {
 -                        is_wrong_signal = TRUE;
 -                        break;
 -                    }
 -                    // Check RW channel outstanding
 -                    *(mmio_vbase + 0x235c) = 0x90;
 -                    check = *((volatile U16*)(mmio_vbase + 0x2360));
 -                    if (0 != (check & 0x3ff))
 -                    {
 -                        is_wrong_signal = TRUE;
 -                        break;
 -                    }
 -                    // No pending PCIE request. GE stall.
 -                    break;
 -                }
 -
 -                if (is_wrong_signal)
 -                {
 -                    // Nothing but skip.
 -                }
 -                else if (0 == continoue_int_count++)
 -                {
 -                    rdtsc(last_int_tick_low, last_int_tick_high);
 -                }
 -                else
 -                {
 -                    rdtscl(new_int_tick_low);
 -                    if ((new_int_tick_low - last_int_tick_low) > STALL_INTERRUPT_RESET_THRESHOLD)
 -                    {
 -                        continoue_int_count = 0;
 -                    }
 -                    else if (continoue_int_count >= 3)
 -                    {
 -                        continoue_int_count = 0;
 -
 -                        // GE Hung up, need reset.
 -                        XGI_INFO("Reset GE!\n");
 -
 -                        *(mmio_vbase + 0xb057) = 8;
 -                        int time_out = 0xffff;
 -                        while (0 != (ge_3d_status[0x00] & 0xf0000000))
 -                        {
 -                            while (0 != ((--time_out) & 0xfff));
 -                            if (0 == time_out)
 -                            {
 -                                XGI_INFO("Can not reset back 0x%lx!\n", ge_3d_status[0x00]);
 -                                *(mmio_vbase + 0xb057) = 0;
 -                                // Have to use 3x5.36 to reset.
 -                                // Save and close dynamic gating
 -                                U8 old_3ce = *(mmio_vbase + 0x3ce);
 -                                *(mmio_vbase + 0x3ce)  = 0x2a;
 -                                U8 old_3cf = *(mmio_vbase + 0x3cf);
 -                                *(mmio_vbase + 0x3cf)  = old_3cf & 0xfe;
 -                                // Reset GE
 -                                U8 old_index = *(mmio_vbase + 0x3d4);
 -                                *(mmio_vbase + 0x3d4)  = 0x36;
 -                                U8 old_36  = *(mmio_vbase + 0x3d5);
 -                                *(mmio_vbase + 0x3d5)  = old_36 | 0x10;
 -                                while (0 != ((--time_out) & 0xfff));
 -                                *(mmio_vbase + 0x3d5)  = old_36;
 -                                *(mmio_vbase + 0x3d4)  = old_index;
 -                                // Restore dynamic gating
 -                                *(mmio_vbase + 0x3cf)  = old_3cf;
 -                                *(mmio_vbase + 0x3ce)  = old_3ce;
 -                                break;
 -                            }
 -                        }
 -                        *(mmio_vbase + 0xb057) = 0;
 -
 -                        // Increase Reset counter
 -                        auto_reset_count++;
 -                    }
 -                }
 -            }
 -            return TRUE;
 -        }
 -        else if (0 != (0x1 & int_status))
 -        {
 -            s_invalid_begin++;
 -            ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000;
 -            return TRUE;
 -        }
 -    }
 -    return FALSE;
 -}
 -
 -BOOL xgi_crt_irq_handler(xgi_info_t *info)
 -{
 -    BOOL    ret = FALSE;
 -    U8      *mmio_vbase = info->mmio.vbase;
 -    U32     device_status = 0;
 -    U32     hw_status = 0;
 -    U8      save_3ce = bReadReg(0x3ce);
 -
 -
 -    if (bIn3cf(0x37) & 0x01) // CRT1 interrupt just happened
 -    {
 -        U8  op3cf_3d;
 -        U8  op3cf_37;
 -
 -        // What happened?
 -        op3cf_37 =  bIn3cf(0x37);
 -
 -#if 0
 -        if (op3cf_37 & 0x04)
 -            device_status |= GDEVST_CONNECT;
 -        else
 -            device_status &= ~GDEVST_CONNECT;
 -
 -        device_status |= GDEVST_DEVICE_CHANGED;
 -        hw_status |= HWST_DEVICE_CHANGED;
 -#endif
 -        // Clear CRT interrupt
 -        op3cf_3d =  bIn3cf(0x3d);
 -        bOut3cf(0x3d, (op3cf_3d | 0x04));
 -        bOut3cf(0x3d, (op3cf_3d & ~0x04));
 -        ret = TRUE;
 -    }
 -    bWriteReg(0x3ce, save_3ce);
 -
 -    return (ret);
 -}
 -
 -BOOL xgi_dvi_irq_handler(xgi_info_t *info)
 -{
 -    BOOL    ret = FALSE;
 -    U8      *mmio_vbase = info->mmio.vbase;
 -    U32     device_status = 0;
 -    U32     hw_status = 0;
 -    U8      save_3ce = bReadReg(0x3ce);
 -
 -    if (bIn3cf(0x38) & 0x20) // DVI interrupt just happened
 -    {
 -        U8  op3cf_39;
 -        U8  op3cf_37;
 -        U8  op3x5_5a;
 -        U8  save_3x4 = bReadReg(0x3d4);;
 -
 -        // What happened?
 -        op3cf_37 =  bIn3cf(0x37);
 -#if 0
 -        //Also update our internal flag
 -        if (op3cf_37 & 0x10) // Second Monitor plugged In
 -        {
 -            device_status |= GDEVST_CONNECT;
 -            //Because currenly we cannot determine if DVI digital
 -            //or DVI analog is connected according to DVI interrupt
 -            //We should still call BIOS to check it when utility ask us
 -            device_status &= ~GDEVST_CHECKED;
 -        }
 -        else
 -        {
 -            device_status &= ~GDEVST_CONNECT;
 -        }
 -#endif
 -        //Notify BIOS that DVI plug/unplug happened
 -        op3x5_5a =  bIn3x5(0x5a);
 -        bOut3x5(0x5a, op3x5_5a & 0xf7);
 -
 -        bWriteReg(0x3d4, save_3x4);
 -
 -        //device_status |= GDEVST_DEVICE_CHANGED;
 -        //hw_status |= HWST_DEVICE_CHANGED;
 -
 -        // Clear DVI interrupt
 -        op3cf_39 =  bIn3cf(0x39);
 -        bOut3c5(0x39, (op3cf_39 & ~0x01));  //Set 3cf.39 bit 0 to 0
 -        bOut3c5(0x39, (op3cf_39 | 0x01 ));  //Set 3cf.39 bit 0 to 1
 -
 -        ret = TRUE;
 -    }
 -    bWriteReg(0x3ce, save_3ce);
 -
 -    return (ret);
 -}
 -
 -void xgi_dump_register(xgi_info_t *info)
 -{
 -    int             i, j;
 -    unsigned char   temp;
 -
 -    // 0x3C5
 -    printk("\r\n=====xgi_dump_register========0x%x===============\r\n", 0x3C5);
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bIn3c5(i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    // 0x3D5
 -    printk("\r\n====xgi_dump_register=========0x%x===============\r\n", 0x3D5);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bIn3x5(i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    // 0x3CF
 -    printk("\r\n=========xgi_dump_register====0x%x===============\r\n", 0x3CF);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bIn3cf(i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    printk("\r\n=====xgi_dump_register======0x%x===============\r\n", 0xB000);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x5; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bReadReg(0xB000 + i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    printk("\r\n==================0x%x===============\r\n", 0x2200);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0xB; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bReadReg(0x2200 + i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    printk("\r\n==================0x%x===============\r\n", 0x2300);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x7; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bReadReg(0x2300 + i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    printk("\r\n==================0x%x===============\r\n", 0x2400);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bReadReg(0x2400 + i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -
 -    printk("\r\n==================0x%x===============\r\n", 0x2800);
 -    for(i=0; i<0x10; i++)
 -    {
 -        if(i == 0)
 -        {
 -            printk("%5x", i);
 -        }
 -        else
 -        {
 -            printk("%3x", i);
 -        }
 -    }
 -    printk("\r\n");
 -
 -    for(i=0; i<0x10; i++)
 -    {
 -        printk("%1x ", i);
 -
 -        for(j=0; j<0x10; j++)
 -        {
 -            temp = bReadReg(0x2800 + i*0x10 + j);
 -            printk("%3x", temp);
 -        }
 -        printk("\r\n");
 -    }
 -}
 -
 -void xgi_restore_registers(xgi_info_t *info)
 -{
 -    bOut3x5(0x13, 0);
 -    bOut3x5(0x8b, 2);
 -}
 -
 -void xgi_waitfor_pci_idle(xgi_info_t *info)
 -{
 -#define WHOLD_GE_STATUS             0x2800
 -#define IDLE_MASK                   ~0x90200000
 -
 -    int idleCount = 0;
 -    while(idleCount < 5)
 -    {
 -        if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK)
 -        {
 -            idleCount = 0;
 -        }
 -        else
 -        {
 -            idleCount ++;
 -        }
 -    }
 -}
 -
 -int xgi_get_cpu_id(struct cpu_info_s *arg)
 -{
 -    int op = arg->_eax;
 -     __asm__("cpuid"
 -        : "=a" (arg->_eax),
 -          "=b" (arg->_ebx),
 -          "=c" (arg->_ecx),
 -          "=d" (arg->_edx)
 -        : "0" (op));
 -
 -    XGI_INFO("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n",
 -              op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx);
 -}
 -
 -/*memory collect function*/
 -extern struct list_head xgi_mempid_list;
 -void xgi_mem_collect(xgi_info_t *info, unsigned int *pcnt)
 -{
 -    xgi_mem_pid_t       *mempid_block;
 -    struct list_head    *mempid_list;
 -    struct task_struct  *p,*find;
 -    unsigned int cnt = 0;
 -
 -    mempid_list = xgi_mempid_list.next;
 -
 -    while (mempid_list != &xgi_mempid_list)
 -    {
 -        mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);
 -        mempid_list = mempid_list->next;
 -
 -        find = NULL;
 -        XGI_SCAN_PROCESS(p)
 -        {
 -            if (p->pid == mempid_block->pid)
 -            {
 -                XGI_INFO("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", mempid_block->pid, p->state, mempid_block->location, mempid_block->bus_addr);
 -                find = p;
 -                if (mempid_block->bus_addr == 0xFFFFFFFF)
 -                    ++cnt;
 -                break;
 -            }
 -        }
 -        if (!find)
 -        {
 -            if (mempid_block->location == LOCAL)
 -            {
 -                XGI_INFO("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr);
 -                xgi_fb_free(info, mempid_block->bus_addr);
 -            }
 -            else if (mempid_block->bus_addr != 0xFFFFFFFF)
 -            {
 -                XGI_INFO("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", mempid_block->pid, mempid_block->bus_addr);
 -                xgi_pcie_free(info, mempid_block->bus_addr);
 -            }
 -            else
 -            {
 -                /*only delete the memory block*/
 -                list_del(&mempid_block->list);
 -                XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_block->pid);
 -                kfree(mempid_block);
 -            }
 -        }
 -    }
 -    *pcnt = cnt;
 -}
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" + +void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req) +{ +	req->device_id = info->device_id; +	req->device_name[0] = 'x'; +	req->device_name[1] = 'g'; +	req->device_name[2] = '4'; +	req->device_name[3] = '7'; +	req->vendor_id = info->vendor_id; +	req->curr_display_mode = 0; +	req->fb_size = info->fb.size; +	req->sarea_bus_addr = info->sarea_info.bus_addr; +	req->sarea_size = info->sarea_info.size; +} + +void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req) +{ +	req->mmioBase = (void *)info->mmio.base; +	req->size = info->mmio.size; +} + +void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ +	info->scrn_info.scrn_start = req->scrn_start; +	info->scrn_info.scrn_xres = req->scrn_xres; +	info->scrn_info.scrn_yres = req->scrn_yres; +	info->scrn_info.scrn_bpp = req->scrn_bpp; +	info->scrn_info.scrn_pitch = req->scrn_pitch; + +	XGI_INFO("info->scrn_info.scrn_start: 0x%lx" +		 "info->scrn_info.scrn_xres: 0x%lx" +		 "info->scrn_info.scrn_yres: 0x%lx" +		 "info->scrn_info.scrn_bpp: 0x%lx" +		 "info->scrn_info.scrn_pitch: 0x%lx\n", +		 info->scrn_info.scrn_start, +		 info->scrn_info.scrn_xres, +		 info->scrn_info.scrn_yres, +		 info->scrn_info.scrn_bpp, info->scrn_info.scrn_pitch); +} + +void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req) +{ +	req->scrn_start = info->scrn_info.scrn_start; +	req->scrn_xres = info->scrn_info.scrn_xres; +	req->scrn_yres = info->scrn_info.scrn_yres; +	req->scrn_bpp = info->scrn_info.scrn_bpp; +	req->scrn_pitch = info->scrn_info.scrn_pitch; + +	XGI_INFO("req->scrn_start: 0x%lx" +		 "req->scrn_xres: 0x%lx" +		 "req->scrn_yres: 0x%lx" +		 "req->scrn_bpp: 0x%lx" +		 "req->scrn_pitch: 0x%lx\n", +		 req->scrn_start, +		 req->scrn_xres, +		 req->scrn_yres, req->scrn_bpp, req->scrn_pitch); +} + +void xgi_ge_reset(xgi_info_t * info) +{ +	xgi_disable_ge(info); +	xgi_enable_ge(info); +} + +void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req) +{ +	info->sarea_info.bus_addr = req->bus_addr; +	info->sarea_info.size = req->size; +	XGI_INFO("info->sarea_info.bus_addr: 0x%lx" +		 "info->sarea_info.size: 0x%lx\n", +		 info->sarea_info.bus_addr, info->sarea_info.size); +} + +/* + * irq functions + */ +#define STALL_INTERRUPT_RESET_THRESHOLD 0xffff + +static U32 s_invalid_begin = 0; + +BOOL xgi_ge_irq_handler(xgi_info_t * info) +{ +	volatile U8 *mmio_vbase = info->mmio.vbase; +	volatile U32 *ge_3d_status = (volatile U32 *)(mmio_vbase + 0x2800); +	U32 int_status = ge_3d_status[4];	// interrupt status +	U32 auto_reset_count = 0; +	BOOL is_support_auto_reset = FALSE; + +	// Check GE on/off +	if (0 == (0xffffc0f0 & int_status)) { +		U32 old_ge_status = ge_3d_status[0x00]; +		U32 old_pcie_cmd_fetch_Addr = ge_3d_status[0x0a]; +		if (0 != (0x1000 & int_status)) { +			// We got GE stall interrupt. +			ge_3d_status[0x04] = int_status | 0x04000000; + +			if (TRUE == is_support_auto_reset) { +				BOOL is_wrong_signal = FALSE; +				static U32 last_int_tick_low, +				    last_int_tick_high; +				static U32 new_int_tick_low, new_int_tick_high; +				static U32 continoue_int_count = 0; +				// OE II is busy. +				while (old_ge_status & 0x001c0000) { +					U16 check; +					// Check Read back status +					*(mmio_vbase + 0x235c) = 0x80; +					check = +					    *((volatile U16 *)(mmio_vbase + +							       0x2360)); +					if ((check & 0x3f) != +					    ((check & 0x3f00) >> 8)) { +						is_wrong_signal = TRUE; +						break; +					} +					// Check RO channel +					*(mmio_vbase + 0x235c) = 0x83; +					check = +					    *((volatile U16 *)(mmio_vbase + +							       0x2360)); +					if ((check & 0x0f) != +					    ((check & 0xf0) >> 4)) { +						is_wrong_signal = TRUE; +						break; +					} +					// Check RW channel +					*(mmio_vbase + 0x235c) = 0x88; +					check = +					    *((volatile U16 *)(mmio_vbase + +							       0x2360)); +					if ((check & 0x0f) != +					    ((check & 0xf0) >> 4)) { +						is_wrong_signal = TRUE; +						break; +					} +					// Check RO channel outstanding +					*(mmio_vbase + 0x235c) = 0x8f; +					check = +					    *((volatile U16 *)(mmio_vbase + +							       0x2360)); +					if (0 != (check & 0x3ff)) { +						is_wrong_signal = TRUE; +						break; +					} +					// Check RW channel outstanding +					*(mmio_vbase + 0x235c) = 0x90; +					check = +					    *((volatile U16 *)(mmio_vbase + +							       0x2360)); +					if (0 != (check & 0x3ff)) { +						is_wrong_signal = TRUE; +						break; +					} +					// No pending PCIE request. GE stall. +					break; +				} + +				if (is_wrong_signal) { +					// Nothing but skip. +				} else if (0 == continoue_int_count++) { +					rdtsc(last_int_tick_low, +					      last_int_tick_high); +				} else { +					rdtscl(new_int_tick_low); +					if ((new_int_tick_low - +					     last_int_tick_low) > +					    STALL_INTERRUPT_RESET_THRESHOLD) { +						continoue_int_count = 0; +					} else if (continoue_int_count >= 3) { +						continoue_int_count = 0; + +						// GE Hung up, need reset. +						XGI_INFO("Reset GE!\n"); + +						*(mmio_vbase + 0xb057) = 8; +						int time_out = 0xffff; +						while (0 != +						       (ge_3d_status[0x00] & +							0xf0000000)) { +							while (0 != +							       ((--time_out) & +								0xfff)) ; +							if (0 == time_out) { +								XGI_INFO +								    ("Can not reset back 0x%lx!\n", +								     ge_3d_status +								     [0x00]); +								*(mmio_vbase + +								  0xb057) = 0; +								// Have to use 3x5.36 to reset. +								// Save and close dynamic gating +								U8 old_3ce = +								    *(mmio_vbase +								      + 0x3ce); +								*(mmio_vbase + +								  0x3ce) = 0x2a; +								U8 old_3cf = +								    *(mmio_vbase +								      + 0x3cf); +								*(mmio_vbase + +								  0x3cf) = +						       old_3cf & 0xfe; +								// Reset GE +								U8 old_index = +								    *(mmio_vbase +								      + 0x3d4); +								*(mmio_vbase + +								  0x3d4) = 0x36; +								U8 old_36 = +								    *(mmio_vbase +								      + 0x3d5); +								*(mmio_vbase + +								  0x3d5) = +						       old_36 | 0x10; +								while (0 != +								       ((--time_out) & 0xfff)) ; +								*(mmio_vbase + +								  0x3d5) = +						       old_36; +								*(mmio_vbase + +								  0x3d4) = +						       old_index; +								// Restore dynamic gating +								*(mmio_vbase + +								  0x3cf) = +						       old_3cf; +								*(mmio_vbase + +								  0x3ce) = +						       old_3ce; +								break; +							} +						} +						*(mmio_vbase + 0xb057) = 0; + +						// Increase Reset counter +						auto_reset_count++; +					} +				} +			} +			return TRUE; +		} else if (0 != (0x1 & int_status)) { +			s_invalid_begin++; +			ge_3d_status[0x04] = (int_status & ~0x01) | 0x04000000; +			return TRUE; +		} +	} +	return FALSE; +} + +BOOL xgi_crt_irq_handler(xgi_info_t * info) +{ +	BOOL ret = FALSE; +	U8 *mmio_vbase = info->mmio.vbase; +	U32 device_status = 0; +	U32 hw_status = 0; +	U8 save_3ce = bReadReg(0x3ce); + +	if (bIn3cf(0x37) & 0x01)	// CRT1 interrupt just happened +	{ +		U8 op3cf_3d; +		U8 op3cf_37; + +		// What happened? +		op3cf_37 = bIn3cf(0x37); + +#if 0 +		if (op3cf_37 & 0x04) +			device_status |= GDEVST_CONNECT; +		else +			device_status &= ~GDEVST_CONNECT; + +		device_status |= GDEVST_DEVICE_CHANGED; +		hw_status |= HWST_DEVICE_CHANGED; +#endif +		// Clear CRT interrupt +		op3cf_3d = bIn3cf(0x3d); +		bOut3cf(0x3d, (op3cf_3d | 0x04)); +		bOut3cf(0x3d, (op3cf_3d & ~0x04)); +		ret = TRUE; +	} +	bWriteReg(0x3ce, save_3ce); + +	return (ret); +} + +BOOL xgi_dvi_irq_handler(xgi_info_t * info) +{ +	BOOL ret = FALSE; +	U8 *mmio_vbase = info->mmio.vbase; +	U32 device_status = 0; +	U32 hw_status = 0; +	U8 save_3ce = bReadReg(0x3ce); + +	if (bIn3cf(0x38) & 0x20)	// DVI interrupt just happened +	{ +		U8 op3cf_39; +		U8 op3cf_37; +		U8 op3x5_5a; +		U8 save_3x4 = bReadReg(0x3d4);; + +		// What happened? +		op3cf_37 = bIn3cf(0x37); +#if 0 +		//Also update our internal flag +		if (op3cf_37 & 0x10)	// Second Monitor plugged In +		{ +			device_status |= GDEVST_CONNECT; +			//Because currenly we cannot determine if DVI digital +			//or DVI analog is connected according to DVI interrupt +			//We should still call BIOS to check it when utility ask us +			device_status &= ~GDEVST_CHECKED; +		} else { +			device_status &= ~GDEVST_CONNECT; +		} +#endif +		//Notify BIOS that DVI plug/unplug happened +		op3x5_5a = bIn3x5(0x5a); +		bOut3x5(0x5a, op3x5_5a & 0xf7); + +		bWriteReg(0x3d4, save_3x4); + +		//device_status |= GDEVST_DEVICE_CHANGED; +		//hw_status |= HWST_DEVICE_CHANGED; + +		// Clear DVI interrupt +		op3cf_39 = bIn3cf(0x39); +		bOut3c5(0x39, (op3cf_39 & ~0x01));	//Set 3cf.39 bit 0 to 0 +		bOut3c5(0x39, (op3cf_39 | 0x01));	//Set 3cf.39 bit 0 to 1 + +		ret = TRUE; +	} +	bWriteReg(0x3ce, save_3ce); + +	return (ret); +} + +void xgi_dump_register(xgi_info_t * info) +{ +	int i, j; +	unsigned char temp; + +	// 0x3C5 +	printk("\r\n=====xgi_dump_register========0x%x===============\r\n", +	       0x3C5); + +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x10; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bIn3c5(i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	// 0x3D5 +	printk("\r\n====xgi_dump_register=========0x%x===============\r\n", +	       0x3D5); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x10; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bIn3x5(i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	// 0x3CF +	printk("\r\n=========xgi_dump_register====0x%x===============\r\n", +	       0x3CF); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x10; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bIn3cf(i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	printk("\r\n=====xgi_dump_register======0x%x===============\r\n", +	       0xB000); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x5; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bReadReg(0xB000 + i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	printk("\r\n==================0x%x===============\r\n", 0x2200); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0xB; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bReadReg(0x2200 + i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	printk("\r\n==================0x%x===============\r\n", 0x2300); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x7; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bReadReg(0x2300 + i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	printk("\r\n==================0x%x===============\r\n", 0x2400); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x10; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bReadReg(0x2400 + i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} + +	printk("\r\n==================0x%x===============\r\n", 0x2800); +	for (i = 0; i < 0x10; i++) { +		if (i == 0) { +			printk("%5x", i); +		} else { +			printk("%3x", i); +		} +	} +	printk("\r\n"); + +	for (i = 0; i < 0x10; i++) { +		printk("%1x ", i); + +		for (j = 0; j < 0x10; j++) { +			temp = bReadReg(0x2800 + i * 0x10 + j); +			printk("%3x", temp); +		} +		printk("\r\n"); +	} +} + +void xgi_restore_registers(xgi_info_t * info) +{ +	bOut3x5(0x13, 0); +	bOut3x5(0x8b, 2); +} + +void xgi_waitfor_pci_idle(xgi_info_t * info) +{ +#define WHOLD_GE_STATUS             0x2800 +#define IDLE_MASK                   ~0x90200000 + +	int idleCount = 0; +	while (idleCount < 5) { +		if (dwReadReg(WHOLD_GE_STATUS) & IDLE_MASK) { +			idleCount = 0; +		} else { +			idleCount++; +		} +	} +} + +int xgi_get_cpu_id(struct cpu_info_s *arg) +{ +	int op = arg->_eax; +      __asm__("cpuid":"=a"(arg->_eax), +		"=b"(arg->_ebx), +		"=c"(arg->_ecx), "=d"(arg->_edx) +      :	"0"(op)); + +	XGI_INFO +	    ("opCode = 0x%x, eax = 0x%x, ebx = 0x%x, ecx = 0x%x, edx = 0x%x \n", +	     op, arg->_eax, arg->_ebx, arg->_ecx, arg->_edx); +} + +/*memory collect function*/ +extern struct list_head xgi_mempid_list; +void xgi_mem_collect(xgi_info_t * info, unsigned int *pcnt) +{ +	xgi_mem_pid_t *mempid_block; +	struct list_head *mempid_list; +	struct task_struct *p, *find; +	unsigned int cnt = 0; + +	mempid_list = xgi_mempid_list.next; + +	while (mempid_list != &xgi_mempid_list) { +		mempid_block = +		    list_entry(mempid_list, struct xgi_mem_pid_s, list); +		mempid_list = mempid_list->next; + +		find = NULL; +		XGI_SCAN_PROCESS(p) { +			if (p->pid == mempid_block->pid) { +				XGI_INFO +				    ("[!]Find active pid:%ld state:%ld location:%d addr:0x%lx! \n", +				     mempid_block->pid, p->state, +				     mempid_block->location, +				     mempid_block->bus_addr); +				find = p; +				if (mempid_block->bus_addr == 0xFFFFFFFF) +					++cnt; +				break; +			} +		} +		if (!find) { +			if (mempid_block->location == LOCAL) { +				XGI_INFO +				    ("Memory ProcessID free fb and delete one block pid:%ld addr:0x%lx successfully! \n", +				     mempid_block->pid, mempid_block->bus_addr); +				xgi_fb_free(info, mempid_block->bus_addr); +			} else if (mempid_block->bus_addr != 0xFFFFFFFF) { +				XGI_INFO +				    ("Memory ProcessID free pcie and delete one block pid:%ld addr:0x%lx successfully! \n", +				     mempid_block->pid, mempid_block->bus_addr); +				xgi_pcie_free(info, mempid_block->bus_addr); +			} else { +				/*only delete the memory block */ +				list_del(&mempid_block->list); +				XGI_INFO +				    ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", +				     mempid_block->pid); +				kfree(mempid_block); +			} +		} +	} +	*pcnt = cnt; +} diff --git a/linux-core/xgi_misc.h b/linux-core/xgi_misc.h index ac4daaa1..37120aaa 100644 --- a/linux-core/xgi_misc.h +++ b/linux-core/xgi_misc.h @@ -1,49 +1,47 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -
 -#ifndef _XGI_MISC_H_
 -#define _XGI_MISC_H_
 -
 -extern void xgi_dump_register(xgi_info_t *info);
 -extern void xgi_get_device_info(xgi_info_t *info, xgi_chip_info_t * req);
 -extern void xgi_get_mmio_info(xgi_info_t *info, xgi_mmio_info_t *req);
 -extern void xgi_get_screen_info(xgi_info_t *info, xgi_screen_info_t *req);
 -extern void xgi_put_screen_info(xgi_info_t *info, xgi_screen_info_t *req);
 -extern void xgi_ge_reset(xgi_info_t *info);
 -extern void xgi_sarea_info(xgi_info_t *info, xgi_sarea_info_t *req);
 -extern int  xgi_get_cpu_id(struct cpu_info_s *arg);
 -
 -extern void xgi_restore_registers(xgi_info_t *info);
 -extern BOOL xgi_ge_irq_handler(xgi_info_t *info);
 -extern BOOL xgi_crt_irq_handler(xgi_info_t *info);
 -extern BOOL xgi_dvi_irq_handler(xgi_info_t *info);
 -extern void xgi_waitfor_pci_idle(xgi_info_t *info);
 -
 -
 -#endif
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_MISC_H_ +#define _XGI_MISC_H_ + +extern void xgi_dump_register(xgi_info_t * info); +extern void xgi_get_device_info(xgi_info_t * info, xgi_chip_info_t * req); +extern void xgi_get_mmio_info(xgi_info_t * info, xgi_mmio_info_t * req); +extern void xgi_get_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_put_screen_info(xgi_info_t * info, xgi_screen_info_t * req); +extern void xgi_ge_reset(xgi_info_t * info); +extern void xgi_sarea_info(xgi_info_t * info, xgi_sarea_info_t * req); +extern int xgi_get_cpu_id(struct cpu_info_s *arg); + +extern void xgi_restore_registers(xgi_info_t * info); +extern BOOL xgi_ge_irq_handler(xgi_info_t * info); +extern BOOL xgi_crt_irq_handler(xgi_info_t * info); +extern BOOL xgi_dvi_irq_handler(xgi_info_t * info); +extern void xgi_waitfor_pci_idle(xgi_info_t * info); + +#endif diff --git a/linux-core/xgi_pcie.c b/linux-core/xgi_pcie.c index 62e2323f..9457770a 100644 --- a/linux-core/xgi_pcie.c +++ b/linux-core/xgi_pcie.c @@ -1,1060 +1,1031 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#include "xgi_types.h"
 -#include "xgi_linux.h"
 -#include "xgi_drv.h"
 -#include "xgi_regs.h"
 -#include "xgi_pcie.h"
 -#include "xgi_misc.h"
 -
 -static xgi_pcie_heap_t *xgi_pcie_heap = NULL;
 -static kmem_cache_t *xgi_pcie_cache_block = NULL;
 -static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL;
 -static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL;
 -static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL;
 -extern struct list_head xgi_mempid_list;
 -
 -static unsigned long xgi_pcie_lut_alloc(unsigned long page_order)
 -{
 -    struct page     *page;
 -    unsigned long   page_addr = 0;
 -    unsigned long   page_count = 0;
 -    int             i;
 -
 -    page_count = (1 << page_order);
 -    page_addr = __get_free_pages(GFP_KERNEL, page_order);
 -
 -    if (page_addr == 0UL)
 -    {
 -        XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n",
 -                   page_count);
 -        return 0;
 -    }
 -
 -    page = virt_to_page(page_addr);
 -
 -    for (i = 0; i < page_count; i++, page++)
 -    {
 -        XGI_INC_PAGE_COUNT(page);
 -        XGILockPage(page);
 -    }
 -
 -    XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n",
 -              page_count, page_order, page_addr);
 -    return page_addr;
 -}
 -
 -static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order)
 -{
 -    struct page     *page;
 -    unsigned long   page_count = 0;
 -    int             i;
 -
 -    page_count = (1 << page_order);
 -    page = virt_to_page(page_addr);
 -
 -    for (i = 0; i < page_count; i++, page++)
 -    {
 -        XGI_DEC_PAGE_COUNT(page);
 -        XGIUnlockPage(page);
 -    }
 -
 -    free_pages(page_addr, page_order);
 -}
 -
 -static int xgi_pcie_lut_init(xgi_info_t *info)
 -{
 -    unsigned char   *page_addr = NULL;
 -    unsigned long   pciePageCount, lutEntryNum, lutPageCount, lutPageOrder;
 -    unsigned long   count = 0;
 -    u8              temp = 0;
 -
 -	/* Jong 06/06/2006 */
 -	unsigned long	pcie_aperture_size;
 -
 -    info->pcie.size = 128 * 1024 * 1024;
 -
 -    /* Get current FB aperture size */
 -    temp = In3x5(0x27);
 -    XGI_INFO("In3x5(0x27): 0x%x \n", temp);
 -
 -    if (temp & 0x01)    /* 256MB; Jong 06/05/2006; 0x10000000 */
 -    {
 -		/* Jong 06/06/2006; allocate memory */
 -		pcie_aperture_size=256 * 1024 * 1024;
 -        /* info->pcie.base = 256 * 1024 * 1024; */ /* pcie base is different from fb base */
 -    }
 -    else                /* 128MB; Jong 06/05/2006; 0x08000000 */
 -    {
 -		/* Jong 06/06/2006; allocate memory */
 -		pcie_aperture_size=128 * 1024 * 1024;
 -        /* info->pcie.base = 128 * 1024 * 1024; */
 -    }
 -
 -	/* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */
 -	/* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */
 -	/* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */
 -	/* info->pcie.base=ioremap(0x0F000000, 0x10000000); */ /* Cause system hang */
 -	info->pcie.base=pcie_aperture_size; /* works */
 -	/* info->pcie.base=info->fb.base + info->fb.size; */ /* System hang */
 -	/* info->pcie.base=128 * 1024 * 1024;*/ /* System hang */
 -
 -    XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base);
 -
 -
 -    /* Get current lookup table page size */
 -    temp = bReadReg(0xB00C);
 -    if (temp & 0x04)    /* 8KB */
 -    {
 -        info->lutPageSize = 8 * 1024;
 -    }
 -    else                /* 4KB */
 -    {
 -        info->lutPageSize = 4 * 1024;
 -    }
 -
 -    XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
 -
 -#if 0
 -    /* Get current lookup table location */
 -    temp = bReadReg(0xB00C);
 -    if (temp & 0x02)    /* LFB */
 -    {
 -        info->isLUTInLFB = TRUE;
 -        /* Current we only support lookup table in LFB */
 -        temp &= 0xFD;
 -        bWriteReg(0xB00C, temp);
 -        info->isLUTInLFB = FALSE;
 -    }
 -    else                /* SFB */
 -    {
 -        info->isLUTInLFB = FALSE;
 -    }
 -
 -    XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize);
 -
 -    /* Get current SDFB page size */
 -    temp = bReadReg(0xB00C);
 -    if (temp & 0x08)    /* 8MB */
 -    {
 -        info->sdfbPageSize = 8 * 1024 * 1024;
 -    }
 -    else                /* 4MB */
 -    {
 -        info->sdfbPageSize = 4 * 1024 * 1024;
 -    }
 -#endif
 -    pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE;
 -
 -    /*
 -     * Allocate memory for PCIE GART table;
 -     */
 -    lutEntryNum = pciePageCount;
 -    lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE;
 -
 -    /* get page_order base on page_count */
 -    count = lutPageCount;
 -    for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder);
 -
 -    if ((lutPageCount << 1) ==  (1 << lutPageOrder))
 -    {
 -        lutPageOrder -= 1;
 -    }
 -
 -    XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n",
 -              lutEntryNum, lutPageCount, lutPageOrder);
 -
 -    info->lutPageOrder = lutPageOrder;
 -    page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder);
 -
 -    if (!page_addr)
 -    {
 -        XGI_ERROR("cannot allocate PCIE lut page!\n");
 -        goto fail;
 -    }
 -    info->lut_base = (unsigned long *)page_addr;
 -
 -    XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n",
 -              page_addr, virt_to_phys(page_addr));
 -
 -    XGI_INFO("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n",
 -              info->lut_base, __pa(info->lut_base), info->lutPageOrder);
 -
 -    /*
 -     * clean all PCIE GART Entry
 -     */
 -    memset(page_addr, 0, PAGE_SIZE << lutPageOrder);
 -
 -#if defined(__i386__) || defined(__x86_64__)
 -    asm volatile ( "wbinvd" ::: "memory" );
 -#else
 -    mb();
 -#endif
 -
 -    /* Set GART in SFB */
 -    bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02);
 -    /* Set GART base address to HW */
 -    dwWriteReg(0xB034, __pa(info->lut_base));
 -
 -    return 1;
 -fail:
 -    return 0;
 -}
 -
 -static void xgi_pcie_lut_cleanup(xgi_info_t *info)
 -{
 -    if (info->lut_base)
 -    {
 -        XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n",
 -                  info->lut_base, info->lutPageOrder);
 -        xgi_pcie_lut_free((unsigned long)info->lut_base, info->lutPageOrder);
 -        info->lut_base = NULL;
 -    }
 -}
 -
 -static xgi_pcie_block_t *xgi_pcie_new_node(void)
 -{
 -    xgi_pcie_block_t *block = (xgi_pcie_block_t *)kmem_cache_alloc(xgi_pcie_cache_block, GFP_KERNEL);
 -    if (block == NULL)
 -    {
 -        return NULL;
 -    }
 -
 -    block->offset = 0;          /* block's offset in pcie memory, begin from 0 */
 -    block->size   = 0;          /* The block size.              */
 -    block->bus_addr = 0;        /* CPU access address/bus address */
 -    block->hw_addr  = 0;        /* GE access address            */
 -    block->page_count = 0;
 -    block->page_order = 0;
 -    block->page_block = NULL;
 -    block->page_table = NULL;
 -    block->owner = PCIE_INVALID;
 -
 -    return block;
 -}
 -
 -static void xgi_pcie_block_stuff_free(xgi_pcie_block_t *block)
 -{
 -    struct page         *page;
 -    xgi_page_block_t    *page_block = block->page_block;
 -    xgi_page_block_t    *free_block;
 -    unsigned long       page_count = 0;
 -    int                 i;
 -
 -    //XGI_INFO("block->page_block: 0x%p \n", block->page_block);
 -    while (page_block)
 -    {
 -        page_count = page_block->page_count;
 -
 -        page = virt_to_page(page_block->virt_addr);
 -        for (i = 0; i < page_count; i++, page++)
 -        {
 -            XGI_DEC_PAGE_COUNT(page);
 -            XGIUnlockPage(page);
 -        }
 -        free_pages(page_block->virt_addr, page_block->page_order);
 -
 -        page_block->phys_addr = 0;
 -        page_block->virt_addr = 0;
 -        page_block->page_count = 0;
 -        page_block->page_order = 0;
 -
 -        free_block = page_block;
 -        page_block = page_block->next;
 -        //XGI_INFO("free free_block: 0x%p \n", free_block);
 -        kfree(free_block);
 -        free_block = NULL;
 -    }
 -
 -    if (block->page_table)
 -    {
 -        //XGI_INFO("free block->page_table: 0x%p \n", block->page_table);
 -        kfree(block->page_table);
 -        block->page_table = NULL;
 -    }
 -}
 -
 -int xgi_pcie_heap_init(xgi_info_t *info)
 -{
 -    xgi_pcie_block_t    *block;
 -
 -    if (!xgi_pcie_lut_init(info))
 -    {
 -        XGI_ERROR("xgi_pcie_lut_init failed\n");
 -        return 0;
 -    }
 -
 -    xgi_pcie_heap = (xgi_pcie_heap_t *)kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL);
 -    if(!xgi_pcie_heap)
 -    {
 -        XGI_ERROR("xgi_pcie_heap alloc failed\n");
 -        goto fail1;
 -    }
 -    INIT_LIST_HEAD(&xgi_pcie_heap->free_list);
 -    INIT_LIST_HEAD(&xgi_pcie_heap->used_list);
 -    INIT_LIST_HEAD(&xgi_pcie_heap->sort_list);
 -
 -    xgi_pcie_heap->max_freesize = info->pcie.size;
 -
 -    xgi_pcie_cache_block = kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t),
 -                                             0, SLAB_HWCACHE_ALIGN, NULL, NULL);
 -
 -    if (NULL == xgi_pcie_cache_block)
 -    {
 -         XGI_ERROR("Fail to creat xgi_pcie_block\n");
 -         goto fail2;
 -    }
 -
 -    block = (xgi_pcie_block_t *)xgi_pcie_new_node();
 -    if (!block)
 -    {
 -        XGI_ERROR("xgi_pcie_new_node failed\n");
 -        goto fail3;
 -    }
 -
 -    block->offset = 0;  /* block's offset in pcie memory, begin from 0 */
 -    block->size = info->pcie.size;
 -
 -    list_add(&block->list, &xgi_pcie_heap->free_list);
 -
 -    XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", block->offset, block->size);
 -    return 1;
 -fail3:
 -    if (xgi_pcie_cache_block)
 -    {
 -        kmem_cache_destroy(xgi_pcie_cache_block);
 -        xgi_pcie_cache_block = NULL;
 -    }
 -
 -fail2:
 -    if(xgi_pcie_heap)
 -    {
 -        kfree(xgi_pcie_heap);
 -        xgi_pcie_heap = NULL;
 -    }
 -fail1:
 -    xgi_pcie_lut_cleanup(info);
 -    return 0;
 -}
 -
 -void xgi_pcie_heap_check(void)
 -{
 -    struct list_head *useList, *temp;
 -    xgi_pcie_block_t *block;
 -    unsigned int ownerIndex;
 -    char *ownerStr[6] = {"2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE"};
 -
 -    if (xgi_pcie_heap)
 -    {
 -        useList = &xgi_pcie_heap->used_list;
 -        temp = useList->next;
 -        XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize);
 -        while (temp != useList)
 -        {
 -            block = list_entry(temp, struct xgi_pcie_block_s, list);
 -            if (block->owner == PCIE_2D)
 -                ownerIndex = 0;
 -            else if (block->owner > PCIE_3D_TEXTURE || block->owner < PCIE_2D || block->owner < PCIE_3D)
 -                ownerIndex = 5;
 -            else
 -                ownerIndex = block->owner - PCIE_3D + 1;
 -            XGI_INFO("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n",
 -                      ownerStr[ownerIndex], block->offset, block->size);
 -            temp = temp->next;
 -        }
 -
 -    }
 -}
 -
 -
 -void xgi_pcie_heap_cleanup(xgi_info_t *info)
 -{
 -    struct list_head    *free_list, *temp;
 -    xgi_pcie_block_t    *block;
 -    int                 j;
 -
 -    xgi_pcie_lut_cleanup(info);
 -    XGI_INFO("xgi_pcie_lut_cleanup scceeded\n");
 -
 -    if (xgi_pcie_heap)
 -    {
 -        free_list = &xgi_pcie_heap->free_list;
 -        for (j = 0; j < 3; j++, free_list++)
 -        {
 -            temp = free_list->next;
 -
 -            while (temp != free_list)
 -            {
 -                block = list_entry(temp, struct xgi_pcie_block_s, list);
 -                XGI_INFO("No. %d block->offset: 0x%lx block->size: 0x%lx \n",
 -                          j, block->offset, block->size);
 -                xgi_pcie_block_stuff_free(block);
 -                block->bus_addr = 0;
 -                block->hw_addr = 0;
 -
 -                temp = temp->next;
 -                //XGI_INFO("No. %d free block: 0x%p \n", j, block);
 -                kmem_cache_free(xgi_pcie_cache_block, block);
 -                block = NULL;
 -            }
 -        }
 -
 -        XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap);
 -        kfree(xgi_pcie_heap);
 -        xgi_pcie_heap = NULL;
 -    }
 -
 -    if (xgi_pcie_cache_block)
 -    {
 -        kmem_cache_destroy(xgi_pcie_cache_block);
 -        xgi_pcie_cache_block = NULL;
 -    }
 -}
 -
 -
 -static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t *info,
 -                                            unsigned long originalSize,
 -                                            enum PcieOwner owner)
 -{
 -    struct list_head    *free_list;
 -    xgi_pcie_block_t    *block, *used_block, *free_block;
 -    xgi_page_block_t    *page_block, *prev_page_block;
 -    struct page         *page;
 -    unsigned long       page_order = 0, count = 0, index =0;
 -    unsigned long       page_addr = 0;
 -    unsigned long       *lut_addr = NULL;
 -    unsigned long       lut_id = 0;
 -    unsigned long       size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK;
 -    int                 i, j, page_count = 0;
 -    int                 temp = 0;
 -
 -    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n");
 -    XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", originalSize, size);
 -
 -    if (owner == PCIE_3D)
 -    {
 -        if (xgi_pcie_vertex_block)
 -        {
 -            XGI_INFO("PCIE Vertex has been created, return directly.\n");
 -            return xgi_pcie_vertex_block;
 -        }
 -    }
 -
 -    if (owner == PCIE_3D_CMDLIST)
 -    {
 -        if (xgi_pcie_cmdlist_block)
 -        {
 -            XGI_INFO("PCIE Cmdlist has been created, return directly.\n");
 -            return xgi_pcie_cmdlist_block;
 -        }
 -    }
 -
 -    if (owner == PCIE_3D_SCRATCHPAD)
 -    {
 -        if (xgi_pcie_scratchpad_block)
 -        {
 -            XGI_INFO("PCIE Scratchpad has been created, return directly.\n");
 -            return xgi_pcie_scratchpad_block;
 -        }
 -    }
 -
 -    if (size == 0)
 -    {
 -        XGI_ERROR("size == 0 \n");
 -        return (NULL);
 -    }
 -
 -    XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize);
 -    if (size > xgi_pcie_heap->max_freesize)
 -    {
 -        XGI_ERROR("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n",
 -                   size, xgi_pcie_heap->max_freesize);
 -        return (NULL);
 -    }
 -
 -	/* Jong 05/30/2006; find next free list which has enough space*/
 -    free_list = xgi_pcie_heap->free_list.next;
 -    while (free_list != &xgi_pcie_heap->free_list)
 -    {
 -        //XGI_INFO("free_list: 0x%px \n", free_list);
 -        block = list_entry(free_list, struct xgi_pcie_block_s, list);
 -        if (size <= block->size)
 -        {
 -            break;
 -        }
 -        free_list = free_list->next;
 -    }
 -
 -    if (free_list == &xgi_pcie_heap->free_list)
 -    {
 -        XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", size/1024);
 -        return (NULL);
 -    }
 -
 -    free_block = block;
 -    XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n",
 -              size, free_block->offset, free_block->size);
 -
 -    if (size == free_block->size)
 -    {
 -        used_block = free_block;
 -        XGI_INFO("size==free_block->size: free_block = 0x%p\n", free_block);
 -        list_del(&free_block->list);
 -    }
 -    else
 -    {
 -        used_block = xgi_pcie_new_node();
 -        if (used_block == NULL)
 -        {
 -            return NULL;
 -        }
 -
 -        if (used_block == free_block)
 -        {
 -            XGI_ERROR("used_block == free_block = 0x%p\n", used_block);
 -        }
 -
 -        used_block->offset = free_block->offset;
 -        used_block->size = size;
 -
 -        free_block->offset += size;
 -        free_block->size -= size;
 -    }
 -
 -    xgi_pcie_heap->max_freesize -= size;
 -
 -    used_block->bus_addr = info->pcie.base + used_block->offset;
 -    used_block->hw_addr    = info->pcie.base + used_block->offset;
 -    used_block->page_count = page_count = size / PAGE_SIZE;
 -
 -    /* get page_order base on page_count */
 -    for (used_block->page_order = 0; page_count; page_count >>= 1)
 -    {
 -        ++used_block->page_order;
 -    }
 -
 -    if ((used_block->page_count << 1) ==  (1 << used_block->page_order))
 -    {
 -        used_block->page_order--;
 -    }
 -    XGI_INFO("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n",
 -              used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr, used_block->page_count, used_block->page_order);
 -
 -    used_block->page_block = NULL;
 -    //used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL);
 -    //if (!used_block->page_block) return NULL;
 -    //used_block->page_block->next = NULL;
 -
 -    used_block->page_table = (xgi_pte_t *)kmalloc(sizeof(xgi_pte_t) * used_block->page_count, GFP_KERNEL);
 -    if (used_block->page_table == NULL)
 -    {
 -        goto fail;
 -    }
 -
 -    lut_id = (used_block->offset >> PAGE_SHIFT);
 -    lut_addr = info->lut_base;
 -    lut_addr += lut_id;
 -    XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id);
 -
 -    /* alloc free pages from system */
 -    page_count = used_block->page_count;
 -    page_block = used_block->page_block;
 -    prev_page_block = used_block->page_block;
 -    for (i = 0; page_count > 0; i++)
 -    {
 -        /* if size is bigger than 2M bytes, it should be split */
 -        if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER))
 -        {
 -            page_order = XGI_PCIE_ALLOC_MAX_ORDER;
 -        }
 -        else
 -        {
 -            count = page_count;
 -            for (page_order = 0; count; count >>= 1, ++page_order);
 -
 -            if ((page_count << 1) ==  (1 << page_order))
 -            {
 -                page_order -= 1;
 -            }
 -        }
 -
 -        count = (1 << page_order);
 -        page_addr = __get_free_pages(GFP_KERNEL, page_order);
 -	    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", page_addr);
 -
 -        if (!page_addr)
 -        {
 -            XGI_ERROR("No: %d :Can't get free pages: 0x%lx from system memory !\n",
 -                       i, count);
 -            goto fail;
 -        }
 -
 -		/* Jong 05/30/2006; test */
 -        memset((unsigned char *)page_addr, 0xFF, PAGE_SIZE << page_order);
 -        /* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */
 -
 -        if (page_block == NULL)
 -        {
 -            page_block = (xgi_page_block_t *)kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL);
 -            if (!page_block)
 -            {
 -                XGI_ERROR("Can't get memory for page_block! \n");
 -                goto fail;
 -            }
 -        }
 -
 -        if (prev_page_block == NULL)
 -        {
 -            used_block->page_block = page_block;
 -            prev_page_block = page_block;
 -        }
 -        else
 -        {
 -            prev_page_block->next = page_block;
 -            prev_page_block = page_block;
 -        }
 -
 -        page_block->next = NULL;
 -        page_block->phys_addr = __pa(page_addr);
 -        page_block->virt_addr = page_addr;
 -        page_block->page_count = count;
 -        page_block->page_order = page_order;
 -
 -	    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", page_block->phys_addr);
 -	    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", page_block->virt_addr);
 -
 -        page = virt_to_page(page_addr);
 -
 -        //XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p"
 -        //         "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n",
 -        //          i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr);
 -
 -        for (j = 0 ; j < count; j++, page++, lut_addr++)
 -        {
 -            used_block->page_table[index + j].phys_addr = __pa(page_address(page));
 -            used_block->page_table[index + j].virt_addr = (unsigned long)page_address(page);
 -
 -		    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", used_block->page_table[index + j].phys_addr);
 -		    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", used_block->page_table[index + j].virt_addr);
 -
 -            *lut_addr = __pa(page_address(page));
 -            XGI_INC_PAGE_COUNT(page);
 -            XGILockPage(page);
 -
 -            if (temp)
 -            {
 -                XGI_INFO("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n",
 -                          __pa(page_address(page)), lut_addr, j, *lut_addr);
 -                temp--;
 -            }
 -        }
 -
 -        page_block = page_block->next;
 -        page_count -= count;
 -        index += count;
 -        temp = 0;
 -    }
 -
 -    used_block->owner = owner;
 -    list_add(&used_block->list, &xgi_pcie_heap->used_list);
 -
 -#if defined(__i386__) || defined(__x86_64__)
 -    asm volatile ( "wbinvd" ::: "memory" );
 -#else
 -    mb();
 -#endif
 -
 -    /* Flush GART Table */
 -    bWriteReg(0xB03F, 0x40);
 -    bWriteReg(0xB03F, 0x00);
 -
 -    if (owner == PCIE_3D)
 -    {
 -        xgi_pcie_vertex_block = used_block;
 -    }
 -
 -    if (owner == PCIE_3D_CMDLIST)
 -    {
 -        xgi_pcie_cmdlist_block = used_block;
 -    }
 -
 -    if (owner == PCIE_3D_SCRATCHPAD)
 -    {
 -        xgi_pcie_scratchpad_block = used_block;
 -    }
 -
 -    XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n");
 -    return (used_block);
 -
 -fail:
 -    xgi_pcie_block_stuff_free(used_block);
 -    kmem_cache_free(xgi_pcie_cache_block, used_block);
 -    return NULL;
 -}
 -
 -static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t *info, unsigned long offset)
 -{
 -    struct list_head    *free_list, *used_list;
 -    xgi_pcie_block_t    *used_block, *block = NULL;
 -    xgi_pcie_block_t    *prev, *next;
 -    unsigned long       upper, lower;
 -
 -    used_list = xgi_pcie_heap->used_list.next;
 -    while (used_list != &xgi_pcie_heap->used_list)
 -    {
 -        block = list_entry(used_list, struct xgi_pcie_block_s, list);
 -        if (block->offset == offset)
 -        {
 -            break;
 -        }
 -        used_list = used_list->next;
 -    }
 -
 -    if (used_list == &xgi_pcie_heap->used_list)
 -    {
 -        XGI_ERROR("can't find block: 0x%lx to free!\n", offset);
 -        return (NULL);
 -    }
 -
 -    used_block = block;
 -    XGI_INFO("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n",
 -              used_block, used_block->offset, used_block->size, used_block->bus_addr, used_block->hw_addr);
 -
 -    xgi_pcie_block_stuff_free(used_block);
 -
 -    /* update xgi_pcie_heap */
 -    xgi_pcie_heap->max_freesize += used_block->size;
 -
 -    prev = next = NULL;
 -    upper = used_block->offset + used_block->size;
 -    lower = used_block->offset;
 -
 -    free_list = xgi_pcie_heap->free_list.next;
 -
 -    while (free_list != &xgi_pcie_heap->free_list)
 -    {
 -        block = list_entry(free_list, struct xgi_pcie_block_s, list);
 -        if (block->offset == upper)
 -        {
 -            next = block;
 -        }
 -        else if ((block->offset + block->size) == lower)
 -        {
 -            prev = block;
 -        }
 -        free_list = free_list->next;
 -    }
 -
 -    XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev);
 -    list_del(&used_block->list);
 -
 -    if (prev && next)
 -    {
 -        prev->size += (used_block->size + next->size);
 -        list_del(&next->list);
 -        XGI_INFO("free node 0x%p\n", next);
 -        kmem_cache_free(xgi_pcie_cache_block, next);
 -        kmem_cache_free(xgi_pcie_cache_block, used_block);
 -        next = NULL;
 -        used_block = NULL;
 -        return (prev);
 -    }
 -
 -    if (prev)
 -    {
 -        prev->size += used_block->size;
 -        XGI_INFO("free node 0x%p\n", used_block);
 -        kmem_cache_free(xgi_pcie_cache_block, used_block);
 -        used_block = NULL;
 -        return (prev);
 -    }
 -
 -    if (next)
 -    {
 -        next->size += used_block->size;
 -        next->offset = used_block->offset;
 -        XGI_INFO("free node 0x%p\n", used_block);
 -        kmem_cache_free(xgi_pcie_cache_block, used_block);
 -        used_block = NULL;
 -        return (next);
 -    }
 -
 -    used_block->bus_addr = 0;
 -    used_block->hw_addr = 0;
 -    used_block->page_count = 0;
 -    used_block->page_order = 0;
 -    list_add(&used_block->list, &xgi_pcie_heap->free_list);
 -    XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n",
 -              used_block, used_block->offset, used_block->size);
 -    return (used_block);
 -}
 -
 -void xgi_pcie_alloc(xgi_info_t *info, unsigned long size,
 -                    enum PcieOwner owner, xgi_mem_alloc_t *alloc)
 -{
 -    xgi_pcie_block_t *block;
 -    xgi_mem_pid_t *mempid_block;
 -
 -    xgi_down(info->pcie_sem);
 -    block = xgi_pcie_mem_alloc(info, size, owner);
 -    xgi_up(info->pcie_sem);
 -
 -    if (block == NULL)
 -    {
 -        alloc->location = INVALID;
 -        alloc->size     = 0;
 -        alloc->bus_addr = 0;
 -        alloc->hw_addr  = 0;
 -        XGI_ERROR("PCIE RAM allocation failed\n");
 -    }
 -    else
 -    {
 -        XGI_INFO("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n",
 -                 block->offset, block->bus_addr);
 -        alloc->location = NON_LOCAL;
 -        alloc->size     = block->size;
 -        alloc->bus_addr = block->bus_addr;
 -        alloc->hw_addr  = block->hw_addr;
 -
 -        /*
 -        manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE.
 -        PCIE_3D request means a opengl process created.
 -        PCIE_3D_TEXTURE request means texture cannot alloc from fb.
 -        */
 -        if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE)
 -        {
 -            mempid_block = kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL);
 -            if (!mempid_block)
 -                XGI_ERROR("mempid_block alloc failed\n");
 -            mempid_block->location = NON_LOCAL;
 -            if (owner == PCIE_3D)
 -                mempid_block->bus_addr = 0xFFFFFFFF;/*xgi_pcie_vertex_block has the address*/
 -            else
 -                mempid_block->bus_addr = alloc->bus_addr;
 -            mempid_block->pid = alloc->pid;
 -
 -            XGI_INFO("Memory ProcessID add one pcie block pid:%ld successfully! \n", mempid_block->pid);
 -            list_add(&mempid_block->list, &xgi_mempid_list);
 -        }
 -    }
 -}
 -
 -void xgi_pcie_free(xgi_info_t *info, unsigned long bus_addr)
 -{
 -    xgi_pcie_block_t    *block;
 -    unsigned long       offset = bus_addr - info->pcie.base;
 -    xgi_mem_pid_t       *mempid_block;
 -    xgi_mem_pid_t       *mempid_freeblock = NULL;
 -    struct list_head    *mempid_list;
 -    char                isvertex = 0;
 -    int                 processcnt;
 -
 -    if (xgi_pcie_vertex_block && xgi_pcie_vertex_block->bus_addr == bus_addr)
 -        isvertex = 1;
 -
 -    if (isvertex)
 -    {
 -        /*check is there any other process using vertex*/
 -        processcnt = 0;
 -        mempid_list = xgi_mempid_list.next;
 -        while (mempid_list != &xgi_mempid_list)
 -        {
 -            mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);
 -            if (mempid_block->location == NON_LOCAL && mempid_block->bus_addr == 0xFFFFFFFF)
 -            {
 -                ++processcnt;
 -            }
 -            mempid_list = mempid_list->next;
 -        }
 -        if (processcnt > 1)
 -        {
 -            return;
 -        }
 -    }
 -
 -    xgi_down(info->pcie_sem);
 -    block = xgi_pcie_mem_free(info, offset);
 -    xgi_up(info->pcie_sem);
 -
 -    if (block == NULL)
 -    {
 -        XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset);
 -    }
 -
 -    if (isvertex)
 -        xgi_pcie_vertex_block = NULL;
 -
 -    /* manage mempid */
 -    mempid_list = xgi_mempid_list.next;
 -    while (mempid_list != &xgi_mempid_list)
 -    {
 -        mempid_block = list_entry(mempid_list, struct xgi_mem_pid_s, list);
 -        if (mempid_block->location == NON_LOCAL && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) || (!isvertex && mempid_block->bus_addr == bus_addr)))
 -        {
 -            mempid_freeblock = mempid_block;
 -            break;
 -        }
 -        mempid_list = mempid_list->next;
 -    }
 -    if (mempid_freeblock)
 -    {
 -        list_del(&mempid_freeblock->list);
 -        XGI_INFO("Memory ProcessID delete one pcie block pid:%ld successfully! \n", mempid_freeblock->pid);
 -        kfree(mempid_freeblock);
 -    }
 -}
 -
 -/*
 - * given a bus address, fid the pcie mem block
 - * uses the bus address as the key.
 - */
 -void *xgi_find_pcie_block(xgi_info_t *info, unsigned long address)
 -{
 -    struct list_head    *used_list;
 -    xgi_pcie_block_t    *block;
 -    int                 i;
 -
 -    used_list = xgi_pcie_heap->used_list.next;
 -
 -    while (used_list != &xgi_pcie_heap->used_list)
 -    {
 -        block = list_entry(used_list, struct xgi_pcie_block_s, list);
 -
 -        if (block->bus_addr == address)
 -        {
 -            return block;
 -        }
 -
 -        if (block->page_table)
 -        {
 -            for (i = 0; i < block->page_count; i++)
 -            {
 -                unsigned long offset = block->bus_addr;
 -                if ( (address >= offset) && (address < (offset + PAGE_SIZE)))
 -                {
 -                    return block;
 -                }
 -            }
 -        }
 -        used_list = used_list->next;
 -    }
 -
 -    XGI_ERROR("could not find map for vm 0x%lx\n", address);
 -
 -    return NULL;
 -}
 -
 -/*
 -	address -- GE HW address
 -	return  -- CPU virtual address
 -
 -    assume the CPU VAddr is continuous in not the same block
 -*/
 -void *xgi_find_pcie_virt(xgi_info_t *info, unsigned long address)
 -{
 -    struct list_head    *used_list;
 -    xgi_pcie_block_t    *block;
 -    unsigned long       offset_in_page;
 -    unsigned long       loc_in_pagetable;
 -    void * ret;
 -
 -    XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n");
 -
 -    used_list = xgi_pcie_heap->used_list.next;
 -    XGI_INFO("Jong_05292006-used_list=%ul\n", used_list);
 -
 -    offset_in_page = address & (PAGE_SIZE-1);
 -    XGI_INFO("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", address, PAGE_SIZE-1, offset_in_page);
 -
 -    while (used_list != &xgi_pcie_heap->used_list)
 -    {
 -        block = list_entry(used_list, struct xgi_pcie_block_s, list);
 -	    XGI_INFO("Jong_05292006-block=0x%px\n", block);
 -	    XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", block->hw_addr);
 -	    XGI_INFO("Jong_05292006- block->size=%ul\n",  block->size);
 -
 -        if ((address >= block->hw_addr) && (address < (block->hw_addr + block->size)))
 -        {
 -            loc_in_pagetable = (address - block->hw_addr) >> PAGE_SHIFT;
 -            ret = (void*)(block->page_table[loc_in_pagetable].virt_addr + offset_in_page);
 -
 -		    XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT);
 -		    XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", loc_in_pagetable);
 -		    XGI_INFO("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", block->page_table[loc_in_pagetable].virt_addr);
 -		    XGI_INFO("Jong_05292006-offset_in_page=%d\n", offset_in_page);
 -		    XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", ret);
 -
 -            return ret ;
 -        }
 -        else
 -        {
 -		    XGI_INFO("Jong_05292006-used_list = used_list->next;\n");
 -            used_list = used_list->next;
 -        }
 -    }
 -
 -    XGI_ERROR("could not find map for vm 0x%lx\n", address);
 -    return NULL;
 -}
 -
 -
 -void xgi_read_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req)
 -{
 -
 -}
 -
 -void xgi_write_pcie_mem(xgi_info_t *info, xgi_mem_req_t *req)
 -{
 -}
 -
 -/*
 -    address -- GE hw address
 -*/
 -void xgi_test_rwinkernel(xgi_info_t *info, unsigned long address)
 -{
 -    unsigned long * virtaddr = 0;
 -    if (address == 0)
 -    {
 -        XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n");
 -        return;
 -    }
 -
 -    virtaddr = (unsigned long *) xgi_find_pcie_virt(info, address);
 -
 -    XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address);
 -    XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr);
 -    XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr);
 -    if (virtaddr != NULL)
 -    {
 -        *virtaddr = 0x00f00fff;
 -    }
 -
 -    XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr);
 -}
 -
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#include "xgi_types.h" +#include "xgi_linux.h" +#include "xgi_drv.h" +#include "xgi_regs.h" +#include "xgi_pcie.h" +#include "xgi_misc.h" + +static xgi_pcie_heap_t *xgi_pcie_heap = NULL; +static kmem_cache_t *xgi_pcie_cache_block = NULL; +static xgi_pcie_block_t *xgi_pcie_vertex_block = NULL; +static xgi_pcie_block_t *xgi_pcie_cmdlist_block = NULL; +static xgi_pcie_block_t *xgi_pcie_scratchpad_block = NULL; +extern struct list_head xgi_mempid_list; + +static unsigned long xgi_pcie_lut_alloc(unsigned long page_order) +{ +	struct page *page; +	unsigned long page_addr = 0; +	unsigned long page_count = 0; +	int i; + +	page_count = (1 << page_order); +	page_addr = __get_free_pages(GFP_KERNEL, page_order); + +	if (page_addr == 0UL) { +		XGI_ERROR("Can't get free pages: 0x%lx from system memory !\n", +			  page_count); +		return 0; +	} + +	page = virt_to_page(page_addr); + +	for (i = 0; i < page_count; i++, page++) { +		XGI_INC_PAGE_COUNT(page); +		XGILockPage(page); +	} + +	XGI_INFO("page_count: 0x%lx page_order: 0x%lx page_addr: 0x%lx \n", +		 page_count, page_order, page_addr); +	return page_addr; +} + +static void xgi_pcie_lut_free(unsigned long page_addr, unsigned long page_order) +{ +	struct page *page; +	unsigned long page_count = 0; +	int i; + +	page_count = (1 << page_order); +	page = virt_to_page(page_addr); + +	for (i = 0; i < page_count; i++, page++) { +		XGI_DEC_PAGE_COUNT(page); +		XGIUnlockPage(page); +	} + +	free_pages(page_addr, page_order); +} + +static int xgi_pcie_lut_init(xgi_info_t * info) +{ +	unsigned char *page_addr = NULL; +	unsigned long pciePageCount, lutEntryNum, lutPageCount, lutPageOrder; +	unsigned long count = 0; +	u8 temp = 0; + +	/* Jong 06/06/2006 */ +	unsigned long pcie_aperture_size; + +	info->pcie.size = 128 * 1024 * 1024; + +	/* Get current FB aperture size */ +	temp = In3x5(0x27); +	XGI_INFO("In3x5(0x27): 0x%x \n", temp); + +	if (temp & 0x01) {	/* 256MB; Jong 06/05/2006; 0x10000000 */ +		/* Jong 06/06/2006; allocate memory */ +		pcie_aperture_size = 256 * 1024 * 1024; +		/* info->pcie.base = 256 * 1024 * 1024; *//* pcie base is different from fb base */ +	} else {		/* 128MB; Jong 06/05/2006; 0x08000000 */ + +		/* Jong 06/06/2006; allocate memory */ +		pcie_aperture_size = 128 * 1024 * 1024; +		/* info->pcie.base = 128 * 1024 * 1024; */ +	} + +	/* Jong 06/06/2006; allocate memory; it can be used for build-in kernel modules */ +	/* info->pcie.base=(unsigned long)alloc_bootmem(pcie_mem_size); */ +	/* total 496 MB; need 256 MB (0x10000000); start from 240 MB (0x0F000000) */ +	/* info->pcie.base=ioremap(0x0F000000, 0x10000000); *//* Cause system hang */ +	info->pcie.base = pcie_aperture_size;	/* works */ +	/* info->pcie.base=info->fb.base + info->fb.size; *//* System hang */ +	/* info->pcie.base=128 * 1024 * 1024; *//* System hang */ + +	XGI_INFO("Jong06062006-info->pcie.base: 0x%lx \n", info->pcie.base); + +	/* Get current lookup table page size */ +	temp = bReadReg(0xB00C); +	if (temp & 0x04) {	/* 8KB */ +		info->lutPageSize = 8 * 1024; +	} else {		/* 4KB */ + +		info->lutPageSize = 4 * 1024; +	} + +	XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +#if 0 +	/* Get current lookup table location */ +	temp = bReadReg(0xB00C); +	if (temp & 0x02) {	/* LFB */ +		info->isLUTInLFB = TRUE; +		/* Current we only support lookup table in LFB */ +		temp &= 0xFD; +		bWriteReg(0xB00C, temp); +		info->isLUTInLFB = FALSE; +	} else {		/* SFB */ + +		info->isLUTInLFB = FALSE; +	} + +	XGI_INFO("info->lutPageSize: 0x%lx \n", info->lutPageSize); + +	/* Get current SDFB page size */ +	temp = bReadReg(0xB00C); +	if (temp & 0x08) {	/* 8MB */ +		info->sdfbPageSize = 8 * 1024 * 1024; +	} else {		/* 4MB */ + +		info->sdfbPageSize = 4 * 1024 * 1024; +	} +#endif +	pciePageCount = (info->pcie.size + PAGE_SIZE - 1) / PAGE_SIZE; + +	/* +	 * Allocate memory for PCIE GART table; +	 */ +	lutEntryNum = pciePageCount; +	lutPageCount = (lutEntryNum * 4 + PAGE_SIZE - 1) / PAGE_SIZE; + +	/* get page_order base on page_count */ +	count = lutPageCount; +	for (lutPageOrder = 0; count; count >>= 1, ++lutPageOrder) ; + +	if ((lutPageCount << 1) == (1 << lutPageOrder)) { +		lutPageOrder -= 1; +	} + +	XGI_INFO("lutEntryNum: 0x%lx lutPageCount: 0x%lx lutPageOrder 0x%lx\n", +		 lutEntryNum, lutPageCount, lutPageOrder); + +	info->lutPageOrder = lutPageOrder; +	page_addr = (unsigned char *)xgi_pcie_lut_alloc(lutPageOrder); + +	if (!page_addr) { +		XGI_ERROR("cannot allocate PCIE lut page!\n"); +		goto fail; +	} +	info->lut_base = (unsigned long *)page_addr; + +	XGI_INFO("page_addr: 0x%p virt_to_phys(page_virtual): 0x%lx \n", +		 page_addr, virt_to_phys(page_addr)); + +	XGI_INFO +	    ("info->lut_base: 0x%p __pa(info->lut_base): 0x%lx info->lutPageOrder 0x%lx\n", +	     info->lut_base, __pa(info->lut_base), info->lutPageOrder); + +	/* +	 * clean all PCIE GART Entry +	 */ +	memset(page_addr, 0, PAGE_SIZE << lutPageOrder); + +#if defined(__i386__) || defined(__x86_64__) +	asm volatile ("wbinvd":::"memory"); +#else +	mb(); +#endif + +	/* Set GART in SFB */ +	bWriteReg(0xB00C, bReadReg(0xB00C) & ~0x02); +	/* Set GART base address to HW */ +	dwWriteReg(0xB034, __pa(info->lut_base)); + +	return 1; +      fail: +	return 0; +} + +static void xgi_pcie_lut_cleanup(xgi_info_t * info) +{ +	if (info->lut_base) { +		XGI_INFO("info->lut_base: 0x%p info->lutPageOrder: 0x%lx \n", +			 info->lut_base, info->lutPageOrder); +		xgi_pcie_lut_free((unsigned long)info->lut_base, +				  info->lutPageOrder); +		info->lut_base = NULL; +	} +} + +static xgi_pcie_block_t *xgi_pcie_new_node(void) +{ +	xgi_pcie_block_t *block = +	    (xgi_pcie_block_t *) kmem_cache_alloc(xgi_pcie_cache_block, +						  GFP_KERNEL); +	if (block == NULL) { +		return NULL; +	} + +	block->offset = 0;	/* block's offset in pcie memory, begin from 0 */ +	block->size = 0;	/* The block size.              */ +	block->bus_addr = 0;	/* CPU access address/bus address */ +	block->hw_addr = 0;	/* GE access address            */ +	block->page_count = 0; +	block->page_order = 0; +	block->page_block = NULL; +	block->page_table = NULL; +	block->owner = PCIE_INVALID; + +	return block; +} + +static void xgi_pcie_block_stuff_free(xgi_pcie_block_t * block) +{ +	struct page *page; +	xgi_page_block_t *page_block = block->page_block; +	xgi_page_block_t *free_block; +	unsigned long page_count = 0; +	int i; + +	//XGI_INFO("block->page_block: 0x%p \n", block->page_block); +	while (page_block) { +		page_count = page_block->page_count; + +		page = virt_to_page(page_block->virt_addr); +		for (i = 0; i < page_count; i++, page++) { +			XGI_DEC_PAGE_COUNT(page); +			XGIUnlockPage(page); +		} +		free_pages(page_block->virt_addr, page_block->page_order); + +		page_block->phys_addr = 0; +		page_block->virt_addr = 0; +		page_block->page_count = 0; +		page_block->page_order = 0; + +		free_block = page_block; +		page_block = page_block->next; +		//XGI_INFO("free free_block: 0x%p \n", free_block); +		kfree(free_block); +		free_block = NULL; +	} + +	if (block->page_table) { +		//XGI_INFO("free block->page_table: 0x%p \n", block->page_table); +		kfree(block->page_table); +		block->page_table = NULL; +	} +} + +int xgi_pcie_heap_init(xgi_info_t * info) +{ +	xgi_pcie_block_t *block; + +	if (!xgi_pcie_lut_init(info)) { +		XGI_ERROR("xgi_pcie_lut_init failed\n"); +		return 0; +	} + +	xgi_pcie_heap = +	    (xgi_pcie_heap_t *) kmalloc(sizeof(xgi_pcie_heap_t), GFP_KERNEL); +	if (!xgi_pcie_heap) { +		XGI_ERROR("xgi_pcie_heap alloc failed\n"); +		goto fail1; +	} +	INIT_LIST_HEAD(&xgi_pcie_heap->free_list); +	INIT_LIST_HEAD(&xgi_pcie_heap->used_list); +	INIT_LIST_HEAD(&xgi_pcie_heap->sort_list); + +	xgi_pcie_heap->max_freesize = info->pcie.size; + +	xgi_pcie_cache_block = +	    kmem_cache_create("xgi_pcie_block", sizeof(xgi_pcie_block_t), 0, +			      SLAB_HWCACHE_ALIGN, NULL, NULL); + +	if (NULL == xgi_pcie_cache_block) { +		XGI_ERROR("Fail to creat xgi_pcie_block\n"); +		goto fail2; +	} + +	block = (xgi_pcie_block_t *) xgi_pcie_new_node(); +	if (!block) { +		XGI_ERROR("xgi_pcie_new_node failed\n"); +		goto fail3; +	} + +	block->offset = 0;	/* block's offset in pcie memory, begin from 0 */ +	block->size = info->pcie.size; + +	list_add(&block->list, &xgi_pcie_heap->free_list); + +	XGI_INFO("PCIE start address: 0x%lx, memory size : 0x%lx\n", +		 block->offset, block->size); +	return 1; +      fail3: +	if (xgi_pcie_cache_block) { +		kmem_cache_destroy(xgi_pcie_cache_block); +		xgi_pcie_cache_block = NULL; +	} + +      fail2: +	if (xgi_pcie_heap) { +		kfree(xgi_pcie_heap); +		xgi_pcie_heap = NULL; +	} +      fail1: +	xgi_pcie_lut_cleanup(info); +	return 0; +} + +void xgi_pcie_heap_check(void) +{ +	struct list_head *useList, *temp; +	xgi_pcie_block_t *block; +	unsigned int ownerIndex; +	char *ownerStr[6] = +	    { "2D", "3D", "3D_CMD", "3D_SCR", "3D_TEX", "ELSE" }; + +	if (xgi_pcie_heap) { +		useList = &xgi_pcie_heap->used_list; +		temp = useList->next; +		XGI_INFO("pcie freemax = 0x%lx\n", xgi_pcie_heap->max_freesize); +		while (temp != useList) { +			block = list_entry(temp, struct xgi_pcie_block_s, list); +			if (block->owner == PCIE_2D) +				ownerIndex = 0; +			else if (block->owner > PCIE_3D_TEXTURE +				 || block->owner < PCIE_2D +				 || block->owner < PCIE_3D) +				ownerIndex = 5; +			else +				ownerIndex = block->owner - PCIE_3D + 1; +			XGI_INFO +			    ("Allocated by %s, block->offset: 0x%lx block->size: 0x%lx \n", +			     ownerStr[ownerIndex], block->offset, block->size); +			temp = temp->next; +		} + +	} +} + +void xgi_pcie_heap_cleanup(xgi_info_t * info) +{ +	struct list_head *free_list, *temp; +	xgi_pcie_block_t *block; +	int j; + +	xgi_pcie_lut_cleanup(info); +	XGI_INFO("xgi_pcie_lut_cleanup scceeded\n"); + +	if (xgi_pcie_heap) { +		free_list = &xgi_pcie_heap->free_list; +		for (j = 0; j < 3; j++, free_list++) { +			temp = free_list->next; + +			while (temp != free_list) { +				block = +				    list_entry(temp, struct xgi_pcie_block_s, +					       list); +				XGI_INFO +				    ("No. %d block->offset: 0x%lx block->size: 0x%lx \n", +				     j, block->offset, block->size); +				xgi_pcie_block_stuff_free(block); +				block->bus_addr = 0; +				block->hw_addr = 0; + +				temp = temp->next; +				//XGI_INFO("No. %d free block: 0x%p \n", j, block); +				kmem_cache_free(xgi_pcie_cache_block, block); +				block = NULL; +			} +		} + +		XGI_INFO("free xgi_pcie_heap: 0x%p \n", xgi_pcie_heap); +		kfree(xgi_pcie_heap); +		xgi_pcie_heap = NULL; +	} + +	if (xgi_pcie_cache_block) { +		kmem_cache_destroy(xgi_pcie_cache_block); +		xgi_pcie_cache_block = NULL; +	} +} + +static xgi_pcie_block_t *xgi_pcie_mem_alloc(xgi_info_t * info, +					    unsigned long originalSize, +					    enum PcieOwner owner) +{ +	struct list_head *free_list; +	xgi_pcie_block_t *block, *used_block, *free_block; +	xgi_page_block_t *page_block, *prev_page_block; +	struct page *page; +	unsigned long page_order = 0, count = 0, index = 0; +	unsigned long page_addr = 0; +	unsigned long *lut_addr = NULL; +	unsigned long lut_id = 0; +	unsigned long size = (originalSize + PAGE_SIZE - 1) & PAGE_MASK; +	int i, j, page_count = 0; +	int temp = 0; + +	XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-Begin\n"); +	XGI_INFO("Original 0x%lx bytes requested, really 0x%lx allocated\n", +		 originalSize, size); + +	if (owner == PCIE_3D) { +		if (xgi_pcie_vertex_block) { +			XGI_INFO +			    ("PCIE Vertex has been created, return directly.\n"); +			return xgi_pcie_vertex_block; +		} +	} + +	if (owner == PCIE_3D_CMDLIST) { +		if (xgi_pcie_cmdlist_block) { +			XGI_INFO +			    ("PCIE Cmdlist has been created, return directly.\n"); +			return xgi_pcie_cmdlist_block; +		} +	} + +	if (owner == PCIE_3D_SCRATCHPAD) { +		if (xgi_pcie_scratchpad_block) { +			XGI_INFO +			    ("PCIE Scratchpad has been created, return directly.\n"); +			return xgi_pcie_scratchpad_block; +		} +	} + +	if (size == 0) { +		XGI_ERROR("size == 0 \n"); +		return (NULL); +	} + +	XGI_INFO("max_freesize: 0x%lx \n", xgi_pcie_heap->max_freesize); +	if (size > xgi_pcie_heap->max_freesize) { +		XGI_ERROR +		    ("size: 0x%lx bigger than PCIE total free size: 0x%lx.\n", +		     size, xgi_pcie_heap->max_freesize); +		return (NULL); +	} + +	/* Jong 05/30/2006; find next free list which has enough space */ +	free_list = xgi_pcie_heap->free_list.next; +	while (free_list != &xgi_pcie_heap->free_list) { +		//XGI_INFO("free_list: 0x%px \n", free_list); +		block = list_entry(free_list, struct xgi_pcie_block_s, list); +		if (size <= block->size) { +			break; +		} +		free_list = free_list->next; +	} + +	if (free_list == &xgi_pcie_heap->free_list) { +		XGI_ERROR("Can't allocate %ldk size from PCIE memory !\n", +			  size / 1024); +		return (NULL); +	} + +	free_block = block; +	XGI_INFO("alloc size: 0x%lx from offset: 0x%lx size: 0x%lx \n", +		 size, free_block->offset, free_block->size); + +	if (size == free_block->size) { +		used_block = free_block; +		XGI_INFO("size==free_block->size: free_block = 0x%p\n", +			 free_block); +		list_del(&free_block->list); +	} else { +		used_block = xgi_pcie_new_node(); +		if (used_block == NULL) { +			return NULL; +		} + +		if (used_block == free_block) { +			XGI_ERROR("used_block == free_block = 0x%p\n", +				  used_block); +		} + +		used_block->offset = free_block->offset; +		used_block->size = size; + +		free_block->offset += size; +		free_block->size -= size; +	} + +	xgi_pcie_heap->max_freesize -= size; + +	used_block->bus_addr = info->pcie.base + used_block->offset; +	used_block->hw_addr = info->pcie.base + used_block->offset; +	used_block->page_count = page_count = size / PAGE_SIZE; + +	/* get page_order base on page_count */ +	for (used_block->page_order = 0; page_count; page_count >>= 1) { +		++used_block->page_order; +	} + +	if ((used_block->page_count << 1) == (1 << used_block->page_order)) { +		used_block->page_order--; +	} +	XGI_INFO +	    ("used_block->offset: 0x%lx, used_block->size: 0x%lx, used_block->bus_addr: 0x%lx, used_block->hw_addr: 0x%lx, used_block->page_count: 0x%lx used_block->page_order: 0x%lx\n", +	     used_block->offset, used_block->size, used_block->bus_addr, +	     used_block->hw_addr, used_block->page_count, +	     used_block->page_order); + +	used_block->page_block = NULL; +	//used_block->page_block = (xgi_pages_block_t *)kmalloc(sizeof(xgi_pages_block_t), GFP_KERNEL); +	//if (!used_block->page_block) return NULL; +	//used_block->page_block->next = NULL; + +	used_block->page_table = +	    (xgi_pte_t *) kmalloc(sizeof(xgi_pte_t) * used_block->page_count, +				  GFP_KERNEL); +	if (used_block->page_table == NULL) { +		goto fail; +	} + +	lut_id = (used_block->offset >> PAGE_SHIFT); +	lut_addr = info->lut_base; +	lut_addr += lut_id; +	XGI_INFO("lutAddr: 0x%p lutID: 0x%lx \n", lut_addr, lut_id); + +	/* alloc free pages from system */ +	page_count = used_block->page_count; +	page_block = used_block->page_block; +	prev_page_block = used_block->page_block; +	for (i = 0; page_count > 0; i++) { +		/* if size is bigger than 2M bytes, it should be split */ +		if (page_count > (1 << XGI_PCIE_ALLOC_MAX_ORDER)) { +			page_order = XGI_PCIE_ALLOC_MAX_ORDER; +		} else { +			count = page_count; +			for (page_order = 0; count; count >>= 1, ++page_order) ; + +			if ((page_count << 1) == (1 << page_order)) { +				page_order -= 1; +			} +		} + +		count = (1 << page_order); +		page_addr = __get_free_pages(GFP_KERNEL, page_order); +		XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-page_addr=0x%lx \n", +			 page_addr); + +		if (!page_addr) { +			XGI_ERROR +			    ("No: %d :Can't get free pages: 0x%lx from system memory !\n", +			     i, count); +			goto fail; +		} + +		/* Jong 05/30/2006; test */ +		memset((unsigned char *)page_addr, 0xFF, +		       PAGE_SIZE << page_order); +		/* memset((unsigned char *)page_addr, 0, PAGE_SIZE << page_order); */ + +		if (page_block == NULL) { +			page_block = +			    (xgi_page_block_t *) +			    kmalloc(sizeof(xgi_page_block_t), GFP_KERNEL); +			if (!page_block) { +				XGI_ERROR +				    ("Can't get memory for page_block! \n"); +				goto fail; +			} +		} + +		if (prev_page_block == NULL) { +			used_block->page_block = page_block; +			prev_page_block = page_block; +		} else { +			prev_page_block->next = page_block; +			prev_page_block = page_block; +		} + +		page_block->next = NULL; +		page_block->phys_addr = __pa(page_addr); +		page_block->virt_addr = page_addr; +		page_block->page_count = count; +		page_block->page_order = page_order; + +		XGI_INFO +		    ("Jong05302006-xgi_pcie_mem_alloc-page_block->phys_addr=0x%lx \n", +		     page_block->phys_addr); +		XGI_INFO +		    ("Jong05302006-xgi_pcie_mem_alloc-page_block->virt_addr=0x%lx \n", +		     page_block->virt_addr); + +		page = virt_to_page(page_addr); + +		//XGI_INFO("No: %d page_order: 0x%lx page_count: 0x%x count: 0x%lx index: 0x%lx lut_addr: 0x%p" +		//         "page_block->phys_addr: 0x%lx page_block->virt_addr: 0x%lx \n", +		//          i, page_order, page_count, count, index, lut_addr, page_block->phys_addr, page_block->virt_addr); + +		for (j = 0; j < count; j++, page++, lut_addr++) { +			used_block->page_table[index + j].phys_addr = +			    __pa(page_address(page)); +			used_block->page_table[index + j].virt_addr = +			    (unsigned long)page_address(page); + +			XGI_INFO +			    ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].phys_addr=0x%lx \n", +			     used_block->page_table[index + j].phys_addr); +			XGI_INFO +			    ("Jong05302006-xgi_pcie_mem_alloc-used_block->page_table[index + j].virt_addr=0x%lx \n", +			     used_block->page_table[index + j].virt_addr); + +			*lut_addr = __pa(page_address(page)); +			XGI_INC_PAGE_COUNT(page); +			XGILockPage(page); + +			if (temp) { +				XGI_INFO +				    ("__pa(page_address(page)): 0x%lx lutAddr: 0x%p lutAddr No: 0x%x = 0x%lx \n", +				     __pa(page_address(page)), lut_addr, j, +				     *lut_addr); +				temp--; +			} +		} + +		page_block = page_block->next; +		page_count -= count; +		index += count; +		temp = 0; +	} + +	used_block->owner = owner; +	list_add(&used_block->list, &xgi_pcie_heap->used_list); + +#if defined(__i386__) || defined(__x86_64__) +	asm volatile ("wbinvd":::"memory"); +#else +	mb(); +#endif + +	/* Flush GART Table */ +	bWriteReg(0xB03F, 0x40); +	bWriteReg(0xB03F, 0x00); + +	if (owner == PCIE_3D) { +		xgi_pcie_vertex_block = used_block; +	} + +	if (owner == PCIE_3D_CMDLIST) { +		xgi_pcie_cmdlist_block = used_block; +	} + +	if (owner == PCIE_3D_SCRATCHPAD) { +		xgi_pcie_scratchpad_block = used_block; +	} + +	XGI_INFO("Jong05302006-xgi_pcie_mem_alloc-End \n"); +	return (used_block); + +      fail: +	xgi_pcie_block_stuff_free(used_block); +	kmem_cache_free(xgi_pcie_cache_block, used_block); +	return NULL; +} + +static xgi_pcie_block_t *xgi_pcie_mem_free(xgi_info_t * info, +					   unsigned long offset) +{ +	struct list_head *free_list, *used_list; +	xgi_pcie_block_t *used_block, *block = NULL; +	xgi_pcie_block_t *prev, *next; +	unsigned long upper, lower; + +	used_list = xgi_pcie_heap->used_list.next; +	while (used_list != &xgi_pcie_heap->used_list) { +		block = list_entry(used_list, struct xgi_pcie_block_s, list); +		if (block->offset == offset) { +			break; +		} +		used_list = used_list->next; +	} + +	if (used_list == &xgi_pcie_heap->used_list) { +		XGI_ERROR("can't find block: 0x%lx to free!\n", offset); +		return (NULL); +	} + +	used_block = block; +	XGI_INFO +	    ("used_block: 0x%p, offset = 0x%lx, size = 0x%lx, bus_addr = 0x%lx, hw_addr = 0x%lx\n", +	     used_block, used_block->offset, used_block->size, +	     used_block->bus_addr, used_block->hw_addr); + +	xgi_pcie_block_stuff_free(used_block); + +	/* update xgi_pcie_heap */ +	xgi_pcie_heap->max_freesize += used_block->size; + +	prev = next = NULL; +	upper = used_block->offset + used_block->size; +	lower = used_block->offset; + +	free_list = xgi_pcie_heap->free_list.next; + +	while (free_list != &xgi_pcie_heap->free_list) { +		block = list_entry(free_list, struct xgi_pcie_block_s, list); +		if (block->offset == upper) { +			next = block; +		} else if ((block->offset + block->size) == lower) { +			prev = block; +		} +		free_list = free_list->next; +	} + +	XGI_INFO("next = 0x%p, prev = 0x%p\n", next, prev); +	list_del(&used_block->list); + +	if (prev && next) { +		prev->size += (used_block->size + next->size); +		list_del(&next->list); +		XGI_INFO("free node 0x%p\n", next); +		kmem_cache_free(xgi_pcie_cache_block, next); +		kmem_cache_free(xgi_pcie_cache_block, used_block); +		next = NULL; +		used_block = NULL; +		return (prev); +	} + +	if (prev) { +		prev->size += used_block->size; +		XGI_INFO("free node 0x%p\n", used_block); +		kmem_cache_free(xgi_pcie_cache_block, used_block); +		used_block = NULL; +		return (prev); +	} + +	if (next) { +		next->size += used_block->size; +		next->offset = used_block->offset; +		XGI_INFO("free node 0x%p\n", used_block); +		kmem_cache_free(xgi_pcie_cache_block, used_block); +		used_block = NULL; +		return (next); +	} + +	used_block->bus_addr = 0; +	used_block->hw_addr = 0; +	used_block->page_count = 0; +	used_block->page_order = 0; +	list_add(&used_block->list, &xgi_pcie_heap->free_list); +	XGI_INFO("Recycled free node %p, offset = 0x%lx, size = 0x%lx\n", +		 used_block, used_block->offset, used_block->size); +	return (used_block); +} + +void xgi_pcie_alloc(xgi_info_t * info, unsigned long size, +		    enum PcieOwner owner, xgi_mem_alloc_t * alloc) +{ +	xgi_pcie_block_t *block; +	xgi_mem_pid_t *mempid_block; + +	xgi_down(info->pcie_sem); +	block = xgi_pcie_mem_alloc(info, size, owner); +	xgi_up(info->pcie_sem); + +	if (block == NULL) { +		alloc->location = INVALID; +		alloc->size = 0; +		alloc->bus_addr = 0; +		alloc->hw_addr = 0; +		XGI_ERROR("PCIE RAM allocation failed\n"); +	} else { +		XGI_INFO +		    ("PCIE RAM allocation succeeded: offset = 0x%lx, bus_addr = 0x%lx\n", +		     block->offset, block->bus_addr); +		alloc->location = NON_LOCAL; +		alloc->size = block->size; +		alloc->bus_addr = block->bus_addr; +		alloc->hw_addr = block->hw_addr; + +		/* +		   manage mempid, handle PCIE_3D, PCIE_3D_TEXTURE. +		   PCIE_3D request means a opengl process created. +		   PCIE_3D_TEXTURE request means texture cannot alloc from fb. +		 */ +		if (owner == PCIE_3D || owner == PCIE_3D_TEXTURE) { +			mempid_block = +			    kmalloc(sizeof(xgi_mem_pid_t), GFP_KERNEL); +			if (!mempid_block) +				XGI_ERROR("mempid_block alloc failed\n"); +			mempid_block->location = NON_LOCAL; +			if (owner == PCIE_3D) +				mempid_block->bus_addr = 0xFFFFFFFF;	/*xgi_pcie_vertex_block has the address */ +			else +				mempid_block->bus_addr = alloc->bus_addr; +			mempid_block->pid = alloc->pid; + +			XGI_INFO +			    ("Memory ProcessID add one pcie block pid:%ld successfully! \n", +			     mempid_block->pid); +			list_add(&mempid_block->list, &xgi_mempid_list); +		} +	} +} + +void xgi_pcie_free(xgi_info_t * info, unsigned long bus_addr) +{ +	xgi_pcie_block_t *block; +	unsigned long offset = bus_addr - info->pcie.base; +	xgi_mem_pid_t *mempid_block; +	xgi_mem_pid_t *mempid_freeblock = NULL; +	struct list_head *mempid_list; +	char isvertex = 0; +	int processcnt; + +	if (xgi_pcie_vertex_block +	    && xgi_pcie_vertex_block->bus_addr == bus_addr) +		isvertex = 1; + +	if (isvertex) { +		/*check is there any other process using vertex */ +		processcnt = 0; +		mempid_list = xgi_mempid_list.next; +		while (mempid_list != &xgi_mempid_list) { +			mempid_block = +			    list_entry(mempid_list, struct xgi_mem_pid_s, list); +			if (mempid_block->location == NON_LOCAL +			    && mempid_block->bus_addr == 0xFFFFFFFF) { +				++processcnt; +			} +			mempid_list = mempid_list->next; +		} +		if (processcnt > 1) { +			return; +		} +	} + +	xgi_down(info->pcie_sem); +	block = xgi_pcie_mem_free(info, offset); +	xgi_up(info->pcie_sem); + +	if (block == NULL) { +		XGI_ERROR("xgi_pcie_free() failed at base 0x%lx\n", offset); +	} + +	if (isvertex) +		xgi_pcie_vertex_block = NULL; + +	/* manage mempid */ +	mempid_list = xgi_mempid_list.next; +	while (mempid_list != &xgi_mempid_list) { +		mempid_block = +		    list_entry(mempid_list, struct xgi_mem_pid_s, list); +		if (mempid_block->location == NON_LOCAL +		    && ((isvertex && mempid_block->bus_addr == 0xFFFFFFFF) +			|| (!isvertex && mempid_block->bus_addr == bus_addr))) { +			mempid_freeblock = mempid_block; +			break; +		} +		mempid_list = mempid_list->next; +	} +	if (mempid_freeblock) { +		list_del(&mempid_freeblock->list); +		XGI_INFO +		    ("Memory ProcessID delete one pcie block pid:%ld successfully! \n", +		     mempid_freeblock->pid); +		kfree(mempid_freeblock); +	} +} + +/* + * given a bus address, fid the pcie mem block + * uses the bus address as the key. + */ +void *xgi_find_pcie_block(xgi_info_t * info, unsigned long address) +{ +	struct list_head *used_list; +	xgi_pcie_block_t *block; +	int i; + +	used_list = xgi_pcie_heap->used_list.next; + +	while (used_list != &xgi_pcie_heap->used_list) { +		block = list_entry(used_list, struct xgi_pcie_block_s, list); + +		if (block->bus_addr == address) { +			return block; +		} + +		if (block->page_table) { +			for (i = 0; i < block->page_count; i++) { +				unsigned long offset = block->bus_addr; +				if ((address >= offset) +				    && (address < (offset + PAGE_SIZE))) { +					return block; +				} +			} +		} +		used_list = used_list->next; +	} + +	XGI_ERROR("could not find map for vm 0x%lx\n", address); + +	return NULL; +} + +/* +	address -- GE HW address +	return  -- CPU virtual address + +    assume the CPU VAddr is continuous in not the same block +*/ +void *xgi_find_pcie_virt(xgi_info_t * info, unsigned long address) +{ +	struct list_head *used_list; +	xgi_pcie_block_t *block; +	unsigned long offset_in_page; +	unsigned long loc_in_pagetable; +	void *ret; + +	XGI_INFO("Jong_05292006-xgi_find_pcie_virt-Begin\n"); + +	used_list = xgi_pcie_heap->used_list.next; +	XGI_INFO("Jong_05292006-used_list=%ul\n", used_list); + +	offset_in_page = address & (PAGE_SIZE - 1); +	XGI_INFO +	    ("Jong_05292006-address=0x%px, PAGE_SIZE-1=%ul, offset_in_page=%ul\n", +	     address, PAGE_SIZE - 1, offset_in_page); + +	while (used_list != &xgi_pcie_heap->used_list) { +		block = list_entry(used_list, struct xgi_pcie_block_s, list); +		XGI_INFO("Jong_05292006-block=0x%px\n", block); +		XGI_INFO("Jong_05292006-block->hw_addr=0x%px\n", +			 block->hw_addr); +		XGI_INFO("Jong_05292006- block->size=%ul\n", block->size); + +		if ((address >= block->hw_addr) +		    && (address < (block->hw_addr + block->size))) { +			loc_in_pagetable = +			    (address - block->hw_addr) >> PAGE_SHIFT; +			ret = +			    (void *)(block->page_table[loc_in_pagetable]. +				     virt_addr + offset_in_page); + +			XGI_INFO("Jong_05292006-PAGE_SHIFT=%d\n", PAGE_SHIFT); +			XGI_INFO("Jong_05292006-loc_in_pagetable=0x%px\n", +				 loc_in_pagetable); +			XGI_INFO +			    ("Jong_05292006-block->page_table[loc_in_pagetable].virt_addr=0x%px\n", +			     block->page_table[loc_in_pagetable].virt_addr); +			XGI_INFO("Jong_05292006-offset_in_page=%d\n", +				 offset_in_page); +			XGI_INFO("Jong_05292006-return(virt_addr)=0x%px\n", +				 ret); + +			return ret; +		} else { +			XGI_INFO +			    ("Jong_05292006-used_list = used_list->next;\n"); +			used_list = used_list->next; +		} +	} + +	XGI_ERROR("could not find map for vm 0x%lx\n", address); +	return NULL; +} + +void xgi_read_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ + +} + +void xgi_write_pcie_mem(xgi_info_t * info, xgi_mem_req_t * req) +{ +} + +/* +    address -- GE hw address +*/ +void xgi_test_rwinkernel(xgi_info_t * info, unsigned long address) +{ +	unsigned long *virtaddr = 0; +	if (address == 0) { +		XGI_INFO("[Jong-kd] input GE HW addr is 0x00000000\n"); +		return; +	} + +	virtaddr = (unsigned long *)xgi_find_pcie_virt(info, address); + +	XGI_INFO("[Jong-kd] input GE HW addr is 0x%lx\n", address); +	XGI_INFO("[Jong-kd] convert to CPU virt addr 0x%px\n", virtaddr); +	XGI_INFO("[Jong-kd] origin [virtaddr] = 0x%lx\n", *virtaddr); +	if (virtaddr != NULL) { +		*virtaddr = 0x00f00fff; +	} + +	XGI_INFO("[Jong-kd] modified [virtaddr] = 0x%lx\n", *virtaddr); +} diff --git a/linux-core/xgi_pcie.h b/linux-core/xgi_pcie.h index cd5f85b8..32c2b584 100644 --- a/linux-core/xgi_pcie.h +++ b/linux-core/xgi_pcie.h @@ -1,73 +1,73 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#ifndef _XGI_PCIE_H_
 -#define _XGI_PCIE_H_
 -
 -#ifndef XGI_PCIE_ALLOC_MAX_ORDER
 -#define XGI_PCIE_ALLOC_MAX_ORDER    1  /* 8K in Kernel 2.4.* */
 -#endif
 -
 -typedef struct xgi_page_block_s {
 -    struct xgi_page_block_s *next;
 -    unsigned long       phys_addr;
 -    unsigned long       virt_addr;
 -    unsigned long       page_count;
 -    unsigned long       page_order;
 -} xgi_page_block_t;
 -
 -typedef struct xgi_pcie_block_s {
 -    struct list_head    list;
 -    unsigned long       offset;     /* block's offset in pcie memory, begin from 0 */
 -    unsigned long       size;       /* The block size.              */
 -    unsigned long       bus_addr;   /* CPU access address/bus address */
 -    unsigned long       hw_addr;    /* GE access address            */
 -
 -    unsigned long       page_count;
 -    unsigned long       page_order;
 -    xgi_page_block_t    *page_block;
 -    xgi_pte_t           *page_table; /* list of physical pages allocated */
 -
 -    atomic_t            use_count;
 -    enum PcieOwner      owner;
 -    unsigned long       processID;
 -} xgi_pcie_block_t;
 -
 -typedef struct xgi_pcie_list_s {
 -    xgi_pcie_block_t    *head;
 -    xgi_pcie_block_t    *tail;
 -} xgi_pcie_list_t;
 -
 -typedef struct xgi_pcie_heap_s {
 -    struct list_head    free_list;
 -    struct list_head    used_list;
 -    struct list_head    sort_list;
 -    unsigned long       max_freesize;
 -} xgi_pcie_heap_t;
 -
 -#endif
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_PCIE_H_ +#define _XGI_PCIE_H_ + +#ifndef XGI_PCIE_ALLOC_MAX_ORDER +#define XGI_PCIE_ALLOC_MAX_ORDER    1	/* 8K in Kernel 2.4.* */ +#endif + +typedef struct xgi_page_block_s { +	struct xgi_page_block_s *next; +	unsigned long phys_addr; +	unsigned long virt_addr; +	unsigned long page_count; +	unsigned long page_order; +} xgi_page_block_t; + +typedef struct xgi_pcie_block_s { +	struct list_head list; +	unsigned long offset;	/* block's offset in pcie memory, begin from 0 */ +	unsigned long size;	/* The block size.              */ +	unsigned long bus_addr;	/* CPU access address/bus address */ +	unsigned long hw_addr;	/* GE access address            */ + +	unsigned long page_count; +	unsigned long page_order; +	xgi_page_block_t *page_block; +	xgi_pte_t *page_table;	/* list of physical pages allocated */ + +	atomic_t use_count; +	enum PcieOwner owner; +	unsigned long processID; +} xgi_pcie_block_t; + +typedef struct xgi_pcie_list_s { +	xgi_pcie_block_t *head; +	xgi_pcie_block_t *tail; +} xgi_pcie_list_t; + +typedef struct xgi_pcie_heap_s { +	struct list_head free_list; +	struct list_head used_list; +	struct list_head sort_list; +	unsigned long max_freesize; +} xgi_pcie_heap_t; + +#endif diff --git a/linux-core/xgi_regs.h b/linux-core/xgi_regs.h index 18448139..487a7e15 100644 --- a/linux-core/xgi_regs.h +++ b/linux-core/xgi_regs.h @@ -1,410 +1,404 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -
 -#ifndef _XGI_REGS_H_
 -#define _XGI_REGS_H_
 -
 -#ifndef XGI_MMIO
 -    #define XGI_MMIO 1
 -#endif
 -
 -#if XGI_MMIO
 -#define OUTB(port, value)   writeb(value, info->mmio.vbase + port)
 -#define INB(port)           readb(info->mmio.vbase + port)
 -#define OUTW(port, value)   writew(value, info->mmio.vbase + port)
 -#define INW(port)           readw(info->mmio.vbase + port)
 -#define OUTDW(port, value)  writel(value, info->mmio.vbase + port)
 -#define INDW(port)          readl(info->mmio.vbase + port)
 -#else
 -#define OUTB(port, value)   outb(value, port)
 -#define INB(port)           inb(port)
 -#define OUTW(port, value)   outw(value, port)
 -#define INW(port)           inw(port)
 -#define OUTDW(port, value)  outl(value, port)
 -#define INDW(port)          inl(port)
 -#endif
 -
 -/* Hardware access functions */
 -static inline void OUT3C5B(xgi_info_t *info, u8 index, u8 data)
 -{
 -    OUTB(0x3C4, index);
 -    OUTB(0x3C5, data);
 -}
 -
 -static inline void OUT3X5B(xgi_info_t *info, u8 index, u8 data)
 -{
 -    OUTB(0x3D4, index);
 -    OUTB(0x3D5, data);
 -}
 -
 -static inline void OUT3CFB(xgi_info_t *info, u8 index, u8 data)
 -{
 -    OUTB(0x3CE, index);
 -    OUTB(0x3CF, data);
 -}
 -
 -static inline u8 IN3C5B(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3C4, index);
 -    data = INB(0x3C5);
 -    return data;
 -}
 -
 -static inline u8 IN3X5B(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3D4, index);
 -    data = INB(0x3D5);
 -    return data;
 -}
 -
 -static inline u8 IN3CFB(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3CE, index);
 -    data = INB(0x3CF);
 -    return data;
 -}
 -
 -static inline void OUT3C5W(xgi_info_t *info, u8 index, u16 data)
 -{
 -    OUTB(0x3C4, index);
 -    OUTB(0x3C5, data);
 -}
 -
 -static inline void OUT3X5W(xgi_info_t *info, u8 index, u16 data)
 -{
 -    OUTB(0x3D4, index);
 -    OUTB(0x3D5, data);
 -}
 -
 -static inline void OUT3CFW(xgi_info_t *info, u8 index, u8 data)
 -{
 -    OUTB(0x3CE, index);
 -    OUTB(0x3CF, data);
 -}
 -
 -static inline u8 IN3C5W(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3C4, index);
 -    data = INB(0x3C5);
 -    return data;
 -}
 -
 -static inline u8 IN3X5W(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3D4, index);
 -    data = INB(0x3D5);
 -    return data;
 -}
 -
 -static inline u8 IN3CFW(xgi_info_t *info, u8 index)
 -{
 -    volatile u8 data=0;
 -    OUTB(0x3CE, index);
 -    data = INB(0x3CF);
 -    return data;
 -}
 -
 -static inline u8 readAttr(xgi_info_t *info, u8 index)
 -{
 -    INB(0x3DA); /* flip-flop to index */
 -    OUTB(0x3C0, index);
 -    return INB(0x3C1);
 -}
 -
 -static inline void writeAttr(xgi_info_t *info, u8 index, u8 value)
 -{
 -    INB(0x3DA); /* flip-flop to index */
 -    OUTB(0x3C0, index);
 -    OUTB(0x3C0, value);
 -}
 -
 -/*
 - * Graphic engine register (2d/3d) acessing interface
 - */
 -static inline void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data)
 -{
 -	/* Jong 05/25/2006 */
 -    XGI_INFO("Jong-WriteRegDWord()-Begin \n");
 -    XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", info->mmio.vbase);
 -    XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr);
 -    XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); 
 -	/* return; */
 -
 -    *(volatile u32*)(info->mmio.vbase + addr) = (data);
 -    XGI_INFO("Jong-WriteRegDWord()-End \n"); 
 -}
 -
 -static inline void WriteRegWord(xgi_info_t *info, u32 addr, u16 data)
 -{
 -    *(volatile u16*)(info->mmio.vbase + addr) = (data);
 -}
 -
 -static inline void WriteRegByte(xgi_info_t *info, u32 addr, u8 data)
 -{
 -    *(volatile u8*)(info->mmio.vbase + addr) = (data);
 -}
 -
 -static inline u32 ReadRegDWord(xgi_info_t *info, u32 addr)
 -{
 -    volatile u32 data;
 -    data = *(volatile u32*)(info->mmio.vbase + addr);
 -    return data;
 -}
 -
 -static inline u16 ReadRegWord(xgi_info_t *info, u32 addr)
 -{
 -    volatile u16 data;
 -    data = *(volatile u16*)(info->mmio.vbase + addr);
 -    return data;
 -}
 -
 -static inline u8 ReadRegByte(xgi_info_t *info, u32 addr)
 -{
 -    volatile u8 data;
 -    data = *(volatile u8*)(info->mmio.vbase + addr);
 -    return data;
 -}
 -#if 0
 -extern void OUT3C5B(xgi_info_t *info, u8 index, u8 data);
 -extern void OUT3X5B(xgi_info_t *info, u8 index, u8 data);
 -extern void OUT3CFB(xgi_info_t *info, u8 index, u8 data);
 -extern u8 IN3C5B(xgi_info_t *info, u8 index);
 -extern u8 IN3X5B(xgi_info_t *info, u8 index);
 -extern u8 IN3CFB(xgi_info_t *info, u8 index);
 -extern void OUT3C5W(xgi_info_t *info, u8 index, u8 data);
 -extern void OUT3X5W(xgi_info_t *info, u8 index, u8 data);
 -extern void OUT3CFW(xgi_info_t *info, u8 index, u8 data);
 -extern u8 IN3C5W(xgi_info_t *info, u8 index);
 -extern u8 IN3X5W(xgi_info_t *info, u8 index);
 -extern u8 IN3CFW(xgi_info_t *info, u8 index);
 -
 -extern void WriteRegDWord(xgi_info_t *info, u32 addr, u32 data);
 -extern void WriteRegWord(xgi_info_t *info, u32 addr, u16 data);
 -extern void WriteRegByte(xgi_info_t *info, u32 addr, u8 data);
 -extern u32 ReadRegDWord(xgi_info_t *info, u32 addr);
 -extern u16 ReadRegWord(xgi_info_t *info, u32 addr);
 -extern u8 ReadRegByte(xgi_info_t *info, u32 addr);
 -
 -extern void EnableProtect();
 -extern void DisableProtect();
 -#endif
 -
 -#define Out(port, data)         OUTB(port, data)
 -#define bOut(port, data)        OUTB(port, data)
 -#define wOut(port, data)        OUTW(port, data)
 -#define dwOut(port, data)       OUTDW(port, data)
 -
 -#define Out3x5(index, data)     OUT3X5B(info, index, data)
 -#define bOut3x5(index, data)    OUT3X5B(info, index, data)
 -#define wOut3x5(index, data)    OUT3X5W(info, index, data)
 -
 -#define Out3c5(index, data)     OUT3C5B(info, index, data)
 -#define bOut3c5(index, data)    OUT3C5B(info, index, data)
 -#define wOut3c5(index, data)    OUT3C5W(info, index, data)
 -
 -#define Out3cf(index, data)     OUT3CFB(info, index, data)
 -#define bOut3cf(index, data)    OUT3CFB(info, index, data)
 -#define wOut3cf(index, data)    OUT3CFW(info, index, data)
 -
 -#define In(port)                INB(port)
 -#define bIn(port)               INB(port)
 -#define wIn(port)               INW(port)
 -#define dwIn(port)              INDW(port)
 -
 -#define In3x5(index)            IN3X5B(info, index)
 -#define bIn3x5(index)           IN3X5B(info, index)
 -#define wIn3x5(index)           IN3X5W(info, index)
 -
 -#define In3c5(index)            IN3C5B(info, index)
 -#define bIn3c5(index)           IN3C5B(info, index)
 -#define wIn3c5(index)           IN3C5W(info, index)
 -
 -#define In3cf(index)            IN3CFB(info, index)
 -#define bIn3cf(index)           IN3CFB(info, index)
 -#define wIn3cf(index)           IN3CFW(info, index)
 -
 -#define dwWriteReg(addr, data)  WriteRegDWord(info, addr, data)
 -#define wWriteReg(addr, data)   WriteRegWord(info, addr, data)
 -#define bWriteReg(addr, data)   WriteRegByte(info, addr, data)
 -#define dwReadReg(addr)         ReadRegDWord(info, addr)
 -#define wReadReg(addr)          ReadRegWord(info, addr)
 -#define bReadReg(addr)          ReadRegByte(info, addr)
 -
 -static inline void xgi_protect_all(xgi_info_t *info)
 -{
 -    OUTB(0x3C4, 0x11);
 -    OUTB(0x3C5, 0x92);
 -}
 -
 -static inline void xgi_unprotect_all(xgi_info_t *info)
 -{
 -    OUTB(0x3C4, 0x11);
 -    OUTB(0x3C5, 0x92);
 -}
 -
 -static inline void xgi_enable_mmio(xgi_info_t *info)
 -{
 -    u8 protect = 0;
 -
 -    /* Unprotect registers */
 -    outb(0x11, 0x3C4);
 -    protect = inb(0x3C5);
 -    outb(0x92, 0x3C5);
 -
 -    outb(0x3A, 0x3D4);
 -    outb(inb(0x3D5) | 0x20, 0x3D5);
 -
 -    /* Enable MMIO */
 -    outb(0x39, 0x3D4);
 -    outb(inb(0x3D5) | 0x01, 0x3D5);
 -
 -    OUTB(0x3C4, 0x11);
 -    OUTB(0x3C5, protect);
 -}
 -
 -static inline void xgi_disable_mmio(xgi_info_t *info)
 -{
 -    u8   protect = 0;
 -
 -    /* unprotect registers */
 -    OUTB(0x3C4, 0x11);
 -    protect = INB(0x3C5);
 -    OUTB(0x3C5, 0x92);
 -
 -    /* Disable MMIO access */
 -    OUTB(0x3D4, 0x39);
 -    OUTB(0x3D5, INB(0x3D5) & 0xFE);
 -
 -    /* Protect registers */
 -    outb(0x11, 0x3C4);
 -    outb(protect, 0x3C5);
 -}
 -
 -static inline void xgi_enable_ge(xgi_info_t *info)
 -{
 -    unsigned char   bOld3cf2a = 0;
 -    int             wait = 0;
 -
 -    // Enable GE
 -    OUTW(0x3C4, 0x9211);
 -
 -    // Save and close dynamic gating
 -    bOld3cf2a = bIn3cf(0x2a);
 -    bOut3cf(0x2a, bOld3cf2a & 0xfe);
 -
 -    // Reset both 3D and 2D engine
 -    bOut3x5(0x36, 0x84);
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -    bOut3x5(0x36, 0x94);
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -    bOut3x5(0x36, 0x84);
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -    // Enable 2D engine only
 -    bOut3x5(0x36, 0x80);
 -
 -	// Enable 2D+3D engine
 -    bOut3x5(0x36, 0x84);
 -
 -    // Restore dynamic gating
 -    bOut3cf(0x2a, bOld3cf2a);
 -}
 -
 -static inline void xgi_disable_ge(xgi_info_t *info)
 -{
 -    int     wait = 0;
 -
 -    // Reset both 3D and 2D engine
 -    bOut3x5(0x36, 0x84);
 -
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -    bOut3x5(0x36, 0x94);
 -
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -    bOut3x5(0x36, 0x84);
 -
 -    wait = 10;
 -    while (wait--)
 -    {
 -        bIn(0x36);
 -    }
 -
 -    // Disable 2D engine only
 -    bOut3x5(0x36, 0);
 -}
 -
 -static inline void xgi_enable_dvi_interrupt(xgi_info_t *info)
 -{
 -    Out3cf(0x39, In3cf(0x39) & ~0x01);  //Set 3cf.39 bit 0 to 0
 -    Out3cf(0x39, In3cf(0x39) | 0x01);   //Set 3cf.39 bit 0 to 1
 -    Out3cf(0x39, In3cf(0x39) | 0x02);
 -}
 -static inline void xgi_disable_dvi_interrupt(xgi_info_t *info)
 -{
 -    Out3cf(0x39,In3cf(0x39) & ~0x02);
 -}
 -
 -static inline void xgi_enable_crt1_interrupt(xgi_info_t *info)
 -{
 -    Out3cf(0x3d,In3cf(0x3d) | 0x04);
 -    Out3cf(0x3d,In3cf(0x3d) & ~0x04);
 -    Out3cf(0x3d,In3cf(0x3d) | 0x08);
 -}
 -
 -static inline void xgi_disable_crt1_interrupt(xgi_info_t *info)
 -{
 -    Out3cf(0x3d,In3cf(0x3d) & ~0x08);
 -}
 -
 -#endif
 -
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_REGS_H_ +#define _XGI_REGS_H_ + +#ifndef XGI_MMIO +#define XGI_MMIO 1 +#endif + +#if XGI_MMIO +#define OUTB(port, value)   writeb(value, info->mmio.vbase + port) +#define INB(port)           readb(info->mmio.vbase + port) +#define OUTW(port, value)   writew(value, info->mmio.vbase + port) +#define INW(port)           readw(info->mmio.vbase + port) +#define OUTDW(port, value)  writel(value, info->mmio.vbase + port) +#define INDW(port)          readl(info->mmio.vbase + port) +#else +#define OUTB(port, value)   outb(value, port) +#define INB(port)           inb(port) +#define OUTW(port, value)   outw(value, port) +#define INW(port)           inw(port) +#define OUTDW(port, value)  outl(value, port) +#define INDW(port)          inl(port) +#endif + +/* Hardware access functions */ +static inline void OUT3C5B(xgi_info_t * info, u8 index, u8 data) +{ +	OUTB(0x3C4, index); +	OUTB(0x3C5, data); +} + +static inline void OUT3X5B(xgi_info_t * info, u8 index, u8 data) +{ +	OUTB(0x3D4, index); +	OUTB(0x3D5, data); +} + +static inline void OUT3CFB(xgi_info_t * info, u8 index, u8 data) +{ +	OUTB(0x3CE, index); +	OUTB(0x3CF, data); +} + +static inline u8 IN3C5B(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3C4, index); +	data = INB(0x3C5); +	return data; +} + +static inline u8 IN3X5B(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3D4, index); +	data = INB(0x3D5); +	return data; +} + +static inline u8 IN3CFB(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3CE, index); +	data = INB(0x3CF); +	return data; +} + +static inline void OUT3C5W(xgi_info_t * info, u8 index, u16 data) +{ +	OUTB(0x3C4, index); +	OUTB(0x3C5, data); +} + +static inline void OUT3X5W(xgi_info_t * info, u8 index, u16 data) +{ +	OUTB(0x3D4, index); +	OUTB(0x3D5, data); +} + +static inline void OUT3CFW(xgi_info_t * info, u8 index, u8 data) +{ +	OUTB(0x3CE, index); +	OUTB(0x3CF, data); +} + +static inline u8 IN3C5W(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3C4, index); +	data = INB(0x3C5); +	return data; +} + +static inline u8 IN3X5W(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3D4, index); +	data = INB(0x3D5); +	return data; +} + +static inline u8 IN3CFW(xgi_info_t * info, u8 index) +{ +	volatile u8 data = 0; +	OUTB(0x3CE, index); +	data = INB(0x3CF); +	return data; +} + +static inline u8 readAttr(xgi_info_t * info, u8 index) +{ +	INB(0x3DA);		/* flip-flop to index */ +	OUTB(0x3C0, index); +	return INB(0x3C1); +} + +static inline void writeAttr(xgi_info_t * info, u8 index, u8 value) +{ +	INB(0x3DA);		/* flip-flop to index */ +	OUTB(0x3C0, index); +	OUTB(0x3C0, value); +} + +/* + * Graphic engine register (2d/3d) acessing interface + */ +static inline void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data) +{ +	/* Jong 05/25/2006 */ +	XGI_INFO("Jong-WriteRegDWord()-Begin \n"); +	XGI_INFO("Jong-WriteRegDWord()-info->mmio.vbase=0x%lx \n", +		 info->mmio.vbase); +	XGI_INFO("Jong-WriteRegDWord()-addr=0x%lx \n", addr); +	XGI_INFO("Jong-WriteRegDWord()-data=0x%lx \n", data); +	/* return; */ + +	*(volatile u32 *)(info->mmio.vbase + addr) = (data); +	XGI_INFO("Jong-WriteRegDWord()-End \n"); +} + +static inline void WriteRegWord(xgi_info_t * info, u32 addr, u16 data) +{ +	*(volatile u16 *)(info->mmio.vbase + addr) = (data); +} + +static inline void WriteRegByte(xgi_info_t * info, u32 addr, u8 data) +{ +	*(volatile u8 *)(info->mmio.vbase + addr) = (data); +} + +static inline u32 ReadRegDWord(xgi_info_t * info, u32 addr) +{ +	volatile u32 data; +	data = *(volatile u32 *)(info->mmio.vbase + addr); +	return data; +} + +static inline u16 ReadRegWord(xgi_info_t * info, u32 addr) +{ +	volatile u16 data; +	data = *(volatile u16 *)(info->mmio.vbase + addr); +	return data; +} + +static inline u8 ReadRegByte(xgi_info_t * info, u32 addr) +{ +	volatile u8 data; +	data = *(volatile u8 *)(info->mmio.vbase + addr); +	return data; +} + +#if 0 +extern void OUT3C5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5B(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFB(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5B(xgi_info_t * info, u8 index); +extern u8 IN3X5B(xgi_info_t * info, u8 index); +extern u8 IN3CFB(xgi_info_t * info, u8 index); +extern void OUT3C5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3X5W(xgi_info_t * info, u8 index, u8 data); +extern void OUT3CFW(xgi_info_t * info, u8 index, u8 data); +extern u8 IN3C5W(xgi_info_t * info, u8 index); +extern u8 IN3X5W(xgi_info_t * info, u8 index); +extern u8 IN3CFW(xgi_info_t * info, u8 index); + +extern void WriteRegDWord(xgi_info_t * info, u32 addr, u32 data); +extern void WriteRegWord(xgi_info_t * info, u32 addr, u16 data); +extern void WriteRegByte(xgi_info_t * info, u32 addr, u8 data); +extern u32 ReadRegDWord(xgi_info_t * info, u32 addr); +extern u16 ReadRegWord(xgi_info_t * info, u32 addr); +extern u8 ReadRegByte(xgi_info_t * info, u32 addr); + +extern void EnableProtect(); +extern void DisableProtect(); +#endif + +#define Out(port, data)         OUTB(port, data) +#define bOut(port, data)        OUTB(port, data) +#define wOut(port, data)        OUTW(port, data) +#define dwOut(port, data)       OUTDW(port, data) + +#define Out3x5(index, data)     OUT3X5B(info, index, data) +#define bOut3x5(index, data)    OUT3X5B(info, index, data) +#define wOut3x5(index, data)    OUT3X5W(info, index, data) + +#define Out3c5(index, data)     OUT3C5B(info, index, data) +#define bOut3c5(index, data)    OUT3C5B(info, index, data) +#define wOut3c5(index, data)    OUT3C5W(info, index, data) + +#define Out3cf(index, data)     OUT3CFB(info, index, data) +#define bOut3cf(index, data)    OUT3CFB(info, index, data) +#define wOut3cf(index, data)    OUT3CFW(info, index, data) + +#define In(port)                INB(port) +#define bIn(port)               INB(port) +#define wIn(port)               INW(port) +#define dwIn(port)              INDW(port) + +#define In3x5(index)            IN3X5B(info, index) +#define bIn3x5(index)           IN3X5B(info, index) +#define wIn3x5(index)           IN3X5W(info, index) + +#define In3c5(index)            IN3C5B(info, index) +#define bIn3c5(index)           IN3C5B(info, index) +#define wIn3c5(index)           IN3C5W(info, index) + +#define In3cf(index)            IN3CFB(info, index) +#define bIn3cf(index)           IN3CFB(info, index) +#define wIn3cf(index)           IN3CFW(info, index) + +#define dwWriteReg(addr, data)  WriteRegDWord(info, addr, data) +#define wWriteReg(addr, data)   WriteRegWord(info, addr, data) +#define bWriteReg(addr, data)   WriteRegByte(info, addr, data) +#define dwReadReg(addr)         ReadRegDWord(info, addr) +#define wReadReg(addr)          ReadRegWord(info, addr) +#define bReadReg(addr)          ReadRegByte(info, addr) + +static inline void xgi_protect_all(xgi_info_t * info) +{ +	OUTB(0x3C4, 0x11); +	OUTB(0x3C5, 0x92); +} + +static inline void xgi_unprotect_all(xgi_info_t * info) +{ +	OUTB(0x3C4, 0x11); +	OUTB(0x3C5, 0x92); +} + +static inline void xgi_enable_mmio(xgi_info_t * info) +{ +	u8 protect = 0; + +	/* Unprotect registers */ +	outb(0x11, 0x3C4); +	protect = inb(0x3C5); +	outb(0x92, 0x3C5); + +	outb(0x3A, 0x3D4); +	outb(inb(0x3D5) | 0x20, 0x3D5); + +	/* Enable MMIO */ +	outb(0x39, 0x3D4); +	outb(inb(0x3D5) | 0x01, 0x3D5); + +	OUTB(0x3C4, 0x11); +	OUTB(0x3C5, protect); +} + +static inline void xgi_disable_mmio(xgi_info_t * info) +{ +	u8 protect = 0; + +	/* unprotect registers */ +	OUTB(0x3C4, 0x11); +	protect = INB(0x3C5); +	OUTB(0x3C5, 0x92); + +	/* Disable MMIO access */ +	OUTB(0x3D4, 0x39); +	OUTB(0x3D5, INB(0x3D5) & 0xFE); + +	/* Protect registers */ +	outb(0x11, 0x3C4); +	outb(protect, 0x3C5); +} + +static inline void xgi_enable_ge(xgi_info_t * info) +{ +	unsigned char bOld3cf2a = 0; +	int wait = 0; + +	// Enable GE +	OUTW(0x3C4, 0x9211); + +	// Save and close dynamic gating +	bOld3cf2a = bIn3cf(0x2a); +	bOut3cf(0x2a, bOld3cf2a & 0xfe); + +	// Reset both 3D and 2D engine +	bOut3x5(0x36, 0x84); +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} +	bOut3x5(0x36, 0x94); +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} +	bOut3x5(0x36, 0x84); +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} +	// Enable 2D engine only +	bOut3x5(0x36, 0x80); + +	// Enable 2D+3D engine +	bOut3x5(0x36, 0x84); + +	// Restore dynamic gating +	bOut3cf(0x2a, bOld3cf2a); +} + +static inline void xgi_disable_ge(xgi_info_t * info) +{ +	int wait = 0; + +	// Reset both 3D and 2D engine +	bOut3x5(0x36, 0x84); + +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} +	bOut3x5(0x36, 0x94); + +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} +	bOut3x5(0x36, 0x84); + +	wait = 10; +	while (wait--) { +		bIn(0x36); +	} + +	// Disable 2D engine only +	bOut3x5(0x36, 0); +} + +static inline void xgi_enable_dvi_interrupt(xgi_info_t * info) +{ +	Out3cf(0x39, In3cf(0x39) & ~0x01);	//Set 3cf.39 bit 0 to 0 +	Out3cf(0x39, In3cf(0x39) | 0x01);	//Set 3cf.39 bit 0 to 1 +	Out3cf(0x39, In3cf(0x39) | 0x02); +} +static inline void xgi_disable_dvi_interrupt(xgi_info_t * info) +{ +	Out3cf(0x39, In3cf(0x39) & ~0x02); +} + +static inline void xgi_enable_crt1_interrupt(xgi_info_t * info) +{ +	Out3cf(0x3d, In3cf(0x3d) | 0x04); +	Out3cf(0x3d, In3cf(0x3d) & ~0x04); +	Out3cf(0x3d, In3cf(0x3d) | 0x08); +} + +static inline void xgi_disable_crt1_interrupt(xgi_info_t * info) +{ +	Out3cf(0x3d, In3cf(0x3d) & ~0x08); +} + +#endif diff --git a/linux-core/xgi_types.h b/linux-core/xgi_types.h index 24cb8f3c..65ec498b 100644 --- a/linux-core/xgi_types.h +++ b/linux-core/xgi_types.h @@ -1,68 +1,67 @@ -
 -/****************************************************************************
 - * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			
 - *																			*
 - * All Rights Reserved.														*
 - *																			*
 - * Permission is hereby granted, free of charge, to any person obtaining
 - * a copy of this software and associated documentation files (the	
 - * "Software"), to deal in the Software without restriction, including	
 - * without limitation on the rights to use, copy, modify, merge,	
 - * publish, distribute, sublicense, and/or sell copies of the Software,	
 - * and to permit persons to whom the Software is furnished to do so,	
 - * subject to the following conditions:					
 - *																			*
 - * The above copyright notice and this permission notice (including the	
 - * next paragraph) shall be included in all copies or substantial	
 - * portions of the Software.						
 - *																			*
 - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	
 - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	
 - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		
 - * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			
 - * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		
 - * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		
 - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			
 - * DEALINGS IN THE SOFTWARE.												
 - ***************************************************************************/
 -
 -#ifndef _XGI_TYPES_H_
 -#define _XGI_TYPES_H_
 -
 -/****************************************************************************
 - *                                 Typedefs                                 *
 - ***************************************************************************/
 -
 -typedef unsigned char       V8;  /* "void": enumerated or multiple fields   */
 -typedef unsigned short      V16; /* "void": enumerated or multiple fields   */
 -typedef unsigned char       U8;  /* 0 to 255                                */
 -typedef unsigned short      U16; /* 0 to 65535                              */
 -typedef signed char         S8;  /* -128 to 127                             */
 -typedef signed short        S16; /* -32768 to 32767                         */
 -typedef float               F32; /* IEEE Single Precision (S1E8M23)         */
 -typedef double              F64; /* IEEE Double Precision (S1E11M52)        */
 -typedef unsigned long       BOOL;
 -/*
 - * mainly for 64-bit linux, where long is 64 bits
 - * and win9x, where int is 16 bit.
 - */
 -#if defined(vxworks)
 -typedef unsigned int       V32; /* "void": enumerated or multiple fields   */
 -typedef unsigned int       U32; /* 0 to 4294967295                         */
 -typedef signed int         S32; /* -2147483648 to 2147483647               */
 -#else
 -typedef unsigned long      V32; /* "void": enumerated or multiple fields   */
 -typedef unsigned long      U32; /* 0 to 4294967295                         */
 -typedef signed long        S32; /* -2147483648 to 2147483647               */
 -#endif
 -
 -#ifndef TRUE
 -#define TRUE    1UL
 -#endif
 -
 -#ifndef FALSE
 -#define FALSE   0UL
 -#endif
 -
 -#endif
 -
 + +/**************************************************************************** + * Copyright (C) 2003-2006 by XGI Technology, Taiwan.			 + *																			* + * All Rights Reserved.														* + *																			* + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the	 + * "Software"), to deal in the Software without restriction, including	 + * without limitation on the rights to use, copy, modify, merge,	 + * publish, distribute, sublicense, and/or sell copies of the Software,	 + * and to permit persons to whom the Software is furnished to do so,	 + * subject to the following conditions:					 + *																			* + * The above copyright notice and this permission notice (including the	 + * next paragraph) shall be included in all copies or substantial	 + * portions of the Software.						 + *																			* + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,	 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF	 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND		 + * NON-INFRINGEMENT.  IN NO EVENT SHALL XGI AND/OR			 + * ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,		 + * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,		 + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER			 + * DEALINGS IN THE SOFTWARE.												 + ***************************************************************************/ + +#ifndef _XGI_TYPES_H_ +#define _XGI_TYPES_H_ + +/**************************************************************************** + *                                 Typedefs                                 * + ***************************************************************************/ + +typedef unsigned char V8;	/* "void": enumerated or multiple fields   */ +typedef unsigned short V16;	/* "void": enumerated or multiple fields   */ +typedef unsigned char U8;	/* 0 to 255                                */ +typedef unsigned short U16;	/* 0 to 65535                              */ +typedef signed char S8;		/* -128 to 127                             */ +typedef signed short S16;	/* -32768 to 32767                         */ +typedef float F32;		/* IEEE Single Precision (S1E8M23)         */ +typedef double F64;		/* IEEE Double Precision (S1E11M52)        */ +typedef unsigned long BOOL; +/* + * mainly for 64-bit linux, where long is 64 bits + * and win9x, where int is 16 bit. + */ +#if defined(vxworks) +typedef unsigned int V32;	/* "void": enumerated or multiple fields   */ +typedef unsigned int U32;	/* 0 to 4294967295                         */ +typedef signed int S32;		/* -2147483648 to 2147483647               */ +#else +typedef unsigned long V32;	/* "void": enumerated or multiple fields   */ +typedef unsigned long U32;	/* 0 to 4294967295                         */ +typedef signed long S32;	/* -2147483648 to 2147483647               */ +#endif + +#ifndef TRUE +#define TRUE    1UL +#endif + +#ifndef FALSE +#define FALSE   0UL +#endif + +#endif | 
