Exemple #1
0
int32 swTable_readEntry(uint32 tableType, uint32 eidx, void *entryContent_P)
{
    uint32 *    entryAddr;

    REG32(SWTCR0) = REG32(SWTCR0) | EN_STOP_TLU;
    while ((REG32(SWTCR0) & STOP_TLU_READY) == 0);

    ASSERT_CSP(entryContent_P);
    
    entryAddr = (uint32 *) (table_access_addr_base(tableType) + eidx * TABLE_ENTRY_DISTANCE);
    
    /* Wait for command ready */
    while ( (REG32(SWTACR) & ACTION_MASK) != ACTION_DONE );
    
    /* Read registers according to entry width of each table */
    *((uint32 *)entryContent_P + 7) = *(entryAddr + 7);
    *((uint32 *)entryContent_P + 6) = *(entryAddr + 6);
    *((uint32 *)entryContent_P + 5) = *(entryAddr + 5);
    *((uint32 *)entryContent_P + 4) = *(entryAddr + 4);
    *((uint32 *)entryContent_P + 3) = *(entryAddr + 3);
    *((uint32 *)entryContent_P + 2) = *(entryAddr + 2);
    *((uint32 *)entryContent_P + 1) = *(entryAddr + 1);
    *((uint32 *)entryContent_P + 0) = *(entryAddr + 0);

    REG32(SWTCR0) = REG32(SWTCR0) & ~EN_STOP_TLU;

    return 0;
}
int32 swNic_send_portmbr(void * output, uint32 len, uint32 portmbr)
{
    struct pktHdr * pPkthdr;
    uint8 pktbuf[2048];
    uint8* pktbuf_alligned = (uint8*) (( (uint32) pktbuf & 0xfffffffc) | 0xa0000000);

    /* Copy Packet Content */
    memcpy(pktbuf_alligned, output, len);

    ASSERT_CSP( ((int32) txPkthdrRing[0][currTxPkthdrDescIndex] & DESC_OWNED_BIT) == DESC_RISC_OWNED );

    /* Fetch packet header from Tx ring */
    pPkthdr = (struct pktHdr *) ((int32) txPkthdrRing[0][currTxPkthdrDescIndex] 
                                                & ~(DESC_OWNED_BIT | DESC_WRAP));

    /* Pad small packets and add CRC */
    if ( len < 60 )
        pPkthdr->ph_len = 64;
    else
        pPkthdr->ph_len = len + 4;

    pPkthdr->ph_mbuf->m_len = pPkthdr->ph_len;
    pPkthdr->ph_mbuf->m_extsize = pPkthdr->ph_len;

    /* Set cluster pointer to buffer */
    pPkthdr->ph_mbuf->m_data = pktbuf_alligned;
    pPkthdr->ph_mbuf->m_extbuf = pktbuf_alligned;

    /* Set destination port */
    pPkthdr->ph_portlist = portmbr;

    /* Give descriptor to switch core */
    txPkthdrRing[0][currTxPkthdrDescIndex] |= DESC_SWCORE_OWNED;

    /* Set TXFD bit to start send */
    REG32(CPUICR) |= TXFD;
    
    /* Wait until packet is successfully sent */
    while ( (*(volatile uint32 *)&txPkthdrRing[0][currTxPkthdrDescIndex] 
                    & DESC_OWNED_BIT) == DESC_SWCORE_OWNED )
	{
#ifdef CONFIG_RTL8196C_SWITCH_PATCH
#ifdef CONFIG_AUTO_IDENTIFY
             if (REG32(REVR) == RTL8196C_REVISION_A)
			txPkthdrRing[0][currTxPkthdrDescIndex]=txPkthdrRing_BAK[0][currTxPkthdrDescIndex];
#endif
#endif
        }
    txPktCounter++;
    
    if ( ++currTxPkthdrDescIndex == txPkthdrRingCnt[0] )
        currTxPkthdrDescIndex = 0;

    return 0;
}
Exemple #3
0
//RTL_STATIC_INLINE void tableAccessForeword(uint32 tableType, uint32 eidx,     void *entryContent_P)
void tableAccessForeword(uint32 tableType, uint32 eidx,     void *entryContent_P)
{
    ASSERT_CSP(entryContent_P);

    /* Wait for command done */
    while ( (REG32(SWTACR) & ACTION_MASK) != ACTION_DONE );
    
    /* Write registers according to entry width of each table */
    REG32(TCR7) = *((uint32 *)entryContent_P + 7);
    REG32(TCR6) = *((uint32 *)entryContent_P + 6);
    REG32(TCR5) = *((uint32 *)entryContent_P + 5);
    REG32(TCR4) = *((uint32 *)entryContent_P + 4);
    REG32(TCR3) = *((uint32 *)entryContent_P + 3);
    REG32(TCR2) = *((uint32 *)entryContent_P + 2);
    REG32(TCR1) = *((uint32 *)entryContent_P + 1);
    REG32(TCR0) = *(uint32 *)entryContent_P;
    
    /* Fill address */
    REG32(SWTAA) = table_access_addr_base(tableType) + eidx * TABLE_ENTRY_DISTANCE;
}
Exemple #4
0
int32 swTable_forceAddEntry(uint32 tableType, uint32 eidx, void *entryContent_P)
{
    REG32(SWTCR0) = REG32(SWTCR0) | EN_STOP_TLU;
    while ((REG32(SWTCR0) & STOP_TLU_READY) == 0);

    tableAccessForeword(tableType, eidx, entryContent_P);
        
    /* Activate add command */
    REG32(SWTACR) = ACTION_START | CMD_FORCE;
    
    /* Wait for command done */
    while ( (REG32(SWTACR) & ACTION_MASK) != ACTION_DONE );

    REG32(SWTCR0) = REG32(SWTCR0) & ~EN_STOP_TLU;

    /* Check status */
    if ( (REG32(SWTASR) & TABSTS_MASK) == TABSTS_SUCCESS )
        return 0;
        
    /* There might be something wrong */
    ASSERT_CSP( 0 );

}
void free(void *ap)
/*
        Return memory to free list.  Where possible makes
        contiguous blocks of free memory.

        NOTE: assumes that 0 is not a valid address for
            allocation .
        NOTE2: i_alloc() must be called prior to using either
            free() or malloc(), otherwise free list will be null.
*/
{
    HEADER  *nxt, *prev, *f;

    ASSERT_CSP(ap);
    ASSERT_CSP(frhd);

    f = (HEADER *)ap - 1;
    /* pt to header of block being returned */
    memleft += f->size;
    /*
            Note: frhd is never NULL unless i_alloc() was
                never called to initialize package.
    */
    if (frhd > f)
    {
        /*
                free queue head is higher up in memory
                than returnee
        */
        nxt = frhd;                     /* old head */
        frhd = f;                       /* new head */
        prev = f + f->size;     /* right after new head */

        if (prev==nxt) /* old and new are contiguous */
        {
            f->size += nxt->size;
            f->ptr = nxt->ptr;      /* contiguate */
        }
        else f->ptr = nxt;
        return;
    }
    /*
            Otherwise current free space head is lower in
            memory. Walk down free space list looking for
            the block being returned. If the next pointer
            points past the block, make a new entry and
            link it.  If next pointer + its size points to the
            block form one contiguous block.
    */
    nxt = frhd;
    for (nxt=frhd; nxt && nxt < f; prev=nxt,nxt=nxt->ptr)
    {
        if (nxt+nxt->size == f)
        {
            /* they are contiguous */
            nxt->size += f->size;
            /* form one block */
            f = nxt + nxt->size;
            if (f==nxt->ptr)
            {
                /*
                        The new larger block is contiguous
                        with the next free block, so form a
                        larger block. There is no need to
                        continue this checking since if the
                        block following this free one was
                        free, the two would have been
                        made one already.
                */
                nxt->size += f->size;
                nxt->ptr = f->ptr;
            }
            return;
        }
    }
    /*
            Otherwise, the addr of the block being returned
            is greater than one in the free queue ('nxt') or
            the end of the queue was reached. If at end, just
            link to the end of the queue.  Therefore, 'nxt' is
            either NULL or points to a block higher up in
            memory than the one being returned.
    */
    prev->ptr = f;  /* link to queue */
    prev = f + f->size;     /* right after space to free */
    if (prev == nxt)        /* f and nxt are contiguous */
    {
        f->size += nxt->size;
        /* form a larger contiguous block */
        f->ptr = nxt->ptr;
    }
    else f->ptr = nxt;
    return;
}
Exemple #6
0
int32 swNic_init(uint32 userNeedRxPkthdrRingCnt[RTL865X_SWNIC_RXRING_HW_PKTDESC],
                 uint32 userNeedRxMbufRingCnt,
                 uint32 userNeedTxPkthdrRingCnt[RTL865X_SWNIC_TXRING_HW_PKTDESC],
                 uint32 clusterSize)
{
	uint32 i, j, k;
	static uint32 totalRxPkthdrRingCnt = 0, totalTxPkthdrRingCnt = 0;
	static struct rtl_pktHdr *pPkthdrList_start;
	static struct rtl_mBuf *pMbufList_start;
	struct rtl_pktHdr *pPkthdrList;
	struct rtl_mBuf *pMbufList;
	struct rtl_pktHdr * pPkthdr;
	struct rtl_mBuf * pMbuf;
	unsigned long flags=0;
	int	ret;

#if defined(CONFIG_RTL_8198C) && defined(_PKTHDR_CACHEABLE)
	int cpu_dcache_line = cpu_dcache_line_size(); // in \arch\mips\include\asm\cpu-features.h
#endif

	/* init const array for rx pre-process	*/
	extPortMaskToPortNum[0] = 5;
	extPortMaskToPortNum[1] = 6;
	extPortMaskToPortNum[2] = 7;
	extPortMaskToPortNum[3] = 5;
	extPortMaskToPortNum[4] = 8;
	extPortMaskToPortNum[5] = 5;
	extPortMaskToPortNum[6] = 5;
	extPortMaskToPortNum[7] = 5;

#if	defined(DELAY_REFILL_ETH_RX_BUF)
	rxPkthdrRefillThreshold[0] = ETH_REFILL_THRESHOLD;
	rxPkthdrRefillThreshold[1] = ETH_REFILL_THRESHOLD1;
	rxPkthdrRefillThreshold[2] = ETH_REFILL_THRESHOLD2;
	rxPkthdrRefillThreshold[3] = ETH_REFILL_THRESHOLD3;
	rxPkthdrRefillThreshold[4] = ETH_REFILL_THRESHOLD4;
	rxPkthdrRefillThreshold[5] = ETH_REFILL_THRESHOLD5;
#endif

	#if defined(CONFIG_RTL8196C_REVISION_B)
	rtl_chip_version = REG32(REVR);
	#endif

	ret = SUCCESS;
	SMP_LOCK_ETH_RECV(flags);
	if (rxMbufRing == NULL)
	{
		size_of_cluster = clusterSize;

		/* Allocate Rx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++) {
			rxPkthdrRingCnt[i] = userNeedRxPkthdrRingCnt[i];
			if (rxPkthdrRingCnt[i] == 0)
			{
				rxPkthdrRing[i] = NULL;
				continue;
			}

			rxPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(rxPkthdrRingCnt[i] * sizeof(uint32*));
			ASSERT_CSP( (uint32) rxPkthdrRing[i] & 0x0fffffff );

			totalRxPkthdrRingCnt += rxPkthdrRingCnt[i];
		}

		if (totalRxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate Tx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++) {
			txPkthdrRingCnt[i] = userNeedTxPkthdrRingCnt[i];

			if (txPkthdrRingCnt[i] == 0)
			{
				txPkthdrRing[i] = NULL;
				continue;
			}

			txPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32*));
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i]=(uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32));
			#endif

			ASSERT_CSP( (uint32) txPkthdrRing[i] & 0x0fffffff );

			totalTxPkthdrRingCnt += txPkthdrRingCnt[i];
		}

		if (totalTxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate MBuf descriptors of rings */
		rxMbufRingCnt = userNeedRxMbufRingCnt;

		if (userNeedRxMbufRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		rxMbufRing = (uint32 *) UNCACHED_MALLOC((rxMbufRingCnt+RESERVERD_MBUF_RING_NUM) * sizeof(uint32*));
		ASSERT_CSP( (uint32) rxMbufRing & 0x0fffffff );

		/* Allocate pkthdr */
#ifdef _PKTHDR_CACHEABLE

#if 0 //defined(CONFIG_RTL_8198C)
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );

#else
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		pPkthdrList_start = (struct rtl_pktHdr *)(((uint32) pPkthdrList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
	
		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
		
		pMbufList_start = (struct rtl_mBuf *)(((uint32) pMbufList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
#endif

#else
		pPkthdrList_start = (struct rtl_pktHdr *) UNCACHED_MALLOC(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) UNCACHED_MALLOC(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
#endif
	}

	/* Initialize interrupt statistics counter */
	//rxPktCounter = txPktCounter = 0;

	/* Initialize index of Tx pkthdr descriptor */
	for (i=0;i<RTL865X_SWNIC_TXRING_HW_PKTDESC;i++)
	{
		currTxPkthdrDescIndex[i] = 0;
		txPktDoneDescIndex[i]=0;
	}

	pPkthdrList = pPkthdrList_start;
	pMbufList = pMbufList_start;

	/* Initialize Tx packet header descriptors */
	for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < txPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_OUTGOING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;

			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_data = NULL;
			pMbuf->m_extbuf = NULL;
			pMbuf->m_extsize = 0;

			txPkthdrRing[i][j] = (int32) pPkthdr | DESC_RISC_OWNED;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][j]=(int32) pPkthdr | DESC_RISC_OWNED;
			#endif
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		txPkthdrRing_base[i] = txPkthdrRing[i][0];
#endif

		if(txPkthdrRingCnt[i] > 0)
		{
			/* Set wrap bit of the last descriptor */
			txPkthdrRing[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#endif
		}

	}

	/* Fill Tx packet header FDP */
	REG32(CPUTPDCR0) = (uint32) txPkthdrRing[0];
	REG32(CPUTPDCR1) = (uint32) txPkthdrRing[1];

#if defined(CONFIG_RTL_819XD) || defined(CONFIG_RTL_8196E) || defined(CONFIG_RTL_8198C)
	REG32(CPUTPDCR2) = (uint32) txPkthdrRing[2];
	REG32(CPUTPDCR3) = (uint32) txPkthdrRing[3];
#endif

	/* Initialize Rx packet header descriptors */
	k = 0;

	for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < rxPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			/* Setup pkthdr and mbuf */
			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_INCOMING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;
			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_len = 0;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_extsize = size_of_cluster;
			pMbuf->m_data = pMbuf->m_extbuf = alloc_rx_buf(&pPkthdr->ph_mbuf->skb, size_of_cluster);

			/* Setup descriptors */
			rxPkthdrRing[i][j] = (int32) pPkthdr | DESC_SWCORE_OWNED;
			rxMbufRing[k++] = (int32) pMbuf | DESC_SWCORE_OWNED;
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		rxPkthdrRing_base[i] = rxPkthdrRing[i][0] & ~DESC_OWNED_BIT;
#endif

		/* Initialize index of current Rx pkthdr descriptor */
		currRxPkthdrDescIndex[i] = 0;

		/* Initialize index of current Rx Mbuf descriptor */
		currRxMbufDescIndex = 0;

		/* Set wrap bit of the last descriptor */
		if(rxPkthdrRingCnt[i] > 0)
			rxPkthdrRing[i][rxPkthdrRingCnt[i] - 1] |= DESC_WRAP;

		#if	defined(DELAY_REFILL_ETH_RX_BUF)
		rxDescReadyForHwIndex[i] = 0;
		rxDescCrossBoundFlag[i] = 0;
		#endif
	}

#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	rxMbufRing_base = rxMbufRing[0] & ~DESC_OWNED_BIT;
#endif

	rxMbufRing[rxMbufRingCnt - 1] |= DESC_WRAP;

	/* Fill Rx packet header FDP */
	REG32(CPURPDCR0) = (uint32) rxPkthdrRing[0];
	REG32(CPURPDCR1) = (uint32) rxPkthdrRing[1];
	REG32(CPURPDCR2) = (uint32) rxPkthdrRing[2];
	REG32(CPURPDCR3) = (uint32) rxPkthdrRing[3];
	REG32(CPURPDCR4) = (uint32) rxPkthdrRing[4];
	REG32(CPURPDCR5) = (uint32) rxPkthdrRing[5];

	REG32(CPURMDCR0) = (uint32) rxMbufRing;

out:
	//SMP_UNLOCK_ETH_RECV(flags);

#ifdef _PKTHDR_CACHEABLE
	_dma_cache_wback_inv((unsigned long)pPkthdrList_start, (totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)pMbufList_start, (rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
#endif
	SMP_UNLOCK_ETH_RECV(flags);

	return ret;
}
int32 swNic_init(uint32 userNeedRxPkthdrRingCnt[RTL865X_SWNIC_RXRING_MAX_PKTDESC],
                 uint32 userNeedRxMbufRingCnt,
                 uint32 userNeedTxPkthdrRingCnt[RTL865X_SWNIC_TXRING_MAX_PKTDESC],
                 uint32 clusterSize)
{
    uint32 i, j, k;
	uint32 totalRxPkthdrRingCnt = 0, totalTxPkthdrRingCnt = 0;
    struct pktHdr *pPkthdrList;
    struct mBuf *pMbufList;
    uint8 * pClusterList;
    struct pktHdr * pPkthdr;
    struct mBuf * pMbuf;

    /* Cluster size is always 2048 */
    size_of_cluster = 2048;

    /* Allocate Rx descriptors of rings */
    for (i = 0; i < RTL865X_SWNIC_RXRING_MAX_PKTDESC; i++) {
		rxPkthdrRingCnt[i] = userNeedRxPkthdrRingCnt[i];
		if (rxPkthdrRingCnt[i] == 0)
			continue;

		rxPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(rxPkthdrRingCnt[i] * sizeof(uint32));
		ASSERT_CSP( (uint32) rxPkthdrRing[i] & 0x0fffffff );
		memset(rxPkthdrRing[i],0,rxPkthdrRingCnt[i] * sizeof(uint32));
		totalRxPkthdrRingCnt += rxPkthdrRingCnt[i];
    }
	
	if (totalRxPkthdrRingCnt == 0)
		return EINVAL;

    /* Allocate Tx descriptors of rings */
    for (i = 0; i < RTL865X_SWNIC_TXRING_MAX_PKTDESC; i++) {
		txPkthdrRingCnt[i] = userNeedTxPkthdrRingCnt[i];

		if (txPkthdrRingCnt[i] == 0)
			continue;

		txPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32));
#ifdef CONFIG_RTL8196C_SWITCH_PATCH
#ifdef CONFIG_AUTO_IDENTIFY
		if (REG32(REVR) == RTL8196C_REVISION_A)
        		txPkthdrRing_BAK[i] = (uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32));
#endif		
#endif		
		ASSERT_CSP( (uint32) txPkthdrRing[i] & 0x0fffffff );
		memset(txPkthdrRing[i],0,(txPkthdrRingCnt[i] * sizeof(uint32)));
		totalTxPkthdrRingCnt += txPkthdrRingCnt[i];
    }

	if (totalTxPkthdrRingCnt == 0)
		return EINVAL;

    /* Allocate MBuf descriptors of rings */
	rxMbufRingCnt = userNeedRxMbufRingCnt;

	if (userNeedRxMbufRingCnt == 0)
		return EINVAL;

	rxMbufRing = (uint32 *) UNCACHED_MALLOC(userNeedRxMbufRingCnt * sizeof(uint32));
    ASSERT_CSP( (uint32) rxMbufRing & 0x0fffffff );
	memset(rxMbufRing,0,userNeedRxMbufRingCnt * sizeof(uint32));
    /* Allocate pkthdr */
    pPkthdrList = (struct pktHdr *) UNCACHED_MALLOC(
                    (totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct pktHdr));
    ASSERT_CSP( (uint32) pPkthdrList & 0x0fffffff );
   memset(pPkthdrList,0, (totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct pktHdr));                 
    /* Allocate mbufs */
    pMbufList = (struct mBuf *) UNCACHED_MALLOC(
                    (rxMbufRingCnt + totalTxPkthdrRingCnt) * sizeof(struct mBuf));
    ASSERT_CSP( (uint32) pMbufList & 0x0fffffff );
    memset(pMbufList,0,((rxMbufRingCnt + totalTxPkthdrRingCnt) * sizeof(struct mBuf)));                
    /* Allocate clusters */
    pClusterList = (uint8 *) UNCACHED_MALLOC(rxMbufRingCnt * size_of_cluster + 8 - 1+2*rxMbufRingCnt);
    ASSERT_CSP( (uint32) pClusterList & 0x0fffffff );
    memset(pClusterList,0,(rxMbufRingCnt * size_of_cluster + 8 - 1+2*rxMbufRingCnt));
    pClusterList = (uint8*)(((uint32) pClusterList + 8 - 1) & ~(8 - 1));

    /* Initialize interrupt statistics counter */
    rxPktCounter = txPktCounter = 0;

    /* Initialize index of Tx pkthdr descriptor */
    currTxPkthdrDescIndex = 0;
    txPktDoneDescIndex=0;

    /* Initialize Tx packet header descriptors */
    for (i = 0; i < RTL865X_SWNIC_TXRING_MAX_PKTDESC; i++)
	{
		for (j = 0; j < txPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

         bzero((void *) pPkthdr, sizeof(struct pktHdr));
         bzero((void *) pMbuf, sizeof(struct mBuf));

			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_OUTGOING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;

			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_data = NULL;
			pMbuf->m_extbuf = NULL;
			pMbuf->m_extsize = 0;

			txPkthdrRing[i][j] = (int32) pPkthdr | DESC_RISC_OWNED;
#ifdef CONFIG_RTL8196C_SWITCH_PATCH
#ifdef CONFIG_AUTO_IDENTIFY
			if (REG32(REVR) == RTL8196C_REVISION_A)
           			 txPkthdrRing_BAK[i][j]=(int32) pPkthdr | DESC_RISC_OWNED;
#endif
#endif
		}

		/* Set wrap bit of the last descriptor */
        if (txPkthdrRingCnt[i] != 0)
{
            txPkthdrRing[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
#ifdef CONFIG_RTL8196C_SWITCH_PATCH		
#ifdef CONFIG_AUTO_IDENTIFY
		if (REG32(REVR) == RTL8196C_REVISION_A)		
            		txPkthdrRing_BAK[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
#endif
#endif
}

	}

    /* Fill Tx packet header FDP */
    REG32(CPUTPDCR0) = (uint32) txPkthdrRing[0];
    REG32(CPUTPDCR1) = (uint32) txPkthdrRing[1];

    /* Initialize index of current Rx pkthdr descriptor */
    currRxPkthdrDescIndex = 0;

    /* Initialize index of current Rx Mbuf descriptor */
    currRxMbufDescIndex = 0;

    /* Initialize Rx packet header descriptors */
	k = 0;

    for (i = 0; i < RTL865X_SWNIC_RXRING_MAX_PKTDESC; i++)
	{
		for (j = 0; j < rxPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

         bzero((void *) pPkthdr, sizeof(struct pktHdr));
         bzero((void *) pMbuf, sizeof(struct mBuf));

			/* Setup pkthdr and mbuf */
			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_INCOMING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;
			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_len = 0;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_data = NULL;
			pMbuf->m_extsize = size_of_cluster;
			/*offset 2 bytes for 4 bytes align of ip packet*/
			pMbuf->m_data = pMbuf->m_extbuf = (pClusterList+2);
			pClusterList += size_of_cluster;
			
			/* Setup descriptors */
			rxPkthdrRing[i][j] = (int32) pPkthdr | DESC_SWCORE_OWNED;
			rxMbufRing[k++] = (int32) pMbuf | DESC_SWCORE_OWNED;
		}

		/* Set wrap bit of the last descriptor */
        if (rxPkthdrRingCnt[i] != 0)
		    rxPkthdrRing[i][rxPkthdrRingCnt[i] - 1] |= DESC_WRAP;
	}

	rxMbufRing[rxMbufRingCnt - 1] |= DESC_WRAP;

    /* Fill Rx packet header FDP */
    REG32(CPURPDCR0) = (uint32) rxPkthdrRing[0];
    REG32(CPURPDCR1) = (uint32) rxPkthdrRing[1];
    REG32(CPURPDCR2) = (uint32) rxPkthdrRing[2];
    REG32(CPURPDCR3) = (uint32) rxPkthdrRing[3];
    REG32(CPURPDCR4) = (uint32) rxPkthdrRing[4];
    REG32(CPURPDCR5) = (uint32) rxPkthdrRing[5];

    REG32(CPURMDCR0) = (uint32) rxMbufRing;

// for debug
#if 0
    /* Initialize ARP table */
    bzero((void *) arptab, ARPTAB_SIZ * sizeof(struct arptab_s));
    arptab_next_available = 0;
#endif

	//dprintf("addr=%x, val=%x\r\n",(CPUIIMR),REG32(CPUIIMR));
    /* Enable runout interrupts */
    //REG32(CPUIIMR) |= RX_ERR_IE_ALL | TX_ERR_IE_ALL | PKTHDR_DESC_RUNOUT_IE_ALL;  //8651c
	//REG32(CPUIIMR) = 0xffffffff; //RX_DONE_IE_ALL;  //   0xffffffff;  //wei test irq
	
	//*(volatile unsigned int*)(0xb8010028)=0xffffffff;	
	//dprintf("eth0 CPUIIMR status=%x\r\n", *(volatile unsigned int*)(0xb8010028));   //ISR	
	
    /* Enable Rx & Tx. Config bus burst size and mbuf size. */
    //REG32(CPUICR) = TXCMD | RXCMD | BUSBURST_256WORDS | icr_mbufsize;
    //REG32(CPUICR) = TXCMD | RXCMD | BUSBURST_32WORDS | MBUF_2048BYTES;	//8651c
	REG32(CPUICR) = TXCMD | RXCMD | BUSBURST_32WORDS | MBUF_2048BYTES; //wei test irq
#ifdef CONFIG_RTL8196C_ETH_IOT 
	REG32(CPUIIMR) = RX_DONE_IE_ALL | TX_DONE_IE_ALL | LINK_CHANGE_IE; 
#else 
	REG32(CPUIIMR) = RX_DONE_IE_ALL | TX_DONE_IE_ALL; 
#endif

	REG32(MDCIOCR)=0x96181441;      // enable Giga port 8211B LED
	//dprintf("eth0 CPUIIMR status=%x\r\n", *(volatile unsigned int*)(0xb8010028));   //ISR
	
    return SUCCESS;
}
/*************************************************************************
*   FUNCTION                                                              
*       swNic_send                                         
*                                                                         
*   DESCRIPTION                                                           
*       This function writes one packet to tx descriptors, and waits until 
*       the packet is successfully sent.
*                                                                         
*   INPUTS                                                                
*       None
*                                                                         
*   OUTPUTS                                                               
*       None
*************************************************************************/
int32 swNic_send(void * output, uint32 len)
{
    struct pktHdr * pPkthdr;
    //uint8 pktbuf[2048];
    uint8* pktbuf_alligned = (uint8*) (( (uint32) pktbuf & 0xfffffffc) | 0xa0000000);

	int next_index;
	if ((currTxPkthdrDescIndex+1) == txPkthdrRingCnt[0])
		next_index = 0;
	else
		next_index = currTxPkthdrDescIndex+1;
	if (next_index == txPktDoneDescIndex) {
		dprintf("Tx Desc full!\n");
		return -1;
	}		

    /* Copy Packet Content */
    memcpy(pktbuf_alligned, output, len);

    ASSERT_CSP( ((int32) txPkthdrRing[0][currTxPkthdrDescIndex] & DESC_OWNED_BIT) == DESC_RISC_OWNED );

    /* Fetch packet header from Tx ring */
    pPkthdr = (struct pktHdr *) ((int32) txPkthdrRing[0][currTxPkthdrDescIndex] 
                                                & ~(DESC_OWNED_BIT | DESC_WRAP));

    /* Pad small packets and add CRC */
    if ( len < 60 )
        pPkthdr->ph_len = 64;
    else
        pPkthdr->ph_len = len + 4;
    pPkthdr->ph_mbuf->m_len       = pPkthdr->ph_len;
    pPkthdr->ph_mbuf->m_extsize = pPkthdr->ph_len;

    /* Set cluster pointer to buffer */
    pPkthdr->ph_mbuf->m_data    = pktbuf_alligned;
    pPkthdr->ph_mbuf->m_extbuf = pktbuf_alligned;


    /* Set destination port */
#if defined(CONFIG_RTL8198)
    pPkthdr->ph_portlist = ALL_PORT_MASK;
#else
        #define HW_STRAT_ROUTER_MODE 0x00100000
        if((REG32(HW_STRAP)&(HW_STRAT_ROUTER_MODE))==HW_STRAT_ROUTER_MODE)
        {
                pPkthdr->ph_portlist = ALL_PORT_MASK;
        }
        else
        {
                pPkthdr->ph_portlist = AP_MODE_PORT_MASK;//Port 4 Only for AP Mode
        }
#endif		
    /* Give descriptor to switch core */
    txPkthdrRing[0][currTxPkthdrDescIndex] |= DESC_SWCORE_OWNED;

    /* Set TXFD bit to start send */
    REG32(CPUICR) |= TXFD;
    txPktCounter++;

	currTxPkthdrDescIndex = next_index;
    return 0;
}