Ejemplo n.º 1
0
void __ref play_dead(void)
{
	idle_task_exit();

	/* flush data cache */
	_dma_cache_wback_inv(0, ~0);

	/*
	 * Wakeup is on SW0 or SW1; disable everything else
	 * Use BEV !IV (BRCM_WARM_RESTART_VEC) to avoid the regular Linux
	 * IRQ handlers; this clears ST0_IE and returns immediately.
	 */
	clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
	change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
		IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
	irq_disable_hazard();

	/*
	 * wait for SW interrupt from brcmstb_boot_secondary(), then jump
	 * back to start_secondary()
	 */
	__asm__ __volatile__(
	"	wait\n"
	"	j	brcmstb_tp1_reentry\n"
	: : : "memory");
}
Ejemplo n.º 2
0
__MIPS16
__IRAM_FWD
static void release_pkthdr(struct sk_buff  *skb, int idx)
{
	struct rtl_pktHdr *pReadyForHw;
	uint32 mbufIndex;
	//unsigned long flags=0;

#if 0 //defined(CONFIG_RTL_8198C)
	_dma_cache_inv((unsigned long)skb->head, skb->truesize);
#else
	_dma_cache_wback_inv((unsigned long)skb->head, skb->truesize);
#endif

	//local_irq_save(flags);
	pReadyForHw = (struct rtl_pktHdr *)(rxPkthdrRing[idx][rxDescReadyForHwIndex[idx]] &
						~(DESC_OWNED_BIT | DESC_WRAP));
	mbufIndex = ((uint32)(pReadyForHw->ph_mbuf) - (rxMbufRing[0] & ~(DESC_OWNED_BIT | DESC_WRAP))) /
					(sizeof(struct rtl_mBuf));

	pReadyForHw->ph_mbuf->m_data = skb->data;
	pReadyForHw->ph_mbuf->m_extbuf = skb->data;
	pReadyForHw->ph_mbuf->skb = skb;

	rxMbufRing[mbufIndex] |= DESC_SWCORE_OWNED;
	set_RxPkthdrRing_OwnBit(idx);
	//local_irq_restore(flags);
#ifdef _PKTHDR_CACHEABLE
	#if 0 //defined(CONFIG_RTL_8198C)
	_dma_cache_inv((unsigned long)pReadyForHw, sizeof(struct rtl_pktHdr));
	_dma_cache_inv((unsigned long)(pReadyForHw->ph_mbuf), sizeof(struct rtl_mBuf));
	#else
	_dma_cache_wback_inv((unsigned long)pReadyForHw, sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)(pReadyForHw->ph_mbuf), sizeof(struct rtl_mBuf));
	#endif
#endif
}
Ejemplo n.º 3
0
/*************************************************************************
*   FUNCTION
*       swNic_send
*
*   DESCRIPTION
*       This function writes one packet to tx descriptors, and waits until
*       the packet is successfully sent.
*
*   INPUTS
*       None
*
*   OUTPUTS
*       None
*************************************************************************/
__MIPS16
__IRAM_FWD  inline int32 _swNic_send(void *skb, void * output, uint32 len,rtl_nicTx_info *nicTx)
{
	struct rtl_pktHdr * pPkthdr;
	int next_index, ret;

	if ((currTxPkthdrDescIndex[nicTx->txIdx]+1)==txPkthdrRingCnt[nicTx->txIdx])
		next_index = 0;
	else
		next_index = currTxPkthdrDescIndex[nicTx->txIdx]+1;

	if (next_index == txPktDoneDescIndex[nicTx->txIdx])	{
		/*	TX ring full	*/
			return -1;
	}

#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	pPkthdr = (struct rtl_pktHdr *) ((int32) txPkthdrRing_base[nicTx->txIdx] + 
		(sizeof(struct rtl_pktHdr) * currTxPkthdrDescIndex[nicTx->txIdx]));

#else
	/* Fetch packet header from Tx ring */
	pPkthdr = (struct rtl_pktHdr *) ((int32) txPkthdrRing[nicTx->txIdx][currTxPkthdrDescIndex[nicTx->txIdx]]
                                                & ~(DESC_OWNED_BIT | DESC_WRAP));

#endif

	/* Pad small packets and add CRC */
	if ( len < 60 )
		len = 64;
	else
		len += 4;

	pPkthdr->ph_mbuf->m_len  = len;
	pPkthdr->ph_mbuf->m_extsize = len;
	pPkthdr->ph_mbuf->skb = skb;
	pPkthdr->ph_len = len;

	pPkthdr->ph_vlanId = nicTx->vid;
	#if defined(CONFIG_8198_PORT5_GMII) || defined(CONFIG_8198_PORT5_RGMII) || defined(CONFIG_RTL_8198C_8367RB)
	pPkthdr->ph_portlist = nicTx->portlist&0x3f;
	#else
	pPkthdr->ph_portlist = nicTx->portlist&0x1f;
	#endif
	pPkthdr->ph_srcExtPortNum = nicTx->srcExtPort;
	pPkthdr->ph_flags = nicTx->flags;
#if	defined(CONFIG_RTL_HW_QOS_SUPPORT) || defined(CONFIG_RTK_VOIP_QOS)
	pPkthdr->ph_txPriority = nicTx->priority;
#endif
#ifdef CONFIG_RTK_VLAN_WAN_TAG_SUPPORT
if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q))
	pPkthdr->ph_txCVlanTagAutoAdd = nicTx->tagport;
else
	pPkthdr->ph_txCVlanTagAutoAdd = 0;
#endif

#if defined(CONFIG_RTL_VLAN_8021Q) || defined(CONFIG_SWCONFIG)
	if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q))
	{
		pPkthdr->ph_txCVlanTagAutoAdd = (0x3f) & rtk_get_vlan_tagmask(pPkthdr->ph_vlanId); 
	}
	else
		pPkthdr->ph_txCVlanTagAutoAdd = 0;
    #if 0
    panic_printk("%s %d pPkthdr->ph_txCVlanTagAutoAdd=0x%x pPkthdr->ph_portlist=0x%x pPkthdr->ph_vlanId=%d\n",
        __FUNCTION__, __LINE__, pPkthdr->ph_txCVlanTagAutoAdd, pPkthdr->ph_portlist, pPkthdr->ph_vlanId);
    #endif
#elif defined(CONFIG_RTL_HW_VLAN_SUPPORT)
	if (*((unsigned short *)((unsigned char*)output+ETH_ALEN*2)) != __constant_htons(ETH_P_8021Q))
			pPkthdr->ph_txCVlanTagAutoAdd = auto_set_tag_portmask;
	else
			pPkthdr->ph_txCVlanTagAutoAdd = 0;
#endif


	/* Set cluster pointer to buffer */
	pPkthdr->ph_mbuf->m_data    = (output);
	pPkthdr->ph_mbuf->m_extbuf = (output);

#if defined(CONFIG_RTL_819XD) || defined(CONFIG_RTL_8196E) || defined(CONFIG_RTL_8198C)
	pPkthdr->ph_ptpPkt = 0;
#endif

#ifdef _PKTHDR_CACHEABLE
	#if defined(CONFIG_RTL_8198C)
	_dma_cache_wback((unsigned long)pPkthdr, sizeof(struct rtl_pktHdr));
	_dma_cache_wback((unsigned long)(pPkthdr->ph_mbuf), sizeof(struct rtl_mBuf));
	#else
	_dma_cache_wback_inv((unsigned long)pPkthdr, sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)(pPkthdr->ph_mbuf), sizeof(struct rtl_mBuf));
	#endif
#endif

	ret = currTxPkthdrDescIndex[nicTx->txIdx];
	currTxPkthdrDescIndex[nicTx->txIdx] = next_index;
	/* Give descriptor to switch core */
	txPkthdrRing[nicTx->txIdx][ret] |= DESC_SWCORE_OWNED;

#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && !defined(CONFIG_RTL_8198C)
	{
	uint32 pkthdr2 = (uint32)txPkthdrRing[nicTx->txIdx][ret];
	if ((pkthdr2 & DESC_OWNED_BIT) == 0)
		#ifndef CONFIG_OPENWRT_SDK
		panic_printk("_swNic_send: idx= %d, read back pkthdr= 0x%x.\n", ret, pkthdr2);
		#else
		printk("_swNic_send: idx= %d, read back pkthdr= 0x%x.\n", ret, pkthdr2); 
		#endif
	}
#endif

#if 0
	memDump((void*)output, 64, "TX");
	printk("index %d address 0x%p, 0x%x 0x%p.\n", ret, &txPkthdrRing[nicTx->txIdx][ret], (*(volatile uint32 *)&txPkthdrRing[nicTx->txIdx][ret]), pPkthdr);
	printk("Flags 0x%x proto 0x%x portlist 0x%x vid %d extPort %d srcExtPort %d len %d.\n",
		pPkthdr->ph_flags, pPkthdr->ph_proto, pPkthdr->ph_portlist, pPkthdr->ph_vlanId,
		pPkthdr->ph_extPortList, pPkthdr->ph_srcExtPortNum, pPkthdr->ph_len);
#endif

	/* Set TXFD bit to start send */
	REG32(CPUICR) |= TXFD;

	return ret;
}
Ejemplo n.º 4
0
__IRAM_FWD
static void increase_rx_idx_release_pkthdr(struct sk_buff  *skb, int idx)
{
	struct rtl_pktHdr *pReadyForHw;
	uint32 mbufIndex;
#if !defined(CONFIG_SMP)
    unsigned long flags=0;
#endif

#if 0 //defined(CONFIG_RTL_8198C)
	_dma_cache_inv((unsigned long)skb->head, skb->truesize);
#else
	//_dma_cache_wback_inv((unsigned long)skb->head, skb->truesize);
#endif

#if !defined(CONFIG_SMP)
	SMP_LOCK_ETH_RECV(flags);
#endif

#if	defined(DELAY_REFILL_ETH_RX_BUF)
	pReadyForHw = (struct rtl_pktHdr *)(rxPkthdrRing[idx][rxDescReadyForHwIndex[idx]] &
						~(DESC_OWNED_BIT | DESC_WRAP));
#else

	#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	pReadyForHw = (struct rtl_pktHdr *) (rxPkthdrRing_base[idx] + (sizeof(struct rtl_pktHdr) * currRxPkthdrDescIndex[idx]));								
	#else		
	pReadyForHw = (struct rtl_pktHdr *)(rxPkthdrRing[idx][currRxPkthdrDescIndex[idx]] &
						~(DESC_OWNED_BIT | DESC_WRAP));
	#endif

#endif


#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	mbufIndex = ((uint32)(pReadyForHw->ph_mbuf) - rxMbufRing_base) /(sizeof(struct rtl_mBuf));
#else
	mbufIndex = ((uint32)(pReadyForHw->ph_mbuf) - (rxMbufRing[0] & ~(DESC_OWNED_BIT | DESC_WRAP))) /
					(sizeof(struct rtl_mBuf));
#endif

	pReadyForHw->ph_mbuf->m_data = skb->data;
	pReadyForHw->ph_mbuf->m_extbuf = skb->data;
	pReadyForHw->ph_mbuf->skb = skb;

	rxMbufRing[mbufIndex] |= DESC_SWCORE_OWNED;

#if !defined(DELAY_REFILL_ETH_RX_BUF)	
	//set own bit to siwtch in the final step
	rxPkthdrRing[idx][currRxPkthdrDescIndex[idx]] |= DESC_SWCORE_OWNED; 
#endif	
	if ( ++currRxPkthdrDescIndex[idx] == rxPkthdrRingCnt[idx] ) {
		currRxPkthdrDescIndex[idx] = 0;
		#if	defined(DELAY_REFILL_ETH_RX_BUF)
		rxDescCrossBoundFlag[idx]++;
		#endif
	}

#if	defined(DELAY_REFILL_ETH_RX_BUF)
	set_RxPkthdrRing_OwnBit(idx);
#endif
#if !defined(CONFIG_SMP)
	SMP_UNLOCK_ETH_RECV(flags);
#endif
#ifdef _PKTHDR_CACHEABLE
	#if 0 //defined(CONFIG_RTL_8198C)
	_dma_cache_inv((unsigned long)pReadyForHw, sizeof(struct rtl_pktHdr));
	_dma_cache_inv((unsigned long)(pReadyForHw->ph_mbuf), sizeof(struct rtl_mBuf));
	#else
	_dma_cache_wback_inv((unsigned long)pReadyForHw, sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)(pReadyForHw->ph_mbuf), sizeof(struct rtl_mBuf));
	#endif
#endif
	_dma_cache_wback_inv((unsigned long)skb, sizeof(struct sk_buff));
}
Ejemplo n.º 5
0
int32 swNic_init(uint32 userNeedRxPkthdrRingCnt[RTL865X_SWNIC_RXRING_HW_PKTDESC],
                 uint32 userNeedRxMbufRingCnt,
                 uint32 userNeedTxPkthdrRingCnt[RTL865X_SWNIC_TXRING_HW_PKTDESC],
                 uint32 clusterSize)
{
	uint32 i, j, k;
	static uint32 totalRxPkthdrRingCnt = 0, totalTxPkthdrRingCnt = 0;
	static struct rtl_pktHdr *pPkthdrList_start;
	static struct rtl_mBuf *pMbufList_start;
	struct rtl_pktHdr *pPkthdrList;
	struct rtl_mBuf *pMbufList;
	struct rtl_pktHdr * pPkthdr;
	struct rtl_mBuf * pMbuf;
	unsigned long flags=0;
	int	ret;

#if defined(CONFIG_RTL_8198C) && defined(_PKTHDR_CACHEABLE)
	int cpu_dcache_line = cpu_dcache_line_size(); // in \arch\mips\include\asm\cpu-features.h
#endif

	/* init const array for rx pre-process	*/
	extPortMaskToPortNum[0] = 5;
	extPortMaskToPortNum[1] = 6;
	extPortMaskToPortNum[2] = 7;
	extPortMaskToPortNum[3] = 5;
	extPortMaskToPortNum[4] = 8;
	extPortMaskToPortNum[5] = 5;
	extPortMaskToPortNum[6] = 5;
	extPortMaskToPortNum[7] = 5;

#if	defined(DELAY_REFILL_ETH_RX_BUF)
	rxPkthdrRefillThreshold[0] = ETH_REFILL_THRESHOLD;
	rxPkthdrRefillThreshold[1] = ETH_REFILL_THRESHOLD1;
	rxPkthdrRefillThreshold[2] = ETH_REFILL_THRESHOLD2;
	rxPkthdrRefillThreshold[3] = ETH_REFILL_THRESHOLD3;
	rxPkthdrRefillThreshold[4] = ETH_REFILL_THRESHOLD4;
	rxPkthdrRefillThreshold[5] = ETH_REFILL_THRESHOLD5;
#endif

	#if defined(CONFIG_RTL8196C_REVISION_B)
	rtl_chip_version = REG32(REVR);
	#endif

	ret = SUCCESS;
	SMP_LOCK_ETH_RECV(flags);
	if (rxMbufRing == NULL)
	{
		size_of_cluster = clusterSize;

		/* Allocate Rx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++) {
			rxPkthdrRingCnt[i] = userNeedRxPkthdrRingCnt[i];
			if (rxPkthdrRingCnt[i] == 0)
			{
				rxPkthdrRing[i] = NULL;
				continue;
			}

			rxPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(rxPkthdrRingCnt[i] * sizeof(uint32*));
			ASSERT_CSP( (uint32) rxPkthdrRing[i] & 0x0fffffff );

			totalRxPkthdrRingCnt += rxPkthdrRingCnt[i];
		}

		if (totalRxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate Tx descriptors of rings */
		for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++) {
			txPkthdrRingCnt[i] = userNeedTxPkthdrRingCnt[i];

			if (txPkthdrRingCnt[i] == 0)
			{
				txPkthdrRing[i] = NULL;
				continue;
			}

			txPkthdrRing[i] = (uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32*));
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i]=(uint32 *) UNCACHED_MALLOC(txPkthdrRingCnt[i] * sizeof(uint32));
			#endif

			ASSERT_CSP( (uint32) txPkthdrRing[i] & 0x0fffffff );

			totalTxPkthdrRingCnt += txPkthdrRingCnt[i];
		}

		if (totalTxPkthdrRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		/* Allocate MBuf descriptors of rings */
		rxMbufRingCnt = userNeedRxMbufRingCnt;

		if (userNeedRxMbufRingCnt == 0) {
			ret = EINVAL;
			goto out;
		}

		rxMbufRing = (uint32 *) UNCACHED_MALLOC((rxMbufRingCnt+RESERVERD_MBUF_RING_NUM) * sizeof(uint32*));
		ASSERT_CSP( (uint32) rxMbufRing & 0x0fffffff );

		/* Allocate pkthdr */
#ifdef _PKTHDR_CACHEABLE

#if 0 //defined(CONFIG_RTL_8198C)
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );

#else
		pPkthdrList_start = (struct rtl_pktHdr *) kmalloc(
		(totalRxPkthdrRingCnt+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_pktHdr), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		pPkthdrList_start = (struct rtl_pktHdr *)(((uint32) pPkthdrList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
	
		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) kmalloc(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+totalTxPkthdrRingCnt+1) * sizeof(struct rtl_mBuf), GFP_ATOMIC);
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
		
		pMbufList_start = (struct rtl_mBuf *)(((uint32) pMbufList_start + (cpu_dcache_line - 1))& ~(cpu_dcache_line - 1));
#endif

#else
		pPkthdrList_start = (struct rtl_pktHdr *) UNCACHED_MALLOC(
		(totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
		ASSERT_CSP( (uint32) pPkthdrList_start & 0x0fffffff );

		/* Allocate mbufs */
		pMbufList_start = (struct rtl_mBuf *) UNCACHED_MALLOC(
		(rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
		ASSERT_CSP( (uint32) pMbufList_start & 0x0fffffff );
#endif
	}

	/* Initialize interrupt statistics counter */
	//rxPktCounter = txPktCounter = 0;

	/* Initialize index of Tx pkthdr descriptor */
	for (i=0;i<RTL865X_SWNIC_TXRING_HW_PKTDESC;i++)
	{
		currTxPkthdrDescIndex[i] = 0;
		txPktDoneDescIndex[i]=0;
	}

	pPkthdrList = pPkthdrList_start;
	pMbufList = pMbufList_start;

	/* Initialize Tx packet header descriptors */
	for (i = 0; i < RTL865X_SWNIC_TXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < txPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_OUTGOING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;

			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_data = NULL;
			pMbuf->m_extbuf = NULL;
			pMbuf->m_extsize = 0;

			txPkthdrRing[i][j] = (int32) pPkthdr | DESC_RISC_OWNED;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][j]=(int32) pPkthdr | DESC_RISC_OWNED;
			#endif
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		txPkthdrRing_base[i] = txPkthdrRing[i][0];
#endif

		if(txPkthdrRingCnt[i] > 0)
		{
			/* Set wrap bit of the last descriptor */
			txPkthdrRing[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#ifdef CONFIG_RTL8196C_REVISION_B
			if (rtl_chip_version == RTL8196C_REVISION_A)
				txPkthdrRing_backup[i][txPkthdrRingCnt[i] - 1] |= DESC_WRAP;
			#endif
		}

	}

	/* Fill Tx packet header FDP */
	REG32(CPUTPDCR0) = (uint32) txPkthdrRing[0];
	REG32(CPUTPDCR1) = (uint32) txPkthdrRing[1];

#if defined(CONFIG_RTL_819XD) || defined(CONFIG_RTL_8196E) || defined(CONFIG_RTL_8198C)
	REG32(CPUTPDCR2) = (uint32) txPkthdrRing[2];
	REG32(CPUTPDCR3) = (uint32) txPkthdrRing[3];
#endif

	/* Initialize Rx packet header descriptors */
	k = 0;

	for (i = 0; i < RTL865X_SWNIC_RXRING_HW_PKTDESC; i++)
	{
		for (j = 0; j < rxPkthdrRingCnt[i]; j++)
		{
			/* Dequeue pkthdr and mbuf */
			pPkthdr = pPkthdrList++;
			pMbuf = pMbufList++;

			bzero((void *) pPkthdr, sizeof(struct rtl_pktHdr));
			bzero((void *) pMbuf, sizeof(struct rtl_mBuf));

			/* Setup pkthdr and mbuf */
			pPkthdr->ph_mbuf = pMbuf;
			pPkthdr->ph_len = 0;
			pPkthdr->ph_flags = PKTHDR_USED | PKT_INCOMING;
			pPkthdr->ph_type = PKTHDR_ETHERNET;
			pPkthdr->ph_portlist = 0;
			pMbuf->m_next = NULL;
			pMbuf->m_pkthdr = pPkthdr;
			pMbuf->m_len = 0;
			pMbuf->m_flags = MBUF_USED | MBUF_EXT | MBUF_PKTHDR | MBUF_EOR;
			pMbuf->m_extsize = size_of_cluster;
			pMbuf->m_data = pMbuf->m_extbuf = alloc_rx_buf(&pPkthdr->ph_mbuf->skb, size_of_cluster);

			/* Setup descriptors */
			rxPkthdrRing[i][j] = (int32) pPkthdr | DESC_SWCORE_OWNED;
			rxMbufRing[k++] = (int32) pMbuf | DESC_SWCORE_OWNED;
		}

#ifdef CONFIG_RTL_ENHANCE_RELIABILITY
		rxPkthdrRing_base[i] = rxPkthdrRing[i][0] & ~DESC_OWNED_BIT;
#endif

		/* Initialize index of current Rx pkthdr descriptor */
		currRxPkthdrDescIndex[i] = 0;

		/* Initialize index of current Rx Mbuf descriptor */
		currRxMbufDescIndex = 0;

		/* Set wrap bit of the last descriptor */
		if(rxPkthdrRingCnt[i] > 0)
			rxPkthdrRing[i][rxPkthdrRingCnt[i] - 1] |= DESC_WRAP;

		#if	defined(DELAY_REFILL_ETH_RX_BUF)
		rxDescReadyForHwIndex[i] = 0;
		rxDescCrossBoundFlag[i] = 0;
		#endif
	}

#if defined(CONFIG_RTL_ENHANCE_RELIABILITY) && defined(CONFIG_RTL_8198C)
	rxMbufRing_base = rxMbufRing[0] & ~DESC_OWNED_BIT;
#endif

	rxMbufRing[rxMbufRingCnt - 1] |= DESC_WRAP;

	/* Fill Rx packet header FDP */
	REG32(CPURPDCR0) = (uint32) rxPkthdrRing[0];
	REG32(CPURPDCR1) = (uint32) rxPkthdrRing[1];
	REG32(CPURPDCR2) = (uint32) rxPkthdrRing[2];
	REG32(CPURPDCR3) = (uint32) rxPkthdrRing[3];
	REG32(CPURPDCR4) = (uint32) rxPkthdrRing[4];
	REG32(CPURPDCR5) = (uint32) rxPkthdrRing[5];

	REG32(CPURMDCR0) = (uint32) rxMbufRing;

out:
	//SMP_UNLOCK_ETH_RECV(flags);

#ifdef _PKTHDR_CACHEABLE
	_dma_cache_wback_inv((unsigned long)pPkthdrList_start, (totalRxPkthdrRingCnt + totalTxPkthdrRingCnt) * sizeof(struct rtl_pktHdr));
	_dma_cache_wback_inv((unsigned long)pMbufList_start, (rxMbufRingCnt+RESERVERD_MBUF_RING_NUM+ totalTxPkthdrRingCnt) * sizeof(struct rtl_mBuf));
#endif
	SMP_UNLOCK_ETH_RECV(flags);

	return ret;
}