Example #1
0
static inline int
pdma_recv(struct net_device* dev, END_DEVICE* ei_local, int work_todo)
{
	struct PDMA_rxdesc *rxd_ring;
	struct sk_buff *new_skb, *rx_skb;
	int gmac_no = PSE_PORT_GMAC1;
	int work_done = 0;
	u32 rxd_dma_owner_idx;
	u32 rxd_info2, rxd_info4;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
	u32 rxd_info3;
#endif
#if defined (CONFIG_RAETH_SPECIAL_TAG)
	struct vlan_ethhdr *veth;
#endif

	rxd_dma_owner_idx = le32_to_cpu(sysRegRead(RX_CALC_IDX0));

	while (work_done < work_todo) {
		rxd_dma_owner_idx = (rxd_dma_owner_idx + 1) % NUM_RX_DESC;
		rxd_ring = &ei_local->rxd_ring[rxd_dma_owner_idx];
		
		if (!(rxd_ring->rxd_info2 & RX2_DMA_DONE))
			break;
		
		/* load completed skb pointer */
		rx_skb = ei_local->rxd_buff[rxd_dma_owner_idx];
		
		/* copy RX desc to CPU */
		rxd_info2 = rxd_ring->rxd_info2;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
		rxd_info3 = rxd_ring->rxd_info3;
#endif
		rxd_info4 = rxd_ring->rxd_info4;
		
#if defined (CONFIG_PSEUDO_SUPPORT)
		gmac_no = RX4_DMA_SP(rxd_info4);
#endif
		/* We have to check the free memory size is big enough
		 * before pass the packet to cpu */
		new_skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
		if (unlikely(new_skb == NULL)) {
#if defined (RAETH_PDMA_V2)
			rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
			rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
			/* move CPU pointer to next RXD */
			sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
			
			inc_rx_drop(ei_local, gmac_no);
#if !defined (CONFIG_RAETH_NAPI)
			/* mean need reschedule */
			work_done = work_todo;
#endif
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: Failed to alloc new RX skb! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
			break;
		}
#if !defined (RAETH_PDMA_V2)
		skb_reserve(new_skb, NET_IP_ALIGN);
#endif
		/* store new empty skb pointer */
		ei_local->rxd_buff[rxd_dma_owner_idx] = new_skb;
		
		/* map new skb to ring (unmap is not required on generic mips mm) */
		rxd_ring->rxd_info1 = (u32)dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
#if defined (RAETH_PDMA_V2)
		rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
		wmb();
		
		/* move CPU pointer to next RXD */
		sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
		
		/* skb processing */
		rx_skb->len = RX2_DMA_SDL0_GET(rxd_info2);
#if defined (RAETH_PDMA_V2)
		rx_skb->data += NET_IP_ALIGN;
#endif
		rx_skb->tail = rx_skb->data + rx_skb->len;

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(rx_skb) = FOE_MAGIC_GE;
		DO_FILL_FOE_DESC(rx_skb, (rxd_info4 & ~(RX4_DMA_ALG_SET)));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
		if (rxd_info4 & RX4_DMA_L4FVLD)
			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#if defined (CONFIG_RAETH_HW_VLAN_RX)
		if ((rxd_info2 & RX2_DMA_TAG) && rxd_info3) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
			__vlan_hwaccel_put_tag(rx_skb, __constant_htons(ETH_P_8021Q), RX3_DMA_VID(rxd_info3));
#else
			__vlan_hwaccel_put_tag(rx_skb, RX3_DMA_VID(rxd_info3));
#endif
		}
#endif

#if defined (CONFIG_PSEUDO_SUPPORT)
		if (gmac_no == PSE_PORT_GMAC2)
			rx_skb->protocol = eth_type_trans(rx_skb, ei_local->PseudoDev);
		else
#endif
			rx_skb->protocol = eth_type_trans(rx_skb, dev);

#if defined (CONFIG_RAETH_SPECIAL_TAG)
#if defined (CONFIG_MT7530_GSW)
#define ESW_TAG_ID	0x00
#else
#define ESW_TAG_ID	0x81
#endif
		// port0: 0x8100 => 0x8100 0001
		// port1: 0x8101 => 0x8100 0002
		// port2: 0x8102 => 0x8100 0003
		// port3: 0x8103 => 0x8100 0004
		// port4: 0x8104 => 0x8100 0005
		// port5: 0x8105 => 0x8100 0006
		veth = vlan_eth_hdr(rx_skb);
		if ((veth->h_vlan_proto & 0xFF) == ESW_TAG_ID) {
			veth->h_vlan_TCI = htons((((veth->h_vlan_proto >> 8) & 0xF) + 1));
			veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
			rx_skb->protocol = veth->h_vlan_proto;
		}
#endif

/* ra_sw_nat_hook_rx return 1 --> continue
 * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
 */
#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		if((ra_sw_nat_hook_rx == NULL) ||
		   (ra_sw_nat_hook_rx != NULL && ra_sw_nat_hook_rx(rx_skb)))
#endif
		{
#if defined (CONFIG_RAETH_NAPI)
#if defined (CONFIG_RAETH_NAPI_GRO)
			if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY)
				napi_gro_receive(&ei_local->napi, rx_skb);
			else
#endif
			netif_receive_skb(rx_skb);
#else
			netif_rx(rx_skb);
#endif
		}
		
		work_done++;
	}
Example #2
0
/* must be spinlock protected */
static void
fe_dma_init(END_DEVICE *ei_local)
{
	u32 i, txd_idx, regVal;
	dma_addr_t rxd_buf_phy, fq_tail_phy, txd_free_phy;

	/* init QDMA HW TX pool */
	for (i = 0; i < NUM_QDMA_PAGE; i++) {
		struct QDMA_txdesc *txd = &ei_local->fq_head[i];
		dma_addr_t fq_buf_phy, fq_ndp_phy;
		
		fq_buf_phy = ei_local->fq_head_page_phy + (i * QDMA_PAGE_SIZE);
		if (i < (NUM_QDMA_PAGE-1))
			fq_ndp_phy = ei_local->fq_head_phy + ((i+1) * sizeof(struct QDMA_txdesc));
		else
			fq_ndp_phy = ei_local->fq_head_phy;
		
		ACCESS_ONCE(txd->txd_info1) = (u32)fq_buf_phy;
		ACCESS_ONCE(txd->txd_info2) = (u32)fq_ndp_phy;
		ACCESS_ONCE(txd->txd_info3) = TX3_QDMA_SDL(QDMA_PAGE_SIZE);
		ACCESS_ONCE(txd->txd_info4) = 0;
	}

	fq_tail_phy = ei_local->fq_head_phy + ((NUM_QDMA_PAGE-1) * sizeof(struct QDMA_txdesc));

	/* init QDMA SW TX pool */
	for (i = 0; i < NUM_TX_DESC; i++) {
		struct QDMA_txdesc *txd = &ei_local->txd_pool[i];
		
		ei_local->txd_buff[i] = NULL;
		ei_local->txd_pool_info[i] = i + 1;
		
		ACCESS_ONCE(txd->txd_info1) = 0;
		ACCESS_ONCE(txd->txd_info2) = 0;
		ACCESS_ONCE(txd->txd_info3) = TX3_QDMA_LS | TX3_QDMA_OWN;
		ACCESS_ONCE(txd->txd_info4) = 0;
	}

	ei_local->txd_pool_free_head = 0;
	ei_local->txd_pool_free_tail = NUM_TX_DESC - 1;
	ei_local->txd_pool_free_num = NUM_TX_DESC;

	/* init PDMA (or QDMA) RX ring */
	for (i = 0; i < NUM_RX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->rxd_ring[i];
		
		rxd_buf_phy = dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);
		
		ACCESS_ONCE(rxd->rxd_info1) = (u32)rxd_buf_phy;
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}

#if !defined (CONFIG_RAETH_QDMATX_QDMARX)
	/* init QDMA RX stub ring (map one buffer to all RXD) */
	rxd_buf_phy = dma_map_single(NULL, ei_local->qrx_buff->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);

	for (i = 0; i < NUM_QRX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->qrx_ring[i];
		
		ACCESS_ONCE(rxd->rxd_info1) = (u32)rxd_buf_phy;
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}
#endif

	wmb();

	/* clear QDMA */
	regVal = sysRegRead(QDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(QDMA_GLO_CFG, regVal);

	/* clear PDMA */
	regVal = sysRegRead(PDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(PDMA_GLO_CFG, regVal);

	/* PPE QoS -> QDMA HW TX pool */
	sysRegWrite(QDMA_FQ_HEAD, (u32)ei_local->fq_head_phy);
	sysRegWrite(QDMA_FQ_TAIL, (u32)fq_tail_phy);
	sysRegWrite(QDMA_FQ_CNT,  cpu_to_le32((NUM_TX_DESC << 16) | NUM_QDMA_PAGE));
	sysRegWrite(QDMA_FQ_BLEN, cpu_to_le32(QDMA_PAGE_SIZE << 16));

#if defined (CONFIG_RAETH_QDMATX_QDMARX)
	/* GDMA1/2 -> QDMA RX ring #0 */
	sysRegWrite(QRX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(QRX_MAX_CNT0, cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(QRX_CRX_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
#else
	/* GDMA1/2 -> PDMA RX ring #0 */
	sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(RX_MAX_CNT0,  cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(RX_CALC_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
	sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);

	/* GDMA1/2 -> QDMA RX stub ring #0 (not used, but RX DMA started) */
	sysRegWrite(QRX_BASE_PTR0, phys_to_bus((u32)ei_local->qrx_ring_phy));
	sysRegWrite(QRX_MAX_CNT0, cpu_to_le32(NUM_QRX_DESC));
	sysRegWrite(QRX_CRX_IDX0, cpu_to_le32(NUM_QRX_DESC-1));
#endif
	sysRegWrite(QDMA_RST_CFG, PST_DRX_IDX0);

	/* Reserve 4 TXD for each physical queue */
	for (i = 0; i < 16; i++)
		sysRegWrite(QTX_CFG_0 + 0x10*i, ((NUM_PQ_RESV << 8) | NUM_PQ_RESV));

	/* get free txd from pool for RLS (release) */
	txd_idx = get_free_txd(ei_local);
	txd_free_phy = get_txd_ptr_phy(ei_local, txd_idx);
	sysRegWrite(QTX_CRX_PTR, (u32)txd_free_phy);
	sysRegWrite(QTX_DRX_PTR, (u32)txd_free_phy);

	/* get free txd from pool for FWD (forward) */
	txd_idx = get_free_txd(ei_local);
	txd_free_phy = get_txd_ptr_phy(ei_local, txd_idx);
	ei_local->txd_last_idx = txd_idx;
	sysRegWrite(QTX_CTX_PTR, (u32)txd_free_phy);
	sysRegWrite(QTX_DTX_PTR, (u32)txd_free_phy);

	/* reset TX indexes for queues 0~15 */
	sysRegWrite(QDMA_RST_CFG, 0xffff);

	/* enable random early drop and set drop threshold automatically */
	sysRegWrite(QDMA_FC_THRES, 0x174444);
	sysRegWrite(QDMA_HRED2, 0x0);

	/* config DLY interrupt */
	sysRegWrite(DLY_INT_CFG, FE_DLY_INIT_VALUE);
	sysRegWrite(QDMA_DELAY_INT, FE_DLY_INIT_VALUE);
}
Example #3
0
int mtk_aes_process_sg(struct scatterlist* sg_src,
		struct scatterlist* sg_dst,
		struct mcrypto_ctx *ctx,
		unsigned int nbytes,
		unsigned int mode)
{
	struct scatterlist *next_dst, *next_src;
	struct AES_txdesc* txdesc;
	struct AES_rxdesc* rxdesc;
	u32 aes_txd_info4;
	u32 aes_size_total, aes_size_chunk, aes_free_desc;
	u32 aes_tx_scatter = 0;
	u32 aes_rx_gather = 0;
	u32 i = 1, j = 1;
	unsigned long flags = 0;

	next_src = sg_src;
	next_dst = sg_dst;

	while (sg_dma_len(next_src) == 0) {
		if (sg_is_last(next_src))
			return -EINVAL;
		next_src = sg_next(next_src);
	}

	while (sg_dma_len(next_dst) == 0) {
		if (sg_is_last(next_dst))
			return -EINVAL;
		next_dst = sg_next(next_dst);
	}

	if (ctx->keylen == AES_KEYSIZE_256)
		aes_txd_info4 = TX4_DMA_AES_256;
	else if (ctx->keylen == AES_KEYSIZE_192)
		aes_txd_info4 = TX4_DMA_AES_192;
	else
		aes_txd_info4 = TX4_DMA_AES_128;

	if (mode & MCRYPTO_MODE_ENC)
		aes_txd_info4 |= TX4_DMA_ENC;

	if (mode & MCRYPTO_MODE_CBC)
		aes_txd_info4 |= TX4_DMA_CBC | TX4_DMA_IVR;

	spin_lock_irqsave(&AES_Entry.page_lock, flags);

	DBGPRINT(DBG_HIGH, "\nStart new scater, TX [front=%u rear=%u]; RX [front=%u rear=%u]\n",
			AES_Entry.aes_tx_front_idx, AES_Entry.aes_tx_rear_idx,
			AES_Entry.aes_rx_front_idx, AES_Entry.aes_rx_rear_idx);

	aes_size_total = nbytes;

	if (AES_Entry.aes_tx_front_idx > AES_Entry.aes_tx_rear_idx)
		aes_free_desc = NUM_AES_TX_DESC - (AES_Entry.aes_tx_front_idx - AES_Entry.aes_tx_rear_idx);
	else
		aes_free_desc = AES_Entry.aes_tx_rear_idx - AES_Entry.aes_tx_front_idx;

	/* TX descriptor */
	while (1) {
		if (i > aes_free_desc) {
			spin_unlock_irqrestore(&AES_Entry.page_lock, flags);
			return -EAGAIN;
		}
		
		aes_tx_scatter = (AES_Entry.aes_tx_rear_idx + i) % NUM_AES_TX_DESC;
		txdesc = &AES_Entry.AES_tx_ring0[aes_tx_scatter];
		
		if (sg_dma_len(next_src) == 0)
			goto next_desc_tx;
		
		aes_size_chunk = min(aes_size_total, sg_dma_len(next_src));
		
		DBGPRINT(DBG_HIGH, "AES set TX Desc[%u] Src=%08X, len=%d, Key=%08X, klen=%d\n",
			aes_tx_scatter, (u32)sg_virt(next_src), aes_size_chunk, (u32)ctx->key, ctx->keylen);
		
		if ((mode & MCRYPTO_MODE_CBC) && (i == 1)) {
			if (!ctx->iv)
				memset((void*)txdesc->IV, 0xFF, sizeof(uint32_t)*4);
			else
				memcpy((void*)txdesc->IV, ctx->iv, sizeof(uint32_t)*4);
			txdesc->txd_info4 = aes_txd_info4 | TX4_DMA_KIU;
		} else {
			txdesc->txd_info4 = aes_txd_info4;
		}
		
		if (i == 1) {
			txdesc->SDP0 = (u32)dma_map_single(NULL, ctx->key, ctx->keylen, DMA_TO_DEVICE);
			txdesc->txd_info2 = TX2_DMA_SDL0_SET(ctx->keylen);
		} else {
			txdesc->txd_info2 = 0;
		}
		
		txdesc->SDP1 = (u32)dma_map_single(NULL, sg_virt(next_src), aes_size_chunk, DMA_TO_DEVICE);
		txdesc->txd_info2 |= TX2_DMA_SDL1_SET(aes_size_chunk);
		
		i++;
		aes_size_total -= aes_size_chunk;
next_desc_tx:
		if (!aes_size_total || sg_is_last(next_src)) {
			txdesc->txd_info2 |= TX2_DMA_LS1;
			break;
		}
		
		next_src = sg_next(next_src);
	}

	aes_size_total = nbytes;

	if (AES_Entry.aes_rx_front_idx > AES_Entry.aes_rx_rear_idx)
		aes_free_desc = NUM_AES_RX_DESC - (AES_Entry.aes_rx_front_idx - AES_Entry.aes_rx_rear_idx);
	else
		aes_free_desc = AES_Entry.aes_rx_rear_idx - AES_Entry.aes_rx_front_idx;

	/* RX descriptor */
	while (1) {
		if (j > aes_free_desc) {
			spin_unlock_irqrestore(&AES_Entry.page_lock, flags);
			return -EAGAIN;
		}
		
		aes_rx_gather = (AES_Entry.aes_rx_rear_idx + j) % NUM_AES_RX_DESC;
		rxdesc = &AES_Entry.AES_rx_ring0[aes_rx_gather];
		
		if (sg_dma_len(next_dst) == 0)
			goto next_desc_rx;
		
		aes_size_chunk = min(aes_size_total, sg_dma_len(next_dst));
		
		DBGPRINT(DBG_HIGH, "AES set RX Desc[%u] Dst=%08X, len=%d\n",
			aes_rx_gather, (u32)sg_virt(next_dst), aes_size_chunk);
		
		rxdesc->SDP0 = dma_map_single(NULL, sg_virt(next_dst), aes_size_chunk, DMA_FROM_DEVICE);
		rxdesc->rxd_info2 = RX2_DMA_SDL0_SET(aes_size_chunk);
		
		j++;
		aes_size_total -= aes_size_chunk;
next_desc_rx:
		if (!aes_size_total || sg_is_last(next_dst)) {
			rxdesc->rxd_info2 |= RX2_DMA_LS0;
			break;
		}
		
		next_dst = sg_next(next_dst);
	}

	AES_Entry.aes_tx_rear_idx = aes_tx_scatter;
	AES_Entry.aes_rx_rear_idx = aes_rx_gather;

	DBGPRINT(DBG_MID, "TT [front=%u rear=%u]; RR [front=%u rear=%u]\n",
		AES_Entry.aes_tx_front_idx, AES_Entry.aes_tx_rear_idx,
		AES_Entry.aes_rx_front_idx, AES_Entry.aes_rx_rear_idx);

#if defined (CONFIG_CRYPTO_DEV_MTK_AES_INT)
	INIT_COMPLETION(AES_Entry.op_complete);
#endif

	wmb();

	aes_tx_scatter = (aes_tx_scatter + 1) % NUM_AES_TX_DESC;
	sysRegWrite(AES_TX_CTX_IDX0, cpu_to_le32(aes_tx_scatter));

	spin_unlock_irqrestore(&AES_Entry.page_lock, flags);

#if defined (CONFIG_CRYPTO_DEV_MTK_AES_INT)
	if (wait_for_completion_timeout(&AES_Entry.op_complete, msecs_to_jiffies(200)) == 0) {
		printk("\n%s: PDMA timeout!\n", AES_MODNAME);
		return -ETIMEDOUT;
	}
#endif

	return mtk_aes_poll_done();
}
Example #4
0
/* must be spinlock protected */
static void
fe_dma_init(END_DEVICE *ei_local)
{
	int i;
	u32 regVal;

	/* init PDMA TX ring */
	for (i = 0; i < NUM_TX_DESC; i++) {
		struct PDMA_txdesc *txd = &ei_local->txd_ring[i];
		
		ei_local->txd_buff[i] = NULL;
		
		ACCESS_ONCE(txd->txd_info1) = 0;
		ACCESS_ONCE(txd->txd_info2) = TX2_DMA_DONE;
#if defined (RAETH_PDMA_V2)
		ACCESS_ONCE(txd->txd_info4) = 0;
#else
		ACCESS_ONCE(txd->txd_info4) = TX4_DMA_QN(3);
#endif
		ACCESS_ONCE(txd->txd_info3) = 0;
	}

	/* init PDMA RX ring */
	for (i = 0; i < NUM_RX_DESC; i++) {
		struct PDMA_rxdesc *rxd = &ei_local->rxd_ring[i];
#if defined (RAETH_PDMA_V2)
		ACCESS_ONCE(rxd->rxd_info1) = (u32)dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH + NET_IP_ALIGN, DMA_FROM_DEVICE);
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		ACCESS_ONCE(rxd->rxd_info1) = (u32)dma_map_single(NULL, ei_local->rxd_buff[i]->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
		ACCESS_ONCE(rxd->rxd_info2) = RX2_DMA_LS0;
#endif
		ACCESS_ONCE(rxd->rxd_info3) = 0;
		ACCESS_ONCE(rxd->rxd_info4) = 0;
	}

	wmb();

	/* clear PDMA */
	regVal = sysRegRead(PDMA_GLO_CFG);
	regVal &= ~(CSR_CLKGATE | RX_DMA_EN | TX_DMA_EN);
	sysRegWrite(PDMA_GLO_CFG, regVal);

	/* GDMA1/2 <- TX Ring #0 */
	sysRegWrite(TX_BASE_PTR0, phys_to_bus((u32)ei_local->txd_ring_phy));
	sysRegWrite(TX_MAX_CNT0, cpu_to_le32(NUM_TX_DESC));
	sysRegWrite(TX_CTX_IDX0, 0);
	sysRegWrite(PDMA_RST_CFG, PST_DTX_IDX0);
	ei_local->txd_last_idx = le32_to_cpu(sysRegRead(TX_CTX_IDX0));
	ei_local->txd_free_idx = ei_local->txd_last_idx;

	/* GDMA1/2 -> RX Ring #0 */
	sysRegWrite(RX_BASE_PTR0, phys_to_bus((u32)ei_local->rxd_ring_phy));
	sysRegWrite(RX_MAX_CNT0, cpu_to_le32(NUM_RX_DESC));
	sysRegWrite(RX_CALC_IDX0, cpu_to_le32(NUM_RX_DESC - 1));
	sysRegWrite(PDMA_RST_CFG, PST_DRX_IDX0);

	/* only the following chipset need to set it */
#if defined (CONFIG_RALINK_RT3052) || defined (CONFIG_RALINK_RT3883)
	// set 1us timer count in unit of clock cycle
	regVal = sysRegRead(FE_GLO_CFG);
	regVal &= ~(0xff << 8); //clear bit8-bit15
	regVal |= (((get_surfboard_sysclk()/1000000)) << 8);
	sysRegWrite(FE_GLO_CFG, regVal);
#endif

	/* config DLY interrupt */
	sysRegWrite(DLY_INT_CFG, FE_DLY_INIT_VALUE);
}