Beispiel #1
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
		  skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb)= FOE_MAGIC_PCI;
	FOE_AI(skb)=UN_HIT;
	if(ra_sw_nat_hook_rx!= NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS) {
				netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status);
			}
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS) {
			netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status);
		}
	}
	
#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS)
		netif_dbg(dev, rx_err, dev->net,
			  "netif_rx status %d\n", status);
#endif

}
Beispiel #2
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb) = FOE_MAGIC_EXTIF;
	FOE_AI_UNHIT(skb);
	if(ra_sw_nat_hook_rx != NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
				devdbg (dev, "netif_rx status %d", status);
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
			devdbg (dev, "netif_rx status %d", status);
	}

#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
#endif
}
Beispiel #3
0
static inline int
pdma_recv(struct net_device* dev, END_DEVICE* ei_local, int work_todo)
{
	struct PDMA_rxdesc *rxd_ring;
	struct sk_buff *new_skb, *rx_skb;
	int gmac_no = PSE_PORT_GMAC1;
	int work_done = 0;
	u32 rxd_dma_owner_idx;
	u32 rxd_info2, rxd_info4;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
	u32 rxd_info3;
#endif
#if defined (CONFIG_RAETH_SPECIAL_TAG)
	struct vlan_ethhdr *veth;
#endif

	rxd_dma_owner_idx = le32_to_cpu(sysRegRead(RX_CALC_IDX0));

	while (work_done < work_todo) {
		rxd_dma_owner_idx = (rxd_dma_owner_idx + 1) % NUM_RX_DESC;
		rxd_ring = &ei_local->rxd_ring[rxd_dma_owner_idx];
		
		if (!(rxd_ring->rxd_info2 & RX2_DMA_DONE))
			break;
		
		/* load completed skb pointer */
		rx_skb = ei_local->rxd_buff[rxd_dma_owner_idx];
		
		/* copy RX desc to CPU */
		rxd_info2 = rxd_ring->rxd_info2;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
		rxd_info3 = rxd_ring->rxd_info3;
#endif
		rxd_info4 = rxd_ring->rxd_info4;
		
#if defined (CONFIG_PSEUDO_SUPPORT)
		gmac_no = RX4_DMA_SP(rxd_info4);
#endif
		/* We have to check the free memory size is big enough
		 * before pass the packet to cpu */
		new_skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
		if (unlikely(new_skb == NULL)) {
#if defined (RAETH_PDMA_V2)
			rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
			rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
			/* move CPU pointer to next RXD */
			sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
			
			inc_rx_drop(ei_local, gmac_no);
#if !defined (CONFIG_RAETH_NAPI)
			/* mean need reschedule */
			work_done = work_todo;
#endif
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: Failed to alloc new RX skb! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
			break;
		}
#if !defined (RAETH_PDMA_V2)
		skb_reserve(new_skb, NET_IP_ALIGN);
#endif
		/* store new empty skb pointer */
		ei_local->rxd_buff[rxd_dma_owner_idx] = new_skb;
		
		/* map new skb to ring (unmap is not required on generic mips mm) */
		rxd_ring->rxd_info1 = (u32)dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
#if defined (RAETH_PDMA_V2)
		rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
		wmb();
		
		/* move CPU pointer to next RXD */
		sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
		
		/* skb processing */
		rx_skb->len = RX2_DMA_SDL0_GET(rxd_info2);
#if defined (RAETH_PDMA_V2)
		rx_skb->data += NET_IP_ALIGN;
#endif
		rx_skb->tail = rx_skb->data + rx_skb->len;

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(rx_skb) = FOE_MAGIC_GE;
		DO_FILL_FOE_DESC(rx_skb, (rxd_info4 & ~(RX4_DMA_ALG_SET)));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
		if (rxd_info4 & RX4_DMA_L4FVLD)
			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#if defined (CONFIG_RAETH_HW_VLAN_RX)
		if ((rxd_info2 & RX2_DMA_TAG) && rxd_info3) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
			__vlan_hwaccel_put_tag(rx_skb, __constant_htons(ETH_P_8021Q), RX3_DMA_VID(rxd_info3));
#else
			__vlan_hwaccel_put_tag(rx_skb, RX3_DMA_VID(rxd_info3));
#endif
		}
#endif

#if defined (CONFIG_PSEUDO_SUPPORT)
		if (gmac_no == PSE_PORT_GMAC2)
			rx_skb->protocol = eth_type_trans(rx_skb, ei_local->PseudoDev);
		else
#endif
			rx_skb->protocol = eth_type_trans(rx_skb, dev);

#if defined (CONFIG_RAETH_SPECIAL_TAG)
#if defined (CONFIG_MT7530_GSW)
#define ESW_TAG_ID	0x00
#else
#define ESW_TAG_ID	0x81
#endif
		// port0: 0x8100 => 0x8100 0001
		// port1: 0x8101 => 0x8100 0002
		// port2: 0x8102 => 0x8100 0003
		// port3: 0x8103 => 0x8100 0004
		// port4: 0x8104 => 0x8100 0005
		// port5: 0x8105 => 0x8100 0006
		veth = vlan_eth_hdr(rx_skb);
		if ((veth->h_vlan_proto & 0xFF) == ESW_TAG_ID) {
			veth->h_vlan_TCI = htons((((veth->h_vlan_proto >> 8) & 0xF) + 1));
			veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
			rx_skb->protocol = veth->h_vlan_proto;
		}
#endif

/* ra_sw_nat_hook_rx return 1 --> continue
 * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
 */
#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		if((ra_sw_nat_hook_rx == NULL) ||
		   (ra_sw_nat_hook_rx != NULL && ra_sw_nat_hook_rx(rx_skb)))
#endif
		{
#if defined (CONFIG_RAETH_NAPI)
#if defined (CONFIG_RAETH_NAPI_GRO)
			if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY)
				napi_gro_receive(&ei_local->napi, rx_skb);
			else
#endif
			netif_receive_skb(rx_skb);
#else
			netif_rx(rx_skb);
#endif
		}
		
		work_done++;
	}
Beispiel #4
0
void announce_802_3_packet(
	IN	VOID			*pAdSrc,
	IN	PNDIS_PACKET	pPacket,
	IN	UCHAR			OpMode)
{
	PRTMP_ADAPTER	pAd = (PRTMP_ADAPTER)pAdSrc;
/*	struct sk_buff	*pRxPkt; */
	PNDIS_PACKET pRxPkt;
#ifdef INF_PPA_SUPPORT
        int             ret = 0;
        unsigned int ppa_flags = 0; /* reserved for now */
#endif /* INF_PPA_SUPPORT */

	pAd = pAd; /* avoid compile warning */

	MEM_DBG_PKT_FREE_INC(pPacket);


	ASSERT(pPacket);

/*	pRxPkt = RTPKT_TO_OSPKT(pPacket); */
	pRxPkt = pPacket;

#ifdef CONFIG_STA_SUPPORT
#endif /* CONFIG_STA_SUPPORT */

    /* Push up the protocol stack */

#ifdef IKANOS_VX_1X0
	IKANOS_DataFrameRx(pAd, pRxPkt);
	return;
#endif /* IKANOS_VX_1X0 */


	/* mark for bridge fast path, 2009/06/22 */
	/* pRxPkt->protocol = eth_type_trans(pRxPkt, pRxPkt->dev); */

#ifdef INF_PPA_SUPPORT
	if (ppa_hook_directpath_send_fn && pAd->PPAEnable==TRUE ) 
	{
		RtmpOsPktInfPpaSend(pRxPkt);

		pRxPkt=NULL;
		return;

	}	  	
#endif /* INF_PPA_SUPPORT */

	{
#ifdef CONFIG_RT2880_BRIDGING_ONLY
/*	pRxPkt->cb[22]=0xa8; */
		PACKET_CB_ASSIGN(pRxPkt, 22) = 0xa8;
#endif

#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
		if(ra_classifier_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			ra_classifier_hook_rx(pRxPkt, classifier_cur_cycle);
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#endif /* CONFIG_RA_CLASSIFIER */

#if !defined(CONFIG_RA_NAT_NONE)
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
		RtmpOsPktNatMagicTag(pRxPkt);
#endif

		/* bruce+
		  * ra_sw_nat_hook_rx return 1 --> continue
		  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
		 */
		if (ra_sw_nat_hook_rx!= NULL)
		{
			unsigned int flags;
		
		/*	pRxPkt->protocol = eth_type_trans(pRxPkt, pRxPkt->dev); */
		RtmpOsPktProtocolAssign(pRxPkt);

		RTMP_IRQ_LOCK(&pAd->page_lock, flags);
		if(ra_sw_nat_hook_rx(pRxPkt)) 
		{
			RtmpOsPktRcvHandle(pRxPkt);
		}
		RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#else
		{
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
		RtmpOsPktNatNone(pRxPkt);
#endif /* CONFIG_RA_HW_NAT */
		}
#endif /* CONFIG_RA_NAT_NONE */
	}

	

/*		pRxPkt->protocol = eth_type_trans(pRxPkt, pRxPkt->dev); */
		RtmpOsPktProtocolAssign(pRxPkt);
		RtmpOsPktRcvHandle(pRxPkt);
	
}
Beispiel #5
0
void announce_802_3_packet(
	IN VOID *pAdSrc,
	IN PNDIS_PACKET pPacket,
	IN UCHAR OpMode)
{
	RTMP_ADAPTER *pAd;
	PNDIS_PACKET pRxPkt = pPacket;

	pAd = (RTMP_ADAPTER *)pAdSrc;

	ASSERT(pPacket);
	MEM_DBG_PKT_FREE_INC(pPacket);


#ifdef CONFIG_AP_SUPPORT
#ifdef APCLI_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_AP(pAd)
	{
		if (RTMP_MATPktRxNeedConvert(pAd, RtmpOsPktNetDevGet(pRxPkt)))
			RTMP_MATEngineRxHandle(pAd, pRxPkt, 0);
	}
#endif /* APCLI_SUPPORT */
#endif /* CONFIG_AP_SUPPORT */


    /* Push up the protocol stack */
#ifdef CONFIG_AP_SUPPORT
#ifdef PLATFORM_BL2348
{
	extern int (*pToUpperLayerPktSent)(PNDIS_PACKET *pSkb);
	RtmpOsPktProtocolAssign(pRxPkt);
	pToUpperLayerPktSent(pRxPkt);
	return;
}
#endif /* PLATFORM_BL2348 */
#endif /* CONFIG_AP_SUPPORT */

#ifdef IKANOS_VX_1X0
{
	IKANOS_DataFrameRx(pAd, pRxPkt);
	return;
}
#endif /* IKANOS_VX_1X0 */

#ifdef INF_PPA_SUPPORT
	if (ppa_hook_directpath_send_fn && pAd->PPAEnable==TRUE ) 
	{
		RtmpOsPktInfPpaSend(pRxPkt);
		pRxPkt=NULL;
		return;
	}	  	
#endif /* INF_PPA_SUPPORT */

	{
#ifdef CONFIG_RT2880_BRIDGING_ONLY
		PACKET_CB_ASSIGN(pRxPkt, 22) = 0xa8;
#endif

#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
		if(ra_classifier_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			ra_classifier_hook_rx(pRxPkt, classifier_cur_cycle);
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#endif /* CONFIG_RA_CLASSIFIER */

#if !defined(CONFIG_RA_NAT_NONE)
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
		{
			struct sk_buff *pRxPktb = RTPKT_TO_OSPKT(pRxPkt);
			FOE_MAGIC_TAG(pRxPktb) = FOE_MAGIC_WLAN;
		}
#endif

#ifdef RA_NAT_SUPPORT
#if !defined(CONFIG_RA_NAT_NONE)
		/* bruce+
		  * ra_sw_nat_hook_rx return 1 --> continue
		  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
		 */
		if (ra_sw_nat_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RtmpOsPktProtocolAssign(pRxPkt);

			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			if(ra_sw_nat_hook_rx(pRxPkt)) 
			{
				netif_rx(pRxPkt);
			}
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
			return;
		}
#endif /* !CONFIG_RA_NAT_NONE */
#endif /* RA_NAT_SUPPORT */
#else
		{
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
			FOE_AI(((struct sk_buff *)pRxPkt)) = UN_HIT;
#endif /* CONFIG_RA_HW_NAT */
		}
#endif /* CONFIG_RA_NAT_NONE */
	}
	
#ifdef CONFIG_AP_SUPPORT
#ifdef BG_FT_SUPPORT
		if (BG_FTPH_PacketFromApHandle(pRxPkt) == 0)
			return;
#endif /* BG_FT_SUPPORT */
#endif /* CONFIG_AP_SUPPORT */

		RtmpOsPktProtocolAssign(pRxPkt);
		RtmpOsPktRcvHandle(pRxPkt);
}
Beispiel #6
0
void announce_802_3_packet(void*pAdSrc,PNDIS_PACKET pPacket,unsigned char OpMode)
{
	RTMP_ADAPTER *pAd = (RTMP_ADAPTER *)pAdSrc;
	PNDIS_PACKET pRxPkt = pPacket;

	ASSERT(pPacket);
	MEM_DBG_PKT_FREE_INC(pPacket);



#ifdef CONFIG_STA_SUPPORT
#endif /* CONFIG_STA_SUPPORT */

    /* Push up the protocol stack */

#ifdef IKANOS_VX_1X0
{
	IKANOS_DataFrameRx(pAd, pRxPkt);
	return;
}
#endif /* IKANOS_VX_1X0 */

#ifdef INF_PPA_SUPPORT
	if (ppa_hook_directpath_send_fn && pAd->PPAEnable==TRUE ) 
	{
		RtmpOsPktInfPpaSend(pRxPkt);
		pRxPkt=NULL;
		return;
	}	  	
#endif /* INF_PPA_SUPPORT */

	{
#ifdef CONFIG_RT2880_BRIDGING_ONLY
		PACKET_CB_ASSIGN(pRxPkt, 22) = 0xa8;
#endif

#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
		if(ra_classifier_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			ra_classifier_hook_rx(pRxPkt, classifier_cur_cycle);
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#endif /* CONFIG_RA_CLASSIFIER */

#if !defined(CONFIG_RA_NAT_NONE)
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
		RtmpOsPktNatMagicTag(pRxPkt);
#endif

#ifdef RA_NAT_SUPPORT
		/* bruce+
		  * ra_sw_nat_hook_rx return 1 --> continue
		  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
		 */
		if (ra_sw_nat_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RtmpOsPktProtocolAssign(pRxPkt);

			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			if(ra_sw_nat_hook_rx(pRxPkt)) 
			{
				RtmpOsPktRcvHandle(pRxPkt);
			}
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#endif /* RA_NAT_SUPPORT */
#else
		{
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
			RtmpOsPktNatNone(pRxPkt);
#endif /* CONFIG_RA_HW_NAT */
		}
#endif /* CONFIG_RA_NAT_NONE */
	}
	

		RtmpOsPktProtocolAssign(pRxPkt);
		RtmpOsPktRcvHandle(pRxPkt);
}