static unsigned int ipv4_conntrack_help(unsigned int hooknum,
				      struct sk_buff **pskb,
				      const struct net_device *in,
				      const struct net_device *out,
				      int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	struct nf_conn_help *help;

	/* This is where we call the helper: as the packet goes out. */
	ct = nf_ct_get(*pskb, &ctinfo);
	if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
		return NF_ACCEPT;

	help = nfct_help(ct);
	if (!help || !help->helper)
		return NF_ACCEPT;

#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
            if( (skb_headroom(*pskb) >=4)  &&
                    ((FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_PCI) ||
                     (FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_WLAN) ||
                     (FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_GE))){
                    FOE_ALG_RXIF(*pskb)=1;
            }
#endif

	return help->helper->help(pskb,
			       (*pskb)->nh.raw - (*pskb)->data
					       + (*pskb)->nh.iph->ihl*4,
			       ct, ctinfo);
}
static unsigned int ipv6_confirm(unsigned int hooknum,
				 struct sk_buff **pskb,
				 const struct net_device *in,
				 const struct net_device *out,
				 int (*okfn)(struct sk_buff *))
{
	struct nf_conn *ct;
	struct nf_conn_help *help;
	enum ip_conntrack_info ctinfo;
	unsigned int ret, protoff;
	unsigned int extoff = (u8*)((*pskb)->nh.ipv6h + 1)
			      - (*pskb)->data;
	unsigned char pnum = (*pskb)->nh.ipv6h->nexthdr;


	/* This is where we call the helper: as the packet goes out. */
	ct = nf_ct_get(*pskb, &ctinfo);
	if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
		goto out;

	help = nfct_help(ct);
	if (!help || !help->helper)
		goto out;

	protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
					 (*pskb)->len - extoff);
	if (protoff < 0 || protoff > (*pskb)->len ||
	    pnum == NEXTHDR_FRAGMENT) {
		DEBUGP("proto header not found\n");
		return NF_ACCEPT;
	}

#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
	if( (skb_headroom(*pskb) >=4)  &&
		((FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_PCI) ||
		 (FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_WLAN) ||
		 (FOE_MAGIC_TAG(*pskb) == FOE_MAGIC_GE))){
	    FOE_ALG_RXIF(*pskb)=1;
	}
#endif

	ret = help->helper->help(pskb, protoff, ct, ctinfo);
	if (ret != NF_ACCEPT)
		return ret;
out:
	/* We've seen it coming out the other side: confirm it */
	return nf_conntrack_confirm(pskb);
}
Пример #3
0
int ip6_input(struct sk_buff *skb)
{

#if defined(CONFIG_RA_SW_NAT) || defined(CONFIG_RA_SW_NAT_MODULE)
        if( (skb_headroom(skb) >=4)  && (FOE_MAGIC_TAG(skb) == FOE_MAGIC_NUM) ) {
            FOE_HASH_NUM(skb) |= FOE_ALG_FLAGS;
                                                    }
#elif  defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
        if( IS_SPACE_AVAILABLED(skb)  &&
                ((FOE_MAGIC_TAG(skb) == FOE_MAGIC_PCI) ||
                 (FOE_MAGIC_TAG(skb) == FOE_MAGIC_WLAN) ||
                 (FOE_MAGIC_TAG(skb) == FOE_MAGIC_GE))){
            FOE_ALG(skb)=1;
        }
#endif

	return NF_HOOK(PF_INET6,NF_IP6_LOCAL_IN, skb, skb->dev, NULL, ip6_input_finish);
}
Пример #4
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	if (test_bit(EVENT_RX_PAUSED, &dev->flags)) {
		skb_queue_tail(&dev->rxq_pause, skb);
		return;
	}

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->net->stats.rx_packets++;
	dev->net->stats.rx_bytes += skb->len;

	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
		  skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb)= FOE_MAGIC_PCI;
	FOE_AI(skb)=UN_HIT;
	if(ra_sw_nat_hook_rx!= NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS) {
				netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status);
			}
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS) {
			netif_dbg(dev, rx_err, dev->net, "netif_rx status %d\n", status);
		}
	}
	
#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS)
		netif_dbg(dev, rx_err, dev->net,
			  "netif_rx status %d\n", status);
#endif

}
Пример #5
0
/* Passes this packet up the stack, updating its accounting.
 * Some link protocols batch packets, so their rx_fixup paths
 * can return clones as well as just modify the original skb.
 */
void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
{
	int	status;

	skb->protocol = eth_type_trans (skb, dev->net);
	dev->stats.rx_packets++;
	dev->stats.rx_bytes += skb->len;

	if (netif_msg_rx_status (dev))
		devdbg (dev, "< rx, len %zu, type 0x%x",
			skb->len + sizeof (struct ethhdr), skb->protocol);
	memset (skb->cb, 0, sizeof (struct skb_data));
#if defined(CONFIG_RA_HW_NAT_PCI) && (defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE))
	 /* ra_sw_nat_hook_rx return 1 --> continue
	  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
	  */
	FOE_MAGIC_TAG(skb) = FOE_MAGIC_EXTIF;
	FOE_AI_UNHIT(skb);
	if(ra_sw_nat_hook_rx != NULL)
	{
		if(ra_sw_nat_hook_rx(skb)) {
			status = netif_rx (skb);
			if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
				devdbg (dev, "netif_rx status %d", status);
		}
	} else  {
		status = netif_rx (skb);
		if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
			devdbg (dev, "netif_rx status %d", status);
	}

#else
	status = netif_rx (skb);
	if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
		devdbg (dev, "netif_rx status %d", status);
#endif
}
Пример #6
0
static inline int
pdma_recv(struct net_device* dev, END_DEVICE* ei_local, int work_todo)
{
	struct PDMA_rxdesc *rxd_ring;
	struct sk_buff *new_skb, *rx_skb;
	int gmac_no = PSE_PORT_GMAC1;
	int work_done = 0;
	u32 rxd_dma_owner_idx;
	u32 rxd_info2, rxd_info4;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
	u32 rxd_info3;
#endif
#if defined (CONFIG_RAETH_SPECIAL_TAG)
	struct vlan_ethhdr *veth;
#endif

	rxd_dma_owner_idx = le32_to_cpu(sysRegRead(RX_CALC_IDX0));

	while (work_done < work_todo) {
		rxd_dma_owner_idx = (rxd_dma_owner_idx + 1) % NUM_RX_DESC;
		rxd_ring = &ei_local->rxd_ring[rxd_dma_owner_idx];
		
		if (!(rxd_ring->rxd_info2 & RX2_DMA_DONE))
			break;
		
		/* load completed skb pointer */
		rx_skb = ei_local->rxd_buff[rxd_dma_owner_idx];
		
		/* copy RX desc to CPU */
		rxd_info2 = rxd_ring->rxd_info2;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
		rxd_info3 = rxd_ring->rxd_info3;
#endif
		rxd_info4 = rxd_ring->rxd_info4;
		
#if defined (CONFIG_PSEUDO_SUPPORT)
		gmac_no = RX4_DMA_SP(rxd_info4);
#endif
		/* We have to check the free memory size is big enough
		 * before pass the packet to cpu */
		new_skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
		if (unlikely(new_skb == NULL)) {
#if defined (RAETH_PDMA_V2)
			rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
			rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
			/* move CPU pointer to next RXD */
			sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
			
			inc_rx_drop(ei_local, gmac_no);
#if !defined (CONFIG_RAETH_NAPI)
			/* mean need reschedule */
			work_done = work_todo;
#endif
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: Failed to alloc new RX skb! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
			break;
		}
#if !defined (RAETH_PDMA_V2)
		skb_reserve(new_skb, NET_IP_ALIGN);
#endif
		/* store new empty skb pointer */
		ei_local->rxd_buff[rxd_dma_owner_idx] = new_skb;
		
		/* map new skb to ring (unmap is not required on generic mips mm) */
		rxd_ring->rxd_info1 = (u32)dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
#if defined (RAETH_PDMA_V2)
		rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
		wmb();
		
		/* move CPU pointer to next RXD */
		sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
		
		/* skb processing */
		rx_skb->len = RX2_DMA_SDL0_GET(rxd_info2);
#if defined (RAETH_PDMA_V2)
		rx_skb->data += NET_IP_ALIGN;
#endif
		rx_skb->tail = rx_skb->data + rx_skb->len;

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(rx_skb) = FOE_MAGIC_GE;
		DO_FILL_FOE_DESC(rx_skb, (rxd_info4 & ~(RX4_DMA_ALG_SET)));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
		if (rxd_info4 & RX4_DMA_L4FVLD)
			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#if defined (CONFIG_RAETH_HW_VLAN_RX)
		if ((rxd_info2 & RX2_DMA_TAG) && rxd_info3) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
			__vlan_hwaccel_put_tag(rx_skb, __constant_htons(ETH_P_8021Q), RX3_DMA_VID(rxd_info3));
#else
			__vlan_hwaccel_put_tag(rx_skb, RX3_DMA_VID(rxd_info3));
#endif
		}
#endif

#if defined (CONFIG_PSEUDO_SUPPORT)
		if (gmac_no == PSE_PORT_GMAC2)
			rx_skb->protocol = eth_type_trans(rx_skb, ei_local->PseudoDev);
		else
#endif
			rx_skb->protocol = eth_type_trans(rx_skb, dev);

#if defined (CONFIG_RAETH_SPECIAL_TAG)
#if defined (CONFIG_MT7530_GSW)
#define ESW_TAG_ID	0x00
#else
#define ESW_TAG_ID	0x81
#endif
		// port0: 0x8100 => 0x8100 0001
		// port1: 0x8101 => 0x8100 0002
		// port2: 0x8102 => 0x8100 0003
		// port3: 0x8103 => 0x8100 0004
		// port4: 0x8104 => 0x8100 0005
		// port5: 0x8105 => 0x8100 0006
		veth = vlan_eth_hdr(rx_skb);
		if ((veth->h_vlan_proto & 0xFF) == ESW_TAG_ID) {
			veth->h_vlan_TCI = htons((((veth->h_vlan_proto >> 8) & 0xF) + 1));
			veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
			rx_skb->protocol = veth->h_vlan_proto;
		}
#endif

/* ra_sw_nat_hook_rx return 1 --> continue
 * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
 */
#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		if((ra_sw_nat_hook_rx == NULL) ||
		   (ra_sw_nat_hook_rx != NULL && ra_sw_nat_hook_rx(rx_skb)))
#endif
		{
#if defined (CONFIG_RAETH_NAPI)
#if defined (CONFIG_RAETH_NAPI_GRO)
			if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY)
				napi_gro_receive(&ei_local->napi, rx_skb);
			else
#endif
			netif_receive_skb(rx_skb);
#else
			netif_rx(rx_skb);
#endif
		}
		
		work_done++;
	}
Пример #7
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl->fl6_dst;
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit, tclass;
	u32 mtu;

	if (opt) {
		unsigned int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(ip6_dst_idev(skb->dst),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));

	/*
	 *	Fill in the IPv6 header
	 */

	hlimit = -1;
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = ip6_dst_hoplimit(dst);

	tclass = -1;
	if (np)
		tclass = np->tclass;
	if (tclass < 0)
		tclass = 0;

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl->fl6_flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	skb->protocol = htons(ETH_P_IPV6);
	skb->priority = sk->sk_priority;

	mtu = ip6_skb_dst_mtu(skb);
	if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
		IP6_INC_STATS(ip6_dst_idev(skb->dst),
			      IPSTATS_MIB_OUTREQUESTS);
#if defined (CONFIG_RA_HW_NAT_IPV6)
#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(skb) = 0;
		FOE_AI_UNHIT(skb);
#endif
#endif
		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev,
				dst_output);
	}

	skb->dev = dst->dev;
	ipv6_local_error(sk, EMSGSIZE, fl, mtu);
	IP6_INC_STATS(ip6_dst_idev(skb->dst), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Пример #8
0
int ip6_push_pending_frames(struct sock *sk)
{
	struct sk_buff *skb, *tmp_skb;
	struct sk_buff **tail_skb;
	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct ipv6hdr *hdr;
	struct ipv6_txoptions *opt = np->cork.opt;
	struct rt6_info *rt = np->cork.rt;
	struct flowi *fl = &inet->cork.fl;
	unsigned char proto = fl->proto;
	int err = 0;

	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
		goto out;
	tail_skb = &(skb_shinfo(skb)->frag_list);

	/* move skb->data to ip header from ext header */
	if (skb->data < skb_network_header(skb))
		__skb_pull(skb, skb_network_offset(skb));
	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
		__skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
		*tail_skb = tmp_skb;
		tail_skb = &(tmp_skb->next);
		skb->len += tmp_skb->len;
		skb->data_len += tmp_skb->len;
		skb->truesize += tmp_skb->truesize;
		tmp_skb->destructor = NULL;
		tmp_skb->sk = NULL;
	}

	ipv6_addr_copy(final_dst, &fl->fl6_dst);
	__skb_pull(skb, skb->h.raw - skb->nh.raw);
	if (opt && opt->opt_flen)
		ipv6_push_frag_opts(skb, opt, &proto);
	if (opt && opt->opt_nflen)
		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);

	skb->nh.ipv6h = hdr = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr));

	*(__be32*)hdr = fl->fl6_flowlabel |
		     htonl(0x60000000 | ((int)np->cork.tclass << 20));

	if (skb->len <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN)
		hdr->payload_len = htons(skb->len - sizeof(struct ipv6hdr));
	else
		hdr->payload_len = 0;
	hdr->hop_limit = np->cork.hop_limit;
	hdr->nexthdr = proto;
	ipv6_addr_copy(&hdr->saddr, &fl->fl6_src);
	ipv6_addr_copy(&hdr->daddr, final_dst);

	skb->priority = sk->sk_priority;

	skb->dst = dst_clone(&rt->u.dst);

#if defined (CONFIG_RA_HW_NAT_IPV6)
#if defined(CONFIG_RA_HW_NAT) || defined(CONFIG_RA_HW_NAT_MODULE)
	FOE_MAGIC_TAG(skb) = 0;
	FOE_AI_UNHIT(skb);
#endif
#endif

	IP6_INC_STATS(rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS);
	err = NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output);
	if (err) {
		if (err > 0)
			err = net_xmit_errno(err);
		if (err)
			goto error;
	}

out:
	inet->cork.flags &= ~IPCORK_OPT;
	kfree(np->cork.opt);
	np->cork.opt = NULL;
	if (np->cork.rt) {
		dst_release(&np->cork.rt->u.dst);
		np->cork.rt = NULL;
		inet->cork.flags &= ~IPCORK_ALLFRAG;
	}
	memset(&inet->cork.fl, 0, sizeof(inet->cork.fl));
	return err;
error:
	goto out;
}
Пример #9
0
int fe_qos_packet_send(struct net_device *dev, struct sk_buff* skb, unsigned int ring_no, unsigned int qn, unsigned pn)
{
	END_DEVICE* ei_local = netdev_priv(dev);
	struct PDMA_txdesc* tx_desc;
	unsigned int tx_cpu_owner_idx, tx_dtx_idx;

	unsigned int	length=skb->len;
	int ret;
	unsigned long flags;

	//printk("fe_qos_packet_send: ring_no=%d qn=%d pn=%d\n", ring_no, qn, pn);

	switch ( ring_no ) {
		case 0:
			tx_desc = ei_local->tx_ring0;
			tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX0;
			tx_dtx_idx 	 = *(unsigned long*)TX_DTX_IDX0;
			break;
		case 1:
			tx_desc = ei_local->tx_ring1;
			tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX1;
			tx_dtx_idx 	 = *(unsigned long*)TX_DTX_IDX1;
			break;
		case 2:
			tx_desc = ei_local->tx_ring2;
			tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX2;
			tx_dtx_idx 	 = *(unsigned long*)TX_DTX_IDX2;
			break;
		case 3:
			tx_desc = ei_local->tx_ring3;
			tx_cpu_owner_idx = *(unsigned long*)TX_CTX_IDX3;
			tx_dtx_idx 	 = *(unsigned long*)TX_DTX_IDX3;
			break;
		default:
			printk("ring_no input error... %d\n", ring_no);
			return -1;
	};

	//printk("tx_cpu_owner_idx=%d tx_dtx_idx=%d\n", tx_cpu_owner_idx, tx_dtx_idx);

	if(tx_desc == NULL) {
		printk("%s : txdesc is NULL\n", dev->name);
		return -1;
	}

	tx_desc[tx_cpu_owner_idx].txd_info1.SDP0 = virt_to_phys(skb->data);
	tx_desc[tx_cpu_owner_idx].txd_info2.SDL0 = length;
	tx_desc[tx_cpu_owner_idx].txd_info2.DDONE_bit = 0;
	tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn;
	tx_desc[tx_cpu_owner_idx].txd_info4.QN = qn;

#ifdef CONFIG_RAETH_CHECKSUM_OFFLOAD
	ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.TCO = 1; 
	ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.UCO = 1; 
	ei_local->tx_ring0[tx_cpu_owner_idx].txd_info4.ICO = 1; 
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE) 
	if(FOE_MAGIC_TAG(skb) == FOE_MAGIC_PPE) {
	    tx_desc[tx_cpu_owner_idx].txd_info4.PN = 6; /* PPE */
	} else {
	    tx_desc[tx_cpu_owner_idx].txd_info4.PN = pn; 
	}
	
	//tell hwnat module, which is incoming interface of this packet
#if defined (CONFIG_RALINK_RT2880) || defined (CONFIG_RALINK_RT3052)
	tx_desc[tx_cpu_owner_idx].txd_info4.RXIF = FOE_ALG_RXIF(skb); /* 0: WLAN, 1: PCI */
#else
	tx_desc[tx_cpu_owner_idx].txd_info4.UDF = FOE_UDF(skb); 
#endif

#endif

	spin_lock_irqsave(&ei_local->page_lock, flags);
	ei_local->skb_free[ring_no][tx_cpu_owner_idx] = skb;
	tx_cpu_owner_idx = (tx_cpu_owner_idx +1) % NUM_TX_DESC;
	ret = set_tx_ctx_idx(ring_no, tx_cpu_owner_idx);
	spin_unlock_irqrestore(&ei_local->page_lock, flags);

	ei_local->stat.tx_packets++;
	ei_local->stat.tx_bytes += length;

#ifdef CONFIG_RAETH_NAPI
	switch ( ring_no ) {
		case 0:
			if ( ei_local->tx0_full == 1) {
				ei_local->tx0_full = 0;
				netif_wake_queue(dev);
			}
			break;
		case 1:
			if ( ei_local->tx1_full == 1) {
				ei_local->tx1_full = 0;
				netif_wake_queue(dev);
			}
			break;
		case 2:
			if ( ei_local->tx2_full == 1) {
				ei_local->tx2_full = 0;
				netif_wake_queue(dev);
			}
			break;
		case 3:
			if ( ei_local->tx3_full == 1) {
				ei_local->tx3_full = 0;
				netif_wake_queue(dev);
			}
			break;
		default :
			printk("ring_no input error %d\n", ring_no);
	};
#endif
	return length;
}
Пример #10
0
void announce_802_3_packet(
	IN VOID *pAdSrc,
	IN PNDIS_PACKET pPacket,
	IN UCHAR OpMode)
{
	RTMP_ADAPTER *pAd;
	PNDIS_PACKET pRxPkt = pPacket;

	pAd = (RTMP_ADAPTER *)pAdSrc;

	ASSERT(pPacket);
	MEM_DBG_PKT_FREE_INC(pPacket);


#ifdef CONFIG_AP_SUPPORT
#ifdef APCLI_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_AP(pAd)
	{
		if (RTMP_MATPktRxNeedConvert(pAd, RtmpOsPktNetDevGet(pRxPkt)))
			RTMP_MATEngineRxHandle(pAd, pRxPkt, 0);
	}
#endif /* APCLI_SUPPORT */
#endif /* CONFIG_AP_SUPPORT */


    /* Push up the protocol stack */
#ifdef CONFIG_AP_SUPPORT
#ifdef PLATFORM_BL2348
{
	extern int (*pToUpperLayerPktSent)(PNDIS_PACKET *pSkb);
	RtmpOsPktProtocolAssign(pRxPkt);
	pToUpperLayerPktSent(pRxPkt);
	return;
}
#endif /* PLATFORM_BL2348 */
#endif /* CONFIG_AP_SUPPORT */

#ifdef IKANOS_VX_1X0
{
	IKANOS_DataFrameRx(pAd, pRxPkt);
	return;
}
#endif /* IKANOS_VX_1X0 */

#ifdef INF_PPA_SUPPORT
	if (ppa_hook_directpath_send_fn && pAd->PPAEnable==TRUE ) 
	{
		RtmpOsPktInfPpaSend(pRxPkt);
		pRxPkt=NULL;
		return;
	}	  	
#endif /* INF_PPA_SUPPORT */

	{
#ifdef CONFIG_RT2880_BRIDGING_ONLY
		PACKET_CB_ASSIGN(pRxPkt, 22) = 0xa8;
#endif

#if defined(CONFIG_RA_CLASSIFIER)||defined(CONFIG_RA_CLASSIFIER_MODULE)
		if(ra_classifier_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			ra_classifier_hook_rx(pRxPkt, classifier_cur_cycle);
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
		}
#endif /* CONFIG_RA_CLASSIFIER */

#if !defined(CONFIG_RA_NAT_NONE)
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
		{
			struct sk_buff *pRxPktb = RTPKT_TO_OSPKT(pRxPkt);
			FOE_MAGIC_TAG(pRxPktb) = FOE_MAGIC_WLAN;
		}
#endif

#ifdef RA_NAT_SUPPORT
#if !defined(CONFIG_RA_NAT_NONE)
		/* bruce+
		  * ra_sw_nat_hook_rx return 1 --> continue
		  * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
		 */
		if (ra_sw_nat_hook_rx!= NULL)
		{
			unsigned int flags;
			
			RtmpOsPktProtocolAssign(pRxPkt);

			RTMP_IRQ_LOCK(&pAd->page_lock, flags);
			if(ra_sw_nat_hook_rx(pRxPkt)) 
			{
				netif_rx(pRxPkt);
			}
			RTMP_IRQ_UNLOCK(&pAd->page_lock, flags);
			return;
		}
#endif /* !CONFIG_RA_NAT_NONE */
#endif /* RA_NAT_SUPPORT */
#else
		{
#if defined (CONFIG_RA_HW_NAT)  || defined (CONFIG_RA_HW_NAT_MODULE)
			FOE_AI(((struct sk_buff *)pRxPkt)) = UN_HIT;
#endif /* CONFIG_RA_HW_NAT */
		}
#endif /* CONFIG_RA_NAT_NONE */
	}
	
#ifdef CONFIG_AP_SUPPORT
#ifdef BG_FT_SUPPORT
		if (BG_FTPH_PacketFromApHandle(pRxPkt) == 0)
			return;
#endif /* BG_FT_SUPPORT */
#endif /* CONFIG_AP_SUPPORT */

		RtmpOsPktProtocolAssign(pRxPkt);
		RtmpOsPktRcvHandle(pRxPkt);
}