Esempio n. 1
0
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
	if (skb_vlan_tag_present(skb))
		return skb->protocol;
	else if (skb->protocol == htons(ETH_P_8021Q))
		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
	else
		return 0;
}
Esempio n. 2
0
/*
 *  ebt_ip
 *
 *	Authors:
 *	Bart De Schuymer <*****@*****.**>
 *
 *  April, 2002
 *
 *  Changes:
 *    added ip-sport and ip-dport
 *    Innominate Security Technologies AG <*****@*****.**>
 *    September, 2002
 */
#include <linux/ip.h>
#include <net/ip.h>
#include <linux/in.h>
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
#include <linux/netfilter_bridge/ebt_ip.h>

struct tcpudphdr {
	__be16 src;
	__be16 dst;
};
#if 1  /*Rodney_20090724*/
static inline __be16 vlan_proto(const struct sk_buff *skb)
{
	return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
}
Esempio n. 3
0
static inline int
pdma_recv(struct net_device* dev, END_DEVICE* ei_local, int work_todo)
{
	struct PDMA_rxdesc *rxd_ring;
	struct sk_buff *new_skb, *rx_skb;
	int gmac_no = PSE_PORT_GMAC1;
	int work_done = 0;
	u32 rxd_dma_owner_idx;
	u32 rxd_info2, rxd_info4;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
	u32 rxd_info3;
#endif
#if defined (CONFIG_RAETH_SPECIAL_TAG)
	struct vlan_ethhdr *veth;
#endif

	rxd_dma_owner_idx = le32_to_cpu(sysRegRead(RX_CALC_IDX0));

	while (work_done < work_todo) {
		rxd_dma_owner_idx = (rxd_dma_owner_idx + 1) % NUM_RX_DESC;
		rxd_ring = &ei_local->rxd_ring[rxd_dma_owner_idx];
		
		if (!(rxd_ring->rxd_info2 & RX2_DMA_DONE))
			break;
		
		/* load completed skb pointer */
		rx_skb = ei_local->rxd_buff[rxd_dma_owner_idx];
		
		/* copy RX desc to CPU */
		rxd_info2 = rxd_ring->rxd_info2;
#if defined (CONFIG_RAETH_HW_VLAN_RX)
		rxd_info3 = rxd_ring->rxd_info3;
#endif
		rxd_info4 = rxd_ring->rxd_info4;
		
#if defined (CONFIG_PSEUDO_SUPPORT)
		gmac_no = RX4_DMA_SP(rxd_info4);
#endif
		/* We have to check the free memory size is big enough
		 * before pass the packet to cpu */
		new_skb = __dev_alloc_skb(MAX_RX_LENGTH + NET_IP_ALIGN, GFP_ATOMIC);
		if (unlikely(new_skb == NULL)) {
#if defined (RAETH_PDMA_V2)
			rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
			rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
			/* move CPU pointer to next RXD */
			sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
			
			inc_rx_drop(ei_local, gmac_no);
#if !defined (CONFIG_RAETH_NAPI)
			/* mean need reschedule */
			work_done = work_todo;
#endif
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: Failed to alloc new RX skb! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
			break;
		}
#if !defined (RAETH_PDMA_V2)
		skb_reserve(new_skb, NET_IP_ALIGN);
#endif
		/* store new empty skb pointer */
		ei_local->rxd_buff[rxd_dma_owner_idx] = new_skb;
		
		/* map new skb to ring (unmap is not required on generic mips mm) */
		rxd_ring->rxd_info1 = (u32)dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE);
#if defined (RAETH_PDMA_V2)
		rxd_ring->rxd_info2 = RX2_DMA_SDL0_SET(MAX_RX_LENGTH);
#else
		rxd_ring->rxd_info2 = RX2_DMA_LS0;
#endif
		wmb();
		
		/* move CPU pointer to next RXD */
		sysRegWrite(RX_CALC_IDX0, cpu_to_le32(rxd_dma_owner_idx));
		
		/* skb processing */
		rx_skb->len = RX2_DMA_SDL0_GET(rxd_info2);
#if defined (RAETH_PDMA_V2)
		rx_skb->data += NET_IP_ALIGN;
#endif
		rx_skb->tail = rx_skb->data + rx_skb->len;

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		FOE_MAGIC_TAG(rx_skb) = FOE_MAGIC_GE;
		DO_FILL_FOE_DESC(rx_skb, (rxd_info4 & ~(RX4_DMA_ALG_SET)));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
		if (rxd_info4 & RX4_DMA_L4FVLD)
			rx_skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif

#if defined (CONFIG_RAETH_HW_VLAN_RX)
		if ((rxd_info2 & RX2_DMA_TAG) && rxd_info3) {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
			__vlan_hwaccel_put_tag(rx_skb, __constant_htons(ETH_P_8021Q), RX3_DMA_VID(rxd_info3));
#else
			__vlan_hwaccel_put_tag(rx_skb, RX3_DMA_VID(rxd_info3));
#endif
		}
#endif

#if defined (CONFIG_PSEUDO_SUPPORT)
		if (gmac_no == PSE_PORT_GMAC2)
			rx_skb->protocol = eth_type_trans(rx_skb, ei_local->PseudoDev);
		else
#endif
			rx_skb->protocol = eth_type_trans(rx_skb, dev);

#if defined (CONFIG_RAETH_SPECIAL_TAG)
#if defined (CONFIG_MT7530_GSW)
#define ESW_TAG_ID	0x00
#else
#define ESW_TAG_ID	0x81
#endif
		// port0: 0x8100 => 0x8100 0001
		// port1: 0x8101 => 0x8100 0002
		// port2: 0x8102 => 0x8100 0003
		// port3: 0x8103 => 0x8100 0004
		// port4: 0x8104 => 0x8100 0005
		// port5: 0x8105 => 0x8100 0006
		veth = vlan_eth_hdr(rx_skb);
		if ((veth->h_vlan_proto & 0xFF) == ESW_TAG_ID) {
			veth->h_vlan_TCI = htons((((veth->h_vlan_proto >> 8) & 0xF) + 1));
			veth->h_vlan_proto = __constant_htons(ETH_P_8021Q);
			rx_skb->protocol = veth->h_vlan_proto;
		}
#endif

/* ra_sw_nat_hook_rx return 1 --> continue
 * ra_sw_nat_hook_rx return 0 --> FWD & without netif_rx
 */
#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
		if((ra_sw_nat_hook_rx == NULL) ||
		   (ra_sw_nat_hook_rx != NULL && ra_sw_nat_hook_rx(rx_skb)))
#endif
		{
#if defined (CONFIG_RAETH_NAPI)
#if defined (CONFIG_RAETH_NAPI_GRO)
			if (rx_skb->ip_summed == CHECKSUM_UNNECESSARY)
				napi_gro_receive(&ei_local->napi, rx_skb);
			else
#endif
			netif_receive_skb(rx_skb);
#else
			netif_rx(rx_skb);
#endif
		}
		
		work_done++;
	}
Esempio n. 4
0
/* note: already called with rcu_read_lock (preempt_disabled) */
int br_handle_frame_finish(struct sk_buff *skb)
{
	const unsigned char *dest = eth_hdr(skb)->h_dest;
	struct net_bridge_port *p = rcu_dereference(skb->dev->br_port);
	struct net_bridge *br;
	struct net_bridge_fdb_entry *dst;
	struct sk_buff *skb2;
#if defined(CONFIG_MIPS_BRCM)
	struct iphdr *pip = NULL;
	__u8 igmpTypeOffset = 0;
#endif

	if (!p || p->state == BR_STATE_DISABLED)
		goto drop;

#if defined(CONFIG_MIPS_BRCM)
	if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_IP )
	{
		pip = ip_hdr(skb);
		igmpTypeOffset = (pip->ihl << 2);
	}
	else if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_8021Q )
	{
		if ( vlan_eth_hdr(skb)->h_vlan_encapsulated_proto == ETH_P_IP )
		{
			pip = (struct iphdr *)(skb_network_header(skb) + sizeof(struct vlan_hdr));
			igmpTypeOffset = (pip->ihl << 2) + sizeof(struct vlan_hdr);
		}
	}

	if ((pip) && (pip->protocol == IPPROTO_IGMP))
	{
#if defined(CONFIG_BCM_GPON_MODULE)
		struct igmphdr *ih = (struct igmphdr *)&skb->data[igmpTypeOffset];

		/* drop IGMP v1 report packets */
		if (ih->type == IGMP_HOST_MEMBERSHIP_REPORT)
		{
			goto drop;
		}

		/* drop IGMP v1 query packets */
		if ((ih->type == IGMP_HOST_MEMBERSHIP_QUERY) &&
		    (ih->code == 0))
		{
			goto drop;
		}

		/* drop IGMP leave packets for group 0.0.0.0 */
		if ((ih->type == IGMP_HOST_LEAVE_MESSAGE) &&
          (0 == ih->group) )
		{
			goto drop;
		}
#endif
		/* rate limit IGMP */
		br = p->br;
		if ( br->igmp_rate_limit )
		{
			ktime_t      curTime;
			u64          diffUs;
			unsigned int usPerPacket;
			unsigned int temp32;
			unsigned int burstLimit;

			/* add tokens to the bucket - compute in microseconds */
			curTime     = ktime_get();
			usPerPacket = (1000000 / br->igmp_rate_limit);
			diffUs      = ktime_to_us(ktime_sub(curTime, br->igmp_rate_last_packet));
			diffUs     += br->igmp_rate_rem_time;

			/* allow 25% burst */
			burstLimit = br->igmp_rate_limit >> 2;
			if ( 0 == burstLimit)
			{
				burstLimit = 1;
			}

			if ( diffUs > 1000000 )
			{
				br->igmp_rate_bucket   = burstLimit;
				br->igmp_rate_rem_time = 0;
			}
			else
			{
				temp32 = (unsigned int)diffUs / usPerPacket;
				br->igmp_rate_bucket += temp32;
				if (temp32)
				{
					br->igmp_rate_rem_time = diffUs - (temp32 * usPerPacket);
				}
			}
			if (br->igmp_rate_bucket > burstLimit)
			{
				br->igmp_rate_bucket   = burstLimit;
				br->igmp_rate_rem_time = 0;
			}

			/* if bucket is empty drop the packet */
			if (0 == br->igmp_rate_bucket)
			{
				goto drop;
			}
			br->igmp_rate_bucket--;
			br->igmp_rate_last_packet.tv64 = curTime.tv64;
		}
	}
Esempio n. 5
0
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
	struct sk_buff *skb_out;
	struct cdc_mbim_state *info = (void *)&dev->data;
	struct cdc_ncm_ctx *ctx = info->ctx;
	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
	u16 tci = 0;
	bool is_ip;
	u8 *c;

	if (!ctx)
		goto error;

	if (skb) {
		if (skb->len <= ETH_HLEN)
			goto error;

		/* Some applications using e.g. packet sockets will
		 * bypass the VLAN acceleration and create tagged
		 * ethernet frames directly.  We primarily look for
		 * the accelerated out-of-band tag, but fall back if
		 * required
		 */
		skb_reset_mac_header(skb);
		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
		    __vlan_get_tag(skb, &tci) == 0) {
			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
			skb_pull(skb, VLAN_ETH_HLEN);
		} else {
			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
			skb_pull(skb, ETH_HLEN);
		}

		/* Is IP session <0> tagged too? */
		if (info->flags & FLAG_IPS0_VLAN) {
			/* drop all untagged packets */
			if (!tci)
				goto error;
			/* map MBIM_IPS0_VID to IPS<0> */
			if (tci == MBIM_IPS0_VID)
				tci = 0;
		}

		/* mapping VLANs to MBIM sessions:
		 *   no tag     => IPS session <0> if !FLAG_IPS0_VLAN
		 *   1 - 255    => IPS session <vlanid>
		 *   256 - 511  => DSS session <vlanid - 256>
		 *   512 - 4093 => unsupported, drop
		 *   4094       => IPS session <0> if FLAG_IPS0_VLAN
		 */

		switch (tci & 0x0f00) {
		case 0x0000: /* VLAN ID 0 - 255 */
			if (!is_ip)
				goto error;
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		case 0x0100: /* VLAN ID 256 - 511 */
			if (is_ip)
				goto error;
			sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		default:
			netif_err(dev, tx_err, dev->net,
				  "unsupported tci=0x%04x\n", tci);
			goto error;
		}
	}

	spin_lock_bh(&ctx->mtx);
	skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
	spin_unlock_bh(&ctx->mtx);
	return skb_out;

error:
	if (skb)
		dev_kfree_skb_any(skb);

	return NULL;
}
Esempio n. 6
0
static u16 imq_hash(struct net_device *dev, struct sk_buff *skb)
{
	unsigned int pull_len;
	u16 protocol = skb->protocol;
	u32 addr1, addr2;
	u32 hash, ihl = 0;
	union {
		u16 in16[2];
		u32 in32;
	} ports;
	u8 ip_proto;

	pull_len = 0;

recheck:
	switch (protocol) {
	case htons(ETH_P_8021Q): {
		if (unlikely(skb_pull(skb, VLAN_HLEN) == NULL))
			goto other;

		pull_len += VLAN_HLEN;
		skb->network_header += VLAN_HLEN;

		protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
		goto recheck;
	}

	case htons(ETH_P_PPP_SES): {
		if (unlikely(skb_pull(skb, PPPOE_SES_HLEN) == NULL))
			goto other;

		pull_len += PPPOE_SES_HLEN;
		skb->network_header += PPPOE_SES_HLEN;

		protocol = pppoe_proto(skb);
		goto recheck;
	}

	case htons(ETH_P_IP): {
		const struct iphdr *iph = ip_hdr(skb);

		if (unlikely(!pskb_may_pull(skb, sizeof(struct iphdr))))
			goto other;

		addr1 = iph->daddr;
		addr2 = iph->saddr;

		ip_proto = !(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)) ?
				 iph->protocol : 0;
		ihl = ip_hdrlen(skb);

		break;
	}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
	case htons(ETH_P_IPV6): {
		const struct ipv6hdr *iph = ipv6_hdr(skb);

		if (unlikely(!pskb_may_pull(skb, sizeof(struct ipv6hdr))))
			goto other;

		addr1 = iph->daddr.s6_addr32[3];
		addr2 = iph->saddr.s6_addr32[3];
		ihl = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &ip_proto);
		if (unlikely(ihl < 0))
			goto other;

		break;
	}
#endif
	default:
other:
		if (pull_len != 0) {
			skb_push(skb, pull_len);
			skb->network_header -= pull_len;
		}

		return (u16)(ntohs(protocol) % dev->real_num_tx_queues);
	}

	if (addr1 > addr2)
		swap(addr1, addr2);

	switch (ip_proto) {
	case IPPROTO_TCP:
	case IPPROTO_UDP:
	case IPPROTO_DCCP:
	case IPPROTO_ESP:
	case IPPROTO_AH:
	case IPPROTO_SCTP:
	case IPPROTO_UDPLITE: {
		if (likely(skb_copy_bits(skb, ihl, &ports.in32, 4) >= 0)) {
			if (ports.in16[0] > ports.in16[1])
				swap(ports.in16[0], ports.in16[1]);
			break;
		}
		/* fall-through */
	}
	default:
		ports.in32 = 0;
		break;
	}

	if (pull_len != 0) {
		skb_push(skb, pull_len);
		skb->network_header -= pull_len;
	}

	hash = jhash_3words(addr1, addr2, ports.in32, imq_hashrnd ^ ip_proto);

	return (u16)(((u64)hash * dev->real_num_tx_queues) >> 32);
}