int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(eth_proto_is_802_3(eh->h_proto))) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } nf_reset(skb); secpath_reset(skb); skb_clear_hash_if_not_l4(skb); skb_dst_drop(skb); skb->vlan_tci = 0; skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; return 0; }
int rpl___iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto, bool raw_proto, bool xnet) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (!raw_proto && inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(eth_proto_is_802_3(eh->h_proto))) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } skb_clear_hash_if_not_l4(skb); skb->vlan_tci = 0; skb_set_queue_mapping(skb, 0); skb_scrub_packet(skb, xnet); return iptunnel_pull_offloads(skb); }
static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb) { unsigned int len = nf_bridge_encap_header_len(skb); skb_pull_rcsum(skb, len); skb->network_header += len; }
static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { int port; __be16 *phdr, hdr; if (unlikely(!pskb_may_pull(skb, MTK_HDR_LEN))) return NULL; /* The MTK header is added by the switch between src addr * and ethertype at this point, skb->data points to 2 bytes * after src addr so header should be 2 bytes right before. */ phdr = (__be16 *)(skb->data - 2); hdr = ntohs(*phdr); /* Remove MTK tag and recalculate checksum. */ skb_pull_rcsum(skb, MTK_HDR_LEN); memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - MTK_HDR_LEN, 2 * ETH_ALEN); /* Get source port information */ port = (hdr & MTK_HDR_RECV_SOURCE_PORT_MASK); skb->dev = dsa_master_find_slave(dev, 0, port); if (!skb->dev) return NULL; return skb; }
struct sk_buff *vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; u16 vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); skb = vlan_reorder_header(skb); if (unlikely(!skb)) goto err_free; return skb; err_free: kfree_skb(skb); return NULL; }
struct sk_buff * pfq_vlan_untag(struct sk_buff *skb) { struct vlan_hdr *vhdr; uint16_t vlan_tci; if (unlikely(vlan_tx_tag_present(skb))) { /* vlan_tci is already set-up so leave this for another time */ return skb; } if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *) skb->data; vlan_tci = be16_to_cpu(vhdr->h_vlan_TCI); __vlan_hwaccel_put_tag(skb, vlan_tci); skb_pull_rcsum(skb, VLAN_HLEN); pfq_vlan_set_encap_proto(skb, vhdr); skb = pfq_vlan_reorder_header(skb); if (unlikely(!skb)) goto err_free; skb_reset_network_header(skb); skb_reset_transport_header(skb); return skb; err_free: kfree_skb(skb); return NULL; }
/* pop_eth does not support VLAN packets as this action is never called * for them. */ static int pop_eth(struct sk_buff *skb, struct sw_flow_key *key) { skb_pull_rcsum(skb, ETH_HLEN); skb_reset_mac_header(skb); skb_reset_mac_len(skb); /* safe right before invalidate_flow_key */ key->mac_proto = MAC_PROTO_NONE; invalidate_flow_key(key); return 0; }
static int qinq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct dsa_switch_tree *dst; struct dsa_switch *ds; struct vlan_hdr *vhdr; int source_port; dst = dev->dsa_ptr; if (unlikely(dst == NULL)) goto out_drop; ds = dst->ds[0]; skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) goto out; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto out_drop; vhdr = (struct vlan_hdr *)skb->data; source_port = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; if (source_port >= DSA_MAX_PORTS || ds->ports[source_port] == NULL) goto out_drop; /* Remove the outermost VLAN tag and update checksum. */ skb_pull_rcsum(skb, VLAN_HLEN); memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - VLAN_HLEN, 2 * ETH_ALEN); skb->dev = ds->ports[source_port]; skb_push(skb, ETH_HLEN); skb->pkt_type = PACKET_HOST; skb->protocol = eth_type_trans(skb, skb->dev); skb->dev->stats.rx_packets++; skb->dev->stats.rx_bytes += skb->len; netif_receive_skb(skb); return 0; out_drop: kfree_skb(skb); out: return 0; }
static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u16 *lan9303_tag; u16 lan9303_tag1; unsigned int source_port; if (unlikely(!pskb_may_pull(skb, LAN9303_TAG_LEN))) { dev_warn_ratelimited(&dev->dev, "Dropping packet, cannot pull\n"); return NULL; } /* '->data' points into the middle of our special VLAN tag information: * * ~ MAC src | 0x81 | 0x00 | 0xyy | 0xzz | ether type * ^ * ->data */ lan9303_tag = (u16 *)(skb->data - 2); if (lan9303_tag[0] != htons(ETH_P_8021Q)) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid VLAN marker\n"); return NULL; } lan9303_tag1 = ntohs(lan9303_tag[1]); source_port = lan9303_tag1 & 0x3; skb->dev = dsa_master_find_slave(dev, 0, source_port); if (!skb->dev) { dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid source port\n"); return NULL; } /* remove the special VLAN tag between the MAC addresses * and the current ethertype field. */ skb_pull_rcsum(skb, 2 + 2); memmove(skb->data - ETH_HLEN, skb->data - (ETH_HLEN + LAN9303_TAG_LEN), 2 * ETH_ALEN); skb->offload_fwd_mark = !(lan9303_tag1 & LAN9303_TAG_RX_TRAPPED_TO_CPU); return skb; }
void interface_rx(struct sk_buff *skb, int hdr_size) { struct net_device *dev = soft_device; struct bat_priv *priv = netdev_priv(dev); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) { kfree_skb(skb); return; } skb_pull_rcsum(skb, hdr_size); /* skb_set_mac_header(skb, -sizeof(struct ethhdr));*/ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) { kfree_skb(skb); return; } skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); /* should not be neccesary anymore as we use skb_pull_rcsum() * TODO: please verify this and remove this TODO * -- Dec 21st 2009, Simon Wunderlich */ /* skb->ip_summed = CHECKSUM_UNNECESSARY;*/ /* TODO: set skb->pkt_type to PACKET_BROADCAST, PACKET_MULTICAST, * PACKET_OTHERHOST or PACKET_HOST */ priv->stats.rx_packets++; priv->stats.rx_bytes += skb->len; dev->last_rx = jiffies; netif_rx(skb); }
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } if (unlikely(compute_ip_summed(skb, false))) return -EPROTO; nf_reset(skb); secpath_reset(skb); skb_clear_rxhash(skb); skb_dst_drop(skb); vlan_set_tci(skb, 0); skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; return 0; }
static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) { u8 ver; int port; __be16 *phdr, hdr; if (unlikely(!pskb_may_pull(skb, QCA_HDR_LEN))) return NULL; /* The QCA header is added by the switch between src addr and Ethertype * At this point, skb->data points to ethertype so header should be * right before */ phdr = (__be16 *)(skb->data - 2); hdr = ntohs(*phdr); /* Make sure the version is correct */ ver = (hdr & QCA_HDR_RECV_VERSION_MASK) >> QCA_HDR_RECV_VERSION_S; if (unlikely(ver != QCA_HDR_VERSION)) return NULL; /* Remove QCA tag and recalculate checksum */ skb_pull_rcsum(skb, QCA_HDR_LEN); memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - QCA_HDR_LEN, ETH_HLEN - QCA_HDR_LEN); /* Get source port information */ port = (hdr & QCA_HDR_RECV_SOURCE_PORT_MASK); skb->dev = dsa_master_find_slave(dev, 0, port); if (!skb->dev) return NULL; return skb; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct vlan_pcpu_stats *rx_stats; struct net_device *vlan_dev; u16 vlan_id; u16 vlan_tci; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); vlan_dev = vlan_find_dev(dev, vlan_id); /* If the VLAN device is defined, we use it. * If not, and the VID is 0, it is a 802.1p packet (not * really a VLAN), so we will just netif_rx it later to the * original interface, but with the skb->proto set to the * wrapped proto: we do nothing here. */ if (!vlan_dev) { if (vlan_id) { pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } rx_stats = NULL; } else { skb->dev = vlan_dev; rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make * sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } u64_stats_update_end(&rx_stats->syncp); } skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); if (vlan_dev) { skb = vlan_check_reorder_header(skb); if (!skb) { rx_stats->rx_errors++; goto err_unlock; } } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct vlan_rx_stats *rx_stats; struct net_device *vlan_dev; u16 vlan_id; u16 vlan_tci; #if defined(CONFIG_TCSUPPORT_VLAN_TAG) u16 *proto = NULL; #endif #if defined(CONFIG_TCSUPPORT_PON_VLAN) int ret = 0; #endif skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; #ifdef CONFIG_TCSUPPORT_PON_VLAN #if 0 if(orig_dev->name[0] == 'e') skb->pon_vlan_flag |= PON_PKT_FROM_LAN; else if(orig_dev->name[0] == 'n') { skb->pon_vlan_flag |= PON_PKT_FROM_WAN; skb->pon_vlan_flag |= PON_PKT_ROUTING_FLAG; } #endif if(ptype->type != ETH_P_8021Q) { goto Pon_Handle; } #endif rcu_read_lock(); vlan_dev = __find_vlan_dev(dev, vlan_id); /* If the VLAN device is defined, we use it. * If not, and the VID is 0, it is a 802.1p packet (not * really a VLAN), so we will just netif_rx it later to the * original interface, but with the skb->proto set to the * wrapped proto: we do nothing here. */ if (!vlan_dev) { #ifdef CONFIG_TCSUPPORT_PON_VLAN Pon_Handle: if(pon_store_tag_hook) { ret = pon_store_tag_hook(skb, orig_dev); if(ret == 0) { netif_rx(skb); return 0; } else if(ret == -1) { kfree_skb(skb); return -1; } else { //HGU mode,do nothing } } #endif #ifdef CONFIG_TCSUPPORT_VLAN_TAG if (check_vtag_hook && (check_vtag_hook() == 1)) { if (remove_vtag_hook) { if (remove_vtag_hook(skb, orig_dev) == -1) { /* must free skb !! */ kfree_skb(skb); rcu_read_unlock(); return -1; } else { netif_rx(skb); rcu_read_unlock(); return 0; } } else { goto Normal_Handle; } } else { Normal_Handle: #if !defined(CONFIG_TCSUPPORT_CT) if((orig_dev != NULL) && ((orig_dev->name[0] == 'b') || (orig_dev->name[0] == 'n'))) #endif { proto = vhdr->h_vlan_encapsulated_proto; skb->protocol = proto; /* Take off the VLAN header (4 bytes currently) */ skb_pull_rcsum(skb, VLAN_HLEN); skb->dev = orig_dev; netif_rx(skb); rcu_read_unlock(); return 0; } else { pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); kfree_skb(skb); rcu_read_unlock(); return -1; } } #else if (vlan_id) { pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } rx_stats = NULL; #endif } else { skb->dev = vlan_dev; #if !defined(CONFIG_TCSUPPORT_CT) #ifdef CONFIG_PORT_BINDING if (skb->dev->name[0] == 'e') { // skb->mark |= MASK_ORIGIN_DEV; skb->portbind_mark |= MASK_ORIGIN_DEV; memcpy(skb->orig_dev_name, skb->dev->name, IFNAMSIZ); //printk("vlan_skb_recv: begin orig_dev name is [%s], orig_dev name is [%s]\n", skb->orig_dev_name, orig_dev->name); } #endif #endif rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, smp_processor_id()); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make * sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } u64_stats_update_end(&rx_stats->syncp); } skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); if (vlan_dev) { skb = vlan_check_reorder_header(skb); if (!skb) { rx_stats->rx_errors++; goto err_unlock; } } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: kfree_skb(skb); return NET_RX_DROP; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be aligned, * so there doesn't need to be any of the unaligned stuff. It has * been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) { unsigned char *rawp = NULL; struct vlan_hdr *vhdr; unsigned short vid; struct net_device_stats *stats; unsigned short vlan_TCI; __be16 proto; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) return -1; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) { kfree_skb(skb); return -1; } vhdr = (struct vlan_hdr *)(skb->data); /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ vlan_TCI = ntohs(vhdr->h_vlan_TCI); vid = (vlan_TCI & VLAN_VID_MASK); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n", __FUNCTION__, skb, vid); #endif /* Ok, we will find the correct VLAN device, strip the header, * and then go on as usual. */ /* We have 12 bits of vlan ID. * * We must not drop allow preempt until we hold a * reference to the device (netif_rx does that) or we * fail. */ rcu_read_lock(); skb->dev = __find_vlan_dev(dev, vid); if (!skb->dev) { rcu_read_unlock(); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n", __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex); #endif kfree_skb(skb); return -1; } skb->dev->last_rx = jiffies; /* Bump the rx counters for the VLAN device. */ stats = vlan_dev_get_stats(skb->dev); stats->rx_packets++; stats->rx_bytes += skb->len; /* Take off the VLAN header (4 bytes currently) */ skb_pull_rcsum(skb, VLAN_HLEN); /* Ok, lets check to make sure the device (dev) we * came in on is what this VLAN is attached to. */ if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) { rcu_read_unlock(); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n", __FUNCTION__, skb, dev->name, VLAN_DEV_INFO(skb->dev)->real_dev->name, skb->dev->name); #endif kfree_skb(skb); stats->rx_errors++; return -1; } /* * Deal with ingress priority mapping. */ skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI)); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n", __FUNCTION__, (unsigned long)(skb->priority), ntohs(vhdr->h_vlan_TCI)); #endif /* The ethernet driver already did the pkt_type calculations * for us... */ switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ // stats->broadcast ++; // no such counter :-( break; case PACKET_MULTICAST: stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the underlying * device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) { /* It is for our (changed) MAC-address! */ skb->pkt_type = PACKET_HOST; } break; default: break; } /* Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */ proto = vhdr->h_vlan_encapsulated_proto; skb->protocol = proto; if (ntohs(proto) >= 1536) { /* place it back on the queue to be handled by * true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; } rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) { skb->protocol = htons(ETH_P_802_3); /* place it back on the queue to be handled by true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; } /* * Real 802.2 LLC */ skb->protocol = htons(ETH_P_802_2); /* place it back on the queue to be handled by upper layer protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; }
static void ifb_ri_tasklet(unsigned long _txp) { struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(txp->dev, txp->txqnum); skb = skb_peek(&txp->tq); if (!skb) { if (!__netif_tx_trylock(txq)) goto resched; skb_queue_splice_tail_init(&txp->rq, &txp->tq); __netif_tx_unlock(txq); } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { skb->tc_redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); txp->tx_packets++; txp->tx_bytes += skb->len; u64_stats_update_end(&txp->tsync); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); txp->dev->stats.tx_dropped++; if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; if (!skb->tc_from_ingress) { dev_queue_xmit(skb); } else { skb_pull_rcsum(skb, skb->mac_len); netif_receive_skb(skb); } } if (__netif_tx_trylock(txq)) { skb = skb_peek(&txp->rq); if (!skb) { txp->tasklet_pending = 0; if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: txp->tasklet_pending = 1; tasklet_schedule(&txp->ifb_tasklet); } }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct net_device_stats *stats; u16 vlan_id; u16 vlan_tci; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); skb->dev = __find_vlan_dev(dev, vlan_id); if (!skb->dev) { #if defined(CONFIG_BCM_SMUX) /* start bridge mode vlan pkt is discard A36D08034 by f00110348 */ if (orig_dev->priv_flags & IFF_RSMUX) { skb->dev = NULL; rcu_read_unlock(); /* add unlock, or crash 20111225 by f00110348 */ return 0; } /* end bridge mode vlan pkt is discard A36D08034 by f00110348 */ #endif pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } stats = &skb->dev->stats; stats->rx_packets++; stats->rx_bytes += skb->len; skb_pull_rcsum(skb, VLAN_HLEN); skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); /* Start of modified by f00120964 for qos function 2012-4-2*/ #ifdef CONFIG_DT_QOS skb->mark |= s_dtQos8021PtoMark[((ntohs(vhdr->h_vlan_TCI) >> 13) & 0x7)]; #endif /* End of modified by f00120964 for qos function 2012-4-2*/ pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } vlan_set_encap_proto(skb, vhdr); skb = vlan_check_reorder_header(skb); if (!skb) { stats->rx_errors++; goto err_unlock; } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: kfree_skb(skb); return NET_RX_DROP; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct vlan_rx_stats *rx_stats; u16 vlan_id; u16 vlan_tci; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); skb->dev = __find_vlan_dev(dev, vlan_id); if (!skb->dev) { pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } rx_stats = per_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats, smp_processor_id()); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; skb_pull_rcsum(skb, VLAN_HLEN); skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: rx_stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } vlan_set_encap_proto(skb, vhdr); skb = vlan_check_reorder_header(skb); if (!skb) { rx_stats->rx_errors++; goto err_unlock; } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: kfree_skb(skb); return NET_RX_DROP; }
void batadv_interface_rx(struct net_device *soft_iface, struct sk_buff *skb, struct batadv_hard_iface *recv_if, int hdr_size, struct batadv_orig_node *orig_node) { struct batadv_priv *bat_priv = netdev_priv(soft_iface); struct ethhdr *ethhdr; struct vlan_ethhdr *vhdr; struct batadv_header *batadv_header = (struct batadv_header *)skb->data; short vid __maybe_unused = -1; __be16 ethertype = __constant_htons(ETH_P_BATMAN); bool is_bcast; is_bcast = (batadv_header->packet_type == BATADV_BCAST); /* check if enough space is available for pulling, and pull */ if (!pskb_may_pull(skb, hdr_size)) goto dropped; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); ethhdr = (struct ethhdr *)skb_mac_header(skb); switch (ntohs(ethhdr->h_proto)) { case ETH_P_8021Q: vhdr = (struct vlan_ethhdr *)skb->data; vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK; if (vhdr->h_vlan_encapsulated_proto != ethertype) break; /* fall through */ case ETH_P_BATMAN: goto dropped; } /* skb->dev & skb->pkt_type are set here */ if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) goto dropped; skb->protocol = eth_type_trans(skb, soft_iface); /* should not be necessary anymore as we use skb_pull_rcsum() * TODO: please verify this and remove this TODO * -- Dec 21st 2009, Simon Wunderlich */ /* skb->ip_summed = CHECKSUM_UNNECESSARY; */ batadv_inc_counter(bat_priv, BATADV_CNT_RX); batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, skb->len + ETH_HLEN); soft_iface->last_rx = jiffies; /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) goto out; if (orig_node) batadv_tt_add_temporary_global_entry(bat_priv, orig_node, ethhdr->h_source); if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, ethhdr->h_dest)) goto dropped; netif_rx(skb); goto out; dropped: kfree_skb(skb); out: return; }