static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb_out; struct cdc_mbim_state *info = (void *)&dev->data; struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; u8 *c; if (!ctx) goto error; if (skb) { if (skb->len <= ETH_HLEN) goto error; /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ vlan_get_tag(skb, &tci); switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ /* verify that datagram is IPv4 or IPv6 */ skb_reset_mac_header(skb); switch (eth_hdr(skb)->h_proto) { case htons(ETH_P_IP): case htons(ETH_P_IPV6): break; default: goto error; } c = (u8 *)&sign; c[3] = tci; break; case 0x0100: /* VLAN ID 256 - 511 */ sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); c = (u8 *)&sign; c[3] = tci; break; default: netif_err(dev, tx_err, dev->net, "unsupported tci=0x%04x\n", tci); goto error; } skb_pull(skb, ETH_HLEN); } spin_lock_bh(&ctx->mtx); skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign); spin_unlock_bh(&ctx->mtx); return skb_out; error: if (skb) dev_kfree_skb_any(skb); return NULL; }
static int ipgre_rcv(struct sk_buff *skb) { struct iphdr *iph; u8 *h; __be16 flags; __sum16 csum = 0; __be32 key = 0; u32 seqno = 0; struct ip_tunnel *tunnel; int offset = 4; __be16 gre_proto; unsigned int len; if (!pskb_may_pull(skb, 16)) goto drop_nolock; iph = ip_hdr(skb); h = skb->data; flags = *(__be16*)h; if (flags&(GRE_CSUM|GRE_KEY|GRE_ROUTING|GRE_SEQ|GRE_VERSION)) { /* - Version must be 0. - We do not support routing headers. */ if (flags&(GRE_VERSION|GRE_ROUTING)) goto drop_nolock; if (flags&GRE_CSUM) { switch (skb->ip_summed) { case CHECKSUM_COMPLETE: csum = csum_fold(skb->csum); if (!csum) break; /* fall through */ case CHECKSUM_NONE: skb->csum = 0; csum = __skb_checksum_complete(skb); skb->ip_summed = CHECKSUM_COMPLETE; } offset += 4; } if (flags&GRE_KEY) { key = *(__be32*)(h + offset); offset += 4; } if (flags&GRE_SEQ) { seqno = ntohl(*(__be32*)(h + offset)); offset += 4; } } gre_proto = *(__be16 *)(h + 2); read_lock(&ipgre_lock); if ((tunnel = ipgre_tunnel_lookup(skb->dev, iph->saddr, iph->daddr, key, gre_proto))) { struct net_device_stats *stats = &tunnel->dev->stats; secpath_reset(skb); skb->protocol = gre_proto; /* WCCP version 1 and 2 protocol decoding. * - Change protocol to IP * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header */ if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { skb->protocol = htons(ETH_P_IP); if ((*(h + offset) & 0xF0) != 0x40) offset += 4; } skb->mac_header = skb->network_header; __pskb_pull(skb, offset); skb_postpull_rcsum(skb, skb_transport_header(skb), offset); skb->pkt_type = PACKET_HOST; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { /* Looped back packet, drop it! */ if (skb_rtable(skb)->fl.iif == 0) goto drop; stats->multicast++; skb->pkt_type = PACKET_BROADCAST; } #endif if (((flags&GRE_CSUM) && csum) || (!(flags&GRE_CSUM) && tunnel->parms.i_flags&GRE_CSUM)) { stats->rx_crc_errors++; stats->rx_errors++; goto drop; } if (tunnel->parms.i_flags&GRE_SEQ) { if (!(flags&GRE_SEQ) || (tunnel->i_seqno && (s32)(seqno - tunnel->i_seqno) < 0)) { stats->rx_fifo_errors++; stats->rx_errors++; goto drop; } tunnel->i_seqno = seqno + 1; } len = skb->len; /* Warning: All skb pointers will be invalidated! */ if (tunnel->dev->type == ARPHRD_ETHER) { if (!pskb_may_pull(skb, ETH_HLEN)) { stats->rx_length_errors++; stats->rx_errors++; goto drop; } iph = ip_hdr(skb); skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } stats->rx_packets++; stats->rx_bytes += len; skb->dev = tunnel->dev; skb_dst_drop(skb); nf_reset(skb); skb_reset_network_header(skb); ipgre_ecn_decapsulate(iph, skb); netif_rx(skb); read_unlock(&ipgre_lock); return(0); } icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); drop: read_unlock(&ipgre_lock); drop_nolock: kfree_skb(skb); return(0); }
/** 根据tcp数据生成数据包 根据tcp数据,生成数据包,并填充 mac/ip/tcp 头部信息 @param skb 原始的sk_buff结构地址 @param names 网卡名称结构首地址 @param num 网卡个数 @param tcpdata tcp数据地址 @param tcpdatalen tcp数据长度 @return 成功返回数据包地址,失败返回NULL。 */ struct sk_buff *pkg_skbuff_generate(struct sk_buff *skb, struct client_nicname *names, int num, char *tcpdata, int tcpdatalen) { struct sk_buff *new_skb = NULL; struct net_device *dev = NULL; struct iphdr *iph = NULL,*new_iph = NULL; struct tcphdr *tcph = NULL,*new_tcph = NULL; struct ethhdr *ethdr = NULL; char *newpdata = NULL; unsigned char * mac_header_addr = NULL; int i = 0; if(!skb || !names) { goto out; } iph = ip_hdr(skb); if(iph == NULL) { goto out; } tcph = (struct tcphdr *)((char *)iph + iph->ihl*4); if(tcph == NULL) { goto out; } ethdr = eth_hdr(skb); if(ethdr == NULL) { goto out; } for (i=0; names[i].index != -1; i++) { #if (LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, 24))//不确定版本号是否应该更早 dev = dev_get_by_name(names[i].name); #else dev = dev_get_by_name(&init_net, names[i].name); #endif if (dev != NULL) break; } if (dev == NULL) { goto out; } new_skb = alloc_skb(tcpdatalen + iph->ihl*4 + tcph->doff*4 + 14, GFP_ATOMIC); if(new_skb == NULL) { goto out; } #if (LINUX_VERSION_CODE < KERNEL_VERSION (3, 11, 0)) new_skb->mac_header = new_skb->data; skb_reserve(new_skb,14); new_skb->transport_header = new_skb->data; new_skb->network_header = new_skb->data; //get_route_mac(iph->saddr, iph->daddr); memcpy(&new_skb->mac_header[0], ethdr->h_source, 6); memcpy(&new_skb->mac_header[6], ethdr->h_dest, 6); new_skb->mac_header[12] = 0x08; new_skb->mac_header[13] = 0x00; #else skb_reset_mac_header(new_skb); skb_reserve(new_skb,14); skb_reset_transport_header(new_skb); skb_reset_network_header(new_skb); mac_header_addr=skb_mac_header(new_skb); if(mac_header_addr==NULL) { printk("Can't get header address!\n"); goto out; } //get_route_mac(iph->saddr, iph->daddr); memcpy(mac_header_addr, ethdr->h_source, 6); memcpy(mac_header_addr+6, ethdr->h_dest, 6); mac_header_addr[12] = 0x08; mac_header_addr[13] = 0x00; #endif skb_put(new_skb, iph->ihl*4 + tcph->doff*4); new_skb->mac_len = 14; new_skb->dev = dev; new_skb->pkt_type = PACKET_OTHERHOST; new_skb->protocol = __constant_htons(ETH_P_IP); new_skb->ip_summed = CHECKSUM_NONE; new_skb->priority = 0; /* *IP set */ new_iph = (struct iphdr *)new_skb->data; memset((char *)new_iph, 0, iph->ihl*4); new_iph->version = iph->version; new_iph->ihl = iph->ihl; new_iph->tos = iph->tos; new_iph->id = iph->id; new_iph->ttl = iph->ttl; new_iph->frag_off = iph->frag_off; new_iph->protocol = IPPROTO_TCP; //new_iph->saddr = iph->saddr; new_iph->saddr = iph->daddr; new_iph->daddr = iph->saddr; new_iph->tot_len = htons(tcpdatalen + iph->ihl*4 + tcph->doff*4); new_iph->check = 0; /* *TCP set */ new_tcph = (struct tcphdr *)(new_skb->data + iph->ihl*4); memset((char *)new_tcph, 0, tcph->doff*4); new_tcph->source = tcph->dest; new_tcph->dest = tcph->source; new_tcph->seq = tcph->ack_seq; new_tcph->ack_seq = htonl(ntohl(tcph->seq) + (ntohs(iph->tot_len) - iph->ihl*4 - tcph->doff*4)); new_tcph->doff = tcph->doff; new_tcph->fin = tcph->fin; new_tcph->ack = tcph->ack; new_tcph->psh = tcph->psh; new_tcph->window = tcph->window; new_tcph->check = 0; if (tcpdatalen > 0) { newpdata = skb_put(new_skb, tcpdatalen); if (newpdata != NULL) { if (tcpdata != NULL) memcpy(newpdata, tcpdata, tcpdatalen); } } refresh_skb_checksum(new_skb); return new_skb; out: if (NULL != skb) { dev_put (dev); kfree_skb (skb); } return NULL; }
static void ebt_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *prefix) { unsigned int bitmask; spin_lock_bh(&ebt_log_lock); printk("<%c>%s IN=%s OUT=%s MAC source = %pM MAC dest = %pM proto = 0x%04x", '0' + loginfo->u.log.level, prefix, in ? in->name : "", out ? out->name : "", eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest, ntohs(eth_hdr(skb)->h_proto)); if (loginfo->type == NF_LOG_TYPE_LOG) bitmask = loginfo->u.log.logflags; else bitmask = NF_LOG_MASK; if ((bitmask & EBT_LOG_IP) && eth_hdr(skb)->h_proto == htons(ETH_P_IP)){ const struct iphdr *ih; struct iphdr _iph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) { printk(" INCOMPLETE IP header"); goto out; } printk(" IP SRC=%pI4 IP DST=%pI4, IP tos=0x%02X, IP proto=%d", &ih->saddr, &ih->daddr, ih->tos, ih->protocol); print_ports(skb, ih->protocol, ih->ihl*4); goto out; } #if defined(CONFIG_BRIDGE_EBT_IP6) || defined(CONFIG_BRIDGE_EBT_IP6_MODULE) if ((bitmask & EBT_LOG_IP6) && eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { const struct ipv6hdr *ih; struct ipv6hdr _iph; uint8_t nexthdr; int offset_ph; ih = skb_header_pointer(skb, 0, sizeof(_iph), &_iph); if (ih == NULL) { printk(" INCOMPLETE IPv6 header"); goto out; } printk(" IPv6 SRC=%pI6 IPv6 DST=%pI6, IPv6 priority=0x%01X, Next Header=%d", &ih->saddr, &ih->daddr, ih->priority, ih->nexthdr); nexthdr = ih->nexthdr; offset_ph = ipv6_skip_exthdr(skb, sizeof(_iph), &nexthdr); if (offset_ph == -1) goto out; print_ports(skb, nexthdr, offset_ph); goto out; } #endif if ((bitmask & EBT_LOG_ARP) && ((eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) || (eth_hdr(skb)->h_proto == htons(ETH_P_RARP)))) { const struct arphdr *ah; struct arphdr _arph; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) { printk(" INCOMPLETE ARP header"); goto out; } printk(" ARP HTYPE=%d, PTYPE=0x%04x, OPCODE=%d", ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op)); /* If it's for Ethernet and the lengths are OK, * then log the ARP payload */ if (ah->ar_hrd == htons(1) && ah->ar_hln == ETH_ALEN && ah->ar_pln == sizeof(__be32)) { const struct arppayload *ap; struct arppayload _arpp; ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp); if (ap == NULL) { printk(" INCOMPLETE ARP payload"); goto out; } printk(" ARP MAC SRC=%pM ARP IP SRC=%pI4 ARP MAC DST=%pM ARP IP DST=%pI4", ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst); } } out: printk("\n"); spin_unlock_bh(&ebt_log_lock); }
/** * eth_header_parse - extract hardware address from packet * @skb: packet to extract header from * @haddr: destination buffer */ int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr) { const struct ethhdr *eth = eth_hdr(skb); memcpy(haddr, eth->h_source, ETH_ALEN); return ETH_ALEN; }
/* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; struct rtable *rt; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(dev_net(dev), iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish, 1); return 0; }
/* * Return NULL if skb is handled * note: already called with rcu_read_lock */ rx_handler_result_t br_handle_frame(struct sk_buff **pskb) { struct net_bridge_port *p; struct sk_buff *skb = *pskb; const unsigned char *dest = eth_hdr(skb)->h_dest; br_should_route_hook_t *rhook; if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) return RX_HANDLER_PASS; if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) return RX_HANDLER_CONSUMED; p = br_port_get_rcu(skb->dev); if (unlikely(is_link_local_ether_addr(dest))) { /* * See IEEE 802.1D Table 7-10 Reserved addresses * * Assignment Value * Bridge Group Address 01-80-C2-00-00-00 * (MAC Control) 802.3 01-80-C2-00-00-01 * (Link Aggregation) 802.3 01-80-C2-00-00-02 * 802.1X PAE address 01-80-C2-00-00-03 * * 802.1AB LLDP 01-80-C2-00-00-0E * * Others reserved for future standardization */ switch (dest[5]) { case 0x00: /* Bridge Group Address */ /* If STP is turned off, then must forward to keep loop detection */ if (p->br->stp_enabled == BR_NO_STP) goto forward; break; case 0x01: /* IEEE MAC (Pause) */ goto drop; default: /* Allow selective forwarding for most other protocols */ if (p->br->group_fwd_mask & (1u << dest[5])) goto forward; } /* Deliver packet to local host only */ if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, skb->dev, NULL, br_handle_local_finish)) { return RX_HANDLER_CONSUMED; /* consumed by filter */ } else { *pskb = skb; return RX_HANDLER_PASS; /* continue processing */ } } forward: switch (p->state) { case BR_STATE_FORWARDING: rhook = rcu_dereference(br_should_route_hook); if (rhook) { if ((*rhook)(skb)) { *pskb = skb; return RX_HANDLER_PASS; } dest = eth_hdr(skb)->h_dest; } /* fall through */ case BR_STATE_LEARNING: if (ether_addr_equal(p->br->dev->dev_addr, dest)) skb->pkt_type = PACKET_HOST; NF_HOOK(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_handle_frame_finish); break; default: drop: kfree_skb(skb); } return RX_HANDLER_CONSUMED; }
static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = skb->nf_bridge; int err; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct rtable *rt; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = 0, .tos = RT_TOS(iph->tos) }, }, .proto = 0, }; struct in_device *in_dev = in_dev_get(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; if (!ip_route_output_key(dev_net(dev), &rt, &fl)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (((struct dst_entry *)rt)->dev == dev) { skb->dst = (struct dst_entry *)rt; goto bridged_dnat; } /* we are sure that forwarding is disabled, so printing * this message is no problem. Note that the packet could * still have a martian destination address, in which case * the packet could be dropped even if forwarding were enabled */ __br_dnat_complain(); dst_release((struct dst_entry *)rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb->dst->dev == dev) { bridged_dnat: /* Tell br_nf_local_out this is a * bridged frame */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; skb->dev = nf_bridge->physindev; nf_bridge_push_encap_header(skb); NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else {
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { /* add by Zebos 2015-06-01*/ #if 0 kfree_skb(skb); return NET_RX_DROP; #endif extern int dev_ma_list_lookup(struct net_device *dev, unsigned char *addr, int alen); if (! dev_ma_list_lookup(dev, eth_hdr(skb)->h_dest, dev->addr_len)) { skb->pkt_type = PACKET_HOST; } else { kfree_skb(skb); return NET_RX_DROP; } } rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; /* * RFC4291 2.5.3 * A packet received on an interface with a destination address * of loopback must be dropped. */ if (!(dev->flags & IFF_LOOPBACK) && ipv6_addr_loopback(&hdr->daddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NET_RX_DROP; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ skb_orphan(skb); return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); err: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); return NET_RX_DROP; }
static unsigned int add_sl_header(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info ctinfo, unsigned int host_offset, unsigned int dataoff, unsigned int datalen, unsigned int end_of_host, unsigned char *user_data) { /* first make sure there is room */ if ( skb->len >= ( MAX_PACKET_LEN - SL_HEADER_LEN ) ) { #ifdef SL_DEBUG printk(KERN_DEBUG "\nskb too big, length: %d\n", (skb->len)); #endif return 0; } /* next make sure an X-SLR header is not already present in the http headers already */ if (!strncmp(xslr,(unsigned char *)((unsigned int)user_data+end_of_host+1), XSLR_LEN)) { #ifdef SL_DEBUG printk(KERN_DEBUG "\npkt x-slr already present\n"); #endif return 0; } #ifdef SL_DEBUG printk(KERN_DEBUG "\nno x-slr header present, adding\n"); #endif { unsigned int jhashed, slheader_len; char slheader[SL_HEADER_LEN]; char src_string[MACADDR_SIZE]; unsigned char *pSrc_string = src_string; struct ethhdr *bigmac = eth_hdr(skb); unsigned char *pHsource = bigmac->h_source; int i = 0; /* convert the six octet mac source address into a hex string via bitmask and bitshift on each octet */ while (i<6) { *(pSrc_string++) = int2Hex[(*pHsource)>>4]; *(pSrc_string++) = int2Hex[(*pHsource)&0x0f]; pHsource++; i++; } /* null terminate it just to be safe */ *pSrc_string = '\0'; #ifdef SL_DEBUG printk(KERN_DEBUG "\nsrc macaddr %s\n", src_string); #endif /********************************************/ /* create the http header */ /* jenkins hash obfuscation of source mac */ jhashed = jhash((void *)src_string, MACADDR_SIZE, JHASH_SALT); /* create the X-SLR Header */ slheader_len = sprintf(slheader, "X-SLR: %08x|%s\r\n", jhashed, sl_device); /* handle sprintf failure */ if (slheader_len != SL_HEADER_LEN) { printk(KERN_ERR "exp header %s len %d doesnt match calc len %d\n", (char *)slheader, SL_HEADER_LEN, slheader_len ); return 0; } #ifdef SL_DEBUG printk(KERN_DEBUG "xslr %s, len %d\n", slheader, slheader_len); #endif /* insert the slheader into the http headers Host: foo.com\r\nXSLR: ffffffff|ffffffffffff */ if (!nf_nat_mangle_tcp_packet( skb, ct, ctinfo, end_of_host + search[NEWLINE].len, 0, slheader, slheader_len)) { printk(KERN_ERR " failed to mangle packet\n"); return 0; } #ifdef SL_DEBUG printk(KERN_DEBUG "packet mangled ok:\n%s\n", (unsigned char *)((unsigned int)user_data)); #endif return 1; } }
/* Experimental Netfilter Crap Copyright (C) 2006 Jonathan Zarate */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/version.h> #include <linux/file.h> #include <net/sock.h> #include <linux/netfilter_ipv4/ip_tables.h> #include <linux/netfilter_ipv4/ipt_exp.h> #include "../../bridge/br_private.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int #else static bool #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, int *hotdrop) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ match(const struct sk_buff *skb, const struct xt_match_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) // const struct ipt_exp_info *info = matchinfo; #else // const struct ipt_exp_info *info = par->matchinfo; #endif if ((skb_mac_header(skb) >= skb->head) && ((skb_mac_header(skb) + ETH_HLEN) <= skb->data)) { printk(KERN_INFO "exp src=%02X:%02X:%02X:%02X:%02X:%02X dst=%02X:%02X:%02X:%02X:%02X:%02X\n", eth_hdr(skb)->h_source[0], eth_hdr(skb)->h_source[1], eth_hdr(skb)->h_source[2], eth_hdr(skb)->h_source[3], eth_hdr(skb)->h_source[4], eth_hdr(skb)->h_source[5], eth_hdr(skb)->h_dest[0], eth_hdr(skb)->h_dest[1], eth_hdr(skb)->h_dest[2], eth_hdr(skb)->h_dest[3], eth_hdr(skb)->h_dest[4], eth_hdr(skb)->h_dest[5]); return 1; } printk(KERN_INFO "exp mac=%p head=%p in=%p\n", skb_mac_header(skb), skb->head, in); return 0; }
bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { /* Only the last call to vlan_do_receive() should change * pkt_type to PACKET_OTHERHOST */ if (vlan_id && last_handler) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; if (skb->pkt_type == PACKET_OTHERHOST) { /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* * vlan_insert_tag expect skb->data pointing to mac header. * So change skb->data before calling it and change back to * original position later */ skb_push(skb, offset); skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) rx_stats->rx_multicast++; u64_stats_update_end(&rx_stats->syncp); return true; }
int dhcp_option82_handle(struct sk_buff *skb, struct dhcp_packet *dhcp, dba_result_t *res) { struct ethhdr *ethhdr = NULL; struct iphdr *iph = NULL; struct udphdr *udph = NULL; unsigned char *tail = NULL; if (unlikely(!skb || !dhcp || !res)) { return -1; } if ((DHCP_CLIENT_REQUEST == dhcp->op) && (res->module_type & DHCP_OPTION82_KMOD)) { if (dba_option82_debug) { printk(KERN_DEBUG "skb->dev->name : %s\n", skb->dev->name); printPacketBuffer(skb->data, skb->len); printk(KERN_DEBUG "res->len : %d\n", res->len); printk(KERN_DEBUG "res->data : \n"); printPacketBuffer((unsigned char *)res->data, res->len); } /* 253 = 2^8 - 2(option code len) */ if (unlikely((res->len) > 255)) { log_error("dhcp option82 length %d: too large!\n", res->len); res->result |= DBA_ERROR; return -1; } /* enlarge skb, pointer dhcp may change, so must recalculate */ if (dba_enlarge_skb(skb, DBA_ALIGN4(res->len + 2))) { log_error("dhcp option82 expand skb failed!\n"); res->result |= DBA_ERROR; return -1; } /* skb may change, so recalculate pointer */ ethhdr = eth_hdr(skb); iph = (struct iphdr *)(ethhdr + 1); udph = (struct udphdr *)IPv4_NXT_HDR(iph); dhcp = (struct dhcp_packet *)(udph + 1); #if 0 tail = skb_tail_pointer(skb); if (0xff == *((unsigned char *)(tail-1))) { /* append option82 */ *(tail-1) = 82; /* option 82 code */ *tail = res->len; /* option 82 length */ memcpy(skb_put(skb, res->len+2)+1, res->data, res->len); tail = skb_tail_pointer(skb); *(tail-1) = 0xff; /* dhcp end option */ } else { /* append option82 */ *(tail) = 82; /* option 82 code */ *(tail+1) = res->len; /* option 82 length */ memcpy(skb_put(skb, res->len+2)+2, res->data, res->len); tail = skb_tail_pointer(skb); *(tail-1) = 0xff; /* dhcp end option */ } #else /* geti skb tail */ tail = skb_tail_pointer(skb); /* get dhcp end option(0xff) */ if (tail = dhcp_get_option(dhcp, 0xff, tail)) { skb_put(skb, res->len + 2); /* append option82 */ *(tail) = 82; /* option 82 code */ *(tail + 1) = res->len; /* option 82 length */ memcpy(tail + 2, res->data, res->len); #if 0 tail = skb_tail_pointer(skb); *(tail-1) = 0xff; /* dhcp end option */ #endif *(tail + 2 + res->len) = 0xff;/* dhcp end option added just behind option82 */ } else { res->result |= DBA_ERROR; log_error("dhcp option82 cannot find option 255!\n"); return -1; } #endif /* recalculate ip length Checksum*/ /* ethhdr = eth_hdr(skb); iph = (struct iphdr *)(ethhdr + 1); udph = (struct udphdr *)IPv4_NXT_HDR(iph); */ /* ip header length */ iph->tot_len += (res->len + 2); /* ip checksum */ iph->check = 0; iph->check = ip_fast_csum(iph, iph->ihl); /* recalculate udp Checksum length */ udph->len += (res->len + 2); udph->check = 0; udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, udph->len, IPPROTO_UDP, csum_partial(udph, udph->len, 0)); res->result |= DBA_HANDLED; } return 0; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct net_device_stats *stats; u16 vlan_id; u16 vlan_tci; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); skb->dev = __find_vlan_dev(dev, vlan_id); if (!skb->dev) { #if defined(CONFIG_BCM_SMUX) /* start bridge mode vlan pkt is discard A36D08034 by f00110348 */ if (orig_dev->priv_flags & IFF_RSMUX) { skb->dev = NULL; rcu_read_unlock(); /* add unlock, or crash 20111225 by f00110348 */ return 0; } /* end bridge mode vlan pkt is discard A36D08034 by f00110348 */ #endif pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } stats = &skb->dev->stats; stats->rx_packets++; stats->rx_bytes += skb->len; skb_pull_rcsum(skb, VLAN_HLEN); skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); /* Start of modified by f00120964 for qos function 2012-4-2*/ #ifdef CONFIG_DT_QOS skb->mark |= s_dtQos8021PtoMark[((ntohs(vhdr->h_vlan_TCI) >> 13) & 0x7)]; #endif /* End of modified by f00120964 for qos function 2012-4-2*/ pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } vlan_set_encap_proto(skb, vhdr); skb = vlan_check_reorder_header(skb); if (!skb) { stats->rx_errors++; goto err_unlock; } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: kfree_skb(skb); return NET_RX_DROP; }
/*---------------------------------------------------------------- * p80211pb_80211_to_ether * * Uses the contents of a received 802.11 frame and the etherconv * setting to build an ether frame. * * This function extracts the src and dest address from the 802.11 * frame to use in the construction of the eth frame. * * Arguments: * ethconv Conversion type to perform * skb Packet buffer containing the 802.11 frame * * Returns: * 0 on success, non-zero otherwise * * Call context: * May be called in interrupt or non-interrupt context ----------------------------------------------------------------*/ int skb_p80211_to_ether(wlandevice_t *wlandev, u32 ethconv, struct sk_buff *skb) { netdevice_t *netdev = wlandev->netdev; u16 fc; unsigned int payload_length; unsigned int payload_offset; u8 daddr[WLAN_ETHADDR_LEN]; u8 saddr[WLAN_ETHADDR_LEN]; union p80211_hdr *w_hdr; struct wlan_ethhdr *e_hdr; struct wlan_llc *e_llc; struct wlan_snap *e_snap; int foo; payload_length = skb->len - WLAN_HDR_A3_LEN - WLAN_CRC_LEN; payload_offset = WLAN_HDR_A3_LEN; w_hdr = (union p80211_hdr *) skb->data; /* setup some vars for convenience */ fc = le16_to_cpu(w_hdr->a3.fc); if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 0)) { memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN); } else if ((WLAN_GET_FC_TODS(fc) == 0) && (WLAN_GET_FC_FROMDS(fc) == 1)) { memcpy(daddr, w_hdr->a3.a1, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN); } else if ((WLAN_GET_FC_TODS(fc) == 1) && (WLAN_GET_FC_FROMDS(fc) == 0)) { memcpy(daddr, w_hdr->a3.a3, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a3.a2, WLAN_ETHADDR_LEN); } else { payload_offset = WLAN_HDR_A4_LEN; if (payload_length < WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN) { ; return 1; } payload_length -= (WLAN_HDR_A4_LEN - WLAN_HDR_A3_LEN); memcpy(daddr, w_hdr->a4.a3, WLAN_ETHADDR_LEN); memcpy(saddr, w_hdr->a4.a4, WLAN_ETHADDR_LEN); } /* perform de-wep if necessary.. */ if ((wlandev->hostwep & HOSTWEP_PRIVACYINVOKED) && WLAN_GET_FC_ISWEP(fc) && (wlandev->hostwep & HOSTWEP_DECRYPT)) { if (payload_length <= 8) { // printk(KERN_ERR "WEP frame too short (%u).\n", ; return 1; } foo = wep_decrypt(wlandev, skb->data + payload_offset + 4, payload_length - 8, -1, skb->data + payload_offset, skb->data + payload_offset + payload_length - 4); if (foo) { /* de-wep failed, drop skb. */ pr_debug("Host de-WEP failed, dropping frame (%d).\n", foo); wlandev->rx.decrypt_err++; return 2; } /* subtract the IV+ICV length off the payload */ payload_length -= 8; /* chop off the IV */ skb_pull(skb, 4); /* chop off the ICV. */ skb_trim(skb, skb->len - 4); wlandev->rx.decrypt++; } e_hdr = (struct wlan_ethhdr *) (skb->data + payload_offset); e_llc = (struct wlan_llc *) (skb->data + payload_offset); e_snap = (struct wlan_snap *) (skb->data + payload_offset + sizeof(struct wlan_llc)); /* Test for the various encodings */ if ((payload_length >= sizeof(struct wlan_ethhdr)) && (e_llc->dsap != 0xaa || e_llc->ssap != 0xaa) && ((memcmp(daddr, e_hdr->daddr, WLAN_ETHADDR_LEN) == 0) || (memcmp(saddr, e_hdr->saddr, WLAN_ETHADDR_LEN) == 0))) { pr_debug("802.3 ENCAP len: %d\n", payload_length); /* 802.3 Encapsulated */ /* Test for an overlength frame */ if (payload_length > (netdev->mtu + WLAN_ETHHDR_LEN)) { /* A bogus length ethfrm has been encap'd. */ /* Is someone trying an oflow attack? */ // printk(KERN_ERR "ENCAP frame too large (%d > %d)\n", ; return 1; } /* Chop off the 802.11 header. it's already sane. */ skb_pull(skb, payload_offset); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap)) && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa) && (e_llc->ctl == 0x03) && (((memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) == 0) && (ethconv == WLAN_ETHCONV_8021h) && (p80211_stt_findproto(le16_to_cpu(e_snap->type)))) || (memcmp(e_snap->oui, oui_rfc1042, WLAN_IEEE_OUI_LEN) != 0))) { pr_debug("SNAP+RFC1042 len: %d\n", payload_length); /* it's a SNAP + RFC1042 frame && protocol is in STT */ /* build 802.3 + RFC1042 */ /* Test for an overlength frame */ if (payload_length > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ // printk(KERN_ERR "SNAP frame too large (%d > %d)\n", ; return 1; } /* chop 802.11 header from skb. */ skb_pull(skb, payload_offset); /* create 802.3 header at beginning of skb. */ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); e_hdr->type = htons(payload_length); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else if ((payload_length >= sizeof(struct wlan_llc) + sizeof(struct wlan_snap)) && (e_llc->dsap == 0xaa) && (e_llc->ssap == 0xaa) && (e_llc->ctl == 0x03)) { pr_debug("802.1h/RFC1042 len: %d\n", payload_length); /* it's an 802.1h frame || (an RFC1042 && protocol not in STT) build a DIXII + RFC894 */ /* Test for an overlength frame */ if ((payload_length - sizeof(struct wlan_llc) - sizeof(struct wlan_snap)) > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ // printk(KERN_ERR "DIXII frame too large (%ld > %d)\n", // (long int)(payload_length - // sizeof(struct wlan_llc) - ; return 1; } /* chop 802.11 header from skb. */ skb_pull(skb, payload_offset); /* chop llc header from skb. */ skb_pull(skb, sizeof(struct wlan_llc)); /* chop snap header from skb. */ skb_pull(skb, sizeof(struct wlan_snap)); /* create 802.3 header at beginning of skb. */ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); e_hdr->type = e_snap->type; memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } else { pr_debug("NON-ENCAP len: %d\n", payload_length); /* any NON-ENCAP */ /* it's a generic 80211+LLC or IPX 'Raw 802.3' */ /* build an 802.3 frame */ /* allocate space and setup hostbuf */ /* Test for an overlength frame */ if (payload_length > netdev->mtu) { /* A bogus length ethfrm has been sent. */ /* Is someone trying an oflow attack? */ // printk(KERN_ERR "OTHER frame too large (%d > %d)\n", ; return 1; } /* Chop off the 802.11 header. */ skb_pull(skb, payload_offset); /* create 802.3 header at beginning of skb. */ e_hdr = (struct wlan_ethhdr *) skb_push(skb, WLAN_ETHHDR_LEN); memcpy(e_hdr->daddr, daddr, WLAN_ETHADDR_LEN); memcpy(e_hdr->saddr, saddr, WLAN_ETHADDR_LEN); e_hdr->type = htons(payload_length); /* chop off the 802.11 CRC */ skb_trim(skb, skb->len - WLAN_CRC_LEN); } /* * Note that eth_type_trans() expects an skb w/ skb->data pointing * at the MAC header, it then sets the following skb members: * skb->mac_header, * skb->data, and * skb->pkt_type. * It then _returns_ the value that _we're_ supposed to stuff in * skb->protocol. This is nuts. */ skb->protocol = eth_type_trans(skb, netdev); /* jkriegl: process signal and noise as set in hfa384x_int_rx() */ /* jkriegl: only process signal/noise if requested by iwspy */ if (wlandev->spy_number) orinoco_spy_gather(wlandev, eth_hdr(skb)->h_source, P80211SKB_RXMETA(skb)); /* Free the metadata */ p80211skb_rxmeta_detach(skb); return 0; }
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be aligned, * so there doesn't need to be any of the unaligned stuff. It has * been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type* ptype, struct net_device *orig_dev) { unsigned char *rawp = NULL; struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data); unsigned short vid; struct net_device_stats *stats; unsigned short vlan_TCI; __be16 proto; /* vlan_TCI = ntohs(get_unaligned(&vhdr->h_vlan_TCI)); */ vlan_TCI = ntohs(vhdr->h_vlan_TCI); vid = (vlan_TCI & VLAN_VID_MASK); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: skb: %p vlan_id: %hx\n", __FUNCTION__, skb, vid); #endif /* Ok, we will find the correct VLAN device, strip the header, * and then go on as usual. */ /* We have 12 bits of vlan ID. * * We must not drop allow preempt until we hold a * reference to the device (netif_rx does that) or we * fail. */ rcu_read_lock(); skb->dev = __find_vlan_dev(dev, vid); if (!skb->dev) { rcu_read_unlock(); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: ERROR: No net_device for VID: %i on dev: %s [%i]\n", __FUNCTION__, (unsigned int)(vid), dev->name, dev->ifindex); #endif kfree_skb(skb); return -1; } skb->dev->last_rx = jiffies; /* Bump the rx counters for the VLAN device. */ stats = vlan_dev_get_stats(skb->dev); stats->rx_packets++; stats->rx_bytes += skb->len; skb_pull(skb, VLAN_HLEN); /* take off the VLAN header (4 bytes currently) */ /* Need to correct hardware checksum */ skb_postpull_rcsum(skb, vhdr, VLAN_HLEN); /* Ok, lets check to make sure the device (dev) we * came in on is what this VLAN is attached to. */ if (dev != VLAN_DEV_INFO(skb->dev)->real_dev) { rcu_read_unlock(); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: dropping skb: %p because came in on wrong device, dev: %s real_dev: %s, skb_dev: %s\n", __FUNCTION__, skb, dev->name, VLAN_DEV_INFO(skb->dev)->real_dev->name, skb->dev->name); #endif kfree_skb(skb); stats->rx_errors++; return -1; } /* * Deal with ingress priority mapping. */ skb->priority = vlan_get_ingress_priority(skb->dev, ntohs(vhdr->h_vlan_TCI)); #ifdef VLAN_DEBUG printk(VLAN_DBG "%s: priority: %lu for TCI: %hu (hbo)\n", __FUNCTION__, (unsigned long)(skb->priority), ntohs(vhdr->h_vlan_TCI)); #endif /* The ethernet driver already did the pkt_type calculations * for us... */ switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ // stats->broadcast ++; // no such counter :-( break; case PACKET_MULTICAST: stats->multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make sure. * This allows the VLAN to have a different MAC than the underlying * device, and still route correctly. */ if (memcmp(eth_hdr(skb)->h_dest, skb->dev->dev_addr, ETH_ALEN) == 0) { /* It is for our (changed) MAC-address! */ skb->pkt_type = PACKET_HOST; } break; default: break; }; /* Was a VLAN packet, grab the encapsulated protocol, which the layer * three protocols care about. */ /* proto = get_unaligned(&vhdr->h_vlan_encapsulated_proto); */ proto = vhdr->h_vlan_encapsulated_proto; skb->protocol = proto; if (ntohs(proto) >= 1536) { /* place it back on the queue to be handled by * true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; } rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *)rawp == 0xFFFF) { skb->protocol = __constant_htons(ETH_P_802_3); /* place it back on the queue to be handled by true layer 3 protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; } /* * Real 802.2 LLC */ skb->protocol = __constant_htons(ETH_P_802_2); /* place it back on the queue to be handled by upper layer protocols. */ /* See if we are configured to re-write the VLAN header * to make it look like ethernet... */ skb = vlan_check_reorder_header(skb); /* Can be null if skb-clone fails when re-ordering */ if (skb) { netif_rx(skb); } else { /* TODO: Add a more specific counter here. */ stats->rx_errors++; } rcu_read_unlock(); return 0; }
/* note: already called with rcu_read_lock (preempt_disabled) */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); struct net_bridge *br; struct net_bridge_fdb_entry *dst; struct sk_buff *skb2; #if defined(CONFIG_MIPS_BRCM) struct iphdr *pip = NULL; __u8 igmpTypeOffset = 0; #endif if (!p || p->state == BR_STATE_DISABLED) goto drop; #if defined(CONFIG_MIPS_BRCM) if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_IP ) { pip = ip_hdr(skb); igmpTypeOffset = (pip->ihl << 2); } else if ( vlan_eth_hdr(skb)->h_vlan_proto == ETH_P_8021Q ) { if ( vlan_eth_hdr(skb)->h_vlan_encapsulated_proto == ETH_P_IP ) { pip = (struct iphdr *)(skb_network_header(skb) + sizeof(struct vlan_hdr)); igmpTypeOffset = (pip->ihl << 2) + sizeof(struct vlan_hdr); } } if ((pip) && (pip->protocol == IPPROTO_IGMP)) { #if defined(CONFIG_BCM_GPON_MODULE) struct igmphdr *ih = (struct igmphdr *)&skb->data[igmpTypeOffset]; /* drop IGMP v1 report packets */ if (ih->type == IGMP_HOST_MEMBERSHIP_REPORT) { goto drop; } /* drop IGMP v1 query packets */ if ((ih->type == IGMP_HOST_MEMBERSHIP_QUERY) && (ih->code == 0)) { goto drop; } /* drop IGMP leave packets for group 0.0.0.0 */ if ((ih->type == IGMP_HOST_LEAVE_MESSAGE) && (0 == ih->group) ) { goto drop; } #endif /* rate limit IGMP */ br = p->br; if ( br->igmp_rate_limit ) { ktime_t curTime; u64 diffUs; unsigned int usPerPacket; unsigned int temp32; unsigned int burstLimit; /* add tokens to the bucket - compute in microseconds */ curTime = ktime_get(); usPerPacket = (1000000 / br->igmp_rate_limit); diffUs = ktime_to_us(ktime_sub(curTime, br->igmp_rate_last_packet)); diffUs += br->igmp_rate_rem_time; /* allow 25% burst */ burstLimit = br->igmp_rate_limit >> 2; if ( 0 == burstLimit) { burstLimit = 1; } if ( diffUs > 1000000 ) { br->igmp_rate_bucket = burstLimit; br->igmp_rate_rem_time = 0; } else { temp32 = (unsigned int)diffUs / usPerPacket; br->igmp_rate_bucket += temp32; if (temp32) { br->igmp_rate_rem_time = diffUs - (temp32 * usPerPacket); } } if (br->igmp_rate_bucket > burstLimit) { br->igmp_rate_bucket = burstLimit; br->igmp_rate_rem_time = 0; } /* if bucket is empty drop the packet */ if (0 == br->igmp_rate_bucket) { goto drop; } br->igmp_rate_bucket--; br->igmp_rate_last_packet.tv64 = curTime.tv64; } }
static int br_nf_pre_routing_finish(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = skb->nh.iph; struct nf_bridge_info *nf_bridge = skb->nf_bridge; if (nf_bridge->mask & BRNF_PKT_TYPE) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->mask ^= BRNF_PKT_TYPE; } nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING; if (dnat_took_place(skb)) { if (ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev)) { struct rtable *rt; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = 0, .tos = RT_TOS(iph->tos) }, }, .proto = 0, }; if (!ip_route_output_key(&rt, &fl)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. * - Deal with redirected traffic. */ if (((struct dst_entry *)rt)->dev == dev || rt->rt_type == RTN_LOCAL) { skb->dst = (struct dst_entry *)rt; goto bridged_dnat; } __br_dnat_complain(); dst_release((struct dst_entry *)rt); } kfree_skb(skb); return 0; } else { if (skb->dst->dev == dev) { bridged_dnat: /* Tell br_nf_local_out this is a * bridged frame */ nf_bridge->mask |= BRNF_BRIDGED_DNAT; skb->dev = nf_bridge->physindev; if (skb->protocol == htons(ETH_P_8021Q)) { skb_push(skb, VLAN_HLEN); skb->nh.raw -= VLAN_HLEN; } NF_HOOK_THRESH(PF_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge, 1); return 0; } memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN); skb->pkt_type = PACKET_HOST; } } else {
/* * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. * * Also, at this point we assume that we ARE dealing exclusively with * VLAN packets, or packets that should be made into VLAN packets based * on a default VLAN ID. * * NOTE: Should be similar to ethernet/eth.c. * * SANITY NOTE: This method is called when a packet is moving up the stack * towards userland. To get here, it would have already passed * through the ethernet/eth.c eth_type_trans() method. * SANITY NOTE 2: We are referencing to the VLAN_HDR frields, which MAY be * stored UNALIGNED in the memory. RISC systems don't like * such cases very much... * SANITY NOTE 2a: According to Dave Miller & Alexey, it will always be * aligned, so there doesn't need to be any of the unaligned * stuff. It has been commented out now... --Ben * */ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct vlan_hdr *vhdr; struct vlan_pcpu_stats *rx_stats; struct net_device *vlan_dev; u16 vlan_id; u16 vlan_tci; skb = skb_share_check(skb, GFP_ATOMIC); if (skb == NULL) goto err_free; if (unlikely(!pskb_may_pull(skb, VLAN_HLEN))) goto err_free; vhdr = (struct vlan_hdr *)skb->data; vlan_tci = ntohs(vhdr->h_vlan_TCI); vlan_id = vlan_tci & VLAN_VID_MASK; rcu_read_lock(); vlan_dev = vlan_find_dev(dev, vlan_id); /* If the VLAN device is defined, we use it. * If not, and the VID is 0, it is a 802.1p packet (not * really a VLAN), so we will just netif_rx it later to the * original interface, but with the skb->proto set to the * wrapped proto: we do nothing here. */ if (!vlan_dev) { if (vlan_id) { pr_debug("%s: ERROR: No net_device for VID: %u on dev: %s\n", __func__, vlan_id, dev->name); goto err_unlock; } rx_stats = NULL; } else { skb->dev = vlan_dev; rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; skb->priority = vlan_get_ingress_priority(skb->dev, vlan_tci); pr_debug("%s: priority: %u for TCI: %hu\n", __func__, skb->priority, vlan_tci); switch (skb->pkt_type) { case PACKET_BROADCAST: /* Yeah, stats collect these together.. */ /* stats->broadcast ++; // no such counter :-( */ break; case PACKET_MULTICAST: rx_stats->rx_multicast++; break; case PACKET_OTHERHOST: /* Our lower layer thinks this is not local, let's make * sure. * This allows the VLAN to have a different MAC than the * underlying device, and still route correctly. */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, skb->dev->dev_addr)) skb->pkt_type = PACKET_HOST; break; default: break; } u64_stats_update_end(&rx_stats->syncp); } skb_pull_rcsum(skb, VLAN_HLEN); vlan_set_encap_proto(skb, vhdr); if (vlan_dev) { skb = vlan_check_reorder_header(skb); if (!skb) { rx_stats->rx_errors++; goto err_unlock; } } netif_rx(skb); rcu_read_unlock(); return NET_RX_SUCCESS; err_unlock: rcu_read_unlock(); err_free: atomic_long_inc(&dev->rx_dropped); kfree_skb(skb); return NET_RX_DROP; }
bool vlan_do_receive(struct sk_buff **skbp, bool last_handler) { struct sk_buff *skb = *skbp; u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK; struct net_device *vlan_dev; struct vlan_pcpu_stats *rx_stats; vlan_dev = vlan_find_dev(skb->dev, vlan_id); if (!vlan_dev) { /* */ if (vlan_id && last_handler) skb->pkt_type = PACKET_OTHERHOST; return false; } skb = *skbp = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return false; skb->dev = vlan_dev; if (skb->pkt_type == PACKET_OTHERHOST) { /* */ if (!compare_ether_addr(eth_hdr(skb)->h_dest, vlan_dev->dev_addr)) skb->pkt_type = PACKET_HOST; } if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) { unsigned int offset = skb->data - skb_mac_header(skb); /* */ skb_push(skb, offset); skb = *skbp = vlan_insert_tag(skb, skb->vlan_tci); if (!skb) return false; skb_pull(skb, offset + VLAN_HLEN); skb_reset_mac_len(skb); } skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci); skb->vlan_tci = 0; rx_stats = this_cpu_ptr(vlan_dev_priv(vlan_dev)->vlan_pcpu_stats); u64_stats_update_begin(&rx_stats->syncp); rx_stats->rx_packets++; rx_stats->rx_bytes += skb->len; if (skb->pkt_type == PACKET_MULTICAST) rx_stats->rx_multicast++; u64_stats_update_end(&rx_stats->syncp); return true; }
static int bitmap_ipmac_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); u32 ip; ip = ntohl(ip4addr(skb, opt->flags & IPSET_DIM_ONE_SRC)); if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; /* Backward compatibility: we don't check the second flag */ if (skb_mac_header(skb) < skb->head || (skb_mac_header(skb) + ETH_HLEN) > skb->data) return -EINVAL; e.id = ip_to_id(map, ip); if (opt->flags & IPSET_DIM_ONE_SRC) ether_addr_copy(e.ether, eth_hdr(skb)->h_source); else ether_addr_copy(e.ether, eth_hdr(skb)->h_dest); if (is_zero_ether_addr(e.ether)) return -EINVAL; return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int bitmap_ipmac_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct bitmap_ipmac *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct bitmap_ipmac_adt_elem e = { .id = 0 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0; int ret = 0; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP])) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; if (ip < map->first_ip || ip > map->last_ip) return -IPSET_ERR_BITMAP_RANGE; e.id = ip_to_id(map, ip); if (tb[IPSET_ATTR_ETHER]) { if (nla_len(tb[IPSET_ATTR_ETHER]) != ETH_ALEN) return -IPSET_ERR_PROTOCOL; memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN); e.add_mac = 1; } ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_eexist(ret, flags) ? 0 : ret; } static bool bitmap_ipmac_same_set(const struct ip_set *a, const struct ip_set *b) { const struct bitmap_ipmac *x = a->data; const struct bitmap_ipmac *y = b->data; return x->first_ip == y->first_ip && x->last_ip == y->last_ip && a->timeout == b->timeout && a->extensions == b->extensions; } /* Plain variant */ #include "ip_set_bitmap_gen.h" /* Create bitmap:ip,mac type of sets */ static bool init_map_ipmac(struct ip_set *set, struct bitmap_ipmac *map, u32 first_ip, u32 last_ip, u32 elements) { map->members = ip_set_alloc(map->memsize); if (!map->members) return false; map->first_ip = first_ip; map->last_ip = last_ip; map->elements = elements; set->timeout = IPSET_NO_TIMEOUT; map->set = set; set->data = map; set->family = NFPROTO_IPV4; return true; } static int bitmap_ipmac_create(struct net *net, struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 first_ip = 0, last_ip = 0; u64 elements; struct bitmap_ipmac *map; int ret; if (unlikely(!tb[IPSET_ATTR_IP] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &first_ip); if (ret) return ret; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &last_ip); if (ret) return ret; if (first_ip > last_ip) swap(first_ip, last_ip); } else if (tb[IPSET_ATTR_CIDR]) { u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr >= HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(first_ip, last_ip, cidr); } else { return -IPSET_ERR_PROTOCOL; } elements = (u64)last_ip - first_ip + 1; if (elements > IPSET_BITMAP_MAX_RANGE + 1) return -IPSET_ERR_BITMAP_RANGE_SIZE; set->dsize = ip_set_elem_len(set, tb, sizeof(struct bitmap_ipmac_elem), __alignof__(struct bitmap_ipmac_elem)); map = ip_set_alloc(sizeof(*map) + elements * set->dsize); if (!map) return -ENOMEM; map->memsize = bitmap_bytes(0, elements - 1); set->variant = &bitmap_ipmac; if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) { kfree(map); return -ENOMEM; } if (tb[IPSET_ATTR_TIMEOUT]) { set->timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); bitmap_ipmac_gc_init(set, bitmap_ipmac_gc); } return 0; }
/** * batadv_bla_rx * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * @is_bcast: the packet came in a broadcast packet type. * * bla_rx avoidance checks if: * * we have to race for a claim * * if the frame is allowed on the LAN * * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. */ int batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, bool is_bcast) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; int ret; ethhdr = eth_hdr(skb); primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto handled; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto allow; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); if (!claim) { /* possible optimization: race for a claim */ /* No claim exists yet, claim it for us! */ batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } /* if it is our own claim ... */ if (batadv_compare_eth(claim->backbone_gw->orig, primary_if->net_dev->dev_addr)) { /* ... allow it in any case */ claim->lasttime = jiffies; goto allow; } /* if it is a broadcast ... */ if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { /* ... drop it. the responsible gateway is in charge. * * We need to check is_bcast because with the gateway * feature, broadcasts (like DHCP requests) may be sent * using a unicast packet type. */ goto handled; } else { /* seems the client considers us as its best gateway. * send a claim and update the claim table * immediately. */ batadv_handle_claim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); ret = 0; goto out; handled: kfree_skb(skb); ret = 1; out: if (primary_if) batadv_hardif_free_ref(primary_if); if (claim) batadv_claim_free_ref(claim); return ret; }
static void nft_reject_bridge_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_reject *priv = nft_expr_priv(expr); const unsigned char *dest = eth_hdr(pkt->skb)->h_dest; if (is_broadcast_ether_addr(dest) || is_multicast_ether_addr(dest)) goto out; switch (eth_hdr(pkt->skb)->h_proto) { case htons(ETH_P_IP): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt), priv->icmp_code); break; case NFT_REJECT_TCP_RST: nft_reject_br_send_v4_tcp_reset(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nft_reject_br_send_v4_unreach(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt), nft_reject_icmp_code(priv->icmp_code)); break; } break; case htons(ETH_P_IPV6): switch (priv->type) { case NFT_REJECT_ICMP_UNREACH: nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt), priv->icmp_code); break; case NFT_REJECT_TCP_RST: nft_reject_br_send_v6_tcp_reset(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt)); break; case NFT_REJECT_ICMPX_UNREACH: nft_reject_br_send_v6_unreach(nft_net(pkt), pkt->skb, nft_in(pkt), nft_hook(pkt), nft_reject_icmpv6_code(priv->icmp_code)); break; } break; default: /* No explicit way to reject this protocol, drop it. */ break; } out: regs->verdict.code = NF_DROP; }
/** * batadv_bla_tx * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame * * bla_tx checks if: * * a claim was received which has to be processed * * the frame is allowed on the mesh * * in these cases, the skb is further handled by this function and * returns 1, otherwise it returns 0 and the caller shall further * process the skb. * * This call might reallocate skb data. */ int batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { struct ethhdr *ethhdr; struct batadv_bla_claim search_claim, *claim = NULL; struct batadv_hard_iface *primary_if; int ret = 0; primary_if = batadv_primary_if_get_selected(bat_priv); if (!primary_if) goto out; if (!atomic_read(&bat_priv->bridge_loop_avoidance)) goto allow; if (batadv_bla_process_claim(bat_priv, primary_if, skb)) goto handled; ethhdr = eth_hdr(skb); if (unlikely(atomic_read(&bat_priv->bla.num_requests))) /* don't allow broadcasts while requests are in flight */ if (is_multicast_ether_addr(ethhdr->h_dest)) goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; claim = batadv_claim_hash_find(bat_priv, &search_claim); /* if no claim exists, allow it. */ if (!claim) goto allow; /* check if we are responsible. */ if (batadv_compare_eth(claim->backbone_gw->orig, primary_if->net_dev->dev_addr)) { /* if yes, the client has roamed and we have * to unclaim it. */ batadv_handle_unclaim(bat_priv, primary_if, primary_if->net_dev->dev_addr, ethhdr->h_source, vid); goto allow; } /* check if it is a multicast/broadcast frame */ if (is_multicast_ether_addr(ethhdr->h_dest)) { /* drop it. the responsible gateway has forwarded it into * the backbone network. */ goto handled; } else { /* we must allow it. at least if we are * responsible for the DESTINATION. */ goto allow; } allow: batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); ret = 0; goto out; handled: ret = 1; out: if (primary_if) batadv_hardif_free_ref(primary_if); if (claim) batadv_claim_free_ref(claim); return ret; }
/* This requires some explaining. If DNAT has taken place, * we will need to fix up the destination Ethernet address. * This is also true when SNAT takes place (for the reply direction). * * There are two cases to consider: * 1. The packet was DNAT'ed to a device in the same bridge * port group as it was received on. We can still bridge * the packet. * 2. The packet was DNAT'ed to a different device, either * a non-bridged device or another bridge port group. * The packet will need to be routed. * * The correct way of distinguishing between these two cases is to * call ip_route_input() and to look at skb->dst->dev, which is * changed to the destination device if ip_route_input() succeeds. * * Let's first consider the case that ip_route_input() succeeds: * * If the output device equals the logical bridge device the packet * came in on, we can consider this bridging. The corresponding MAC * address will be obtained in br_nf_pre_routing_finish_bridge. * Otherwise, the packet is considered to be routed and we just * change the destination MAC address so that the packet will * later be passed up to the IP stack to be routed. For a redirected * packet, ip_route_input() will give back the localhost as output device, * which differs from the bridge device. * * Let's now consider the case that ip_route_input() fails: * * This can be because the destination address is martian, in which case * the packet will be dropped. * If IP forwarding is disabled, ip_route_input() will fail, while * ip_route_output_key() can return success. The source * address for ip_route_output_key() is set to zero, so ip_route_output_key() * thinks we're handling a locally generated packet and won't care * if IP forwarding is enabled. If the output device equals the logical bridge * device, we proceed as if ip_route_input() succeeded. If it differs from the * logical bridge port or if ip_route_output_key() fails we drop the packet. */ static int br_nf_pre_routing_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb->dev; struct iphdr *iph = ip_hdr(skb); struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); struct rtable *rt; int err; nf_bridge->frag_max_size = IPCB(skb)->frag_max_size; if (nf_bridge->pkt_otherhost) { skb->pkt_type = PACKET_OTHERHOST; nf_bridge->pkt_otherhost = false; } nf_bridge->in_prerouting = 0; if (br_nf_ipv4_daddr_was_changed(skb, nf_bridge)) { if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) { struct in_device *in_dev = __in_dev_get_rcu(dev); /* If err equals -EHOSTUNREACH the error is due to a * martian destination or due to the fact that * forwarding is disabled. For most martian packets, * ip_route_output_key() will fail. It won't fail for 2 types of * martian destinations: loopback destinations and destination * 0.0.0.0. In both cases the packet will be dropped because the * destination is the loopback device and not the bridge. */ if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev)) goto free_skb; rt = ip_route_output(net, iph->daddr, 0, RT_TOS(iph->tos), 0); if (!IS_ERR(rt)) { /* - Bridged-and-DNAT'ed traffic doesn't * require ip_forwarding. */ if (rt->dst.dev == dev) { skb_dst_set(skb, &rt->dst); goto bridged_dnat; } ip_rt_put(rt); } free_skb: kfree_skb(skb); return 0; } else { if (skb_dst(skb)->dev == dev) { bridged_dnat: skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL, br_nf_pre_routing_finish_bridge); return 0; } ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr); skb->pkt_type = PACKET_HOST; } } else { rt = bridge_parent_rtable(nf_bridge->physindev); if (!rt) { kfree_skb(skb); return 0; } skb_dst_set_noref(skb, &rt->dst); } skb->dev = nf_bridge->physindev; nf_bridge_update_protocol(skb); nf_bridge_push_encap_header(skb); br_nf_hook_thresh(NF_BR_PRE_ROUTING, net, sk, skb, skb->dev, NULL, br_handle_frame_finish); return 0; }
/** * batadv_bla_process_claim * @bat_priv: the bat priv with all the soft interface information * @primary_if: the primary hard interface of this batman soft interface * @skb: the frame to be checked * * Check if this is a claim frame, and process it accordingly. * * returns 1 if it was a claim frame, otherwise return 0 to * tell the callee that it can use the frame on its own. */ static int batadv_bla_process_claim(struct batadv_priv *bat_priv, struct batadv_hard_iface *primary_if, struct sk_buff *skb) { struct batadv_bla_claim_dst *bla_dst, *bla_dst_own; uint8_t *hw_src, *hw_dst; struct vlan_hdr *vhdr, vhdr_buf; struct ethhdr *ethhdr; struct arphdr *arphdr; unsigned short vid; int vlan_depth = 0; __be16 proto; int headlen; int ret; vid = batadv_get_vid(skb, 0); ethhdr = eth_hdr(skb); proto = ethhdr->h_proto; headlen = ETH_HLEN; if (vid & BATADV_VLAN_HAS_TAG) { /* Traverse the VLAN/Ethertypes. * * At this point it is known that the first protocol is a VLAN * header, so start checking at the encapsulated protocol. * * The depth of the VLAN headers is recorded to drop BLA claim * frames encapsulated into multiple VLAN headers (QinQ). */ do { vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN, &vhdr_buf); if (!vhdr) return 0; proto = vhdr->h_vlan_encapsulated_proto; headlen += VLAN_HLEN; vlan_depth++; } while (proto == htons(ETH_P_8021Q)); } if (proto != htons(ETH_P_ARP)) return 0; /* not a claim frame */ /* this must be a ARP frame. check if it is a claim. */ if (unlikely(!pskb_may_pull(skb, headlen + arp_hdr_len(skb->dev)))) return 0; /* pskb_may_pull() may have modified the pointers, get ethhdr again */ ethhdr = eth_hdr(skb); arphdr = (struct arphdr *)((uint8_t *)ethhdr + headlen); /* Check whether the ARP frame carries a valid * IP information */ if (arphdr->ar_hrd != htons(ARPHRD_ETHER)) return 0; if (arphdr->ar_pro != htons(ETH_P_IP)) return 0; if (arphdr->ar_hln != ETH_ALEN) return 0; if (arphdr->ar_pln != 4) return 0; hw_src = (uint8_t *)arphdr + sizeof(struct arphdr); hw_dst = hw_src + ETH_ALEN + 4; bla_dst = (struct batadv_bla_claim_dst *)hw_dst; bla_dst_own = &bat_priv->bla.claim_dest; /* check if it is a claim frame in general */ if (memcmp(bla_dst->magic, bla_dst_own->magic, sizeof(bla_dst->magic)) != 0) return 0; /* check if there is a claim frame encapsulated deeper in (QinQ) and * drop that, as this is not supported by BLA but should also not be * sent via the mesh. */ if (vlan_depth > 1) return 1; /* check if it is a claim frame. */ ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst, ethhdr); if (ret == 1) batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): received a claim frame from another group. From: %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); if (ret < 2) return ret; /* become a backbone gw ourselves on this vlan if not happened yet */ batadv_bla_update_own_backbone_gw(bat_priv, primary_if, vid); /* check for the different types of claim frames ... */ switch (bla_dst->type) { case BATADV_CLAIM_TYPE_CLAIM: if (batadv_handle_claim(bat_priv, primary_if, hw_src, ethhdr->h_source, vid)) return 1; break; case BATADV_CLAIM_TYPE_UNCLAIM: if (batadv_handle_unclaim(bat_priv, primary_if, ethhdr->h_source, hw_src, vid)) return 1; break; case BATADV_CLAIM_TYPE_ANNOUNCE: if (batadv_handle_announce(bat_priv, hw_src, ethhdr->h_source, vid)) return 1; break; case BATADV_CLAIM_TYPE_REQUEST: if (batadv_handle_request(bat_priv, primary_if, hw_src, ethhdr, vid)) return 1; break; } batadv_dbg(BATADV_DBG_BLA, bat_priv, "bla_process_claim(): ERROR - this looks like a claim frame, but is useless. eth src %pM on vid %d ...(hw_src %pM, hw_dst %pM)\n", ethhdr->h_source, BATADV_PRINT_VID(vid), hw_src, hw_dst); return 1; }
/* note: already called with rcu_read_lock */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p = br_port_get_rcu(skb->dev); struct net_bridge *br; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; struct sk_buff *skb2; if (!p || p->state == BR_STATE_DISABLED) goto drop; /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; br_fdb_update(br, p, eth_hdr(skb)->h_source); if (is_multicast_ether_addr(dest) && br_multicast_rcv(br, p, skb)) goto drop; if (p->state == BR_STATE_LEARNING) goto drop; BR_INPUT_SKB_CB(skb)->brdev = br->dev; /* The packet skb2 goes to the local host (NULL to skip). */ skb2 = NULL; if (br->dev->flags & IFF_PROMISC) skb2 = skb; dst = NULL; if (is_multicast_ether_addr(dest)) { mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { if ((mdst && mdst->mglist) || br_multicast_is_router(br)) skb2 = skb; br_multicast_forward(mdst, skb, skb2); skb = NULL; if (!skb2) goto out; } else skb2 = skb; br->dev->stats.multicast++; } else if ((dst = __br_fdb_get(br, dest)) && dst->is_local) { skb2 = skb; /* Do not forward the packet since it's local. */ skb = NULL; } if (skb) { if (dst) br_forward(dst->dst, skb, skb2); else br_flood_forward(br, skb, skb2); } if (skb2) return br_pass_frame_up(skb2); out: return 0; drop: kfree_skb(skb); goto out; }
/** * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); ether_addr_copy(key->eth.src, eth->h_source); ether_addr_copy(key->eth.dst, eth->h_dest); __skb_pull(skb, 2 * ETH_ALEN); /* We are going to push all headers that we pull, so no need to * update skb->csum here. */ if (vlan_tx_tag_present(skb)) key->eth.tci = htons(vlan_get_tci(skb)); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; error = check_iphdr(skb); if (unlikely(error)) { if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } return error; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->tp.src = htons(icmp->type); key->tp.dst = htons(icmp->code); } } } else if ((key->eth.type == htons(ETH_P_ARP) || key->eth.type == htons(ETH_P_RARP)) && arphdr_ok(skb)) { bool arp_available = arphdr_ok(skb); struct arp_eth_header *arp; arp = (struct arp_eth_header *)skb_network_header(skb); if (arp_available && arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } else { error = nh_len; } return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; } } } return 0; }
int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb, const struct tnl_ptk_info *tpi, bool log_ecn_error) { struct pcpu_tstats *tstats; const struct iphdr *iph = ip_hdr(skb); int err; #ifdef CONFIG_NET_IPGRE_BROADCAST if (ipv4_is_multicast(iph->daddr)) { /* Looped back packet, drop it! */ if (rt_is_output_route(skb_rtable(skb))) goto drop; tunnel->dev->stats.multicast++; skb->pkt_type = PACKET_BROADCAST; } #endif if ((!(tpi->flags&TUNNEL_CSUM) && (tunnel->parms.i_flags&TUNNEL_CSUM)) || ((tpi->flags&TUNNEL_CSUM) && !(tunnel->parms.i_flags&TUNNEL_CSUM))) { tunnel->dev->stats.rx_crc_errors++; tunnel->dev->stats.rx_errors++; goto drop; } if (tunnel->parms.i_flags&TUNNEL_SEQ) { if (!(tpi->flags&TUNNEL_SEQ) || (tunnel->i_seqno && (s32)(ntohl(tpi->seq) - tunnel->i_seqno) < 0)) { tunnel->dev->stats.rx_fifo_errors++; tunnel->dev->stats.rx_errors++; goto drop; } tunnel->i_seqno = ntohl(tpi->seq) + 1; } err = IP_ECN_decapsulate(iph, skb); if (unlikely(err)) { if (log_ecn_error) net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n", &iph->saddr, iph->tos); if (err > 1) { ++tunnel->dev->stats.rx_frame_errors; ++tunnel->dev->stats.rx_errors; goto drop; } } tstats = this_cpu_ptr(tunnel->dev->tstats); u64_stats_update_begin(&tstats->syncp); tstats->rx_packets++; tstats->rx_bytes += skb->len; u64_stats_update_end(&tstats->syncp); if (tunnel->net != dev_net(tunnel->dev)) skb_scrub_packet(skb); if (tunnel->dev->type == ARPHRD_ETHER) { skb->protocol = eth_type_trans(skb, tunnel->dev); skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); } else { skb->dev = tunnel->dev; } gro_cells_receive(&tunnel->gro_cells, skb); return 0; drop: kfree_skb(skb); return 0; }
/* note: already called with rcu_read_lock (preempt_disabled) */ int br_handle_frame_finish(struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; struct net_bridge_port *p = rcu_dereference(skb->dev->br_port); struct net_bridge *br; struct net_bridge_fdb_entry *dst; struct sk_buff *skb2; if (!p || p->state == BR_STATE_DISABLED) goto drop; /* insert into forwarding database after filtering to avoid spoofing */ br = p->br; br_fdb_update(br, p, eth_hdr(skb)->h_source); if (p->state == BR_STATE_LEARNING) goto drop; /* The packet skb2 goes to the local host (NULL to skip). */ skb2 = NULL; if (br->dev->flags & IFF_PROMISC) skb2 = skb; dst = NULL; #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BR_MLD_SNOOP) if((0x33 == dest[0]) && (0x33 == dest[1])) { br->statistics.multicast++; skb2 = skb; if (br_mld_mc_forward(br, skb, 1, 0)) { skb = NULL; } } else #endif if (is_multicast_ether_addr(dest)) { br->dev->stats.multicast++; skb2 = skb; #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BR_IGMP_SNOOP) if (br_igmp_mc_forward(br, skb, 1, 0)) { skb = NULL; } #endif } else { dst = __br_fdb_get(br, dest); #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BLOG) blog_br_fdb(skb, __br_fdb_get(br, eth_hdr(skb)->h_source), dst); #endif if ((dst != NULL) && dst->is_local) { skb2 = skb; /* Do not forward the packet since it's local. */ skb = NULL; } } if (skb2 == skb) skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) br_pass_frame_up(br, skb2); if (skb) { if (dst) br_forward(dst->dst, skb); else br_flood_forward(br, skb); } out: return 0; drop: kfree_skb(skb); goto out; }