static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br; unsigned char *dest; struct net_bridge_fdb_entry *dst; br = dev->priv; br->statistics.tx_packets++; br->statistics.tx_bytes += skb->len; dest = skb->mac.raw = skb->data; skb_pull(skb, ETH_HLEN); if (dest[0] & 1) { br_flood_deliver(br, skb, 0); return 0; } if ((dst = br_fdb_get(br, dest)) != NULL) { br_deliver(dst->dst, skb); br_fdb_put(dst); return 0; } br_flood_deliver(br, skb, 0); return 0; }
static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _oth; if (!nft_reject_iphdr_validate(oldskb)) return; oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); if (!oth) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, sysctl_ip_default_ttl); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); niph->ttl = sysctl_ip_default_ttl; niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }
static void nft_reject_br_send_v6_tcp_reset(struct net *net, struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; const struct tcphdr *oth; struct tcphdr _oth; unsigned int otcplen; struct ipv6hdr *nip6h; if (!nft_reject_ip6hdr_validate(oldskb)) return; oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook); if (!oth) return; nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP, net->ipv6.devconf_all->hop_limit); nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen); nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }
/* net device transmit always called with no BH (preempt_disabled) */ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; struct net_bridge_mdb_entry *mdst; BR_INPUT_SKB_CB(skb)->brdev = dev; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (dest[0] & 1) { if (br_multicast_rcv(br, NULL, skb)) goto out; mdst = br_mdb_get(br, skb); if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) br_multicast_deliver(mdst, skb); else br_flood_deliver(br, skb); } else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); out: return NETDEV_TX_OK; }
static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook, u8 code) { struct sk_buff *nskb; struct iphdr *niph; struct icmphdr *icmph; unsigned int len; void *payload; __wsum csum; if (!nft_reject_iphdr_validate(oldskb)) return; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; /* RFC says return as much as we can without exceeding 576 bytes. */ len = min_t(unsigned int, 536, oldskb->len); if (!pskb_may_pull(oldskb, len)) return; if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, sysctl_ip_default_ttl); skb_reset_transport_header(nskb); icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr)); memset(icmph, 0, sizeof(*icmph)); icmph->type = ICMP_DEST_UNREACH; icmph->code = code; payload = skb_put(nskb, len); memcpy(payload, skb_network_header(oldskb), len); csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0); icmph->checksum = csum_fold(csum); niph->tot_len = htons(nskb->len); ip_send_check(niph); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }
static void nft_reject_br_send_v6_unreach(struct net *net, struct sk_buff *oldskb, int hook, u8 code) { struct sk_buff *nskb; struct ipv6hdr *nip6h; struct icmp6hdr *icmp6h; unsigned int len; void *payload; if (!nft_reject_ip6hdr_validate(oldskb)) return; /* Include "As much of invoking packet as possible without the ICMPv6 * packet exceeding the minimum IPv6 MTU" in the ICMP payload. */ len = min_t(unsigned int, 1220, oldskb->len); if (!pskb_may_pull(oldskb, len)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) + LL_MAX_HEADER + len, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6, net->ipv6.devconf_all->hop_limit); skb_reset_transport_header(nskb); icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr)); memset(icmp6h, 0, sizeof(*icmp6h)); icmp6h->icmp6_type = ICMPV6_DEST_UNREACH; icmp6h->icmp6_code = code; payload = skb_put(nskb, len); memcpy(payload, skb_network_header(oldskb), len); nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr)); icmp6h->icmp6_cksum = csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr, nskb->len - sizeof(struct ipv6hdr), IPPROTO_ICMPV6, csum_partial(icmp6h, nskb->len - sizeof(struct ipv6hdr), 0)); nft_reject_br_push_etherhdr(oldskb, nskb); br_deliver(br_port_get_rcu(oldskb->dev), nskb); }
/* net device transmit always called with no BH (preempt_disabled) */ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (dest[0] & 1) br_flood_deliver(br, skb); else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); return 0; }
/* net device transmit always called with no BH (preempt_disabled) */ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BLOG) blog_dev( skb, dev, DIR_TX, skb->len ); #endif dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BR_MLD_SNOOP) if ((0x33 == dest[0]) && (0x33 == dest[1])) { if (!br_mld_mc_forward(br, skb, 0, 0)) br_flood_deliver(br, skb); } else #endif if (dest[0] & 1) { #if defined(CONFIG_MIPS_BRCM) && defined(CONFIG_BR_IGMP_SNOOP) if (!br_igmp_mc_forward(br, skb, 0, 0)) #endif br_flood_deliver(br, skb); } else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); return 0; }
/* net device transmit always called with no BH (preempt_disabled) */ int br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br = netdev_priv(dev); const unsigned char *dest = skb->data; struct net_bridge_fdb_entry *dst; #if defined (CONFIG_RTL_IGMP_SNOOPING) struct iphdr *iph=NULL; unsigned char proto=0; unsigned char reserved=0; #if defined (CONFIG_RTL_MLD_SNOOPING) struct ipv6hdr *ipv6h=NULL; #endif struct rtl_multicastDataInfo multicastDataInfo; struct rtl_multicastFwdInfo multicastFwdInfo; int ret=FAILED; #if defined (CONFIG_RTL_HARDWARE_MULTICAST) unsigned int srcPort=skb->srcPort; unsigned int srcVlanId=skb->srcVlanId; #endif #endif dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); if (dest[0] & 1) { #if defined (CONFIG_RTL_IGMP_SNOOPING) if(igmpsnoopenabled) { if(MULTICAST_MAC(dest)) { iph=(struct iphdr *)skb_network_header(skb); proto = iph->protocol; #if 0 if(( iph->daddr&0xFFFFFF00)==0xE0000000) { reserved=1; } #endif if( iph->daddr == 0xEFFFFFFA) { /*for microsoft upnp*/ reserved=1; } if(((proto ==IPPROTO_UDP) ||(proto ==IPPROTO_TCP)) && (reserved ==0)) { multicastDataInfo.ipVersion=4; multicastDataInfo.sourceIp[0]= (uint32)(iph->saddr); multicastDataInfo.groupAddr[0]= (uint32)(iph->daddr); ret= rtl_getMulticastDataFwdInfo(brIgmpModuleIndex, &multicastDataInfo, &multicastFwdInfo); if(ret==SUCCESS) { br_multicast_deliver(br, multicastFwdInfo.fwdPortMask, skb, 0); #if defined (CONFIG_RTL_HARDWARE_MULTICAST) if((srcVlanId!=0) && (srcPort!=0xFFFF)) { #if defined(CONFIG_RTK_VLAN_SUPPORT) if(rtk_vlan_support_enable == 0) { rtl865x_ipMulticastHardwareAccelerate(br, multicastFwdInfo.fwdPortMask,srcPort,srcVlanId, multicastDataInfo.sourceIp[0], multicastDataInfo.groupAddr[0]); } #else rtl865x_ipMulticastHardwareAccelerate(br, multicastFwdInfo.fwdPortMask,srcPort,srcVlanId, multicastDataInfo.sourceIp[0], multicastDataInfo.groupAddr[0]); #endif } #endif } else { br_flood_deliver(br, skb); } } else { br_flood_deliver(br, skb); } } #if defined(CONFIG_RTL_MLD_SNOOPING) else if(mldSnoopEnabled && IPV6_MULTICAST_MAC(dest)) { ipv6h=(struct ipv6hdr *)skb_network_header(skb); proto=re865x_getIpv6TransportProtocol(ipv6h); if ((proto ==IPPROTO_UDP) ||(proto ==IPPROTO_TCP)) { multicastDataInfo.ipVersion=6; memcpy(&multicastDataInfo.sourceIp, &ipv6h->saddr, sizeof(struct in6_addr)); memcpy(&multicastDataInfo.groupAddr, &ipv6h->daddr, sizeof(struct in6_addr)); ret= rtl_getMulticastDataFwdInfo(brIgmpModuleIndex, &multicastDataInfo, &multicastFwdInfo); if(ret==SUCCESS) { br_multicast_deliver(br, multicastFwdInfo.fwdPortMask, skb, 0); } else { br_flood_deliver(br, skb); } } else { br_flood_deliver(br, skb); } } #endif else { br_flood_deliver(br, skb); } } else { br_flood_deliver(br, skb); } #else br_flood_deliver(br, skb); #endif } else if ((dst = __br_fdb_get(br, dest)) != NULL) br_deliver(dst->dst, skb); else br_flood_deliver(br, skb); return 0; }
static int __br_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_bridge *br; unsigned char *dest; struct net_bridge_fdb_entry *dst; #ifdef IGMP_SNOOPING struct iphdr *iph; unsigned int ipaddr=0; unsigned char proto=0; //unsigned char reserved=0; extern int igmpsnoopenabled; iph = skb->nh.iph; ipaddr = iph->daddr; proto = iph->protocol; //Brad disable 20080619 // if ((ipaddr&0xF0FFFF00) == 0xE0000000) // reserved=1; #endif br = dev->priv; br->statistics.tx_packets++; br->statistics.tx_bytes += skb->len; dest = skb->mac.raw = skb->data; skb_pull(skb, ETH_HLEN); if (dest[0] & 1) { #ifdef IGMP_SNOOPING if(igmpsnoopenabled && (MULTICAST_MAC(dest) || IPV6_MULTICAST_MAC(dest))&& (proto != IPPROTO_IGMP) && (ipaddr != 0xEFFFFFFA) //&& (reserved == 0) ) { if ((dst = br_fdb_get(br, dest)) != NULL) { br_multicast_deliver(br, dst, skb, 0); br_fdb_put(dst); } else { br_flood_deliver(br, skb, 0); } } else { // broadcast br_flood_deliver(br, skb, 0); } return 0; #else br_flood_deliver(br, skb, 0); return 0; #endif } if ((dst = br_fdb_get(br, dest)) != NULL) { #ifdef NAT_SPEEDUP if (skb->dst && skb->dst->hh && skb->dst->hh->fdb_cache==NULL) { skb->dst->hh->fdb_cache = (void*)dst; dst->hh_ptr = skb->dst->hh; } #endif br_deliver(dst->dst, skb); br_fdb_put(dst); return 0; } br_flood_deliver(br, skb, 0); return 0; }