/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct sk_buff *skb) { struct vport *vport; vport = ovs_netdev_get_vport(skb->dev); if (unlikely(!vport)) goto error; if (unlikely(skb_warn_if_lro(skb))) goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); skb_postpush_rcsum(skb, skb->data, ETH_HLEN); ovs_vport_receive(vport, skb, skb_tunnel_info(skb)); return; error: kfree_skb(skb); }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { if (unlikely(!vport)) goto error; if (unlikely(skb_warn_if_lro(skb))) goto error; /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_push(skb, ETH_HLEN); if (unlikely(compute_ip_summed(skb, false))) goto error; vlan_copy_skb_tci(skb); ovs_vport_receive(vport, skb); return; error: kfree_skb(skb); }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { net_warn_ratelimited("%s: dropped over-mtu packet: %d > %d\n", ovs_dp_name(vport->dp), packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
static void __vnsw_forward(const struct vnsw_if *to, struct sk_buff *skb) { struct net_device *indev; if (skb_warn_if_lro(skb)) { kfree_skb(skb); return; } indev = skb->dev; skb->dev = to->dev; skb_forward_csum(skb); NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev, vnsw_forward_finish); }
/* Must be called with rcu_read_lock. */ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb) { /* Make our own copy of the packet. Otherwise we will mangle the * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). * (No one comes after us, since we tell handle_bridge() that we took * the packet.) */ skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return; skb_warn_if_lro(skb); /* Push the Ethernet header back on. */ skb_push(skb, ETH_HLEN); skb_reset_mac_header(skb); compute_ip_summed(skb, false); vport_receive(vport, skb); }
static int mpls_xmit(struct sk_buff *skb) { struct mpls_iptunnel_encap *tun_encap_info; struct mpls_shim_hdr *hdr; struct net_device *out_dev; unsigned int hh_len; unsigned int new_header_size; unsigned int mtu; struct dst_entry *dst = skb_dst(skb); struct rtable *rt = NULL; struct rt6_info *rt6 = NULL; int err = 0; bool bos; int i; unsigned int ttl; /* Obtain the ttl */ if (dst->ops->family == AF_INET) { ttl = ip_hdr(skb)->ttl; rt = (struct rtable *)dst; } else if (dst->ops->family == AF_INET6) { ttl = ipv6_hdr(skb)->hop_limit; rt6 = (struct rt6_info *)dst; } else { goto drop; } skb_orphan(skb); /* Find the output device */ out_dev = dst->dev; if (!mpls_output_possible(out_dev) || !dst->lwtstate || skb_warn_if_lro(skb)) goto drop; skb_forward_csum(skb); tun_encap_info = mpls_lwtunnel_encap(dst->lwtstate); /* Verify the destination can hold the packet */ new_header_size = mpls_encap_size(tun_encap_info); mtu = mpls_dev_mtu(out_dev); if (mpls_pkt_too_big(skb, mtu - new_header_size)) goto drop; hh_len = LL_RESERVED_SPACE(out_dev); if (!out_dev->header_ops) hh_len = 0; /* Ensure there is enough space for the headers in the skb */ if (skb_cow(skb, hh_len + new_header_size)) goto drop; skb_set_inner_protocol(skb, skb->protocol); skb_reset_inner_network_header(skb); skb_push(skb, new_header_size); skb_reset_network_header(skb); skb->dev = out_dev; skb->protocol = htons(ETH_P_MPLS_UC); /* Push the new labels */ hdr = mpls_hdr(skb); bos = true; for (i = tun_encap_info->labels - 1; i >= 0; i--) { hdr[i] = mpls_entry_encode(tun_encap_info->label[i], ttl, 0, bos); bos = false; } if (rt) err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway, skb); else if (rt6) err = neigh_xmit(NEIGH_ND_TABLE, out_dev, &rt6->rt6i_gateway, skb); if (err) net_dbg_ratelimited("%s: packet transmission failed: %d\n", __func__, err); return LWTUNNEL_XMIT_DONE; drop: kfree_skb(skb); return -EINVAL; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct ipv6hdr *hdr = ipv6_hdr(skb); struct inet6_skb_parm *opt = IP6CB(skb); struct net *net = dev_net(dst->dev); struct neighbour *n; u32 mtu; if (net->ipv6.devconf_all->forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb_network_header(skb) + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } /* XXX: idev->cnf.proxy_ndp? */ if (net->ipv6.devconf_all->proxy_ndp && pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) { int proxied = ip6_forward_proxy_check(skb); if (proxied > 0) return ip6_input(skb); else if (proxied < 0) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb_dst(skb); /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. We don't send redirects to frames decapsulated from IPsec. */ n = dst_get_neighbour(dst); if (skb->dev == dst->dev && n && opt->srcrt == 0 && !skb_sec_path(skb)) { struct in6_addr *target = NULL; struct rt6_info *rt; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; if (!rt->rt6i_peer) rt6_bind_peer(rt, 1); /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0); goto error; } } mtu = dst_mtu(dst); if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb->len > mtu && !skb_is_gso(skb)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = ipv6_hdr(skb); /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
static int netdev_send(struct vport *vport, struct sk_buff *skb) { struct netdev_vport *netdev_vport = netdev_vport_priv(vport); int mtu = netdev_vport->dev->mtu; int len; if (unlikely(packet_length(skb) > mtu && !skb_is_gso(skb))) { if (net_ratelimit()) pr_warn("%s: dropped over-mtu packet: %d > %d\n", ovs_dp_name(vport->dp), packet_length(skb), mtu); goto error; } if (unlikely(skb_warn_if_lro(skb))) goto error; skb->dev = netdev_vport->dev; forward_ip_summed(skb, true); if (vlan_tx_tag_present(skb) && !dev_supports_vlan_tx(skb->dev)) { int features; features = netif_skb_features(skb); if (!vlan_tso) features &= ~(NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO | NETIF_F_FSO); if (netif_needs_gso(skb, features)) { struct sk_buff *nskb; nskb = skb_gso_segment(skb, features); if (!nskb) { if (unlikely(skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) { kfree_skb(skb); return 0; } skb_shinfo(skb)->gso_type &= ~SKB_GSO_DODGY; goto tag; } if (IS_ERR(nskb)) { kfree_skb(skb); return 0; } consume_skb(skb); skb = nskb; len = 0; do { nskb = skb->next; skb->next = NULL; skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (likely(skb)) { len += skb->len; vlan_set_tci(skb, 0); dev_queue_xmit(skb); } skb = nskb; } while (skb); return len; } tag: skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); if (unlikely(!skb)) return 0; vlan_set_tci(skb, 0); } len = skb->len; dev_queue_xmit(skb); return len; error: kfree_skb(skb); ovs_vport_record_error(vport, VPORT_E_TX_DROPPED); return 0; }
int ip_forward(struct sk_buff *skb) { struct iphdr *iph; /* Our header */ struct rtable *rt; /* Route we use */ struct ip_options * opt = &(IPCB(skb)->opt); if (skb_warn_if_lro(skb)) goto drop; if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) goto drop; if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb)) return NET_RX_SUCCESS; if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * According to the RFC, we must first decrease the TTL field. If * that reaches zero, we must reply an ICMP control message telling * that the packet's lifetime expired. */ if (ip_hdr(skb)->ttl <= 1) goto too_many_hops; if (!xfrm4_route_forward(skb)) goto drop; rt = skb_rtable(skb); if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto sr_failed; if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dst_mtu(&rt->u.dst))); goto drop; } /* We are about to mangle packet. Copy it! */ if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) goto drop; iph = ip_hdr(skb); /* Decrease ttl after skb cow done */ ip_decrease_ttl(iph); /* * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, ip_forward_finish); sr_failed: /* * Strict routing permits no gatewaying */ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); goto drop; too_many_hops: /* Tell the sender its packet died... */ IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); drop: kfree_skb(skb); return NET_RX_DROP; }
int ip_forward(struct sk_buff *skb) { struct iphdr *iph; struct rtable *rt; struct ip_options * opt = &(IPCB(skb)->opt); if (skb_warn_if_lro(skb)) goto drop; if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) goto drop; if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb)) return NET_RX_SUCCESS; if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); if (ip_hdr(skb)->ttl <= 1) goto too_many_hops; if (!xfrm4_route_forward(skb)) goto drop; rt = skb_rtable(skb); if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto sr_failed; if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dst_mtu(&rt->u.dst))); goto drop; } if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) goto drop; iph = ip_hdr(skb); ip_decrease_ttl(iph); if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); return NF_HOOK(PF_INET, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, ip_forward_finish); sr_failed: icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); goto drop; too_many_hops: IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); drop: kfree_skb(skb); return NET_RX_DROP; }
static int mpls_forward(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct net *net = dev_net(dev); struct mpls_shim_hdr *hdr; struct mpls_route *rt; struct mpls_entry_decoded dec; struct net_device *out_dev; struct mpls_dev *mdev; unsigned int hh_len; unsigned int new_header_size; unsigned int mtu; int err; /* Careful this entire function runs inside of an rcu critical section */ mdev = mpls_dev_get(dev); if (!mdev || !mdev->input_enabled) goto drop; if (skb->pkt_type != PACKET_HOST) goto drop; if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) goto drop; if (!pskb_may_pull(skb, sizeof(*hdr))) goto drop; /* Read and decode the label */ hdr = mpls_hdr(skb); dec = mpls_entry_decode(hdr); /* Pop the label */ skb_pull(skb, sizeof(*hdr)); skb_reset_network_header(skb); skb_orphan(skb); rt = mpls_route_input_rcu(net, dec.label); if (!rt) goto drop; /* Find the output device */ out_dev = rcu_dereference(rt->rt_dev); if (!mpls_output_possible(out_dev)) goto drop; if (skb_warn_if_lro(skb)) goto drop; skb_forward_csum(skb); /* Verify ttl is valid */ if (dec.ttl <= 1) goto drop; dec.ttl -= 1; /* Verify the destination can hold the packet */ new_header_size = mpls_rt_header_size(rt); mtu = mpls_dev_mtu(out_dev); if (mpls_pkt_too_big(skb, mtu - new_header_size)) goto drop; hh_len = LL_RESERVED_SPACE(out_dev); if (!out_dev->header_ops) hh_len = 0; /* Ensure there is enough space for the headers in the skb */ if (skb_cow(skb, hh_len + new_header_size)) goto drop; skb->dev = out_dev; skb->protocol = htons(ETH_P_MPLS_UC); if (unlikely(!new_header_size && dec.bos)) { /* Penultimate hop popping */ if (!mpls_egress(rt, skb, dec)) goto drop; } else { bool bos; int i; skb_push(skb, new_header_size); skb_reset_network_header(skb); /* Push the new labels */ hdr = mpls_hdr(skb); bos = dec.bos; for (i = rt->rt_labels - 1; i >= 0; i--) { hdr[i] = mpls_entry_encode(rt->rt_label[i], dec.ttl, 0, bos); bos = false; } } err = neigh_xmit(rt->rt_via_table, out_dev, rt->rt_via, skb); if (err) net_dbg_ratelimited("%s: packet transmission failed: %d\n", __func__, err); return 0; drop: kfree_skb(skb); return NET_RX_DROP; }
int ip6_forward(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct ipv6hdr *hdr = skb->nh.ipv6h; struct inet6_skb_parm *opt = IP6CB(skb); if (ipv6_devconf.forwarding == 0) goto error; if (skb_warn_if_lro(skb)) goto drop; if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } skb->ip_summed = CHECKSUM_NONE; /* * We DO NOT make any processing on * RA packets, pushing them to user level AS IS * without ane WARRANTY that application will be able * to interpret them. The reason is that we * cannot make anything clever here. * * We are not end-node, so that if packet contains * AH/ESP, we cannot make anything. * Defragmentation also would be mistake, RA packets * cannot be fragmented, because there is no warranty * that different fragments will go along one path. --ANK */ if (opt->ra) { u8 *ptr = skb->nh.raw + opt->ra; if (ip6_call_ra_chain(skb, (ptr[2]<<8) + ptr[3])) return 0; } /* * check and decrement ttl */ if (hdr->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0, skb->dev); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); kfree_skb(skb); return -ETIMEDOUT; } if (!xfrm6_route_forward(skb)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_INDISCARDS); goto drop; } dst = skb->dst; /* IPv6 specs say nothing about it, but it is clear that we cannot send redirects to source routed frames. */ if (skb->dev == dst->dev && dst->neighbour && opt->srcrt == 0) { struct in6_addr *target = NULL; struct rt6_info *rt; struct neighbour *n = dst->neighbour; /* * incoming and outgoing devices are the same * send a redirect. */ rt = (struct rt6_info *) dst; if ((rt->rt6i_flags & RTF_GATEWAY)) target = (struct in6_addr*)&n->primary_key; else target = &hdr->daddr; /* Limit redirects both by destination (here) and by source (inside ndisc_send_redirect) */ if (xrlim_allow(dst, 1*HZ)) ndisc_send_redirect(skb, n, target); } else { int addrtype = ipv6_addr_type(&hdr->saddr); /* This check is security critical. */ if (addrtype == IPV6_ADDR_ANY || addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK)) goto error; if (addrtype & IPV6_ADDR_LINKLOCAL) { icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_NOT_NEIGHBOUR, 0, skb->dev); goto error; } } if (skb->len > dst_mtu(dst)) { /* Again, force OUTPUT device used as source address */ skb->dev = dst->dev; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst_mtu(dst), skb->dev); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INTOOBIGERRORS); IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_FRAGFAILS); kfree_skb(skb); return -EMSGSIZE; } if (skb_cow(skb, dst->dev->hard_header_len)) { IP6_INC_STATS(ip6_dst_idev(dst), IPSTATS_MIB_OUTDISCARDS); goto drop; } hdr = skb->nh.ipv6h; /* Mangling hops number delayed to point after skb COW */ hdr->hop_limit--; IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); return NF_HOOK(PF_INET6,NF_IP6_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); error: IP6_INC_STATS_BH(ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); drop: kfree_skb(skb); return -EINVAL; }
int ip_forward(struct sk_buff *skb) { struct iphdr *iph; /* Our header */ struct rtable *rt; /* Route we use */ struct ip_options * opt = &(IPCB(skb)->opt); #ifdef CONFIG_NETFILTER_TABLE_INDEX int verdict; #endif if (skb_warn_if_lro(skb)) goto drop; if (!xfrm4_policy_check(NULL, XFRM_POLICY_FWD, skb)) goto drop; if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb)) return NET_RX_SUCCESS; if (skb->pkt_type != PACKET_HOST) goto drop; skb_forward_csum(skb); /* * According to the RFC, we must first decrease the TTL field. If * that reaches zero, we must reply an ICMP control message telling * that the packet's lifetime expired. */ if (ip_hdr(skb)->ttl <= 1) goto too_many_hops; if (!xfrm4_route_forward(skb)) goto drop; rt = skb_rtable(skb); if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) goto sr_failed; if (unlikely(skb->len > dst_mtu(&rt->u.dst) && !skb_is_gso(skb) && (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) { IP_INC_STATS(dev_net(rt->u.dst.dev), IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(dst_mtu(&rt->u.dst))); goto drop; } /* We are about to mangle packet. Copy it! */ if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+rt->u.dst.header_len)) goto drop; iph = ip_hdr(skb); #ifdef CONFIG_AS_FASTPATH /* ToS in route table is not actually that is in packet */ _route_add_hook(rt, iph->tos); #endif /* Decrease ttl after skb cow done */ ip_decrease_ttl(iph); /* * We now generate an ICMP HOST REDIRECT giving the route * we calculated. */ if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb)) ip_rt_send_redirect(skb); skb->priority = rt_tos2priority(iph->tos); #ifdef CONFIG_NETFILTER_TABLE_INDEX verdict = match_netfilter_table_index( NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, p_netfilter_table_index); if (-1 == verdict) /* not found, go along the old path */ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, ip_forward_finish); else{ if (verdict == NF_ACCEPT || verdict == NF_STOP) return ip_forward_finish(skb); else goto drop; /* We only recorded 3 kinds of results, * NF_ACCEPT, NF_STOP, NF DROP. * NF_DROP will be processed. * we don't accelorate NF_QUEUE */ } #endif /* end CONFIG_NETFILTER_TABLE_INDEX */ return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, skb, skb->dev, rt->u.dst.dev, ip_forward_finish); sr_failed: /* * Strict routing permits no gatewaying */ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_SR_FAILED, 0); goto drop; too_many_hops: /* Tell the sender its packet died... */ IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_INHDRERRORS); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); drop: kfree_skb(skb); return NET_RX_DROP; }