static int make_writable(struct sk_buff *skb, int write_len) { if (!skb_cloned(skb) || skb_clone_writable(skb, write_len)) return 0; return pskb_expand_head(skb, 0, 0, GFP_ATOMIC); }
static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { struct iphdr *iph; int ntkoff; ntkoff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) goto fail; iph = ip_hdr(skb); switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_ICMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_IGMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 0)) goto fail; break; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 1)) goto fail; break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto fail; ip_send_check(iph); } return 1; fail: return 0; }
/** * tcf_csum_skb_nextlayer - Get next layer pointer * @skb: sk_buff to use * @ihl: previous summed headers length * @ipl: complete packet length * @jhl: next header length * * Check the expected next layer availability in the specified sk_buff. * Return the next layer pointer if pass, NULL otherwise. */ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, unsigned int jhl) { int ntkoff = skb_network_offset(skb); int hl = ihl + jhl; if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || (skb_cloned(skb) && !skb_clone_writable(skb, hl + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return NULL; else return (void *)(skb_network_header(skb) + ihl); }
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) { if (writable_len > skb->len) return 0; /* Not exclusive use of packet? Must copy. */ if (!skb_cloned(skb)) { if (writable_len <= skb_headlen(skb)) return 1; } else if (skb_clone_writable(skb, writable_len)) return 1; if (writable_len <= skb_headlen(skb)) writable_len = 0; else writable_len -= skb_headlen(skb); return !!__pskb_pull_tail(skb, writable_len); }
static int ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct net_device_stats *stats = &tunnel->stat; struct iphdr *tiph = &tunnel->parms.iph; struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; int mtu; if (tunnel->recursion++) { tunnel->stat.collisions++; goto tx_error; } if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; if (extract_ipv4_endpoint(&iph6->daddr, &dst) < 0) goto tx_error_icmp; { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst, .saddr = tiph->saddr, .tos = RT_TOS(tos) } }, .oif = tunnel->parms.link, .proto = IPPROTO_IPV6 }; if (ip_route_output_key(&rt, &fl)) { tunnel->stat.tx_carrier_errors++; goto tx_error_icmp; } } if (rt->rt_type != RTN_UNICAST) { ip_rt_put(rt); tunnel->stat.tx_carrier_errors++; goto tx_error_icmp; } tdev = rt->u.dst.dev; if (tdev == dev) { ip_rt_put(rt); tunnel->stat.collisions++; goto tx_error; } if (tiph->frag_off) mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); else mtu = skb->dst ? dst_mtu(skb->dst) : dev->mtu; if (mtu < 68) { tunnel->stat.collisions++; ip_rt_put(rt); goto tx_error; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (tunnel->parms.iph.daddr && skb->dst) skb->dst->ops->update_pmtu(skb->dst, mtu); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); ip_rt_put(rt); goto tx_error; } if (tunnel->err_count > 0) { if (jiffies - tunnel->err_time < IPTUNNEL_ERR_TIMEO) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); stats->tx_dropped++; dev_kfree_skb(skb); tunnel->recursion--; return 0; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); } skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags = 0; dst_release(skb->dst); skb->dst = &rt->u.dst; /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; if (mtu > IPV6_MIN_MTU) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->protocol = IPPROTO_IPV6; iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; if ((iph->ttl = tiph->ttl) == 0) iph->ttl = iph6->hop_limit; nf_reset(skb); IPTUNNEL_XMIT(); tunnel->recursion--; return 0; tx_error_icmp: dst_link_failure(skb); tx_error: stats->tx_errors++; dev_kfree_skb(skb); tunnel->recursion--; return 0; }
static int tcf_nat(struct sk_buff *skb, struct tc_action *a, struct tcf_result *res) { struct tcf_nat *p = a->priv; struct iphdr *iph; __be32 old_addr; __be32 new_addr; __be32 mask; __be32 addr; int egress; int action; int ihl; int noff; spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; old_addr = p->old_addr; new_addr = p->new_addr; mask = p->mask; egress = p->flags & TCA_NAT_FLAG_EGRESS; action = p->tcf_action; p->tcf_bstats.bytes += qdisc_pkt_len(skb); p->tcf_bstats.packets++; spin_unlock(&p->tcf_lock); if (unlikely(action == TC_ACT_SHOT)) goto drop; noff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + noff)) goto drop; iph = ip_hdr(skb); if (egress) addr = iph->saddr; else addr = iph->daddr; if (!((old_addr ^ addr) & mask)) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; new_addr &= mask; new_addr |= addr & ~mask; /* Rewrite IP header */ iph = ip_hdr(skb); if (egress) iph->saddr = new_addr; else iph->daddr = new_addr; csum_replace4(&iph->check, addr, new_addr); } else if ((iph->frag_off & htons(IP_OFFSET)) || iph->protocol != IPPROTO_ICMP) { goto out; } ihl = iph->ihl * 4; /* It would be nice to share code with stateful NAT. */ switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_TCP: { struct tcphdr *tcph; if (!pskb_may_pull(skb, ihl + sizeof(*tcph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*tcph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; tcph = (void *)(skb_network_header(skb) + ihl); inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1); break; } case IPPROTO_UDP: { struct udphdr *udph; if (!pskb_may_pull(skb, ihl + sizeof(*udph) + noff) || (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*udph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) goto drop; udph = (void *)(skb_network_header(skb) + ihl); if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&udph->check, skb, addr, new_addr, 1); if (!udph->check) udph->check = CSUM_MANGLED_0; } break; } case IPPROTO_ICMP: { struct icmphdr *icmph; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); if ((icmph->type != ICMP_DEST_UNREACH) && (icmph->type != ICMP_TIME_EXCEEDED) && (icmph->type != ICMP_PARAMETERPROB)) break; if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); if (egress) addr = iph->daddr; else addr = iph->saddr; if ((old_addr ^ addr) & mask) break; if (skb_cloned(skb) && !skb_clone_writable(skb, ihl + sizeof(*icmph) + sizeof(*iph) + noff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; icmph = (void *)(skb_network_header(skb) + ihl); iph = (void *)(icmph + 1); new_addr &= mask; new_addr |= addr & ~mask; /* XXX Fix up the inner checksums. */ if (egress) iph->daddr = new_addr; else iph->saddr = new_addr; inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr, 0); break; } default: break; } out: return action; drop: spin_lock(&p->tcf_lock); p->tcf_qstats.drops++; spin_unlock(&p->tcf_lock); return TC_ACT_SHOT; }
static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) { struct ip_tunnel *tunnel = netdev_priv(dev); struct net_device_stats *stats = &tunnel->dev->stats; struct iphdr *tiph = &tunnel->parms.iph; struct ipv6hdr *iph6 = ipv6_hdr(skb); u8 tos = tunnel->parms.iph.tos; struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ __be32 dst = tiph->daddr; int mtu; struct in6_addr *addr6; int addr_type; if (skb->protocol != htons(ETH_P_IPV6)) goto tx_error; /* ISATAP (RFC4214) - must come before 6to4 */ if (dev->priv_flags & IFF_ISATAP) { struct neighbour *neigh = NULL; if (skb_dst(skb)) neigh = skb_dst(skb)->neighbour; if (neigh == NULL) { if (net_ratelimit()) printk(KERN_DEBUG "sit: nexthop == NULL\n"); goto tx_error; } addr6 = (struct in6_addr*)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if ((addr_type & IPV6_ADDR_UNICAST) && ipv6_addr_is_isatap(addr6)) dst = addr6->s6_addr32[3]; else goto tx_error; } if (!dst) dst = try_6to4(&iph6->daddr); if (!dst) { struct neighbour *neigh = NULL; if (skb_dst(skb)) neigh = skb_dst(skb)->neighbour; if (neigh == NULL) { if (net_ratelimit()) printk(KERN_DEBUG "sit: nexthop == NULL\n"); goto tx_error; } addr6 = (struct in6_addr*)&neigh->primary_key; addr_type = ipv6_addr_type(addr6); if (addr_type == IPV6_ADDR_ANY) { addr6 = &ipv6_hdr(skb)->daddr; addr_type = ipv6_addr_type(addr6); } if ((addr_type & IPV6_ADDR_COMPATv4) == 0) goto tx_error_icmp; dst = addr6->s6_addr32[3]; } { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = dst, .saddr = tiph->saddr, .tos = RT_TOS(tos) } }, .oif = tunnel->parms.link, .proto = IPPROTO_IPV6 }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { stats->tx_carrier_errors++; goto tx_error_icmp; } } if (rt->rt_type != RTN_UNICAST) { ip_rt_put(rt); stats->tx_carrier_errors++; goto tx_error_icmp; } tdev = rt->u.dst.dev; if (tdev == dev) { ip_rt_put(rt); stats->collisions++; goto tx_error; } if (tiph->frag_off) mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); else mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; if (mtu < 68) { stats->collisions++; ip_rt_put(rt); goto tx_error; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (tunnel->parms.iph.daddr && skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); ip_rt_put(rt); goto tx_error; } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev)+sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); } skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags = 0; skb_dst_drop(skb); skb_dst_set(skb, &rt->u.dst); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; if (mtu > IPV6_MIN_MTU) iph->frag_off = tiph->frag_off; else iph->frag_off = 0; iph->protocol = IPPROTO_IPV6; iph->tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6)); iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; if ((iph->ttl = tiph->ttl) == 0) iph->ttl = iph6->hop_limit; nf_reset(skb); IPTUNNEL_XMIT(); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: stats->tx_errors++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
static int niit_xmit(struct sk_buff *skb, struct net_device *dev) { struct niit_tunnel *tunnel = (struct niit_tunnel *) netdev_priv(tunnel4_dev); struct ethhdr *ethhead; struct iphdr *iph4; struct ipv6hdr *iph6; struct net_device_stats *stats; struct rt6_info *rt6; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ __u8 nexthdr; /* IPv6 next header */ u32 delta; /* calc space inside skb */ unsigned int max_headroom; /* The extra header space needed */ struct in6_addr s6addr; struct in6_addr d6addr; /* * all IPv4 (includes icmp) will be encapsulated. * IPv6 ICMPs for IPv4 encapsulated data should be translated * */ if (skb->protocol == htons(ETH_P_IP)) { stats = &tunnel4_dev->stats; PDEBUG("niit: skb->proto = iph4 \n"); iph4 = ip_hdr(skb); s6addr.in6_u.u6_addr32[0] = tunnel->ipv6prefix_1; s6addr.in6_u.u6_addr32[1] = tunnel->ipv6prefix_2; s6addr.in6_u.u6_addr32[2] = tunnel->ipv6prefix_3; s6addr.in6_u.u6_addr32[3] = iph4->saddr; d6addr.in6_u.u6_addr32[0] = tunnel->ipv6prefix_1; d6addr.in6_u.u6_addr32[1] = tunnel->ipv6prefix_2; d6addr.in6_u.u6_addr32[2] = tunnel->ipv6prefix_3; d6addr.in6_u.u6_addr32[3] = iph4->daddr; PDEBUG("niit: ipv4: saddr: %x%x%x%x \n niit: ipv4: daddr %x%x%x%x \n", s6addr.in6_u.u6_addr32[0], s6addr.in6_u.u6_addr32[1], s6addr.in6_u.u6_addr32[2], s6addr.in6_u.u6_addr32[3], d6addr.in6_u.u6_addr32[0], d6addr.in6_u.u6_addr32[1], d6addr.in6_u.u6_addr32[2], d6addr.in6_u.u6_addr32[3]); if ((rt6 = rt6_lookup(dev_net(tunnel4_dev), &d6addr, &s6addr, (tunnel4_dev)->iflink, 0)) == NULL) { stats->tx_carrier_errors++; goto tx_error_icmp; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) tdev = rt6->u.dst.dev; dst_release(&rt6->u.dst); #else tdev = rt6->dst.dev; dst_release(&rt6->dst); #endif if (tdev == dev) { PDEBUG("niit: recursion detected todev = dev \n"); stats->collisions++; goto tx_error; } /* old MTU check */ /* * Resize the buffer to push our ipv6 head into */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { stats->tx_dropped++; dev_kfree_skb(skb); tunnel->recursion--; return 0; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph4 = ip_hdr(skb); } delta = skb_network_header(skb) - skb->data; /* make our skb space best fit */ if (delta < sizeof(struct ipv6hdr)) { iph6 = (struct ipv6hdr*) skb_push(skb, sizeof(struct ipv6hdr) - delta); PDEBUG("niit: iph6 < 0 skb->len %x \n", skb->len); } else if (delta > sizeof(struct ipv6hdr)) { iph6 = (struct ipv6hdr*) skb_pull(skb, delta - sizeof(struct ipv6hdr)); PDEBUG("niit: iph6 > 0 skb->len %x \n", skb->len); } else { iph6 = (struct ipv6hdr*) skb->data; PDEBUG("niit: iph6 = 0 skb->len %x \n", skb->len); } /* how the package should look like : * skb->network_header = iph6 * skb->transport_header = iph4; */ skb->transport_header = skb->network_header; /* we say skb->transport_header = iph4; */ skb_reset_network_header(skb); /* now -> we reset the network header to skb->data which is our ipv6 paket */ skb_reset_mac_header(skb); skb->mac_header = skb->network_header - sizeof(struct ethhdr); skb->mac_len = sizeof(struct ethhdr); /* add a dummy ethhdr to use correct interface linktype */ ethhead = eth_hdr(skb); memcpy(ethhead->h_dest, tunnel4_dev->dev_addr, ETH_ALEN); memcpy(ethhead->h_source, tunnel4_dev->dev_addr, ETH_ALEN); ethhead->h_proto = htons(ETH_P_IPV6); /* prepare to send it again */ IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IPV6); skb->pkt_type = PACKET_HOST; skb->dev = tunnel4_dev; skb_dst_drop(skb); /* install v6 header */ memset(iph6, 0, sizeof(struct ipv6hdr)); iph6->version = 6; iph6->payload_len = iph4->tot_len; iph6->hop_limit = iph4->ttl; iph6->nexthdr = IPPROTO_IPIP; memcpy(&(iph6->saddr), &s6addr, sizeof(struct in6_addr)); memcpy(&(iph6->daddr), &d6addr, sizeof(struct in6_addr)); nf_reset(skb); netif_rx(skb); tunnel->recursion--; } else if (skb->protocol == htons(ETH_P_IPV6)) { /* got a ipv6-package and need to translate it back to ipv4 */ __be32 s4addr; __be32 d4addr; __u8 hoplimit; stats = &tunnel6_dev->stats; PDEBUG("niit: skb->proto = iph6 \n"); iph6 = ipv6_hdr(skb); if (!iph6) { PDEBUG("niit: cant find iph6 \n"); goto tx_error; } /* IPv6 to IPv4 */ hoplimit = iph6->hop_limit; /* check against our prefix which all packages must have */ if (iph6->daddr.s6_addr32[0] != tunnel->ipv6prefix_1 || iph6->daddr.s6_addr32[1] != tunnel->ipv6prefix_2 || iph6->daddr.s6_addr32[2] != tunnel->ipv6prefix_3) { PDEBUG("niit: xmit ipv6(): Dst addr haven't our previx addr: %x%x%x%x, packet dropped.\n", iph6->daddr.s6_addr32[0], iph6->daddr.s6_addr32[1], iph6->daddr.s6_addr32[2], iph6->daddr.s6_addr32[3]); goto tx_error; } s4addr = iph6->saddr.s6_addr32[3]; d4addr = iph6->daddr.s6_addr32[3]; nexthdr = iph6->nexthdr; /* TODO nexthdr handle */ /* while(nexthdr != IPPROTO_IPIP) { } */ if(nexthdr != IPPROTO_IPIP) { PDEBUG("niit: cant handle hdrtype : %x.\n", nexthdr); goto tx_error; } iph4 = ipip_hdr(skb); /* TODO: fix the check for a valid route */ /* { struct flowi fl = { .nl_u = { .ip4_u = { .daddr = d4addr, .saddr = s4addr, .tos = RT_TOS(iph4->tos) } }, .oif = tunnel_dev->iflink, .proto = iph4->protocol }; if (ip_route_output_key(dev_net(dev), &rt, &fl)) { PDEBUG("niit : ip route not found \n"); stats->tx_carrier_errors++; goto tx_error_icmp; } } tdev = rt->u.dst.dev; if (tdev == tunnel_dev) { PDEBUG("niit : tdev == tunnel_dev \n"); ip_rt_put(rt); stats->collisions++; goto tx_error; } if (iph4->frag_off) mtu = dst_mtu(&rt->u.dst) - sizeof(struct iphdr); else mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu; if (mtu < 68) { PDEBUG("niit : mtu < 68 \n"); stats->collisions++; ip_rt_put(rt); goto tx_error; } if (iph4->daddr && skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); */ /* if (skb->len > mtu) { icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev); ip_rt_put(rt); goto tx_error; } */ /* * check if we can reuse our skb_buff */ if (skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb = skb_realloc_headroom(skb, skb_headroom(skb)); if (!new_skb) { stats->tx_dropped++; dev_kfree_skb(skb); tunnel->recursion--; return 0; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb(skb); skb = new_skb; iph6 = ipv6_hdr(skb); iph4 = ipip_hdr(skb); } delta = skb_transport_header(skb) - skb->data; skb_pull(skb, delta); /* our paket come with ... */ /* skb->network_header iph6; */ /* skb->transport_header iph4; */ skb->network_header = skb->transport_header; /* we say skb->network_header = iph4; */ skb_set_transport_header(skb, sizeof(struct iphdr)); skb->mac_header = skb->network_header - sizeof(struct ethhdr); skb->mac_len = sizeof(struct ethhdr); /* add a dummy ethhdr to use correct interface linktype */ ethhead = eth_hdr(skb); memcpy(ethhead->h_dest, tunnel6_dev->dev_addr, ETH_ALEN); memcpy(ethhead->h_source, tunnel6_dev->dev_addr, ETH_ALEN); ethhead->h_proto = htons(ETH_P_IP); /* prepare to send it again */ IPCB(skb)->flags = 0; skb->protocol = htons(ETH_P_IP); skb->pkt_type = PACKET_HOST; skb->dev = tunnel6_dev; skb_dst_drop(skb); /* TODO: set iph4->ttl = hoplimit and recalc the checksum ! */ /* sending */ nf_reset(skb); netif_rx(skb); tunnel->recursion--; } else { stats = &tunnel6_dev->stats; PDEBUG("niit: unknown direction %x \n", skb->protocol); goto tx_error; /* drop */ } return 0; tx_error_icmp: dst_link_failure(skb); PDEBUG("niit: tx_error_icmp\n"); tx_error: PDEBUG("niit: tx_error\n"); stats->tx_errors++; dev_kfree_skb(skb); tunnel->recursion--; return 0; }
static int ip6_tnl_xmit2(struct sk_buff *skb, struct net_device *dev, __u8 dsfield, struct flowi *fl, int encap_limit, __u32 *pmtu) { struct net *net = dev_net(dev); struct ip6_tnl *t = netdev_priv(dev); struct net_device_stats *stats = &t->dev->stats; struct ipv6hdr *ipv6h = ipv6_hdr(skb); struct ipv6_tel_txoption opt; struct dst_entry *dst; struct net_device *tdev; int mtu; unsigned int max_headroom = sizeof(struct ipv6hdr); u8 proto; int err = -1; int pkt_len; if ((dst = ip6_tnl_dst_check(t)) != NULL) dst_hold(dst); else { dst = ip6_route_output(net, NULL, fl); if (dst->error || xfrm_lookup(net, &dst, fl, NULL, 0) < 0) goto tx_err_link_failure; } tdev = dst->dev; if (tdev == dev) { stats->collisions++; if (net_ratelimit()) printk(KERN_WARNING "%s: Local routing loop detected!\n", t->parms.name); goto tx_err_dst_release; } mtu = dst_mtu(dst) - sizeof (*ipv6h); if (encap_limit >= 0) { max_headroom += 8; mtu -= 8; } if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); if (skb->len > mtu) { *pmtu = mtu; err = -EMSGSIZE; goto tx_err_dst_release; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom += LL_RESERVED_SPACE(tdev); if (skb_headroom(skb) < max_headroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { struct sk_buff *new_skb; if (!(new_skb = skb_realloc_headroom(skb, max_headroom))) goto tx_err_dst_release; if (skb->sk) skb_set_owner_w(new_skb, skb->sk); kfree_skb(skb); skb = new_skb; } skb_dst_drop(skb); skb_dst_set(skb, dst_clone(dst)); skb->transport_header = skb->network_header; proto = fl->proto; if (encap_limit >= 0) { init_tel_txopt(&opt, encap_limit); ipv6_push_nfrag_opts(skb, &opt.ops, &proto, NULL); } skb_push(skb, sizeof(struct ipv6hdr)); skb_reset_network_header(skb); ipv6h = ipv6_hdr(skb); *(__be32*)ipv6h = fl->fl6_flowlabel | htonl(0x60000000); dsfield = INET_ECN_encapsulate(0, dsfield); ipv6_change_dsfield(ipv6h, ~INET_ECN_MASK, dsfield); ipv6h->hop_limit = t->parms.hop_limit; ipv6h->nexthdr = proto; ipv6_addr_copy(&ipv6h->saddr, &fl->fl6_src); ipv6_addr_copy(&ipv6h->daddr, &fl->fl6_dst); nf_reset(skb); pkt_len = skb->len; err = ip6_local_out(skb); if (net_xmit_eval(err) == 0) { stats->tx_bytes += pkt_len; stats->tx_packets++; } else { stats->tx_errors++; stats->tx_aborted_errors++; } ip6_tnl_dst_store(t, dst); return 0; tx_err_link_failure: stats->tx_carrier_errors++; dst_link_failure(skb); tx_err_dst_release: dst_release(dst); return err; }
//int encap(struct sk_buff *skb, struct tun_param *tp, unsigned int mark, struct ka_payload *kap){ int encap(struct sk_buff *skb, struct tun_param *tp, unsigned int mark, struct ka_payload *kap){ struct sk_buff *new_skb; struct iphdr *old_iph; //struct iphdr *iph; struct net_device *dev; int ret = 0; struct sock tmpsk; struct udphdr *udph; u32 saddr, daddr; u16 sport, dport; struct rtable *rt; struct flowi4 fl4; unsigned int pushroom, piggyroom; piggyroom = 0; if(kap->tid == 0) piggyroom = 0; else piggyroom = PIGGYROOM; pushroom = HEADROOM + piggyroom; old_iph = get_IP_header(skb); daddr = tp->tr.addr; sport = tp->tl.port; dport = tp->tr.port; // since 2.6.36 rtable no longer has rt_src attribute, now I get source address from the device (Sander) dev = dev_get_by_index(upmtns->net_ns, tp->tl.ifindex); if (!dev) { dmesge("no device found during encap"); return -1; } saddr = get_dev_ip_address(dev, NULL, 0); // since 2.6.39 ip_route_output_key flowi parameter has been replaced with flowi4, rtable no longer has rt_src attribute // ip_route_output_key function now returns an rtable and should be replaced with ip_route_output_ports (Sander) tmpsk.sk_mark = mark; rt = ip_route_output_ports(dev_net(dev), &fl4, &tmpsk, daddr, 0, 0, 0, IPPROTO_IP, RT_TOS(old_iph->tos), tp->tl.ifindex); if (!rt) { dmesge("encap - no rtable found during encap"); ret = -1; goto end; } if (skb_headroom(skb) < pushroom || skb_shared(skb) || (skb_cloned(skb) && !skb_clone_writable(skb, 0))) { new_skb = skb_realloc_headroom(skb, pushroom); printk(" --- socket buffer REALLOCATION\n"); if (!new_skb) { dmesge("encap - can not realloc skb room"); ret = -1; goto end; } if (skb->sk) skb_set_owner_w(new_skb, skb->sk); dev_kfree_skb_any(skb); skb = new_skb; old_iph = ip_hdr(skb); } if(piggyroom > 0){ skb_push(skb, PIGGYROOM); //print_ka_info(kap->info); memcpy(skb->data, kap, sizeof(struct ka_payload)); skb_push(skb, HEADROOM); } else{ //printk("Creating normal packet...\n"); skb_push(skb, HEADROOM); } skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); // since 2.6.36 rtable has no 'u' attribute, you can use dst directly (Sander) skb_dst_set(skb, &rt->dst); set_ip_header(skb, old_iph, saddr, daddr, piggyroom); set_udp_header(skb, old_iph, sport, dport, piggyroom); nf_reset(skb); // since 2.6.36 rtable has no 'u' attribute, you can use dst directly (Sander) ip_select_ident(ip_hdr(skb), &rt->dst, NULL); skb->mark = mark; //useless... /*************/ udph = get_UDP_header(skb); //dmesg("ENCAP ---> UDP src: %u dst: %u mark: %u", ntohs(udph->source), ntohs(udph->dest),skb->mark); end: dev_put(dev); return ret; }