int ip_xfrm_me_harder(struct sk_buff **pskb) { struct flowi fl; unsigned int hh_len; struct dst_entry *dst; if (IPCB(*pskb)->flags & IPSKB_XFRM_TRANSFORMED) return 0; if (xfrm_decode_session(*pskb, &fl, AF_INET) < 0) return -1; dst = (*pskb)->dst; if (dst->xfrm) dst = ((struct xfrm_dst *)dst)->route; dst_hold(dst); if (xfrm_lookup(&dst, &fl, (*pskb)->sk, 0) < 0) return -1; dst_release((*pskb)->dst); (*pskb)->dst = dst; /* Change in oif may mean change in hh_len. */ hh_len = (*pskb)->dst->dev->hard_header_len; if (skb_headroom(*pskb) < hh_len && pskb_expand_head(*pskb, HH_DATA_ALIGN(hh_len - skb_headroom(*pskb)), 0, GFP_ATOMIC)) return -1; return 0; }
/* * Send packets to output. */ static inline int bcm_fast_path_output(struct sk_buff *skb) { int ret = 0; struct dst_entry *dst = skb_dst(skb); struct hh_cache *hh = dst->hh; if (hh) { unsigned seq; int hh_len; do { int hh_alen; seq = read_seqbegin(&hh->hh_lock); hh_len = hh->hh_len; hh_alen = HH_DATA_ALIGN(hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh_len); ret = hh->hh_output(skb); if (ret==1) return 0; /* Don't return 1 */ } else if (dst->neighbour) { ret = dst->neighbour->output(skb); if (ret==1) return 0; /* Don't return 1 */ } return ret; }
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) { struct net *net = dev_net(skb_dst(skb)->dev); const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi4 fl4 = {}; __be32 saddr = iph->saddr; __u8 flags = skb->sk ? inet_sk_flowi_flags(skb->sk) : 0; unsigned int hh_len; if (addr_type == RTN_UNSPEC) addr_type = inet_addr_type(net, saddr); if (addr_type == RTN_LOCAL || addr_type == RTN_UNICAST) flags |= FLOWI_FLAG_ANYSRC; else saddr = 0; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_INET_LOCAL_OUT hook. */ fl4.daddr = iph->daddr; fl4.saddr = saddr; fl4.flowi4_tos = RT_TOS(iph->tos); fl4.flowi4_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl4.flowi4_mark = skb->mark; fl4.flowi4_flags = flags; rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return -1; /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); if (skb_dst(skb)->error) return -1; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi4_to_flowi(&fl4), AF_INET) == 0) { struct dst_entry *dst = skb_dst(skb); skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi4_to_flowi(&fl4), skb->sk, 0); if (IS_ERR(dst)) return -1; skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -1; return 0; }
int xtnu_neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { unsigned int hh_alen; read_lock_bh(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); read_unlock_bh(&hh->hh_lock); skb_push(skb, hh->hh_len); return hh->hh_output(skb); }
/* Stolen from ip_finish_output2 * PRE : skb->dev is set to the device we are leaving by * skb->dst is not NULL * POST: the packet is sent with the link layer header pushed * the packet is destroyed */ static void ip_direct_send(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct hh_cache *hh = dst->hh; struct net_device *dev = dst->dev; int hh_len = LL_RESERVED_SPACE(dev); unsigned seq; /* Be paranoid, rather than too clever. */ // if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) { if (unlikely(skb_headroom(skb) < hh_len && (dev->header_ops && dev->header_ops->create) )) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); if (skb2 == NULL) { kfree_skb(skb); return; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); kfree_skb(skb); skb = skb2; } if (hh) { do { int hh_alen; seq = read_seqbegin(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh->hh_len); hh->hh_output(skb); } else if (dst->neighbour) dst->neighbour->output(skb); else { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n"); kfree_skb(skb); } }
/* Stolen from ip_finish_output2 */ static void ip_direct_send(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct hh_cache *hh = dst->hh; if (hh) { int hh_alen; read_lock_bh(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); read_unlock_bh(&hh->hh_lock); skb_push(skb, hh->hh_len); hh->hh_output(skb); } else if (dst->neighbour) dst->neighbour->output(skb); else { printk(KERN_DEBUG "khm in MIRROR\n"); kfree_skb(skb); } }
static inline int ip6_output_finish(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct hh_cache *hh = dst->hh; if (hh) { int hh_alen; read_lock_bh(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); read_unlock_bh(&hh->hh_lock); skb_push(skb, hh->hh_len); return hh->hh_output(skb); } else if (dst->neighbour) return dst->neighbour->output(skb); kfree_skb(skb); return -EINVAL; }
/* Stolen from ip_finish_output2 * PRE : skb->dev is set to the device we are leaving by * skb->dst is not NULL * POST: the packet is sent with the link layer header pushed * the packet is destroyed */ static void ip_direct_send(struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct hh_cache *hh = dst->hh; struct net_device *dev = dst->dev; int hh_len = LL_RESERVED_SPACE(dev); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len )) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); if (skb2 == NULL) { kfree_skb(skb); return; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); kfree_skb(skb); skb = skb2; } if (hh) { int hh_alen; write_seqlock_bh(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); write_sequnlock_bh(&hh->hh_lock); skb_push(skb, hh->hh_len); hh->hh_output(skb); } else if (dst->neighbour) dst->neighbour->output(skb); else { if (net_ratelimit()) pr_debug(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n"); kfree_skb(skb); } }
int ip6_route_me_harder(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct ipv6hdr *iph = ipv6_hdr(skb); unsigned int hh_len; struct dst_entry *dst; struct flowi6 fl6 = { .flowi6_oif = skb->sk ? skb->sk->sk_bound_dev_if : 0, .flowi6_mark = skb->mark, .daddr = iph->daddr, .saddr = iph->saddr, }; int err; dst = ip6_route_output(net, skb->sk, &fl6); err = dst->error; if (err) { IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES); net_dbg_ratelimited("ip6_route_me_harder: No more route\n"); dst_release(dst); return err; } /* Drop old route. */ skb_dst_drop(skb); skb_dst_set(skb, dst); #ifdef CONFIG_XFRM if (!(IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, flowi6_to_flowi(&fl6), AF_INET6) == 0) { skb_dst_set(skb, NULL); dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), skb->sk, 0); if (IS_ERR(dst)) return PTR_ERR(dst); skb_dst_set(skb, dst); } #endif /* Change in oif may mean change in hh_len. */ hh_len = skb_dst(skb)->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -ENOMEM; return 0; } EXPORT_SYMBOL(ip6_route_me_harder); /* * Extra routing may needed on local out, as the QUEUE target never * returns control to the table. */ struct ip6_rt_info { struct in6_addr daddr; struct in6_addr saddr; u_int32_t mark; }; static void nf_ip6_saveroute(const struct sk_buff *skb, struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); rt_info->daddr = iph->daddr; rt_info->saddr = iph->saddr; rt_info->mark = skb->mark; } } static int nf_ip6_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry) { struct ip6_rt_info *rt_info = nf_queue_entry_reroute(entry); if (entry->hook == NF_INET_LOCAL_OUT) { const struct ipv6hdr *iph = ipv6_hdr(skb); if (!ipv6_addr_equal(&iph->daddr, &rt_info->daddr) || !ipv6_addr_equal(&iph->saddr, &rt_info->saddr) || skb->mark != rt_info->mark) return ip6_route_me_harder(skb); } return 0; } static int nf_ip6_route(struct net *net, struct dst_entry **dst, struct flowi *fl, bool strict) { static const struct ipv6_pinfo fake_pinfo; static const struct inet_sock fake_sk = { /* makes ip6_route_output set RT6_LOOKUP_F_IFACE: */ .sk.sk_bound_dev_if = 1, .pinet6 = (struct ipv6_pinfo *) &fake_pinfo, }; const void *sk = strict ? &fake_sk : NULL; struct dst_entry *result; int err; result = ip6_route_output(net, sk, &fl->u.ip6); err = result->error; if (err) dst_release(result); else *dst = result; return err; } __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (hook != NF_INET_PRE_ROUTING && hook != NF_INET_LOCAL_IN) break; if (!csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(skb->csum, skb_checksum(skb, 0, dataoff, 0)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; break; } /* fall through */ case CHECKSUM_NONE: skb->csum = ~csum_unfold( csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, skb_checksum(skb, 0, dataoff, 0)))); csum = __skb_checksum_complete(skb); } return csum; } EXPORT_SYMBOL(nf_ip6_checksum); static __sum16 nf_ip6_checksum_partial(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, unsigned int len, u_int8_t protocol) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); __wsum hsum; __sum16 csum = 0; switch (skb->ip_summed) { case CHECKSUM_COMPLETE: if (len == skb->len - dataoff) return nf_ip6_checksum(skb, hook, dataoff, protocol); /* fall through */ case CHECKSUM_NONE: hsum = skb_checksum(skb, 0, dataoff, 0); skb->csum = ~csum_unfold(csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, skb->len - dataoff, protocol, csum_sub(0, hsum))); skb->ip_summed = CHECKSUM_NONE; return __skb_checksum_complete_head(skb, dataoff + len); } return csum; }; static const struct nf_ipv6_ops ipv6ops = { .chk_addr = ipv6_chk_addr, }; static const struct nf_afinfo nf_ip6_afinfo = { .family = AF_INET6, .checksum = nf_ip6_checksum, .checksum_partial = nf_ip6_checksum_partial, .route = nf_ip6_route, .saveroute = nf_ip6_saveroute, .reroute = nf_ip6_reroute, .route_key_size = sizeof(struct ip6_rt_info), }; int __init ipv6_netfilter_init(void) { RCU_INIT_POINTER(nf_ipv6_ops, &ipv6ops); return nf_register_afinfo(&nf_ip6_afinfo); } /* This can be called from inet6_init() on errors, so it cannot * be marked __exit. -DaveM */ void ipv6_netfilter_fini(void) { RCU_INIT_POINTER(nf_ipv6_ops, NULL); nf_unregister_afinfo(&nf_ip6_afinfo); }
/* route_me_harder function, used by iptable_nat, iptable_mangle + ip_queue */ int ip_route_me_harder(struct sk_buff *skb, unsigned addr_type) { const struct iphdr *iph = ip_hdr(skb); struct rtable *rt; struct flowi fl = {}; struct dst_entry *odst; unsigned int hh_len; unsigned int type; type = inet_addr_type(iph->saddr); if (addr_type == RTN_UNSPEC) addr_type = type; /* some non-standard hacks like ipt_REJECT.c:send_reset() can cause * packets with foreign saddr to appear on the NF_IP_LOCAL_OUT hook. */ if (addr_type == RTN_LOCAL) { fl.nl_u.ip4_u.daddr = iph->daddr; if (type == RTN_LOCAL) fl.nl_u.ip4_u.saddr = iph->saddr; fl.nl_u.ip4_u.tos = RT_TOS(iph->tos); fl.oif = skb->sk ? skb->sk->sk_bound_dev_if : 0; fl.mark = skb->mark; if (ip_route_output_key(&rt, &fl) != 0) return -1; /* Drop old route. */ dst_release(skb->dst); skb->dst = &rt->u.dst; } else { /* non-local src, find valid iif to satisfy * rp-filter when calling ip_route_input. */ fl.nl_u.ip4_u.daddr = iph->saddr; if (ip_route_output_key(&rt, &fl) != 0) return -1; odst = skb->dst; if (ip_route_input(skb, iph->daddr, iph->saddr, RT_TOS(iph->tos), rt->u.dst.dev) != 0) { dst_release(&rt->u.dst); return -1; } dst_release(&rt->u.dst); dst_release(odst); } if (skb->dst->error) return -1; #ifdef CONFIG_XFRM if (!(IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) && xfrm_decode_session(skb, &fl, AF_INET) == 0) if (xfrm_lookup(&skb->dst, &fl, skb->sk, 0)) return -1; #endif /* Change in oif may mean change in hh_len. */ hh_len = skb->dst->dev->hard_header_len; if (skb_headroom(skb) < hh_len && pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)), 0, GFP_ATOMIC)) return -1; return 0; }
static int ak_client_inform_port(const struct net_device *dev, aku16 port_src, aku8 protocol, unsigned int uid) { ak_client_logon_array user_logon[AK_CLIENT_MAX_LOGONS_PER_USER]; struct sk_buff *skb; // Pacote a ser enviado para avisar o firewall #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) struct flowi flp; #else struct flowi4 flp; #endif struct in_device *idev; struct rtable *rt; // Rota a ser usada para enviar o pacote struct iphdr *ip; // Header IP do pacote a enviar struct udphdr *udp; // Header UDP do pacote a enviar struct dst_entry *dst; #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) struct neighbour *neigh; #endif MD5_CTX contexto; // Contexto para calcular MD5 int pkt_sent = 0; // Enviou ao menos um pacote ? fwprofd_header *header; fwprofd_port_ctl *port_ctl; ak_client_logon_array *logon; int size; int count; int i; if (!dev) { PRINT("Device de saida NULL\n"); return -2; } count = ak_client_get_user_list(uid, user_logon); size = sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(fwprofd_header) + sizeof(fwprofd_port_ctl); for (i = 0, logon = user_logon; i < count; i++, logon++) { PRINT("Enviando pacote %d/%d - ", i + 1, count); skb = alloc_skb(size + 16, GFP_ATOMIC); if (!skb) { PRINT("Nao consegui alocar skbuff para enviar pacote\n"); return -3; } skb->data += 16; skb->len = size; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)) skb->tail = skb->data + size; skb->nh.iph = (struct iphdr *) skb->data; skb->h.uh = (struct udphdr *) (skb->data + sizeof(struct iphdr)); ip = skb->nh.iph; #else skb_set_tail_pointer(skb, size); skb_reset_network_header(skb); skb_set_transport_header(skb, sizeof(struct iphdr)); ip = ip_hdr(skb); #endif udp = (struct udphdr *) ((char *) ip + sizeof(struct iphdr)); header = (fwprofd_header *) (udp + 1); port_ctl = (fwprofd_port_ctl *) (header + 1); // Pega o IP da interface de saida para alocar rota de saida #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) idev = in_dev_get(dev); #else rcu_read_lock(); idev = __in_dev_get_rcu(dev); #endif if (!idev) { #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)) rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (1)\n"); return -4; } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_lock(&idev->lock); #endif if (!idev->ifa_list) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif kfree_skb(skb); PRINT("Device de saida sem IP (2)\n"); return -5; } ip->saddr = idev->ifa_list->ifa_address; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)) read_unlock(&idev->lock); in_dev_put(idev); #else rcu_read_unlock(); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) flp.oif = 0; flp.nl_u.ip4_u.saddr = ip->saddr; flp.nl_u.ip4_u.daddr = logon->logon_data.ip.s_addr; flp.nl_u.ip4_u.tos = 0; flp.uli_u.ports.sport = ntohs(AKER_PROF_PORT); flp.uli_u.ports.dport = ntohs(AKER_PROF_PORT); flp.proto = IPPROTO_UDP; #else flp.flowi4_oif = 0; flp.saddr = ip->saddr; flp.daddr = logon->logon_data.ip.s_addr; flp.flowi4_tos = 0; flp.fl4_sport = ntohs(AKER_PROF_PORT); flp.fl4_dport = ntohs(AKER_PROF_PORT); flp.flowi4_proto = IPPROTO_UDP; #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39)) #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)) if (ip_route_output_key(&rt, &flp)) #else if (ip_route_output_key(&init_net, &rt, &flp)) #endif { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #else rt = ip_route_output_key(&init_net, &flp); if (IS_ERR(rt)) { kfree_skb(skb); PRINT("Erro ao alocar rota de saida\n"); continue; } #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) skb->dst = dst_clone(&rt->u.dst); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb_dst_set(skb, dst_clone(&rt->u.dst)); #else skb_dst_set(skb, dst_clone(&rt->dst)); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36)) skb->dev = rt->u.dst.dev; #else skb->dev = rt->dst.dev; #endif skb->protocol = __constant_htons(ETH_P_IP); // Preenche dados do usuario port_ctl->ip_src.s_addr = 0; port_ctl->seq = ntohl(logon->seq); // ak_client_get_user_list() ja incrementou seq port_ctl->user_num = ntohl(logon->logon_data.ak_user_num); port_ctl->port = port_src; port_ctl->protocol = protocol; port_ctl->reserved = 0; MD5Init(&contexto); MD5Update(&contexto, (u_char *) logon->logon_data.secret, 16); MD5Update(&contexto, (u_char *) &port_ctl->ip_src, sizeof(struct in_addr)); MD5Update(&contexto, (u_char *) &port_ctl->seq, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->user_num, sizeof(aku32)); MD5Update(&contexto, (u_char *) &port_ctl->port, sizeof(aku16)); MD5Update(&contexto, (u_char *) &port_ctl->protocol, sizeof(aku8)); MD5Update(&contexto, (u_char *) &port_ctl->reserved, sizeof(aku8)); MD5Final((u_char *) port_ctl->hash, &contexto); // Preenche demais campos do pacote header->ip_dst = logon->logon_data.ip; header->versao = AKER_PROF_VERSION; header->tipo_req = APROF_BIND_PORT; memset(header->md5, 0, 16); MD5Init(&contexto); MD5Update(&contexto, (void *) header, sizeof(fwprofd_header)); MD5Update(&contexto, (void *) port_ctl, sizeof(fwprofd_port_ctl)); MD5Final(header->md5, &contexto); udp->dest = udp->source = ntohs(AKER_PROF_PORT); udp->len = ntohs(size - sizeof(struct iphdr)); udp->check = 0; ip->ihl = sizeof(struct iphdr) >> 2; ip->version = IPVERSION; ip->ttl = IPDEFTTL; ip->tos = 0; ip->daddr = header->ip_dst.s_addr; ip->protocol = IPPROTO_UDP; ip->frag_off = 0; ip->tot_len = htons(size); ip->id = 0; ip->check = 0; ip->check = ip_fast_csum((u_char *) ip, ip->ihl); PRINT("%s -> %s\n", ip2a(ip->saddr), ip2a(ip->daddr)); // Envia pacote #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) dst = skb->dst; #else dst = skb_dst(skb); #endif #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0)) && \ LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_get_neighbour_noref(dst); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)) rcu_read_lock(); neigh = dst_neigh_lookup_skb(dst, skb); if (neigh) { neigh->output(neigh, skb); ip_rt_put(rt); pkt_sent++; } #else if (dst->hh) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)) int hh_alen; read_lock_bh(dst->hh->hh_lock); hh_alen = HH_DATA_ALIGN(dst->hh->hh_len); memcpy(skb->data - hh_alen, dst->hh->hh_data, hh_alen); read_unlock_bh(dst->hh->hh_lock); skb_push(skb, dst->hh->hh_len); dst->hh->hh_output(skb); #else neigh_hh_output(dst->hh, skb); #endif ip_rt_put(rt); pkt_sent++; } else if (dst->neighbour) { dst->neighbour->output(skb); ip_rt_put(rt); pkt_sent++; } #endif else { kfree_skb(skb); ip_rt_put(rt); PRINT("Nao sei como enviar pacote de saida\n"); } #if (((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,41)) && \ (LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0))) || \ (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))) rcu_read_unlock(); #endif } if (!pkt_sent) return -1; return 0; }
/* Try to route the packet according to the routing keys specified in * route_info. Keys are : * - ifindex : * 0 if no oif preferred, * otherwise set to the index of the desired oif * - route_info->gw : * 0 if no gateway specified, * otherwise set to the next host to which the pkt must be routed * If success, skb->dev is the output device to which the packet must * be sent and skb->dst is not NULL * * RETURN: -1 if an error occured * 1 if the packet was succesfully routed to the * destination desired * 0 if the kernel routing table could not route the packet * according to the keys specified */ static int route(struct sk_buff *skb, unsigned int ifindex, const struct ipt_route_target_info *route_info) { int err; struct rtable *rt; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) struct iphdr *iph = ip_hdr(skb); #else struct iphdr *iph = skb->nh.iph; #endif struct flowi fl = { .oif = ifindex, .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = 0, .tos = RT_TOS(iph->tos), .scope = RT_SCOPE_UNIVERSE, } } }; /* The destination address may be overloaded by the target */ if (route_info->gw) fl.fl4_dst = route_info->gw; /* Trying to route the packet using the standard routing table. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) if ((err = ip_route_output_key(&init_net, &rt, &fl))) { #else if ((err = ip_route_output_key(&rt, &fl))) { #endif if (net_ratelimit()) DEBUGP("ipt_ROUTE: couldn't route pkt (err: %i)",err); return -1; } /* Drop old route. */ dst_release(skb->dst); skb->dst = NULL; /* Success if no oif specified or if the oif correspond to the * one desired */ if (!ifindex || rt->u.dst.dev->ifindex == ifindex) { skb->dst = &rt->u.dst; skb->dev = skb->dst->dev; skb->protocol = htons(ETH_P_IP); return 1; } /* The interface selected by the routing table is not the one * specified by the user. This may happen because the dst address * is one of our own addresses. */ if (net_ratelimit()) DEBUGP("ipt_ROUTE: failed to route as desired gw=%u.%u.%u.%u oif=%i (got oif=%i)\n", NIPQUAD(route_info->gw), ifindex, rt->u.dst.dev->ifindex); return 0; } /* Stolen from ip_finish_output2 * PRE : skb->dev is set to the device we are leaving by * skb->dst is not NULL * POST: the packet is sent with the link layer header pushed * the packet is destroyed */ static void ip_direct_send(struct sk_buff *skb) { struct dst_entry *dst = skb->dst; struct hh_cache *hh = dst->hh; struct net_device *dev = dst->dev; int hh_len = LL_RESERVED_SPACE(dev); unsigned seq; /* Be paranoid, rather than too clever. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { #else if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) { #endif struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); if (skb2 == NULL) { kfree_skb(skb); return; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); kfree_skb(skb); skb = skb2; } if (hh) { do { int hh_alen; seq = read_seqbegin(&hh->hh_lock); hh_alen = HH_DATA_ALIGN(hh->hh_len); memcpy(skb->data - hh_alen, hh->hh_data, hh_alen); } while (read_seqretry(&hh->hh_lock, seq)); skb_push(skb, hh->hh_len); hh->hh_output(skb); } else if (dst->neighbour) dst->neighbour->output(skb); else { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n"); kfree_skb(skb); } } /* PRE : skb->dev is set to the device we are leaving by * POST: - the packet is directly sent to the skb->dev device, without * pushing the link layer header. * - the packet is destroyed */ static inline int dev_direct_send(struct sk_buff *skb) { return dev_queue_xmit(skb); } static unsigned int route_oif(const struct ipt_route_target_info *route_info, struct sk_buff *skb) { unsigned int ifindex = 0; struct net_device *dev_out = NULL; /* The user set the interface name to use. * Getting the current interface index. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) if ((dev_out = dev_get_by_name(&init_net, route_info->oif))) { #else if ((dev_out = dev_get_by_name(route_info->oif))) { #endif ifindex = dev_out->ifindex; } else { /* Unknown interface name : packet dropped */ if (net_ratelimit()) DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif); return NF_DROP; } /* Trying the standard way of routing packets */ switch (route(skb, ifindex, route_info)) { case 1: dev_put(dev_out); if (route_info->flags & IPT_ROUTE_CONTINUE) return IPT_CONTINUE; ip_direct_send(skb); return NF_STOLEN; case 0: /* Failed to send to oif. Trying the hard way */ if (route_info->flags & IPT_ROUTE_CONTINUE) return NF_DROP; if (net_ratelimit()) DEBUGP("ipt_ROUTE: forcing the use of %i\n", ifindex); /* We have to force the use of an interface. * This interface must be a tunnel interface since * otherwise we can't guess the hw address for * the packet. For a tunnel interface, no hw address * is needed. */ if ((dev_out->type != ARPHRD_TUNNEL) && (dev_out->type != ARPHRD_IPGRE)) { if (net_ratelimit()) DEBUGP("ipt_ROUTE: can't guess the hw addr !\n"); dev_put(dev_out); return NF_DROP; } /* Send the packet. This will also free skb * Do not go through the POST_ROUTING hook because * skb->dst is not set and because it will probably * get confused by the destination IP address. */ skb->dev = dev_out; dev_direct_send(skb); dev_put(dev_out); return NF_STOLEN; default: /* Unexpected error */ dev_put(dev_out); return NF_DROP; } } static unsigned int route_iif(const struct ipt_route_target_info *route_info, struct sk_buff *skb) { struct net_device *dev_in = NULL; /* Getting the current interface index. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) if (!(dev_in = dev_get_by_name(&init_net, route_info->iif))) { #else if (!(dev_in = dev_get_by_name(route_info->iif))) { #endif if (net_ratelimit()) DEBUGP("ipt_ROUTE: iif interface %s not found\n", route_info->iif); return NF_DROP; } skb->dev = dev_in; dst_release(skb->dst); skb->dst = NULL; netif_rx(skb); dev_put(dev_in); return NF_STOLEN; } static unsigned int route_gw(const struct ipt_route_target_info *route_info, struct sk_buff *skb) { if (route(skb, 0, route_info)!=1) return NF_DROP; if (route_info->flags & IPT_ROUTE_CONTINUE) return IPT_CONTINUE; ip_direct_send(skb); return NF_STOLEN; } /* To detect and deter routed packet loopback when using the --tee option, * we take a page out of the raw.patch book: on the copied skb, we set up * a fake ->nfct entry, pointing to the local &route_tee_track. We skip * routing packets when we see they already have that ->nfct. */ static struct nf_conn route_tee_track; static unsigned int #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in, const struct net_device *out, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) target(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ target(struct sk_buff *skb, const struct xt_target_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_route_target_info *route_info = targinfo; #else const struct ipt_route_target_info *route_info = par->targinfo; unsigned int hooknum = par->hooknum; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) struct sk_buff *skb = *pskb; #endif unsigned int res; if (skb->nfct == &route_tee_track.ct_general) { /* Loopback - a packet we already routed, is to be * routed another time. Avoid that, now. */ if (net_ratelimit()) DEBUGP(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n"); return NF_DROP; } /* If we are at PREROUTING or INPUT hook * the TTL isn't decreased by the IP stack */ if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) struct iphdr *iph = ip_hdr(skb); #else struct iphdr *iph = skb->nh.iph; #endif if (iph->ttl <= 1) { struct rtable *rt; struct flowi fl = { .oif = 0, .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = iph->saddr, .tos = RT_TOS(iph->tos), .scope = ((iph->tos & RTO_ONLINK) ? RT_SCOPE_LINK : RT_SCOPE_UNIVERSE) } } }; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) if (ip_route_output_key(&init_net, &rt, &fl)) { #else if (ip_route_output_key(&rt, &fl)) { #endif return NF_DROP; } if (skb->dev == rt->u.dst.dev) { /* Drop old route. */ dst_release(skb->dst); skb->dst = &rt->u.dst; /* this will traverse normal stack, and * thus call conntrack on the icmp packet */ icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); } return NF_DROP; } /* * If we are at INPUT the checksum must be recalculated since * the length could change as the result of a defragmentation. */ if(hooknum == NF_INET_LOCAL_IN) { iph->ttl = iph->ttl - 1; iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } else { ip_decrease_ttl(iph); } } if ((route_info->flags & IPT_ROUTE_TEE)) { /* * Copy the skb, and route the copy. Will later return * IPT_CONTINUE for the original skb, which should continue * on its way as if nothing happened. The copy should be * independantly delivered to the ROUTE --gw. */ skb = skb_copy(skb, GFP_ATOMIC); if (!skb) { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ipt_ROUTE: copy failed!\n"); return IPT_CONTINUE; } } /* Tell conntrack to forget this packet since it may get confused * when a packet is leaving with dst address == our address. * Good idea ? Dunno. Need advice. * * NEW: mark the skb with our &route_tee_track, so we avoid looping * on any already routed packet. */ if (!(route_info->flags & IPT_ROUTE_CONTINUE)) { nf_conntrack_put(skb->nfct); skb->nfct = &route_tee_track.ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); } if (route_info->oif[0] != '\0') { res = route_oif(route_info, skb); } else if (route_info->iif[0] != '\0') { res = route_iif(route_info, skb); } else if (route_info->gw) { res = route_gw(route_info, skb); } else { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ipt_ROUTE: no parameter !\n"); res = IPT_CONTINUE; } if ((route_info->flags & IPT_ROUTE_TEE)) res = IPT_CONTINUE; return res; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) static int checkentry(const char *tablename, const struct ipt_entry *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static int checkentry(const char *tablename, const void *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static bool checkentry(const struct xt_tgchk_param *par) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) const char *tablename = par->table; unsigned int hook_mask = par->hook_mask; #endif if (strcmp(tablename, "mangle") != 0) { printk("ipt_ROUTE: bad table `%s', use the `mangle' table.\n", tablename); return 0; } if (hook_mask & ~( (1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING))) { printk("ipt_ROUTE: bad hook\n"); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) if (targinfosize != IPT_ALIGN(sizeof(struct ipt_route_target_info))) { printk(KERN_WARNING "ipt_ROUTE: targinfosize %u != %Zu\n", targinfosize, IPT_ALIGN(sizeof(struct ipt_route_target_info))); return 0; } #endif return 1; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) static struct ipt_target xt_route_reg = { #else static struct ipt_target ipt_route_reg = { #endif .name = "ROUTE", #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) .family = AF_INET, #endif .target = target, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) .targetsize = sizeof(struct ipt_route_target_info), #endif .checkentry = checkentry, .me = THIS_MODULE, }; static int __init init(void) { /* Set up fake conntrack (stolen from raw.patch): - to never be deleted, not in any hashes */ atomic_set(&route_tee_track.ct_general.use, 1); /* - and look it like as a confirmed connection */ set_bit(IPS_CONFIRMED_BIT, &route_tee_track.status); /* Initialize fake conntrack so that NAT will skip it */ route_tee_track.status |= IPS_NAT_DONE_MASK; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) return xt_register_target(&xt_route_reg); #else return ipt_register_target(&ipt_route_reg); #endif } static void __exit fini(void) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) xt_unregister_target(&xt_route_reg); #else ipt_unregister_target(&ipt_route_reg); #endif } module_init(init); module_exit(fini);