bool send_packet_ipv4(struct sk_buff *skb_in, struct sk_buff *skb_out) { struct rtable *routing_table; int error; skb_out->protocol = htons(ETH_P_IP); routing_table = route_packet_ipv4(skb_out); if (!routing_table) { kfree_skb(skb_out); return false; } skb_out->dev = routing_table->dst.dev; skb_dst_set(skb_out, (struct dst_entry *) routing_table); if (skb_in) { ipv4_mtu_hack(skb_in, skb_out); if (!ipv4_validate_packet_len(skb_in, skb_out)) { kfree_skb(skb_out); return false; } } log_debug("Sending packet via device '%s'...", skb_out->dev->name); error = ip_local_out(skb_out); /* Send. */ if (error) { log_err(ERR_SEND_FAILED, "ip_local_out() failed. Code: %d. Cannot send packet.", error); return false; } return true; }
static inline int serval_ip_local_out(struct sk_buff *skb) { int err; #if defined(OS_LINUX_KERNEL) #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)) err = ip_local_out(skb); #else struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); #endif #else /* OS_USER */ /* Calculate checksum */ ip_send_check(ip_hdr(skb)); err = dev_queue_xmit(skb); #endif if (err < 0) { LOG_ERR("packet_xmit failed err=%d\n", err); } return err; }
int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb) { int err; skb->destructor = rxe_skb_tx_dtor; skb->sk = pkt->qp->sk->sk; rxe_add_ref(pkt->qp); atomic_inc(&pkt->qp->skb_out); if (skb->protocol == htons(ETH_P_IP)) { err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); } else if (skb->protocol == htons(ETH_P_IPV6)) { err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); } else { pr_err("Unknown layer 3 protocol: %d\n", skb->protocol); atomic_dec(&pkt->qp->skb_out); rxe_drop_ref(pkt->qp); kfree_skb(skb); return -EINVAL; } if (unlikely(net_xmit_eval(err))) { pr_debug("error sending packet: %d\n", err); return -EAGAIN; } return 0; }
static unsigned int tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; if (percpu_read(tee_active)) return XT_CONTINUE; skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return XT_CONTINUE; #ifdef WITH_CONNTRACK nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (tee_tg_route4(skb, info)) { percpu_write(tee_active, true); ip_local_out(skb); percpu_write(tee_active, false); } else { kfree_skb(skb); } return XT_CONTINUE; }
int nat64_send_packet_ipv4(struct sk_buff *skb) { // Begin Ecdysis (nat64_output_ipv4) struct iphdr *iph = ip_hdr(skb); struct flowi fl; struct rtable *rt; skb->protocol = htons(ETH_P_IP); memset(&fl, 0, sizeof(fl)); fl.fl4_dst = iph->daddr; fl.fl4_tos = RT_TOS(iph->tos); fl.proto = skb->protocol; if (ip_route_output_key(&init_net, &rt, &fl)) { pr_warning("nf_NAT64: ip_route_output_key failed"); return -EINVAL; } if (!rt) { pr_warning("nf_NAT64: rt null"); return -EINVAL; } skb->dev = rt->dst.dev; skb_dst_set(skb, (struct dst_entry *)rt); if (ip_local_out(skb)) { pr_warning("nf_NAT64: ip_local_out failed"); return -EINVAL; } return 0; // End Ecdysis (nat64_output_ipv4) }
static int output_ip(struct sk_buff *skb) { memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #undef ip_local_out return ip_local_out(skb); }
int nat64_send_packet_ipv4(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct flowi fl; struct rtable *rt; skb->protocol = htons(ETH_P_IP); memset(&fl, 0, sizeof(fl)); fl.u.ip4.daddr = iph->daddr; fl.flowi_tos = RT_TOS(iph->tos); fl.flowi_proto = skb->protocol; rt = ip_route_output_key(&init_net, &fl.u.ip4); if (!rt || IS_ERR(rt)) { pr_warning("NAT64: nat64_send_packet - rt is null or an error"); if (IS_ERR(rt)) pr_warning("rt -1"); return -1; } skb->dev = rt->dst.dev; skb_dst_set(skb, (struct dst_entry *)rt); if (ip_local_out(skb)) { pr_warning("nf_NAT64: ip_local_out failed"); return -EINVAL; } return 0; }
// Method to send the message to the destination VM using ipv4 and ip_local_out() function static int cse536_sendPacket(char *data, size_t length) { struct sk_buff *skb; struct iphdr *iph; struct rtable *rt; struct net *netw = &init_net; unsigned char *skbdata; // create sk_buff and add the user data to it skb = alloc_skb(sizeof(struct iphdr) + 4096, GFP_ATOMIC); skb_reserve(skb, sizeof(struct iphdr) + 1500); skbdata = skb_put(skb, length); memcpy(skbdata, data, length); // setup space and then add the ip header skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = 0; iph->frag_off = 0; iph->ttl = 64; iph->daddr = my_daddr; iph->saddr = my_saddr; iph->protocol = IPPROTO_CSE536; iph->id = htons(1); iph->tot_len = htons(skb->len); // get the destination route rt = ip_route_output(netw, my_daddr, my_saddr, 0,0); skb_dst_set(skb, &rt->dst); return ip_local_out(skb); }
static void synproxy_send_tcp(struct net *net, const struct sk_buff *skb, struct sk_buff *nskb, struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, struct iphdr *niph, struct tcphdr *nth, unsigned int tcp_hdr_size) { nth->check = ~tcp_v4_check(tcp_hdr_size, niph->saddr, niph->daddr, 0); nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (unsigned char *)nth - nskb->head; nskb->csum_offset = offsetof(struct tcphdr, check); skb_dst_set_noref(nskb, skb_dst(skb)); nskb->protocol = htons(ETH_P_IP); if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) goto free_nskb; if (nfct) { nskb->nfct = nfct; nskb->nfctinfo = ctinfo; nf_conntrack_get(nfct); } ip_local_out(net, nskb->sk, nskb); return; free_nskb: kfree_skb(nskb); }
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
int ip_queue_xmit(struct sk_buff *skb) { //smallboy: We delete all the route here; //smallboy: ROUTE ROUTE ROUTE ROUTE !!!!! struct sock *sk = skb->sk; struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = tcp_hdr(skb); struct net *pnet = sock_net(sk); struct netns_ipv4 *n_ipv4 = &pnet->ipv4; struct iphdr *iph; u32 ihl; s32 ret = 0; //ihl = sizeof(struct iphdr) + (inet_opt ? inet_opt->optlen : 0); ihl = sizeof(struct iphdr); skb_push(skb, ihl); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; //smallboy: Attention for ipv6; iph->ihl = ihl>>2; iph->tos = inet->tos; iph->tot_len = htons(skb->len); iph->frag_off = htons(IP_DF); iph->ttl = ip_select_ttl(sk); iph->protocol = sk->sk_protocol; iph->daddr = inet->inet_daddr; // iph->saddr = inet->inet_rcv_saddr; //smallboy:Attention here; ip_select_ident_more(iph, sk, (skb->gso_segs ?: 1) - 1); th->check = 0; //th->check = get_ipv4_udptcp_checksum(iph , th); iph->check = 0; th->check = get_ipv4_psd_sum(iph); //iph->check = ip_fast_csum(iph, iph->ihl); if(n_ipv4->aft_route_out){ ret = n_ipv4->aft_route_out(skb); if(ret < 0){ skb_reset_data_header(skb); return ret; } } ret = ip_local_out(skb); return ret; }
static unsigned int tee_tg4(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_tee_tginfo *info = par->targinfo; struct iphdr *iph; if (percpu_read(tee_active)) return XT_CONTINUE; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the TEE * --gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return XT_CONTINUE; #ifdef WITH_CONNTRACK /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif /* * If we are in PREROUTING/INPUT, the checksum must be recalculated * since the length could have changed as a result of defragmentation. * * We also decrease the TTL to mitigate potential TEE loops * between two hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (par->hooknum == NF_INET_PRE_ROUTING || par->hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (tee_tg_route4(skb, info)) { percpu_write(tee_active, true); ip_local_out(skb); percpu_write(tee_active, false); } else { kfree_skb(skb); } return XT_CONTINUE; }
static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb, struct net_device *vrf_dev) { struct iphdr *ip4h = ip_hdr(skb); int ret = NET_XMIT_DROP; struct flowi4 fl4 = { /* needed to match OIF rule */ .flowi4_oif = vrf_dev->ifindex, .flowi4_iif = LOOPBACK_IFINDEX, .flowi4_tos = RT_TOS(ip4h->tos), .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_L3MDEV_SRC | FLOWI_FLAG_SKIP_NH_OIF, .daddr = ip4h->daddr, }; if (vrf_send_v4_prep(skb, &fl4, vrf_dev)) goto err; if (!ip4h->saddr) { ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0, RT_SCOPE_LINK); } ret = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb); if (unlikely(net_xmit_eval(ret))) vrf_dev->stats.tx_errors++; else ret = NET_XMIT_SUCCESS; out: return ret; err: vrf_tx_error(vrf_dev, skb); goto out; } static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) { /* strip the ethernet header added for pass through VRF device */ __skb_pull(skb, skb_network_offset(skb)); switch (skb->protocol) { case htons(ETH_P_IP): return vrf_process_v4_outbound(skb, dev); case htons(ETH_P_IPV6): return vrf_process_v6_outbound(skb, dev); default: vrf_tx_error(dev, skb); return NET_XMIT_DROP; } }
/* * Add an ip header to a skbuff and send it out. * */ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,__be32 saddr, __be32 daddr) { int ret = 0; struct inet_sock *inet = inet_sk(sk); struct tcphdr *th = tcp_hdr(skb); struct net *pnet = sock_net(sk); struct netns_ipv4 *n_ipv4 = &pnet->ipv4; struct iphdr *iph; // Build the IP header. skb_push(skb, sizeof(struct iphdr)); ////+ (opt ? opt->opt.optlen : 0) skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; //iph->tos = 0;//inet->tos; iph->tos = inet->tos; iph->tot_len = htons(skb->len); iph->frag_off = htons(IP_DF); //smallboy :must before ip_select_ident; ip_select_ident(skb, sk); iph->ttl = ip_select_ttl(sk); iph->protocol = sk->sk_protocol; iph->daddr = daddr; iph->saddr = saddr; iph->check = 0; //iph->check = ip_fast_csum(iph, iph->ihl); th->check = 0; //th->check = get_ipv4_udptcp_checksum(iph , th); th->check = get_ipv4_psd_sum(iph); //skb->mark = sk->sk_mark; //fprintf(stderr,"TH:%u,src:%s,dst:%s,sport:%u,dport:%u,seq:%-12u,ack:%-12u,ipid:%-12u,len:%-12u,SYN:%u;PSH:%u;ACK:%u;FIN:%u;RST:%u; send!!!\n" // ,US_GET_LCORE(),trans_ip(iph->saddr),trans_ip(iph->daddr) // ,ntohs(th->source),ntohs(th->dest) // ,ntohl(th->seq),ntohl(th->ack_seq) // ,iph->id,skb->len,th->syn,th->psh,th->ack,th->fin,th->rst); if(n_ipv4->aft_route_out){ ret = n_ipv4->aft_route_out(skb); if(ret < 0) return US_ENETUNREACH; } // Send it out. return ip_local_out(skb); }
int iptunnel_xmit(struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; /* inlined skb_scrub_packet() */ if (xnet) skb_orphan(skb); skb->pkt_type = PACKET_HOST; #if LINUX_VERSION_CODE > KERNEL_VERSION(3,0,0) skb->skb_iif = 0; #endif skb_dst_drop(skb); skb->mark = 0; secpath_reset(skb); nf_reset(skb); skb->rxhash = 0; skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; tunnel_ip_select_ident(skb, (const struct iphdr *)skb_inner_network_header(skb), &rt_dst(rt)); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
void nf_dup_ipv4(struct net *net, struct sk_buff *skb, unsigned int hooknum, const struct in_addr *gw, int oif) { struct iphdr *iph; if (this_cpu_read(nf_skb_duplicated)) return; /* * Copy the skb, and route the copy. Will later return %XT_CONTINUE for * the original skb, which should continue on its way as if nothing has * happened. The copy should be independently delivered to the gateway. */ skb = pskb_copy(skb, GFP_ATOMIC); if (skb == NULL) return; #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* Avoid counting cloned packets towards the original connection. */ nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); #endif /* * If we are in PREROUTING/INPUT, the checksum must be recalculated * since the length could have changed as a result of defragmentation. * * We also decrease the TTL to mitigate potential loops between two * hosts. * * Set %IP_DF so that the original source is notified of a potentially * decreased MTU on the clone route. IPv6 does this too. */ iph = ip_hdr(skb); iph->frag_off |= htons(IP_DF); if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) --iph->ttl; ip_send_check(iph); if (nf_dup_ipv4_route(net, skb, gw, oif)) { __this_cpu_write(nf_skb_duplicated, true); ip_local_out(net, skb->sk, skb); __this_cpu_write(nf_skb_duplicated, false); } else { kfree_skb(skb); } }
void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct net_device *dev = skb->dev; int pkt_len = skb->len - skb_inner_network_offset(skb); struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); #elif defined(HAVE_IP_SELECT_IDENT_USING_NET) __ip_select_ident(dev_net(rt->dst.dev), iph, skb_shinfo(skb)->gso_segs ?: 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); }
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; nf_reset(skb); secpath_reset(skb); skb_clear_hash(skb); skb_dst_drop(skb); skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt_dst(rt), (skb_shinfo(skb)->gso_segs ?: 1) - 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
int rpl_ip_local_out(struct sk_buff *skb) { int ret = NETDEV_TX_OK; int id = -1; if (skb_is_gso(skb)) { struct iphdr *iph; iph = ip_hdr(skb); id = ntohs(iph->id); skb = tnl_skb_gso_segment(skb, 0, false); if (!skb || IS_ERR(skb)) return 0; } else if (skb->ip_summed == CHECKSUM_PARTIAL) { int err; err = skb_checksum_help(skb); if (unlikely(err)) return 0; } while (skb) { struct sk_buff *next_skb = skb->next; struct iphdr *iph; int err; skb->next = NULL; iph = ip_hdr(skb); if (id >= 0) iph->id = htons(id++); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #undef ip_local_out err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) ret = err; skb = next_skb; } return ret; }
int iptunnel_xmit(struct net *net, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df) { int pkt_len = skb->len; struct iphdr *iph; int err; nf_reset(skb); secpath_reset(skb); skb_clear_rxhash(skb); skb_dst_drop(skb); skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; tunnel_ip_select_ident(skb, (const struct iphdr *)skb_inner_network_header(skb), &rt_dst(rt)); err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
void iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len - skb_inner_network_offset(skb); struct net *net = dev_net(rt->dst.dev); struct net_device *dev = skb->dev; struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash_if_not_l4(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = ip_mtu_locked(&rt->dst) ? 0 : df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; __ip_select_ident(net, iph, skb_shinfo(skb)->gso_segs ?: 1); err = ip_local_out(net, sk, skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); }
// this method will send the message to the destination machine using ipv4 static int cse536_sendmsg(char *data, size_t len) { struct sk_buff *skb; struct iphdr *iph; struct rtable *rt; struct net *net = &init_net; unsigned char *skbdata; // create and setup an sk_buff skb = alloc_skb(sizeof(struct iphdr) + 4096, GFP_ATOMIC); skb_reserve(skb, sizeof(struct iphdr) + 1500); skbdata = skb_put(skb, len); // skb->csum = csum_and_copy_from_user(data, skbdata, len, 0, &err); memcpy(skbdata, data, len); // setup and add the ip header skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = 0; iph->frag_off = 0; iph->ttl = 64; iph->daddr = cse536_daddr; iph->saddr = cse536_saddr; iph->protocol = IPPROTO_CSE536; // my protocol number iph->id = htons(1); iph->tot_len = htons(skb->len); // get the route. this seems to be necessary, does not work without rt = ip_route_output(net, cse536_daddr, cse536_saddr, 0,0); skb_dst_set(skb, &rt->dst); //printk("skb data: %s", skbdata); return ip_local_out(skb); }
//----------------------------------------------------------------------------- static bool _gtpurh_route_packet(struct sk_buff *skb_pP, const struct xt_gtpurh_target_info *info_pP) //----------------------------------------------------------------------------- { int err = 0; struct rtable *rt = NULL; struct iphdr *iph_p = ip_hdr(skb_pP); int daddr = iph_p->daddr; struct flowi fl = { .u = { .ip4 = { .daddr = daddr, .flowi4_tos = RT_TOS(iph_p->tos), .flowi4_scope = RT_SCOPE_UNIVERSE, } } }; //skb_pP->pkt_type = PACKET_OTHERHOST; skb_pP->pkt_type = PACKET_OUTGOING; #if 0 pr_info("GTPURH(%d): Routing packet: %d.%d.%d.%d --> %d.%d.%d.%d Proto: %d, Len: %d Mark: %u Packet type: %u\n", info_pP->action, iph_p->saddr & 0xFF, (iph_p->saddr & 0x0000FF00) >> 8, (iph_p->saddr & 0x00FF0000) >> 16, iph_p->saddr >> 24, iph_p->daddr & 0xFF, (iph_p->daddr & 0x0000FF00) >> 8, (iph_p->daddr & 0x00FF0000) >> 16, iph_p->daddr >> 24, iph_p->protocol, ntohs(iph_p->tot_len), skb_pP->mark, skb_pP->pkt_type); #endif rt = ip_route_output_key(&init_net, &fl.u.ip4); if (rt == null) { pr_info("GTPURH: Failed to route packet to dst 0x%x. Error: (%d)\n", fl.u.ip4.daddr, err); return GTPURH_FAILURE; } #if 0 if (rt->dst.dev) { pr_info("GTPURH: dst dev name %s\n", rt->dst.dev->name); } else { pr_info("GTPURH: dst dev NULL\n"); } #endif skb_pP->priority = rt_tos2priority(iph_p->tos); skb_dst_drop(skb_pP); skb_dst_set(skb_pP, &rt->dst); skb_pP->dev = skb_dst(skb_pP)->dev; // Send the GTPu message out ip_local_out(skb_pP); if (err == 0) { return GTPURH_SUCCESS; } else { return GTPURH_FAILURE; } }
/* * IP Tunneling transmitter * * This function encapsulates the packet in a new IP packet, its * destination will be set to cp->daddr. Most code of this function * is taken from ipip.c. * * It is used in VS/TUN cluster. The load balancer selects a real * server from a cluster based on a scheduling algorithm, * encapsulates the request packet and forwards it to the selected * server. For example, all real servers are configured with * "ifconfig tunl0 <Virtual IP Address> up". When the server receives * the encapsulated packet, it will decapsulate the packet, processe * the request and return the response packets directly to the client * without passing the load balancer. This can greatly increase the * scalability of virtual server. * * Used for ANY protocol */ int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) { struct netns_ipvs *ipvs = net_ipvs(skb_net(skb)); struct rtable *rt; /* Route to the other host */ __be32 saddr; /* Source for tunnel */ struct net_device *tdev; /* Device to other host */ struct iphdr *old_iph = ip_hdr(skb); u8 tos = old_iph->tos; __be16 df; struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ int ret, local; EnterFunction(10); rcu_read_lock(); local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_CONNECT | IP_VS_RT_MODE_TUNNEL, &saddr); if (local < 0) goto tx_error; if (local) { rcu_read_unlock(); return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); } rt = skb_rtable(skb); tdev = rt->dst.dev; /* Copy DF, reset fragment offset and MF */ df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0; /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) goto tx_error; consume_skb(skb); skb = new_skb; old_iph = ip_hdr(skb); } skb->transport_header = skb->network_header; /* fix old IP header checksum */ ip_send_check(old_iph); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = IPPROTO_IPIP; iph->tos = tos; iph->daddr = cp->daddr.ip; iph->saddr = saddr; iph->ttl = old_iph->ttl; ip_select_ident(skb, &rt->dst, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) ip_local_out(skb); else if (ret == NF_DROP) kfree_skb(skb); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; }
/* * IP Tunneling transmitter * * This function encapsulates the packet in a new IP packet, its * destination will be set to cp->daddr. Most code of this function * is taken from ipip.c. * * It is used in VS/TUN cluster. The load balancer selects a real * server from a cluster based on a scheduling algorithm, * encapsulates the request packet and forwards it to the selected * server. For example, all real servers are configured with * "ifconfig tunl0 <Virtual IP Address> up". When the server receives * the encapsulated packet, it will decapsulate the packet, processe * the request and return the response packets directly to the client * without passing the load balancer. This can greatly increase the * scalability of virtual server. * * Used for ANY protocol */ int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh) { struct net *net = skb_net(skb); struct netns_ipvs *ipvs = net_ipvs(net); struct rtable *rt; /* Route to the other host */ __be32 saddr; /* Source for tunnel */ struct net_device *tdev; /* Device to other host */ __u8 next_protocol = 0; __u8 dsfield = 0; __u8 ttl = 0; __be16 df = 0; __be16 *dfp = NULL; struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ int ret, local; EnterFunction(10); rcu_read_lock(); local = __ip_vs_get_out_rt(cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | IP_VS_RT_MODE_CONNECT | IP_VS_RT_MODE_TUNNEL, &saddr, ipvsh); if (local < 0) goto tx_error; if (local) { rcu_read_unlock(); return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1); } rt = skb_rtable(skb); tdev = rt->dst.dev; /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr); /* We only care about the df field if sysctl_pmtu_disc(ipvs) is set */ dfp = sysctl_pmtu_disc(ipvs) ? &df : NULL; skb = ip_vs_prepare_tunneled_skb(skb, cp->af, max_headroom, &next_protocol, NULL, &dsfield, &ttl, dfp); if (IS_ERR(skb)) goto tx_error; skb = iptunnel_handle_offloads( skb, false, __tun_gso_type_mask(AF_INET, cp->af)); if (IS_ERR(skb)) goto tx_error; skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = next_protocol; iph->tos = dsfield; iph->daddr = cp->daddr.ip; iph->saddr = saddr; iph->ttl = ttl; ip_select_ident(net, skb, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; ret = ip_vs_tunnel_xmit_prepare(skb, cp); if (ret == NF_ACCEPT) ip_local_out(skb); else if (ret == NF_DROP) kfree_skb(skb); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; tx_error: if (!IS_ERR(skb)) kfree_skb(skb); rcu_read_unlock(); LeaveFunction(10); return NF_STOLEN; }
/* Send RST reply */ static void send_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; const struct iphdr *oiph; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _otcph, *tcph; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), sizeof(_otcph), &_otcph); if (oth == NULL) return; /* No RST for RST. */ if (oth->rst) return; if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return; /* Check checksum */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) return; oiph = ip_hdr(oldskb); nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); skb_reset_network_header(nskb); niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); niph->version = 4; niph->ihl = sizeof(struct iphdr) / 4; niph->tos = 0; niph->id = 0; niph->frag_off = htons(IP_DF); niph->protocol = IPPROTO_TCP; niph->check = 0; niph->saddr = oiph->daddr; niph->daddr = oiph->saddr; skb_reset_transport_header(nskb); tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); memset(tcph, 0, sizeof(*tcph)); tcph->source = oth->dest; tcph->dest = oth->source; tcph->doff = sizeof(struct tcphdr) / 4; if (oth->ack) tcph->seq = oth->ack_seq; else { tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + oldskb->len - ip_hdrlen(oldskb) - (oth->doff << 2)); tcph->ack = 1; } tcph->rst = 1; tcph->check = ~tcp_v4_check(sizeof(struct tcphdr), niph->saddr, niph->daddr, 0); nskb->ip_summed = CHECKSUM_PARTIAL; nskb->csum_start = (unsigned char *)tcph - nskb->head; nskb->csum_offset = offsetof(struct tcphdr, check); /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set_noref(nskb, skb_dst(oldskb)); nskb->protocol = htons(ETH_P_IP); if (ip_route_me_harder(nskb, RTN_UNSPEC)) goto free_nskb; niph->ttl = ip4_dst_hoplimit(skb_dst(nskb)); /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; nf_ct_attach(nskb, oldskb); ip_local_out(nskb); return; free_nskb: kfree_skb(nskb); }
/* Send RST reply */ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _oth; oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook); if (!oth) return; if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) return; nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set_noref(nskb, skb_dst(oldskb)); nskb->mark = IP4_REPLY_MARK(net, oldskb->mark); skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, ip4_dst_hoplimit(skb_dst(nskb))); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); if (ip_route_me_harder(net, nskb, RTN_UNSPEC)) goto free_nskb; niph = ip_hdr(nskb); /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; nf_ct_attach(nskb, oldskb); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) /* If we use ip_local_out for bridged traffic, the MAC source on * the RST will be ours, instead of the destination's. This confuses * some routers/firewalls, and they drop the packet. So we need to * build the eth header using the original destination's MAC as the * source, and send the RST packet directly. */ if (oldskb->nf_bridge) { struct ethhdr *oeth = eth_hdr(oldskb); nskb->dev = nf_bridge_get_physindev(oldskb); niph->tot_len = htons(nskb->len); ip_send_check(niph); if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol), oeth->h_source, oeth->h_dest, nskb->len) < 0) goto free_nskb; dev_queue_xmit(nskb); } else #endif ip_local_out(net, nskb->sk, nskb); return; free_nskb: kfree_skb(nskb); }
/* Send RST reply */ static void send_reset(struct sk_buff *oldskb, int hook) { struct sk_buff *nskb; const struct iphdr *oiph; struct iphdr *niph; const struct tcphdr *oth; struct tcphdr _otcph, *tcph; unsigned int addr_type; /* IP header checks: fragment. */ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) return; oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), sizeof(_otcph), &_otcph); if (oth == NULL) return; /* No RST for RST. */ if (oth->rst) return; /* Check checksum */ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) return; oiph = ip_hdr(oldskb); nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + LL_MAX_HEADER, GFP_ATOMIC); if (!nskb) return; skb_reserve(nskb, LL_MAX_HEADER); skb_reset_network_header(nskb); niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); niph->version = 4; niph->ihl = sizeof(struct iphdr) / 4; niph->tos = 0; niph->id = 0; niph->frag_off = htons(IP_DF); niph->protocol = IPPROTO_TCP; niph->check = 0; niph->saddr = oiph->daddr; niph->daddr = oiph->saddr; tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); memset(tcph, 0, sizeof(*tcph)); tcph->source = oth->dest; tcph->dest = oth->source; tcph->doff = sizeof(struct tcphdr) / 4; if (oth->ack) tcph->seq = oth->ack_seq; else { tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + oldskb->len - ip_hdrlen(oldskb) - (oth->doff << 2)); tcph->ack = 1; } tcph->rst = 1; tcph->check = tcp_v4_check(sizeof(struct tcphdr), niph->saddr, niph->daddr, csum_partial(tcph, sizeof(struct tcphdr), 0)); addr_type = RTN_UNSPEC; if (hook != NF_INET_FORWARD #ifdef CONFIG_BRIDGE_NETFILTER || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED) #endif ) addr_type = RTN_LOCAL; /* ip_route_me_harder expects skb->dst to be set */ skb_dst_set(nskb, dst_clone(skb_dst(oldskb))); if (ip_route_me_harder(nskb, addr_type)) goto free_nskb; niph->ttl = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT); nskb->ip_summed = CHECKSUM_NONE; /* "Never happens" */ if (nskb->len > dst_mtu(skb_dst(nskb))) goto free_nskb; nf_ct_attach(nskb, oldskb); ip_local_out(nskb); return; free_nskb: kfree_skb(nskb); }
/* * IP Tunneling transmitter * * This function encapsulates the packet in a new IP packet, its * destination will be set to cp->daddr. Most code of this function * is taken from ipip.c. * * It is used in VS/TUN cluster. The load balancer selects a real * server from a cluster based on a scheduling algorithm, * encapsulates the request packet and forwards it to the selected * server. For example, all real servers are configured with * "ifconfig tunl0 <Virtual IP Address> up". When the server receives * the encapsulated packet, it will decapsulate the packet, processe * the request and return the response packets directly to the client * without passing the load balancer. This can greatly increase the * scalability of virtual server. * * Used for ANY protocol */ int ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp) { struct rtable *rt; /* Route to the other host */ struct net_device *tdev; /* Device to other host */ struct iphdr *old_iph = ip_hdr(skb); u8 tos = old_iph->tos; __be16 df = old_iph->frag_off; struct iphdr *iph; /* Our new IP header */ unsigned int max_headroom; /* The extra header space needed */ int mtu; int ret; EnterFunction(10); if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, RT_TOS(tos), IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL))) goto tx_error_icmp; if (rt->rt_flags & RTCF_LOCAL) { ip_rt_put(rt); IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1); } tdev = rt->dst.dev; mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr); if (mtu < 68) { IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__); goto tx_error_put; } if (skb_dst(skb)) skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu); df |= (old_iph->frag_off & htons(IP_DF)); if ((old_iph->frag_off & htons(IP_DF) && mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) { icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu)); IP_VS_DBG_RL("%s(): frag needed\n", __func__); goto tx_error_put; } /* * Okay, now see if we can stuff it in the buffer as-is. */ max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr); if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) { struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom); if (!new_skb) { ip_rt_put(rt); kfree_skb(skb); IP_VS_ERR_RL("%s(): no memory\n", __func__); return NF_STOLEN; } kfree_skb(skb); skb = new_skb; old_iph = ip_hdr(skb); } skb->transport_header = skb->network_header; /* fix old IP header checksum */ ip_send_check(old_iph); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); /* drop old route */ skb_dst_drop(skb); skb_dst_set(skb, &rt->dst); /* * Push down and install the IPIP header. */ iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr)>>2; iph->frag_off = df; iph->protocol = IPPROTO_IPIP; iph->tos = tos; iph->daddr = rt->rt_dst; iph->saddr = rt->rt_src; iph->ttl = old_iph->ttl; ip_select_ident(iph, &rt->dst, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; ret = IP_VS_XMIT_TUNNEL(skb, cp); if (ret == NF_ACCEPT) ip_local_out(skb); else if (ret == NF_DROP) kfree_skb(skb); LeaveFunction(10); return NF_STOLEN; tx_error_icmp: dst_link_failure(skb); tx_error: kfree_skb(skb); LeaveFunction(10); return NF_STOLEN; tx_error_put: ip_rt_put(rt); goto tx_error; }
static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { return ip_local_out(net, sk, skb); }