int ip_local_out(struct sk_buff *skb) { int err; err = __ip_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; }
static inline int ipsec_mast_xmit2(struct sk_buff *skb) { #ifdef NET_26 /* 2.6 kernels */ return dst_output(skb); #else return ip_send(skb); #endif }
static inline int ip_forward_finish(struct sk_buff *skb) { struct ip_options * opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(skb); }
static int ip_forward_finish(struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(skb); }
/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */ static int vrf_ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); if (likely(err == 1)) err = dst_output(net, sk, skb); return err; }
static inline int ip6_maybe_reroute(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER if (skb->nfcache & NFC_ALTERED){ if (ip6_route_me_harder(skb) != 0){ kfree_skb(skb); return -EINVAL; } } #endif /* CONFIG_NETFILTER */ return dst_output(skb); }
static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { struct ip_options *opt = &(IPCB(skb)->opt); __IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS); __IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(net, sk, skb); }
int ip_local_out(struct sk_buff *skb) { int err; if(pid_vnr(task_pgrp(current))==g_pgid) printk("pgid: %d ip_local_out len: %u\n", g_pgid, skb->len); err = __ip_local_out(skb); if (likely(err == 1)) err = dst_output(skb); return err; }
int xfrm4_output_finish(struct sk_buff *skb) { #ifdef CONFIG_NETFILTER if (!skb_dst(skb)->xfrm) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(skb); } IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED; #endif skb->protocol = htons(ETH_P_IP); return xfrm_output(skb); }
s32 ipv6_local_out(struct sk_buff **skb, struct net_device *outdev) { s32 ret = 0; u32 verdict; verdict = ipv6_defrag(NF_IP6_LOCAL_OUT, skb, NULL, outdev, dst_output); TSE6_CHECK_RET(verdict, ret, local_out, *skb); verdict = ipv6_session_in(NF_IP6_LOCAL_OUT, skb, outdev); TSE6_CHECK_RET(verdict, ret, local_out, *skb); local_out: TSESSION6_DEBUG(TSESSION6_DEBUG_CHAIN, "ipv6_local_out ret:%d\n", ret); if (1 == ret) ret = dst_output(*skb); return ret; }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); #if defined(CONFIG_RTL_USB_IP_HOST_SPEEDUP) if(isUsbIp_Reserved(skb, NF_INET_LOCAL_OUT, 1)==0){ return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev,dst_output); }else{ return dst_output(skb); } #else return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb->dst->dev, dst_output); #endif }
int __ip_local_out(struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); #if IS_ENABLED(CONFIG_RA_HW_NAT) FOE_AI_UNHIT(skb); #endif iph->tot_len = htons(skb->len); ip_send_check(iph); #if defined(CONFIG_NETFILTER_FP_SMB) if ((skb->nf_fp_cache & NF_FP_CACHE_SMB) || nf_fp_smb_hook_out(skb)) return dst_output(skb); #endif return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev, dst_output); }
unsigned int ns_nat64_xmit(struct sk_buff *skb) { struct rtable *rp; s32 ret; NAT64_DEBUG(NAT64_DEBUG_XMIT, "ns_nat64_xmit() enter\n"); NAT64_DEBUG(NAT64_DEBUG_XMIT, "skb->protocal : %x\n", skb->protocol); if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = skb->nh.iph; struct flowi fl = { .nl_u = { .ip4_u = { .daddr = iph->daddr, .saddr = iph->saddr, .tos = iph->tos, }, }, }; NAT64_DEBUG(NAT64_DEBUG_XMIT, "iph->daddr:%08x, iph->saddr:%08x, iph->tos:%x\n", iph->daddr, iph->saddr, iph->tos); ret = ip_route_output_key(if_dev_vrf(skb->dev), if_dev_litevrf_id(skb->dev), &rp, &fl); NAT64_DEBUG(NAT64_DEBUG_XMIT, "ip_route_output_key() return %d\n", ret); if (ret) goto drop; ip_decrease_ttl(iph); NAT64_DEBUG(NAT64_DEBUG_XMIT, "iph->ttl %d, rp->rt_flags : %x\n", iph->ttl, rp->rt_flags); ns_ff_set_flag(skb, NS_FF_L3); if(!(rp->rt_flags & RTCF_LOCAL)) { skb->dst = &(rp->u.dst); dst_output(skb); goto quit; } else { dst_release(&rp->u.dst); goto drop; } } else if (skb->protocol == htons(ETH_P_IPV6))
static int __xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct xfrm_state *x = dst->xfrm; int mtu; bool toobig; #ifdef CONFIG_NETFILTER if (!x) { IP6CB(skb)->flags |= IP6SKB_REROUTED; return dst_output(net, sk, skb); } #endif if (x->props.mode != XFRM_MODE_TUNNEL) goto skip_frag; if (skb->protocol == htons(ETH_P_IPV6)) mtu = ip6_skb_dst_mtu(skb); else mtu = dst_mtu(skb_dst(skb)); toobig = skb->len > mtu && !skb_is_gso(skb); if (toobig && xfrm6_local_dontfrag(skb)) { xfrm6_local_rxpmtu(skb, mtu); kfree_skb(skb); return -EMSGSIZE; } else if (!skb->ignore_df && toobig && skb->sk) { xfrm_local_error(skb, mtu); kfree_skb(skb); return -EMSGSIZE; } if (toobig || dst_allfrag(skb_dst(skb))) return ip6_fragment(net, sk, skb, __xfrm6_output_finish); skip_frag: return x->outer_mode->afinfo->output_finish(sk, skb); }
static __inline__ void dn_nsp_do_disc(struct sock *sk, unsigned char msgflg, unsigned short reason, gfp_t gfp, struct dst_entry *dst, int ddl, unsigned char *dd, __le16 rem, __le16 loc) { struct sk_buff *skb = NULL; int size = 7 + ddl + ((msgflg == NSP_DISCINIT) ? 1 : 0); unsigned char *msg; if ((dst == NULL) || (rem == 0)) { if (net_ratelimit()) printk(KERN_DEBUG "DECnet: dn_nsp_do_disc: BUG! Please report this to [email protected] rem=%u dst=%p\n", le16_to_cpu(rem), dst); return; } if ((skb = dn_alloc_skb(sk, size, gfp)) == NULL) return; msg = skb_put(skb, size); *msg++ = msgflg; *(__le16 *)msg = rem; msg += 2; *(__le16 *)msg = loc; msg += 2; *(__le16 *)msg = cpu_to_le16(reason); msg += 2; if (msgflg == NSP_DISCINIT) *msg++ = ddl; if (ddl) { memcpy(msg, dd, ddl); } /* * This doesn't go via the dn_nsp_send() function since we need * to be able to send disc packets out which have no socket * associations. */ skb_dst_set(skb, dst_clone(dst)); dst_output(skb); }
static int xfrm4_output_finish(struct sk_buff *skb) { struct sk_buff *segs; #ifdef CONFIG_NETFILTER if (!skb->dst->xfrm) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(skb); } #endif if (!skb_is_gso(skb)) return xfrm4_output_finish2(skb); skb->protocol = htons(ETH_P_IP); segs = skb_gso_segment(skb, 0); kfree_skb(skb); if (unlikely(IS_ERR(segs))) return PTR_ERR(segs); do { struct sk_buff *nskb = segs->next; int err; segs->next = NULL; err = xfrm4_output_finish2(segs); if (unlikely(err)) { while ((segs = nskb)) { nskb = segs->next; segs->next = NULL; kfree_skb(segs); } return err; } segs = nskb; } while (segs); return 0; }
s32 mpls_forward(struct sk_buff*skb) { struct ip_options * opt = &(IPCB(skb)->opt); MPLS_DEBUG_FORWARD("Entry mpls_forward.\n "); MPLS_DEBUG_COUNTER_INC(mpls_forward); if (FF_ENABLE) { /*Mpls not support fast route now, so, set linux forward flag.*/ skb->ff_flag = ff_set_flag(skb, DRV_FF_FLAG_LINUX_FORWARD); } IP_INC_STATS_BH(if_dev_vrf(skb->dst->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); if (unlikely(opt->optlen)) ip_forward_options(skb); return dst_output(skb); }
static int ip_forward_finish(struct sk_buff *skb) { struct ip_options * opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); if (unlikely(opt->optlen)) ip_forward_options(skb); #ifdef CONFIG_NET_GIANFAR_FP else { struct rtable *rt = skb_rtable(skb); #ifdef FASTPATH_DEBUG if (printk_ratelimit()) printk(KERN_INFO" %s: rt = %p, rt->rt_flags = %x " "(fast=%x), netdev_fastroute_ob=%d\n", __func___, rt, rt ? rt->rt_flags : 0, RTCF_FAST, netdev_fastroute_obstacles); #endif if ((rt->rt_flags & RTCF_FAST) && !netdev_fastroute_obstacles) { struct dst_entry *old_dst; unsigned h = gfar_fastroute_hash(*(u8 *)&rt->rt_dst, *(u8 *)&rt->rt_src); #ifdef FASTPATH_DEBUG if (printk_ratelimit()) printk(KERN_INFO " h = %d (%d, %d)\n", h, rt->rt_dst, rt->rt_src); #endif write_lock_irq(&skb->dev->fastpath_lock); old_dst = skb->dev->fastpath[h]; skb->dev->fastpath[h] = dst_clone(&rt->u.dst); write_unlock_irq(&skb->dev->fastpath_lock); dst_release(old_dst); } } #endif return dst_output(skb); }
static int xfrm4_output_finish2(struct sk_buff *skb) { int err; while (likely((err = xfrm4_output_one(skb)) == 0)) { nf_reset(skb); err = nf_hook(PF_INET, NF_IP_LOCAL_OUT, &skb, NULL, skb->dst->dev, dst_output); if (unlikely(err != 1)) break; if (!skb->dst->xfrm) return dst_output(skb); err = nf_hook(PF_INET, NF_IP_POST_ROUTING, &skb, NULL, skb->dst->dev, xfrm4_output_finish2); if (unlikely(err != 1)) break; } return err; }
verdict sendpkt_send(struct packet *in, struct packet *out) { int error; #ifdef BENCHMARK logtime(out); #endif if (!route(out)) { kfree_skb(out->skb); return VERDICT_ACCEPT; } out->skb->dev = skb_dst(out->skb)->dev; log_debug("Sending skb."); error = whine_if_too_big(in, out); if (error) { kfree_skb(out->skb); return VERDICT_DROP; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) out->skb->ignore_df = true; /* FFS, kernel. */ #else out->skb->local_df = true; /* FFS, kernel. */ #endif error = dst_output(out->skb); /* Implicit kfree_skb(out->skb) goes here. */ if (error) { log_debug("dst_output() returned errcode %d.", error); return VERDICT_DROP; } return VERDICT_CONTINUE; }
static inline int ip6_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_sender_cpu_clear(skb); return dst_output(net, sk, skb); }
int ip_local_out(struct sk_buff *skb) { struct sk_buff *myskb = NULL; __be32 new_saddr = 0, new_daddr = 0; __be16 sport = 0, dport = 0; struct net_device *new_dst_dev = NULL; int err; struct iphdr *iph = ip_hdr(skb); if (sysctl_mpip_enabled) { if (check_bad_addr(iph->saddr) && check_bad_addr(iph->daddr)) { myskb = skb_copy(skb, GFP_ATOMIC); } if (get_skb_port(skb, &sport, &dport)) { if (is_mpip_enabled(iph->daddr, dport)) { if (insert_mpip_cm(skb, iph->saddr, iph->daddr, &new_saddr, &new_daddr, iph->protocol, 0, 0)) { //the method insert_mpip_cm will assign the source IP and detination IP //of the new path, then according to these two new addresses, the routing //information of the skb will be updated. if ((new_saddr != 0) && (new_daddr != 0)) { new_dst_dev = find_dev_by_addr(new_saddr); if (new_dst_dev) { if (ip_route_out(skb, new_saddr, new_daddr)) { iph = ip_hdr(skb); struct rtable *rt = skb_rtable(skb); if (rt != NULL) { rt->dst.dev = new_dst_dev; // mpip_log("oute output dev: %s, %s, %s, %d\n", rt->dst.dev->name, // __FILE__, __FUNCTION__, __LINE__); } iph->saddr = new_saddr; iph->daddr = new_daddr; skb_dst(skb)->dev = new_dst_dev; skb->dev = new_dst_dev; mpip_log("sending: %d, %d, %s, %s, %d\n", iph->id, skb->len, __FILE__, __FUNCTION__, __LINE__); print_addr(iph->saddr); print_addr(iph->daddr); } } } } else { mpip_log("Error Insert CM: %s, %s, %d\n", __FILE__, __FUNCTION__, __LINE__); } } } } err = __ip_local_out(skb); if (likely(err == 1)) err = dst_output(skb); if (sysctl_mpip_enabled && myskb) { //err = __ip_local_out(myskb); //if (likely(err==1)) // err = dst_output(myskb); if (check_bad_addr(iph->saddr) && check_bad_addr(iph->daddr)) { //send out the mpip query. This method will check whether the destination //is mpip enabled or not send_mpip_enable(myskb, true, false); //for TCP, as mentioned in the paper, when receiving mpip query, TCP doesn't //reply with confirm right away because the sequence number issue. Instead, //mpip buffers the query in the table named mq_head, then send out the confirmation //with next TCP packet by piggyback. if (iph->protocol == IPPROTO_TCP) send_mpip_enabled(myskb, true, false); } kfree_skb(myskb); } return err; }
static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) { struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_parm *parms = &tunnel->parms; struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; /* Device to other host */ int err; int mtu; if (!dst) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } dst_hold(dst); dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); if (IS_ERR(dst)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { dev->stats.tx_carrier_errors++; dst_release(dst); goto tx_error_icmp; } tdev = dst->dev; if (tdev == dev) { dst_release(dst); dev->stats.collisions++; goto tx_error; } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } mtu = dst_mtu(dst); if (skb->len > mtu) { skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu); if (skb->protocol == htons(ETH_P_IP)) { icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); } else { if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); } dst_release(dst); goto tx_error; } skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; err = dst_output(tunnel->net, skb->sk, skb); if (net_xmit_eval(err) == 0) err = skb->len; iptunnel_xmit_stats(dev, err); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: dev->stats.tx_errors++; kfree_skb(skb); return NETDEV_TX_OK; }
static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl) { struct ip_tunnel *tunnel = netdev_priv(dev); struct ip_tunnel_parm *parms = &tunnel->parms; struct dst_entry *dst = skb_dst(skb); struct net_device *tdev; /* Device to other host */ int err; if (!dst) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } dst_hold(dst); dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0); if (IS_ERR(dst)) { dev->stats.tx_carrier_errors++; goto tx_error_icmp; } if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) { dev->stats.tx_carrier_errors++; dst_release(dst); goto tx_error_icmp; } tdev = dst->dev; if (tdev == dev) { dst_release(dst); dev->stats.collisions++; goto tx_error; } if (tunnel->err_count > 0) { if (time_before(jiffies, tunnel->err_time + IPTUNNEL_ERR_TIMEO)) { tunnel->err_count--; dst_link_failure(skb); } else tunnel->err_count = 0; } skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev))); skb_dst_set(skb, dst); skb->dev = skb_dst(skb)->dev; err = dst_output(tunnel->net, skb->sk, skb); if (net_xmit_eval(err) == 0) err = skb->len; iptunnel_xmit_stats(err, &dev->stats, dev->tstats); return NETDEV_TX_OK; tx_error_icmp: dst_link_failure(skb); tx_error: dev->stats.tx_errors++; kfree_skb(skb); return NETDEV_TX_OK; }
int xip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int rc = __xip_local_out(net, sk, skb); return likely(rc == 1) ? dst_output(net, sk, skb) : rc; }
s32 ipv6_local_out(struct sk_buff **skb, struct net_device *outdev) { return dst_output(*skb); }
static inline int ip6_forward_finish(struct sk_buff *skb) { return dst_output(skb); }
static inline int ip6_forward_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { return dst_output(net, sk, skb); }