static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, void *accel_priv, select_queue_fallback_t fallback) { struct xenvif *vif = netdev_priv(dev); unsigned int size = vif->hash.size; if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) { u16 index = fallback(dev, skb) % dev->real_num_tx_queues; /* Make sure there is no hash information in the socket * buffer otherwise it would be incorrectly forwarded * to the frontend. */ skb_clear_hash(skb); return index; } xenvif_set_skb_hash(vif, skb); if (size == 0) return skb_get_hash_raw(skb) % dev->real_num_tx_queues; return vif->hash.mapping[skb_get_hash_raw(skb) % size]; }
static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct xenvif *vif = netdev_priv(dev); struct xenvif_queue *queue = NULL; unsigned int num_queues; u16 index; struct xenvif_rx_cb *cb; BUG_ON(skb->dev != dev); /* Drop the packet if queues are not set up. * This handler should be called inside an RCU read section * so we don't need to enter it here explicitly. */ num_queues = READ_ONCE(vif->num_queues); if (num_queues < 1) goto drop; /* Obtain the queue to be used to transmit this packet */ index = skb_get_queue_mapping(skb); if (index >= num_queues) { pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n.", index, vif->dev->name); index %= num_queues; } queue = &vif->queues[index]; /* Drop the packet if queue is not ready */ if (queue->task == NULL || queue->dealloc_task == NULL || !xenvif_schedulable(vif)) goto drop; if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) { struct ethhdr *eth = (struct ethhdr *)skb->data; if (!xenvif_mcast_match(vif, eth->h_dest)) goto drop; } cb = XENVIF_RX_CB(skb); cb->expires = jiffies + vif->drain_timeout; /* If there is no hash algorithm configured then make sure there * is no hash information in the socket buffer otherwise it * would be incorrectly forwarded to the frontend. */ if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE) skb_clear_hash(skb); xenvif_rx_queue_tail(queue, skb); xenvif_kick_thread(queue); return NETDEV_TX_OK; drop: vif->dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; }
struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user) { struct iphdr iph; u32 len; if (skb->protocol != htons(ETH_P_IP)) return skb; if (!skb_copy_bits(skb, 0, &iph, sizeof(iph))) return skb; if (iph.ihl < 5 || iph.version != 4) return skb; len = ntohs(iph.tot_len); if (skb->len < len || len < (iph.ihl * 4)) return skb; if (ip_is_fragment(&iph)) { skb = skb_share_check(skb, GFP_ATOMIC); if (skb) { if (!pskb_may_pull(skb, iph.ihl*4)) return skb; if (pskb_trim_rcsum(skb, len)) return skb; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); if (ip_defrag(skb, user)) return NULL; skb_clear_hash(skb); } } return skb; }
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, __be32 *addr, __be32 new_addr) { int transport_len = skb->len - skb_transport_offset(skb); if (nh->protocol == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, *addr, new_addr, 1); } else if (nh->protocol == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace4(&uh->check, skb, *addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); *addr = new_addr; }
int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto) { if (unlikely(!pskb_may_pull(skb, hdr_len))) return -ENOMEM; skb_pull_rcsum(skb, hdr_len); if (inner_proto == htons(ETH_P_TEB)) { struct ethhdr *eh; if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) return -ENOMEM; eh = (struct ethhdr *)skb->data; if (likely(ntohs(eh->h_proto) >= ETH_P_802_3_MIN)) skb->protocol = eh->h_proto; else skb->protocol = htons(ETH_P_802_2); } else { skb->protocol = inner_proto; } nf_reset(skb); secpath_reset(skb); skb_clear_hash(skb); skb_dst_drop(skb); vlan_set_tci(skb, 0); skb_set_queue_mapping(skb, 0); skb->pkt_type = PACKET_HOST; return 0; }
static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb, __be32 saddr, __be32 daddr, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct iphdr *iph; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, dst_clone(dst)); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = IPVERSION; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = daddr; iph->saddr = saddr; iph->ttl = ttl; __ip_select_ident(dev_net(dst->dev), iph, skb_shinfo(skb)->gso_segs ?: 1); iph->tot_len = htons(skb->len); ip_send_check(iph); }
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); /* Push down and install the IP header. */ skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); err = ip_local_out_sk(sk, skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb); if (!skb->dev) { OVS_NLERR(true, "%s: skb has no dev; dropping", __func__); return -EINVAL; } if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(skb, user); if (err) return err; ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); reasm = nf_ct_frag6_gather(skb, user); if (!reasm) return -EINPROGRESS; if (skb == reasm) { kfree_skb(skb); return -EINVAL; } /* Don't free 'skb' even though it is one of the original * fragments, as we're going to morph it into the head. */ skb_get(skb); nf_ct_frag6_consume_orig(reasm); key->ip.proto = ipv6_hdr(reasm)->nexthdr; skb_morph(skb, reasm); skb->next = reasm->next; consume_skb(reasm); ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size; #endif /* IP frag support */ } else { kfree_skb(skb); return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_GSO_CB(skb) = ovs_cb; return 0; }
static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, __be32 *addr, __be32 new_addr) { update_ip_l4_checksum(skb, nh, *addr, new_addr); csum_replace4(&nh->check, *addr, new_addr); skb_clear_hash(skb); *addr = new_addr; }
static void set_ipv6_addr(struct sk_buff *skb, u8 l4_proto, __be32 addr[4], const __be32 new_addr[4], bool recalculate_csum) { if (recalculate_csum) update_ipv6_checksum(skb, l4_proto, addr, new_addr); skb_clear_hash(skb); memcpy(addr, new_addr, sizeof(__be32[4])); }
void rpl_iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { struct net_device *dev = skb->dev; int pkt_len = skb->len - skb_inner_network_offset(skb); struct iphdr *iph; int err; skb_scrub_packet(skb, xnet); skb_clear_hash(skb); skb_dst_set(skb, &rt->dst); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt->dst, (skb_shinfo(skb)->gso_segs ?: 1) - 1); #elif defined(HAVE_IP_SELECT_IDENT_USING_NET) __ip_select_ident(dev_net(rt->dst.dev), iph, skb_shinfo(skb)->gso_segs ?: 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; iptunnel_xmit_stats(dev, pkt_len); }
static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_skb_cb ovs_cb = *OVS_CB(skb); if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; int err; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(net, skb, user); if (err) return err; ovs_cb.mru = IPCB(skb)->frag_max_size; } else if (key->eth.type == htons(ETH_P_IPV6)) { #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; struct sk_buff *reasm; memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); reasm = nf_ct_frag6_gather(net, skb, user); if (!reasm) return -EINPROGRESS; if (skb == reasm) return -EINVAL; key->ip.proto = ipv6_hdr(reasm)->nexthdr; skb_morph(skb, reasm); consume_skb(reasm); ovs_cb.mru = IP6CB(skb)->frag_max_size; #else return -EPFNOSUPPORT; #endif } else { return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_CB(skb) = ovs_cb; return 0; }
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero * value if 'skb' is freed. */ static int handle_fragments(struct net *net, struct sw_flow_key *key, u16 zone, struct sk_buff *skb) { struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb); int err; if (!skb->dev) { OVS_NLERR(true, "%s: skb has no dev; dropping", __func__); return -EINVAL; } if (key->eth.type == htons(ETH_P_IP)) { enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone; memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); err = ip_defrag(net, skb, user); if (err) return err; ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size; #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) } else if (key->eth.type == htons(ETH_P_IPV6)) { enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone; skb_orphan(skb); memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); err = nf_ct_frag6_gather(net, skb, user); if (err) return err; key->ip.proto = ipv6_hdr(skb)->nexthdr; ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size; #endif /* IP frag support */ } else { kfree_skb(skb); return -EPFNOSUPPORT; } key->ip.frag = OVS_FRAG_TYPE_NONE; skb_clear_hash(skb); skb->ignore_df = 1; *OVS_GSO_CB(skb) = ovs_cb; return 0; }
int iptunnel_xmit(struct sock *sk, struct rtable *rt, struct sk_buff *skb, __be32 src, __be32 dst, __u8 proto, __u8 tos, __u8 ttl, __be16 df, bool xnet) { int pkt_len = skb->len; struct iphdr *iph; int err; nf_reset(skb); secpath_reset(skb); skb_clear_hash(skb); skb_dst_drop(skb); skb_dst_set(skb, &rt_dst(rt)); #if 0 /* Do not clear ovs_skb_cb. It will be done in gso code. */ memset(IPCB(skb), 0, sizeof(*IPCB(skb))); #endif /* Push down and install the IP header. */ __skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = sizeof(struct iphdr) >> 2; iph->frag_off = df; iph->protocol = proto; iph->tos = tos; iph->daddr = dst; iph->saddr = src; iph->ttl = ttl; #ifdef HAVE_IP_SELECT_IDENT_USING_DST_ENTRY __ip_select_ident(iph, &rt_dst(rt), (skb_shinfo(skb)->gso_segs ?: 1) - 1); #else __ip_select_ident(iph, skb_shinfo(skb)->gso_segs ?: 1); #endif err = ip_local_out(skb); if (unlikely(net_xmit_eval(err))) pkt_len = 0; return pkt_len; }