static int udp_gro_complete_segment(struct sk_buff *skb) { struct udphdr *uh = udp_hdr(skb); skb->csum_start = (unsigned char *)uh - skb->head; skb->csum_offset = offsetof(struct udphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4; return 0; }
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) { struct udphdr *uh = udp_hdr(skb); if (uh->check && skb->ip_summed != CHECKSUM_PARTIAL) { set_tp_port(skb, port, new_port, &uh->check); if (!uh->check) uh->check = CSUM_MANGLED_0; } else { *port = new_port; skb->rxhash = 0; } }
static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port, __be16 dst_port) { struct udphdr *udph; __skb_push(skb, sizeof(*udph)); skb_reset_transport_header(skb); udph = udp_hdr(skb); udph->dest = dst_port; udph->source = src_port; udph->len = htons(skb->len); udph->check = 0; }
static void set_udp_port(struct sk_buff *skb, __be16 *port, __be16 new_port) { struct udphdr *uh = udp_hdr(skb); if (uh->check && get_ip_summed(skb) != OVS_CSUM_PARTIAL) { set_tp_port(skb, port, new_port, &uh->check); if (!uh->check) uh->check = CSUM_MANGLED_0; } else { *port = new_port; skb_clear_rxhash(skb); } }
static int udp6_ufo_send_check(struct sk_buff *skb) { const struct ipv6hdr *ipv6h; struct udphdr *uh; if (!pskb_may_pull(skb, sizeof(*uh))) return -EINVAL; if (likely(!skb->encapsulation)) { ipv6h = ipv6_hdr(skb); uh = udp_hdr(skb); uh->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, IPPROTO_UDP, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); skb->ip_summed = CHECKSUM_PARTIAL; }
/* Function to set UDP checksum for an IPv4 UDP packet. This is intended * for the simple case like when setting the checksum for a UDP tunnel. */ void rpl_udp_set_csum(bool nocheck, struct sk_buff *skb, __be32 saddr, __be32 daddr, int len) { struct udphdr *uh = udp_hdr(skb); if (nocheck) uh->check = 0; else if (skb_is_gso(skb)) uh->check = ~udp_v4_check(len, saddr, daddr, 0); else { BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~udp_v4_check(len, saddr, daddr, 0); } }
static u16 mlx5e_select_queue_assigned(struct mlx5e_priv *priv, struct sk_buff *skb) { struct mlx5e_sq_flow_map *flow_map; int sk_ix = sk_tx_queue_get(skb->sk); u32 key_all, key_dip, key_dport; u16 dport; u32 dip; if (sk_ix >= priv->params.num_channels) return sk_ix; if (vlan_get_protocol(skb) == htons(ETH_P_IP)) { dip = ip_hdr(skb)->daddr; if (ip_hdr(skb)->protocol == IPPROTO_UDP || ip_hdr(skb)->protocol == IPPROTO_TCP) dport = udp_hdr(skb)->dest; else goto fallback; } else { goto fallback; } key_all = dip ^ dport; hash_for_each_possible_rcu(priv->flow_map_hash, flow_map, hlist, key_all) if (flow_map->dst_ip == dip && flow_map->dst_port == dport) return flow_map->queue_index; key_dip = dip; hash_for_each_possible_rcu(priv->flow_map_hash, flow_map, hlist, key_dip) if (flow_map->dst_ip == dip) return flow_map->queue_index; key_dport = dport; hash_for_each_possible_rcu(priv->flow_map_hash, flow_map, hlist, key_dport) if (flow_map->dst_port == dport) return flow_map->queue_index; fallback: return 0; }
static int set_udp(struct sk_buff *skb, const struct ovs_key_udp *udp_port_key) { struct udphdr *uh; int err; err = make_writable(skb, skb_transport_offset(skb) + sizeof(struct udphdr)); if (unlikely(err)) return err; uh = udp_hdr(skb); if (udp_port_key->udp_src != uh->source) set_udp_port(skb, &uh->source, udp_port_key->udp_src); if (udp_port_key->udp_dst != uh->dest) set_udp_port(skb, &uh->dest, udp_port_key->udp_dst); return 0; }
static void etherframe_print(u_char *usr, const struct pcap_pkthdr *pkt, const u_char *d, uint16_t ethtype) { struct iphdr *ip; struct tcphdr *th; struct udphdr *uh; uint16_t ethtype_le; ethtype_le = ntohs(ethtype); switch (ethtype_le) { case ETH_P_IP: ip = ip_hdr(d); sb_append_str(&sb, "IP: "); iphdr_print(ip, &sb); switch (ip->protocol) { case IPPROTO_TCP: th = tcp_hdr(d + ip_hdrlen(ip)); sb_append_str(&sb, "; TCP: "); tcp_print(th, &sb); break; case IPPROTO_UDP: uh = udp_hdr(d + ip_hdrlen(ip)); sb_append_str(&sb, "; UDP: "); udp_print(uh, &sb); break; default: sb_append_char(&sb, ' '); sb_append_str(&sb, ipproto_str(ip->protocol)); } break; default: /* FIXME: This code is open to buffer overrun errors */ sb_append_str(&sb, "ether type: "); sb.len += sprintf(sb_curr(&sb), "0x%04x ", ethtype_le); sb_append_str(&sb, ethertype_to_str(ethtype_le)); } }
static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct udphdr *udph; struct net_device *ndev = skb->dev; struct net_device *rdev = ndev; struct rxe_dev *rxe = rxe_get_dev_from_net(ndev); struct rxe_pkt_info *pkt = SKB_TO_PKT(skb); if (!rxe && is_vlan_dev(rdev)) { rdev = vlan_dev_real_dev(ndev); rxe = rxe_get_dev_from_net(rdev); } if (!rxe) goto drop; if (skb_linearize(skb)) { pr_err("skb_linearize failed\n"); ib_device_put(&rxe->ib_dev); goto drop; } udph = udp_hdr(skb); pkt->rxe = rxe; pkt->port_num = 1; pkt->hdr = (u8 *)(udph + 1); pkt->mask = RXE_GRH_MASK; pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph); rxe_rcv(skb); /* * FIXME: this is in the wrong place, it needs to be done when pkt is * destroyed */ ib_device_put(&rxe->ib_dev); return 0; drop: kfree_skb(skb); return 0; }
static void udp6_set_csum(bool nocheck, struct sk_buff *skb, const struct in6_addr *saddr, const struct in6_addr *daddr, int len) { struct udphdr *uh = udp_hdr(skb); if (nocheck) uh->check = 0; else if (skb_is_gso(skb)) uh->check = ~udp_v6_check(len, saddr, daddr, 0); else if (skb_dst(skb) && skb_dst(skb)->dev && (skb_dst(skb)->dev->features & NETIF_F_IPV6_CSUM)) { BUG_ON(skb->ip_summed == CHECKSUM_PARTIAL); skb->ip_summed = CHECKSUM_PARTIAL; skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct udphdr, check); uh->check = ~udp_v6_check(len, saddr, daddr, 0); } else {
int rpl_udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb, __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, __be16 src_port, __be16 dst_port, bool xnet, bool nocheck) { struct udphdr *uh; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); udp_set_csum(nocheck, skb, src, dst, skb->len); return iptunnel_xmit(sk, rt, skb, src, dst, IPPROTO_UDP, tos, ttl, df, xnet); }
static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto, __be32 addr[4], const __be32 new_addr[4]) { int transport_len = skb->len - skb_transport_offset(skb); if (l4_proto == IPPROTO_TCP) { if (likely(transport_len >= sizeof(struct tcphdr))) inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, addr, new_addr, 1); } else if (l4_proto == IPPROTO_UDP) { if (likely(transport_len >= sizeof(struct udphdr))) { struct udphdr *uh = udp_hdr(skb); if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { inet_proto_csum_replace16(&uh->check, skb, addr, new_addr, 1); if (!uh->check) uh->check = CSUM_MANGLED_0; } } } }
int udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, struct net_device *dev, struct in6_addr *saddr, struct in6_addr *daddr, __u8 prio, __u8 ttl, __be16 src_port, __be16 dst_port, bool nocheck) { struct udphdr *uh; struct ipv6hdr *ip6h; __skb_push(skb, sizeof(*uh)); skb_reset_transport_header(skb); uh = udp_hdr(skb); uh->dest = dst_port; uh->source = src_port; uh->len = htons(skb->len); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_set(skb, dst); udp6_set_csum(nocheck, skb, saddr, daddr, skb->len); __skb_push(skb, sizeof(*ip6h)); skb_reset_network_header(skb); ip6h = ipv6_hdr(skb); ip6_flow_hdr(ip6h, prio, htonl(0)); ip6h->payload_len = htons(skb->len); ip6h->nexthdr = IPPROTO_UDP; ip6h->hop_limit = ttl; ip6h->daddr = *daddr; ip6h->saddr = *saddr; ip6tunnel_xmit(sk, skb, dev); return 0; }
static int udp_rcv(struct sk_buff *skb) { struct sock *sk; assert(skb != NULL); assert(ip_check_version(ip_hdr(skb)) || ip6_check_version(ip6_hdr(skb))); /* Check CRC */ if (MODOPS_VERIFY_CHKSUM) { uint16_t old_check; old_check = skb->h.uh->check; udp_set_check_field(skb->h.uh, skb->nh.raw); if (old_check != skb->h.uh->check) { return 0; /* error: bad checksum */ } } sk = sock_lookup(NULL, udp_sock_ops, ip_check_version(ip_hdr(skb)) ? udp4_rcv_tester : udp6_rcv_tester, skb); if (sk != NULL) { if (ip_check_version(ip_hdr(skb)) ? udp4_accept_dst(sk, skb) : udp6_accept_dst(sk, skb)) { sock_rcv(sk, skb, skb->h.raw + UDP_HEADER_SIZE, udp_data_length(udp_hdr(skb))); } else { skb_free(skb); } } else { icmp_discard(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH); } return 0; }
static bool invert_packet(struct xlation *state, struct sk_buff **skb) { struct iphdr *hdr4; struct udphdr *uhdr; if (create_skb4_udp("1.1.1.1", 1111, "2.2.2.2", 2222, 100, 32, skb)) return false; if (invert_tuple(state)) return false; hdr4 = ip_hdr(*skb); uhdr = udp_hdr(*skb); hdr4->saddr = state->in.tuple.src.addr4.l3.s_addr; uhdr->source = cpu_to_be16(state->in.tuple.src.addr4.l4); hdr4->daddr = state->in.tuple.dst.addr4.l3.s_addr; uhdr->dest = cpu_to_be16(state->in.tuple.dst.addr4.l4); if (pkt_init_ipv4(state, *skb)) return false; return true; }
/* Callback from net/ipv4/udp.c to receive packets */ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct vxlan_sock *vs; struct vxlanhdr *vxh; /* Need Vxlan and inner Ethernet header to be present */ if (!pskb_may_pull(skb, VXLAN_HLEN)) goto error; /* Return packets with reserved bits set */ vxh = (struct vxlanhdr *)(udp_hdr(skb) + 1); if (vxh->vx_flags != htonl(VXLAN_FLAGS) || (vxh->vx_vni & htonl(0xff))) { pr_warn("invalid vxlan flags=%#x vni=%#x\n", ntohl(vxh->vx_flags), ntohl(vxh->vx_vni)); goto error; } if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB))) goto drop; vs = rcu_dereference_sk_user_data(sk); if (!vs) goto drop; vs->rcv(vs, skb, vxh->vx_vni); return 0; drop: /* Consume bad packet */ kfree_skb(skb); return 0; error: /* Return non vxlan pkt */ return 1; }
static void vxlan_gso(struct sk_buff *skb) { int udp_offset = skb_transport_offset(skb); struct udphdr *uh; uh = udp_hdr(skb); uh->len = htons(skb->len - udp_offset); /* csum segment if tunnel sets skb with csum. */ if (unlikely(uh->check)) { struct iphdr *iph = ip_hdr(skb); uh->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, skb->len - udp_offset, IPPROTO_UDP, 0); uh->check = csum_fold(skb_checksum(skb, udp_offset, skb->len - udp_offset, 0)); if (uh->check == 0) uh->check = CSUM_MANGLED_0; } skb->ip_summed = CHECKSUM_NONE; }
/* * lisp_input() * * Packet entry point into LISP processing. Since all packets * will come here, we must be efficient at disposing of non-LISP * destined datagrams. */ unsigned int lisp_input(unsigned int hooknum, struct sk_buff *packet_buf, const struct net_device *input_dev, const struct net_device *output_dev, int (*okfunc)(struct sk_buff*)) { struct iphdr *iph; struct ipv6hdr *ip6; struct udphdr *udh; struct lisphdr *lisp_hdr; char first_byte; int source_locator; /* * PN: net_device corresponding to LISP_EID_INTERFACE */ struct net_device *eid_int = NULL; /* * Get the IP header */ iph = ip_hdr(packet_buf); if (!iph) { printk(KERN_INFO "Odd, no IP header\n"); return NF_ACCEPT; } source_locator = iph->saddr; #ifdef DEBUG_PACKETS printk(KERN_INFO "In LISP Input with packet from %pI4 for %pI4\n", &(source_locator), &(iph->daddr)); #endif /* * Certain things should never be LISP examined: * locally loopback sourced. */ if (packet_buf->pkt_type == PACKET_LOOPBACK) { return NF_ACCEPT; } /* * Check for UDP */ if (iph->protocol == IPPROTO_UDP) { // Move past the ip header skb_pull(packet_buf, sizeof(struct iphdr)); skb_reset_transport_header(packet_buf); udh = udp_hdr(packet_buf); first_byte= *((char *)udh + sizeof(struct udphdr)); #ifdef DEBUG_PACKETS printk(KERN_INFO " Proto is UDP, src port: %d dest port: %d\n", ntohs(udh->source), ntohs(udh->dest)); printk(KERN_INFO " First byte: 0x%x", first_byte); #endif // Detect non-encapsulated lisp control messages if (ntohs(udh->dest) == globals.udp_encap_port && ntohs(udh->source) != LISP_CONTROL_PORT && (first_byte != echo_signature)) { // LISP header lisp_hdr = (struct lisphdr *)skb_pull(packet_buf, sizeof(struct udphdr)); skb_reset_transport_header(packet_buf); #ifdef DEBUG_PACKETS printk(KERN_INFO " LISP packet received: dest %d, len: %d\n", ntohs(udh->dest), ntohs(udh->len)); printk(KERN_INFO " rflags: %d, e: %d, l: %d, n: %d, lsb: 0x%x", lisp_hdr->rflags, lisp_hdr->echo_nonce, lisp_hdr->lsb, lisp_hdr->nonce_present, lisp_hdr->lsb_bits); #endif // Decapsulate skb_pull(packet_buf, sizeof(struct lisphdr)); skb_reset_transport_header(packet_buf); skb_reset_network_header(packet_buf); iph = ip_hdr(packet_buf); if (iph->version == 4) { #ifdef DEBUG_PACKETS printk(KERN_INFO " Inner packet src:%pI4 dst:%pI4, type: %d\n", &(iph->saddr), &(iph->daddr), iph->protocol); #endif // Check the LSB's. check_locator_bits(lisp_hdr, iph, AF_INET, source_locator); eid_int = dev_get_by_name (&init_net, LISP_EID_INTERFACE); if (eid_int) { dev_put (eid_int); packet_buf->dev = eid_int; skb_dst_drop (packet_buf); nf_reset(packet_buf); } else { printk (KERN_INFO "Couldn't get input interface %s\n", LISP_EID_INTERFACE); } return NF_ACCEPT; } else if (iph->version == 6) { ip6 = ipv6_hdr(packet_buf); #ifdef DEBUG_PACKETS printk(KERN_INFO " Inner packet src:%pI6 dst:%pI6, nexthdr: 0x%x\n", ip6->saddr.s6_addr, ip6->daddr.s6_addr, ip6->nexthdr); #endif // Check the LSB's. check_locator_bits(lisp_hdr, ip6, AF_INET6, source_locator); IPCB(packet_buf)->flags = 0; packet_buf->protocol = htons(ETH_P_IPV6); packet_buf->pkt_type = PACKET_HOST; packet_buf->dev = input_dev; nf_reset(packet_buf); netif_rx(packet_buf); return NF_STOLEN; } else { return NF_ACCEPT; // Don't know what it is, let ip deal with it. } } #ifdef DEBUG_PACKETS printk(KERN_INFO " Non-LISP UDP Packet received\n"); if (first_byte == echo_signature) { printk(KERN_INFO " LISP-echo reply to data port"); } #endif // Undo the pull, the next layer expects a pristine skb skb_push(packet_buf, sizeof(struct iphdr)); skb_reset_transport_header(packet_buf); } #ifdef DEBUG_PACKETS printk(KERN_INFO " Punting to IP\n"); #endif return NF_ACCEPT; }
static inline struct lisphdr *lisp_hdr(const struct sk_buff *skb) { return (struct lisphdr *)(udp_hdr(skb) + 1); }
static bool test_hairpin(l4_protocol l4_proto, skb_creator create_skb_fn) { struct sk_buff *skb_in = NULL; struct sk_buff *skb_out = NULL; struct sk_buff *skb_tmp = NULL; struct bib_entry *static_bib = NULL; struct bib_entry *dynamic_bib = NULL; struct session_entry *static_session = NULL; struct session_entry *dynamic_session = NULL; struct tuple tuple6; bool success = true; static_bib = bib_create_str(SERVER_ADDR6, SERVER_PORT6, NAT64_POOL4, SERVER_PORT6, l4_proto); dynamic_bib = bib_create_str(CLIENT_ADDR, CLIENT_PORT, NAT64_POOL4, DYNAMIC_BIB_IPV4_PORT, l4_proto); static_session = session_create_str( SERVER_ADDR6, SERVER_PORT6, SERVER_HAIRPIN_ADDR, DYNAMIC_BIB_IPV4_PORT, NAT64_POOL4, SERVER_PORT6, NAT64_POOL4, DYNAMIC_BIB_IPV4_PORT, l4_proto); dynamic_session = session_create_str(CLIENT_ADDR, CLIENT_PORT, SERVER_HAIRPIN_ADDR, SERVER_PORT6, NAT64_POOL4, DYNAMIC_BIB_IPV4_PORT, NAT64_POOL4, SERVER_PORT6, l4_proto); if (!static_bib || !dynamic_bib || !static_session || !dynamic_session) goto fail; /* Send the request. */ if (is_error(init_ipv6_tuple(&tuple6, CLIENT_ADDR, CLIENT_PORT, SERVER_HAIRPIN_ADDR, SERVER_PORT6, l4_proto))) goto fail; if (is_error(create_skb_fn(&tuple6, &skb_in, 40, 32))) goto fail; success &= send(skb_in); success &= BIB_ASSERT(l4_proto, static_bib, dynamic_bib); success &= SESSION_ASSERT(l4_proto, static_session, dynamic_session); skb_out = skb_tmp = get_sent_skb(); success &= assert_not_null(skb_out, "Request packet"); if (!success) goto fail; do { success &= assert_equals_ipv6_str(SERVER_HAIRPIN_ADDR, &ipv6_hdr(skb_tmp)->saddr, "out src"); success &= assert_equals_ipv6_str(SERVER_ADDR6, &ipv6_hdr(skb_tmp)->daddr, "out dst"); skb_tmp = skb_tmp->next; } while (skb_tmp); switch (l4_proto) { case L4PROTO_UDP: success &= assert_equals_u16(DYNAMIC_BIB_IPV4_PORT, be16_to_cpu(udp_hdr(skb_out)->source), "out's src port"); success &= assert_equals_u16(SERVER_PORT6, be16_to_cpu(udp_hdr(skb_out)->dest), "out's dst port"); break; case L4PROTO_TCP: success &= assert_equals_u16(DYNAMIC_BIB_IPV4_PORT, be16_to_cpu(tcp_hdr(skb_out)->source), "out's src port"); success &= assert_equals_u16(SERVER_PORT6, be16_to_cpu(tcp_hdr(skb_out)->dest), "out's dst port"); break; case L4PROTO_ICMP: case L4PROTO_OTHER: log_err("Test is not designed for protocol %d.", l4_proto); success = false; break; } if (!success) goto fail; kfree_skb(skb_out); /* Send the response. */ if (is_error(init_ipv6_tuple(&tuple6, SERVER_ADDR6, SERVER_PORT6, SERVER_HAIRPIN_ADDR, DYNAMIC_BIB_IPV4_PORT, l4_proto))) goto fail; if (is_error(create_skb_fn(&tuple6, &skb_in, 100, 32))) goto fail; success &= send(skb_in); /* The module should have reused the entries, so the database shouldn't have changed. */ success &= BIB_ASSERT(l4_proto, static_bib, dynamic_bib); success &= SESSION_ASSERT(l4_proto, static_session, dynamic_session); skb_out = skb_tmp = get_sent_skb(); success &= assert_not_null(skb_out, "Response packet"); if (!success) goto fail; do { success &= assert_equals_ipv6_str(SERVER_HAIRPIN_ADDR, &ipv6_hdr(skb_out)->saddr, "out src"); success &= assert_equals_ipv6_str(CLIENT_ADDR, &ipv6_hdr(skb_out)->daddr, "out dst"); skb_tmp = skb_tmp->next; } while (skb_tmp); switch (l4_proto) { case L4PROTO_UDP: success &= assert_equals_u16(SERVER_PORT6, be16_to_cpu(udp_hdr(skb_out)->source), "out's src port"); success &= assert_equals_u16(CLIENT_PORT, be16_to_cpu(udp_hdr(skb_out)->dest), "out's dst port"); break; case L4PROTO_TCP: success &= assert_equals_u16(SERVER_PORT6, be16_to_cpu(tcp_hdr(skb_out)->source), "out's src port"); success &= assert_equals_u16(CLIENT_PORT, be16_to_cpu(tcp_hdr(skb_out)->dest), "out's dst port"); break; case L4PROTO_ICMP: case L4PROTO_OTHER: log_err("Test is not designed for protocol %d.", l4_proto); success = false; break; } kfree_skb(skb_out); session_return(dynamic_session); session_return(static_session); bib_kfree(dynamic_bib); bib_kfree(static_bib); return success; fail: kfree_skb(skb_out); if (dynamic_session) session_return(dynamic_session); if (static_session) session_return(static_session); if (dynamic_bib) bib_kfree(dynamic_bib); if (static_bib) bib_kfree(static_bib); return false; }
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) { struct net_device_context *net_device_ctx = netdev_priv(net); struct hv_netvsc_packet *packet; int ret; unsigned int num_data_pgs; struct rndis_message *rndis_msg; struct rndis_packet *rndis_pkt; u32 rndis_msg_size; bool isvlan; struct rndis_per_packet_info *ppi; struct ndis_tcp_ip_checksum_info *csum_info; struct ndis_tcp_lso_info *lso_info; int hdr_offset; u32 net_trans_info; #if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE > 1291) u32 hash; #endif u32 skb_length = skb->len; bool kick_q = true; /* We will atmost need two pages to describe the rndis * header. We can only transmit MAX_PAGE_BUFFER_COUNT number * of pages in a single packet. */ num_data_pgs = netvsc_get_slots(skb) + 2; if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) { netdev_err(net, "Packet too big: %u\n", skb->len); dev_kfree_skb(skb); net_device_ctx->stats.tx_dropped++; return NETDEV_TX_OK; } /* Allocate a netvsc packet based on # of frags. */ packet = kzalloc(sizeof(struct hv_netvsc_packet) + (num_data_pgs * sizeof(struct hv_page_buffer)) + RNDIS_AND_PPI_SIZE, GFP_ATOMIC); if (!packet) { /* out of memory, drop packet */ netdev_err(net, "unable to allocate hv_netvsc_packet\n"); dev_kfree_skb(skb); net_device_ctx->stats.tx_dropped++; return NETDEV_TX_OK; } //KYSpacket->vlan_tci = skb->vlan_tci; //KYSpacket->q_idx = skb_get_queue_mapping(skb); packet->is_data_pkt = true; packet->total_data_buflen = skb->len; packet->rndis_msg = (struct rndis_message *)((unsigned long)packet + sizeof(struct hv_netvsc_packet) + (num_data_pgs * sizeof(struct hv_page_buffer))); /* Set the completion routine */ packet->send_completion = netvsc_xmit_completion; packet->send_completion_ctx = packet; packet->send_completion_tid = (unsigned long)skb; isvlan = packet->vlan_tci & VLAN_TAG_PRESENT; /* Add the rndis header */ rndis_msg = packet->rndis_msg; rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET; rndis_msg->msg_len = packet->total_data_buflen; rndis_pkt = &rndis_msg->msg.pkt; rndis_pkt->data_offset = sizeof(struct rndis_packet); rndis_pkt->data_len = packet->total_data_buflen; rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet); rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet); #ifdef NOTYET hash = skb_get_hash(skb); if (hash != 0 && net->real_num_tx_queues > 1) { rndis_msg_size += NDIS_HASH_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE, NBL_HASH_VALUE); *(u32 *)((void *)ppi + ppi->ppi_offset) = hash; } #endif if (isvlan) { struct ndis_pkt_8021q_info *vlan; rndis_msg_size += NDIS_VLAN_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE, IEEE_8021Q_INFO); vlan = (struct ndis_pkt_8021q_info *)((void *)ppi + ppi->ppi_offset); vlan->vlanid = packet->vlan_tci & VLAN_VID_MASK; vlan->pri = (packet->vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; } net_trans_info = get_net_transport_info(skb, &hdr_offset); if (net_trans_info == TRANSPORT_INFO_NOT_IP) goto do_send; /* * Setup the sendside checksum offload only if this is not a * GSO packet. */ if (skb_is_gso(skb)) goto do_lso; if ((skb->ip_summed == CHECKSUM_NONE) || (skb->ip_summed == CHECKSUM_UNNECESSARY)) goto do_send; rndis_msg_size += NDIS_CSUM_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE, TCPIP_CHKSUM_PKTINFO); csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi + ppi->ppi_offset); if (net_trans_info & (INFO_IPV4 << 16)) csum_info->transmit.is_ipv4 = 1; else csum_info->transmit.is_ipv6 = 1; if (net_trans_info & INFO_TCP) { csum_info->transmit.tcp_checksum = 1; csum_info->transmit.tcp_header_offset = hdr_offset; } else if (net_trans_info & INFO_UDP) { /* UDP checksum offload is not supported on ws2008r2. * Furthermore, on ws2012 and ws2012r2, there are some * issues with udp checksum offload from Linux guests. * (these are host issues). * For now compute the checksum here. */ struct udphdr *uh; u16 udp_len; ret = skb_cow_head(skb, 0); if (ret) goto drop; uh = udp_hdr(skb); udp_len = ntohs(uh->len); uh->check = 0; uh->check = csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, udp_len, IPPROTO_UDP, #if defined(RHEL_RELEASE_VERSION) && (RHEL_RELEASE_CODE <= 1291) csum_partial((unsigned char *)uh, udp_len, 0)); #else csum_partial(uh, udp_len, 0)); #endif if (uh->check == 0) uh->check = CSUM_MANGLED_0; csum_info->transmit.udp_checksum = 0; } goto do_send; do_lso: rndis_msg_size += NDIS_LSO_PPI_SIZE; ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE, TCP_LARGESEND_PKTINFO); lso_info = (struct ndis_tcp_lso_info *)((void *)ppi + ppi->ppi_offset); lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE; if (net_trans_info & (INFO_IPV4 << 16)) { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4; ip_hdr(skb)->tot_len = 0; ip_hdr(skb)->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } else { lso_info->lso_v2_transmit.ip_version = NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6; ipv6_hdr(skb)->payload_len = 0; tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); } lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset; lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size; do_send: /* Start filling in the page buffers with the rndis hdr */ rndis_msg->msg_len += rndis_msg_size; packet->total_data_buflen = rndis_msg->msg_len; packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, skb, &packet->page_buf[0]); ret = netvsc_send(net_device_ctx->device_ctx, packet, kick_q); drop: if (ret == 0) { net_device_ctx->stats.tx_bytes += skb_length; net_device_ctx->stats.tx_packets++; } else { kfree(packet); if (ret != -EAGAIN) { dev_kfree_skb_any(skb); net_device_ctx->stats.tx_dropped++; } } return (ret == -EAGAIN) ? NETDEV_TX_BUSY : NETDEV_TX_OK; }
static bool validate_frag6(struct sk_buff *skb, bool is_first, bool is_last, u16 offset, u16 payload_len, u16 payload_offset, struct tuple *tuple, l4_protocol l4proto, u16 total_payload) { size_t l4hdr_size; u16 l4_next_hdr; u16 mf = is_last ? 0 : IP6_MF; u16 hdr_payload_len; switch (l4proto) { case (L4PROTO_TCP): l4hdr_size = sizeof(struct tcphdr); l4_next_hdr = NEXTHDR_TCP; break; case (L4PROTO_UDP): l4hdr_size = sizeof(struct udphdr); l4_next_hdr = NEXTHDR_UDP; break; case (L4PROTO_ICMP): l4hdr_size = sizeof(struct icmp6hdr); l4_next_hdr = NEXTHDR_ICMP; break; default: log_debug("Invalid l4 protocol: %u", l4proto); return false; } hdr_payload_len = sizeof(struct frag_hdr) + (is_first ? l4hdr_size : 0) + payload_len; if (!skb) { log_err("The skb is NULL."); return false; } if (!validate_cb_l3(skb, L3PROTO_IPV6, sizeof(struct ipv6hdr) + sizeof(struct frag_hdr))) return false; if (!validate_cb_l4(skb, l4proto, is_first ? l4hdr_size : 0)) return false; if (!validate_cb_payload(skb, payload_len)) return false; if (!validate_ipv6_hdr(ipv6_hdr(skb), hdr_payload_len, NEXTHDR_FRAGMENT, tuple)) return false; if (!validate_frag_hdr(get_extension_header(ipv6_hdr(skb), NEXTHDR_FRAGMENT), offset, mf, l4_next_hdr)) return false; switch (l4proto) { case (L4PROTO_TCP): if (is_first && !validate_tcp_hdr(tcp_hdr(skb), tuple)) return false; break; case (L4PROTO_UDP): if (is_first && !validate_udp_hdr(udp_hdr(skb), total_payload, tuple)) return false; break; case (L4PROTO_ICMP): /*id field is not used in the validate_icmp6_hdr function.*/ if (is_first && !validate_icmp6_hdr(icmp6_hdr(skb), 1234, tuple)) return false; break; } if (!validate_payload(skb_payload(skb), payload_len, payload_offset)) return false; return true; }
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, netdev_features_t features) { struct sock *sk = gso_skb->sk; unsigned int sum_truesize = 0; struct sk_buff *segs, *seg; struct udphdr *uh; unsigned int mss; bool copy_dtor; __sum16 check; __be16 newlen; mss = skb_shinfo(gso_skb)->gso_size; if (gso_skb->len <= sizeof(*uh) + mss) return ERR_PTR(-EINVAL); skb_pull(gso_skb, sizeof(*uh)); /* clear destructor to avoid skb_segment assigning it to tail */ copy_dtor = gso_skb->destructor == sock_wfree; if (copy_dtor) gso_skb->destructor = NULL; segs = skb_segment(gso_skb, features); if (unlikely(IS_ERR_OR_NULL(segs))) { if (copy_dtor) gso_skb->destructor = sock_wfree; return segs; } /* GSO partial and frag_list segmentation only requires splitting * the frame into an MSS multiple and possibly a remainder, both * cases return a GSO skb. So update the mss now. */ if (skb_is_gso(segs)) mss *= skb_shinfo(segs)->gso_segs; seg = segs; uh = udp_hdr(seg); /* compute checksum adjustment based on old length versus new */ newlen = htons(sizeof(*uh) + mss); check = csum16_add(csum16_sub(uh->check, uh->len), newlen); for (;;) { if (copy_dtor) { seg->destructor = sock_wfree; seg->sk = sk; sum_truesize += seg->truesize; } if (!seg->next) break; uh->len = newlen; uh->check = check; if (seg->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(seg, ~check); else uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; seg = seg->next; uh = udp_hdr(seg); } /* last packet can be partial gso_size, account for that in checksum */ newlen = htons(skb_tail_pointer(seg) - skb_transport_header(seg) + seg->data_len); check = csum16_add(csum16_sub(uh->check, uh->len), newlen); uh->len = newlen; uh->check = check; if (seg->ip_summed == CHECKSUM_PARTIAL) gso_reset_checksum(seg, ~check); else uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; /* update refcount for the packet */ if (copy_dtor) { int delta = sum_truesize - gso_skb->truesize; /* In some pathological cases, delta can be negative. * We need to either use refcount_add() or refcount_sub_and_test() */ if (likely(delta >= 0)) refcount_add(delta, &sk->sk_wmem_alloc); else WARN_ON_ONCE(refcount_sub_and_test(-delta, &sk->sk_wmem_alloc)); } return segs; }
int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb) { struct udp_sock *up = udp_sk(sk); struct udphdr *uh; struct iphdr *iph; int iphlen, len; __u8 *udpdata; __be32 *udpdata32; __u16 encap_type = up->encap_type; /* if this is not encapsulated socket, then just return now */ if (!encap_type) return 1; /* If this is a paged skb, make sure we pull up * whatever data we need to look at. */ len = skb->len - sizeof(struct udphdr); if (!pskb_may_pull(skb, sizeof(struct udphdr) + min(len, 8))) return 1; /* Now we can get the pointers */ uh = udp_hdr(skb); udpdata = (__u8 *)uh + sizeof(struct udphdr); udpdata32 = (__be32 *)udpdata; switch (encap_type) { default: case UDP_ENCAP_ESPINUDP: /* Check if this is a keepalive packet. If so, eat it. */ if (len == 1 && udpdata[0] == 0xff) { goto drop; } else if (len > sizeof(struct ip_esp_hdr) && udpdata32[0] != 0) { /* ESP Packet without Non-ESP header */ len = sizeof(struct udphdr); } else /* Must be an IKE packet.. pass it through */ return 1; break; case UDP_ENCAP_ESPINUDP_NON_IKE: /* Check if this is a keepalive packet. If so, eat it. */ if (len == 1 && udpdata[0] == 0xff) { goto drop; } else if (len > 2 * sizeof(u32) + sizeof(struct ip_esp_hdr) && udpdata32[0] == 0 && udpdata32[1] == 0) { /* ESP Packet with Non-IKE marker */ len = sizeof(struct udphdr) + 2 * sizeof(u32); } else /* Must be an IKE packet.. pass it through */ return 1; break; } /* At this point we are sure that this is an ESPinUDP packet, * so we need to remove 'len' bytes from the packet (the UDP * header and optional ESP marker bytes) and then modify the * protocol to ESP, and then call into the transform receiver. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto drop; /* Now we can update and verify the packet length... */ iph = ip_hdr(skb); iphlen = iph->ihl << 2; iph->tot_len = htons(ntohs(iph->tot_len) - len); if (skb->len < iphlen + len) { /* packet is too small!?! */ goto drop; } /* pull the data buffer up to the ESP header and set the * transport header to point to ESP. Keep UDP on the stack * for later. */ __skb_pull(skb, len); skb_reset_transport_header(skb); /* process ESP */ return xfrm4_rcv_encap(skb, IPPROTO_ESP, 0, encap_type); drop: kfree_skb(skb); return 0; }
struct sk_buff* init_skb_for_test( struct tuple *tuple, u_int8_t protocol ) { __u32 l3_len; __u32 l4_len; struct tcphdr *tcp_header; struct udphdr *udp_header; struct icmphdr *icmp_header; struct iphdr *ip_header = NULL; struct sk_buff *skb; switch(protocol) { case IPPROTO_TCP: l4_len = sizeof(struct tcphdr); break; case IPPROTO_UDP: l4_len = sizeof(struct udphdr); break; case IPPROTO_ICMP: l4_len = sizeof(struct icmphdr); break; default: log_warning("Invalid protocol 1: %u", protocol); return NULL; } l3_len = sizeof(struct iphdr); skb = alloc_skb(LL_MAX_HEADER + l3_len + l4_len + SKB_PAYLOAD, GFP_ATOMIC); if (!skb) { log_warning(" New packet allocation failed."); return NULL; } skb_reserve(skb, LL_MAX_HEADER); skb_put(skb, l3_len + l4_len + SKB_PAYLOAD); skb_reset_mac_header(skb); skb_reset_network_header(skb); skb_set_transport_header(skb, l3_len); ip_header = ip_hdr(skb); memset(ip_header, 0, sizeof(struct iphdr)); switch(protocol) { case IPPROTO_TCP: tcp_header = tcp_hdr(skb); memset(tcp_header, 0, l4_len); tcp_header->source = tuple->src.l4_id; tcp_header->dest = tuple->dst.l4_id; break; case IPPROTO_UDP: udp_header = udp_hdr(skb); memset(udp_header, 0, l4_len); udp_header->source = tuple->src.l4_id; udp_header->dest = tuple->dst.l4_id; udp_header->len = htons(sizeof(struct udphdr) + SKB_PAYLOAD); udp_header->check = 0; break; case IPPROTO_ICMP: icmp_header = icmp_hdr(skb); memset(icmp_header, 0, l4_len); icmp_header->type = ICMP_ECHO; /* icmp_header->type = ICMP_ECHOREPLY; */ /* icmp6_header->icmp6_type = ICMPV6_ECHO_REQUEST; */ /* icmp6_header->icmp6_type = ICMPV6_ECHO_REPLY; */ break; default: log_warning("Invalid protocol 2: %u", protocol); kfree_skb(skb); return NULL; } ip_header->version = 4; ip_header->ihl = (sizeof(struct iphdr)) /4 ; ip_header->tos = 0; ip_header->tot_len = htons(l3_len + l4_len + SKB_PAYLOAD); ip_header->id = htons(111); ip_header->frag_off = 0; ip_header->ttl = 64; ip_header->protocol = protocol; ip_header->check = 0; /* skb_forward_csum(skb); */ ip_header->saddr = tuple->src.addr.ipv4.s_addr; ip_header->daddr = tuple->dst.addr.ipv4.s_addr; skb->protocol = htons(ETH_P_IP); return skb; }
static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); unsigned int mss; unsigned int unfrag_ip6hlen, unfrag_len; struct frag_hdr *fptr; u8 *packet_start, *prevhdr; u8 nexthdr; u8 frag_hdr_sz = sizeof(struct frag_hdr); __wsum csum; int tnl_hlen; int err; mss = skb_shinfo(skb)->gso_size; if (unlikely(skb->len <= mss)) goto out; if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { /* Packet is from an untrusted source, reset gso_segs. */ skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); /* Set the IPv6 fragment id if not set yet */ if (!skb_shinfo(skb)->ip6_frag_id) ipv6_proxy_select_ident(dev_net(skb->dev), skb); segs = NULL; goto out; } if (skb->encapsulation && skb_shinfo(skb)->gso_type & (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) segs = skb_udp_tunnel_segment(skb, features, true); else { const struct ipv6hdr *ipv6h; struct udphdr *uh; if (!pskb_may_pull(skb, sizeof(struct udphdr))) goto out; /* Do software UFO. Complete and fill in the UDP checksum as HW cannot * do checksum of UDP packets sent as multiple IP fragments. */ uh = udp_hdr(skb); ipv6h = ipv6_hdr(skb); uh->check = 0; csum = skb_checksum(skb, 0, skb->len, 0); uh->check = udp_v6_check(skb->len, &ipv6h->saddr, &ipv6h->daddr, csum); if (uh->check == 0) uh->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_UNNECESSARY; /* If there is no outer header we can fake a checksum offload * due to the fact that we have already done the checksum in * software prior to segmenting the frame. */ if (!skb->encap_hdr_csum) features |= NETIF_F_HW_CSUM; /* Check if there is enough headroom to insert fragment header. */ tnl_hlen = skb_tnl_header_len(skb); if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) goto out; } /* Find the unfragmentable header and shift it left by frag_hdr_sz * bytes to insert fragment header. */ err = ip6_find_1stfragopt(skb, &prevhdr); if (err < 0) return ERR_PTR(err); unfrag_ip6hlen = err; nexthdr = *prevhdr; *prevhdr = NEXTHDR_FRAGMENT; unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + unfrag_ip6hlen + tnl_hlen; packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; skb->mac_header -= frag_hdr_sz; skb->network_header -= frag_hdr_sz; fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); fptr->nexthdr = nexthdr; fptr->reserved = 0; if (!skb_shinfo(skb)->ip6_frag_id) ipv6_proxy_select_ident(dev_net(skb->dev), skb); fptr->identification = skb_shinfo(skb)->ip6_frag_id; /* Fragment the skb. ipv6 header and the remaining fields of the * fragment header are updated in ipv6_gso_segment() */ segs = skb_segment(skb, features); } out: return segs; }
void rxrpc_reject_packets(struct work_struct *work) { union { struct sockaddr sa; struct sockaddr_in sin; } sa; struct rxrpc_skb_priv *sp; struct rxrpc_header hdr; struct rxrpc_local *local; struct sk_buff *skb; struct msghdr msg; struct kvec iov[2]; size_t size; __be32 code; local = container_of(work, struct rxrpc_local, rejecter); rxrpc_get_local(local); _enter("%d", local->debug_id); iov[0].iov_base = &hdr; iov[0].iov_len = sizeof(hdr); iov[1].iov_base = &code; iov[1].iov_len = sizeof(code); size = sizeof(hdr) + sizeof(code); msg.msg_name = &sa; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = 0; memset(&sa, 0, sizeof(sa)); sa.sa.sa_family = local->srx.transport.family; switch (sa.sa.sa_family) { case AF_INET: msg.msg_namelen = sizeof(sa.sin); break; default: msg.msg_namelen = 0; break; } memset(&hdr, 0, sizeof(hdr)); hdr.type = RXRPC_PACKET_TYPE_ABORT; while ((skb = skb_dequeue(&local->reject_queue))) { sp = rxrpc_skb(skb); switch (sa.sa.sa_family) { case AF_INET: sa.sin.sin_port = udp_hdr(skb)->source; sa.sin.sin_addr.s_addr = ip_hdr(skb)->saddr; code = htonl(skb->priority); hdr.epoch = sp->hdr.epoch; hdr.cid = sp->hdr.cid; hdr.callNumber = sp->hdr.callNumber; hdr.serviceId = sp->hdr.serviceId; hdr.flags = sp->hdr.flags; hdr.flags ^= RXRPC_CLIENT_INITIATED; hdr.flags &= RXRPC_CLIENT_INITIATED; kernel_sendmsg(local->socket, &msg, iov, 2, size); break; default: break; } rxrpc_free_skb(skb); rxrpc_put_local(local); } rxrpc_put_local(local); _leave(""); }
/** * key_extract - extracts a flow key from an Ethernet frame. * @skb: sk_buff that contains the frame, with skb->data pointing to the * Ethernet header * @key: output flow key * * The caller must ensure that skb->len >= ETH_HLEN. * * Returns 0 if successful, otherwise a negative errno value. * * Initializes @skb header pointers as follows: * * - skb->mac_header: the Ethernet header. * * - skb->network_header: just past the Ethernet header, or just past the * VLAN header, to the first byte of the Ethernet payload. * * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6 * on output, then just past the IP header, if one is present and * of a correct length, otherwise the same as skb->network_header. * For other key->eth.type values it is left untouched. */ static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) { int error; struct ethhdr *eth; /* Flags are always used as part of stats */ key->tp.flags = 0; skb_reset_mac_header(skb); /* Link layer. We are guaranteed to have at least the 14 byte Ethernet * header in the linear data area. */ eth = eth_hdr(skb); ether_addr_copy(key->eth.src, eth->h_source); ether_addr_copy(key->eth.dst, eth->h_dest); __skb_pull(skb, 2 * ETH_ALEN); /* We are going to push all headers that we pull, so no need to * update skb->csum here. */ key->eth.tci = 0; if (vlan_tx_tag_present(skb)) key->eth.tci = htons(vlan_get_tci(skb)); else if (eth->h_proto == htons(ETH_P_8021Q)) if (unlikely(parse_vlan(skb, key))) return -ENOMEM; key->eth.type = parse_ethertype(skb); if (unlikely(key->eth.type == htons(0))) return -ENOMEM; skb_reset_network_header(skb); skb_reset_mac_len(skb); __skb_push(skb, skb->data - skb_mac_header(skb)); /* Network layer. */ if (key->eth.type == htons(ETH_P_IP)) { struct iphdr *nh; __be16 offset; error = check_iphdr(skb); if (unlikely(error)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); if (error == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } return error; } nh = ip_hdr(skb); key->ipv4.addr.src = nh->saddr; key->ipv4.addr.dst = nh->daddr; key->ip.proto = nh->protocol; key->ip.tos = nh->tos; key->ip.ttl = nh->ttl; offset = nh->frag_off & htons(IP_OFFSET); if (offset) { key->ip.frag = OVS_FRAG_TYPE_LATER; return 0; } if (nh->frag_off & htons(IP_MF) || skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; else key->ip.frag = OVS_FRAG_TYPE_NONE; /* Transport layer. */ if (key->ip.proto == IPPROTO_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == IPPROTO_ICMP) { if (icmphdr_ok(skb)) { struct icmphdr *icmp = icmp_hdr(skb); /* The ICMP type and code fields use the 16-bit * transport port fields, so we need to store * them in 16-bit network byte order. */ key->tp.src = htons(icmp->type); key->tp.dst = htons(icmp->code); } else { memset(&key->tp, 0, sizeof(key->tp)); } } } else if (key->eth.type == htons(ETH_P_ARP) || key->eth.type == htons(ETH_P_RARP)) { struct arp_eth_header *arp; bool arp_available = arphdr_ok(skb); arp = (struct arp_eth_header *)skb_network_header(skb); if (arp_available && arp->ar_hrd == htons(ARPHRD_ETHER) && arp->ar_pro == htons(ETH_P_IP) && arp->ar_hln == ETH_ALEN && arp->ar_pln == 4) { /* We only match on the lower 8 bits of the opcode. */ if (ntohs(arp->ar_op) <= 0xff) key->ip.proto = ntohs(arp->ar_op); else key->ip.proto = 0; memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); } else { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv4, 0, sizeof(key->ipv4)); } } else if (eth_p_mpls(key->eth.type)) { size_t stack_len = MPLS_HLEN; /* In the presence of an MPLS label stack the end of the L2 * header and the beginning of the L3 header differ. * * Advance network_header to the beginning of the L3 * header. mac_len corresponds to the end of the L2 header. */ while (1) { __be32 lse; error = check_header(skb, skb->mac_len + stack_len); if (unlikely(error)) return 0; memcpy(&lse, skb_network_header(skb), MPLS_HLEN); if (stack_len == MPLS_HLEN) memcpy(&key->mpls.top_lse, &lse, MPLS_HLEN); skb_set_network_header(skb, skb->mac_len + stack_len); if (lse & htonl(MPLS_LS_S_MASK)) break; stack_len += MPLS_HLEN; } } else if (key->eth.type == htons(ETH_P_IPV6)) { int nh_len; /* IPv6 Header + Extensions */ nh_len = parse_ipv6hdr(skb, key); if (unlikely(nh_len < 0)) { memset(&key->ip, 0, sizeof(key->ip)); memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); if (nh_len == -EINVAL) { skb->transport_header = skb->network_header; error = 0; } else { error = nh_len; } return error; } if (key->ip.frag == OVS_FRAG_TYPE_LATER) return 0; if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) key->ip.frag = OVS_FRAG_TYPE_FIRST; /* Transport layer. */ if (key->ip.proto == NEXTHDR_TCP) { if (tcphdr_ok(skb)) { struct tcphdr *tcp = tcp_hdr(skb); key->tp.src = tcp->source; key->tp.dst = tcp->dest; key->tp.flags = TCP_FLAGS_BE16(tcp); } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_UDP) { if (udphdr_ok(skb)) { struct udphdr *udp = udp_hdr(skb); key->tp.src = udp->source; key->tp.dst = udp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_SCTP) { if (sctphdr_ok(skb)) { struct sctphdr *sctp = sctp_hdr(skb); key->tp.src = sctp->source; key->tp.dst = sctp->dest; } else { memset(&key->tp, 0, sizeof(key->tp)); } } else if (key->ip.proto == NEXTHDR_ICMP) { if (icmp6hdr_ok(skb)) { error = parse_icmpv6(skb, key, nh_len); if (error) return error; } else { memset(&key->tp, 0, sizeof(key->tp)); } } } return 0; }
static inline struct genevehdr *geneve_hdr(const struct sk_buff *skb) { return (struct genevehdr *)(udp_hdr(skb) + 1); }