static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key, const struct ovs_action_push_mpls *mpls) { struct mpls_shim_hdr *new_mpls_lse; /* Networking stack do not allow simultaneous Tunnel and MPLS GSO. */ if (skb->encapsulation) return -ENOTSUPP; if (skb_cow_head(skb, MPLS_HLEN) < 0) return -ENOMEM; if (!skb->inner_protocol) { skb_set_inner_network_header(skb, skb->mac_len); skb_set_inner_protocol(skb, skb->protocol); } skb_push(skb, MPLS_HLEN); memmove(skb_mac_header(skb) - MPLS_HLEN, skb_mac_header(skb), skb->mac_len); skb_reset_mac_header(skb); skb_set_network_header(skb, skb->mac_len); new_mpls_lse = mpls_hdr(skb); new_mpls_lse->label_stack_entry = mpls->mpls_lse; skb_postpush_rcsum(skb, new_mpls_lse, MPLS_HLEN); if (ovs_key_mac_proto(key) == MAC_PROTO_ETHERNET) update_ethertype(skb, eth_hdr(skb), mpls->mpls_ethertype); skb->protocol = mpls->mpls_ethertype; invalidate_flow_key(key); return 0; }
static int ipv6_gro_complete(struct sk_buff *skb, int nhoff) { const struct net_offload *ops; struct ipv6hdr *iph = (struct ipv6hdr *)(skb->data + nhoff); int err = -ENOSYS; if (skb->encapsulation) { skb_set_inner_protocol(skb, cpu_to_be16(ETH_P_IPV6)); skb_set_inner_network_header(skb, nhoff); } iph->payload_len = htons(skb->len - nhoff - sizeof(*iph)); rcu_read_lock(); nhoff += sizeof(*iph) + ipv6_exthdrs_len(iph, &ops); if (WARN_ON(!ops || !ops->callbacks.gro_complete)) goto out_unlock; err = ops->callbacks.gro_complete(skb, nhoff); out_unlock: rcu_read_unlock(); return err; }