static int __ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi, struct ip_tunnel_net *itn, int hdr_len, bool raw_proto) { struct metadata_dst *tun_dst = NULL; const struct iphdr *iph; struct ip_tunnel *tunnel; iph = ip_hdr(skb); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { if (__iptunnel_pull_header(skb, hdr_len, tpi->proto, raw_proto, false) < 0) goto drop; if (tunnel->dev->type != ARPHRD_NONE) skb_pop_mac_header(skb); else skb_reset_mac_header(skb); if (tunnel->collect_md) { __be16 flags; __be64 tun_id; flags = tpi->flags & (TUNNEL_CSUM | TUNNEL_KEY); tun_id = key32_to_tunnel_id(tpi->key); tun_dst = ip_tun_rx_dst(skb, flags, tun_id, 0); if (!tun_dst) return PACKET_REJECT; } ip_tunnel_rcv(tunnel, skb, tpi, tun_dst, log_ecn_error); return PACKET_RCVD; } return PACKET_NEXT; drop: kfree_skb(skb); return PACKET_RCVD; }
static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind, bool rtnl_held, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; struct tcf_tunnel_key_params *params_new; struct metadata_dst *metadata = NULL; struct tc_tunnel_key *parm; struct tcf_tunnel_key *t; bool exists = false; __be16 dst_port = 0; int opts_len = 0; __be64 key_id; __be16 flags; u8 tos, ttl; int ret = 0; int err; if (!nla) { NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed"); return -EINVAL; } err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy, extack); if (err < 0) { NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes"); return err; } if (!tb[TCA_TUNNEL_KEY_PARMS]) { NL_SET_ERR_MSG(extack, "Missing tunnel key parameters"); return -EINVAL; } parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); err = tcf_idr_check_alloc(tn, &parm->index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return 0; switch (parm->t_action) { case TCA_TUNNEL_KEY_ACT_RELEASE: break; case TCA_TUNNEL_KEY_ACT_SET: if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) { NL_SET_ERR_MSG(extack, "Missing tunnel key id"); ret = -EINVAL; goto err_out; } key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID])); flags = TUNNEL_KEY | TUNNEL_CSUM; if (tb[TCA_TUNNEL_KEY_NO_CSUM] && nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM])) flags &= ~TUNNEL_CSUM; if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) { opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS], extack); if (opts_len < 0) { ret = opts_len; goto err_out; } } tos = 0; if (tb[TCA_TUNNEL_KEY_ENC_TOS]) tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]); ttl = 0; if (tb[TCA_TUNNEL_KEY_ENC_TTL]) ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]); if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; __be32 daddr; saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]); daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]); metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl, dst_port, flags, key_id, opts_len); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) { struct in6_addr saddr; struct in6_addr daddr; saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]); daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]); metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port, 0, flags, key_id, 0); } else { NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst"); ret = -EINVAL; goto err_out; } if (!metadata) { NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst"); ret = -ENOMEM; goto err_out; } if (opts_len) { ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS], &metadata->u.tun_info, opts_len, extack); if (ret < 0) goto release_tun_meta; } metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: NL_SET_ERR_MSG(extack, "Unknown tunnel key action"); ret = -EINVAL; goto err_out; } if (!exists) { ret = tcf_idr_create(tn, parm->index, est, a, &act_tunnel_key_ops, bind, true); if (ret) { NL_SET_ERR_MSG(extack, "Cannot create TC IDR"); goto release_tun_meta; } ret = ACT_P_CREATED; } else if (!ovr) { NL_SET_ERR_MSG(extack, "TC IDR already exists"); ret = -EEXIST; goto release_tun_meta; } t = to_tunnel_key(*a); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters"); ret = -ENOMEM; exists = true; goto release_tun_meta; } params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; spin_lock_bh(&t->tcf_lock); t->tcf_action = parm->action; rcu_swap_protected(t->params, params_new, lockdep_is_held(&t->tcf_lock)); spin_unlock_bh(&t->tcf_lock); if (params_new) kfree_rcu(params_new, rcu); if (ret == ACT_P_CREATED) tcf_idr_insert(tn, *a); return ret; release_tun_meta: dst_release(&metadata->dst); err_out: if (exists) tcf_idr_release(*a, bind); else tcf_idr_cleanup(tn, parm->index); return ret; }
static int tunnel_key_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, int ovr, int bind) { struct tc_action_net *tn = net_generic(net, tunnel_key_net_id); struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1]; struct tcf_tunnel_key_params *params_old; struct tcf_tunnel_key_params *params_new; struct metadata_dst *metadata = NULL; struct tc_tunnel_key *parm; struct tcf_tunnel_key *t; bool exists = false; __be16 dst_port = 0; __be64 key_id; int ret = 0; int err; if (!nla) return -EINVAL; err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy); if (err < 0) return err; if (!tb[TCA_TUNNEL_KEY_PARMS]) return -EINVAL; parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]); exists = tcf_hash_check(tn, parm->index, a, bind); if (exists && bind) return 0; switch (parm->t_action) { case TCA_TUNNEL_KEY_ACT_RELEASE: break; case TCA_TUNNEL_KEY_ACT_SET: if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) { ret = -EINVAL; goto err_out; } key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID])); if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT]) dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]); if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) { __be32 saddr; __be32 daddr; saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]); daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]); metadata = __ip_tun_set_dst(saddr, daddr, 0, 0, dst_port, TUNNEL_KEY, key_id, 0); } else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] && tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) { struct in6_addr saddr; struct in6_addr daddr; saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]); daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]); metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0, dst_port, TUNNEL_KEY, key_id, 0); } if (!metadata) { ret = -EINVAL; goto err_out; } metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX; break; default: goto err_out; } if (!exists) { ret = tcf_hash_create(tn, parm->index, est, a, &act_tunnel_key_ops, bind, true); if (ret) return ret; ret = ACT_P_CREATED; } else { tcf_hash_release(*a, bind); if (!ovr) return -EEXIST; } t = to_tunnel_key(*a); ASSERT_RTNL(); params_new = kzalloc(sizeof(*params_new), GFP_KERNEL); if (unlikely(!params_new)) { if (ret == ACT_P_CREATED) tcf_hash_release(*a, bind); return -ENOMEM; } params_old = rtnl_dereference(t->params); params_new->action = parm->action; params_new->tcft_action = parm->t_action; params_new->tcft_enc_metadata = metadata; rcu_assign_pointer(t->params, params_new); if (params_old) kfree_rcu(params_old, rcu); if (ret == ACT_P_CREATED) tcf_hash_insert(tn, *a); return ret; err_out: if (exists) tcf_hash_release(*a, bind); return ret; }
static int bnxt_tc_parse_flow(struct bnxt *bp, struct tc_cls_flower_offload *tc_flow_cmd, struct bnxt_tc_flow *flow) { struct flow_dissector *dissector = tc_flow_cmd->dissector; u16 addr_type = 0; /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */ if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 || (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) { netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x", dissector->used_keys); return -EOPNOTSUPP; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) { struct flow_dissector_key_control *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL); addr_type = key->addr_type; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) { struct flow_dissector_key_basic *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); struct flow_dissector_key_basic *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC); flow->l2_key.ether_type = key->n_proto; flow->l2_mask.ether_type = mask->n_proto; if (key->n_proto == htons(ETH_P_IP) || key->n_proto == htons(ETH_P_IPV6)) { flow->l4_key.ip_proto = key->ip_proto; flow->l4_mask.ip_proto = mask->ip_proto; } } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_dissector_key_eth_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); struct flow_dissector_key_eth_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS; ether_addr_copy(flow->l2_key.dmac, key->dst); ether_addr_copy(flow->l2_mask.dmac, mask->dst); ether_addr_copy(flow->l2_key.smac, key->src); ether_addr_copy(flow->l2_mask.smac, mask->src); } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_dissector_key_vlan *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); struct flow_dissector_key_vlan *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN); flow->l2_key.inner_vlan_tci = cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority)); flow->l2_mask.inner_vlan_tci = cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority))); flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q); flow->l2_mask.inner_vlan_tpid = htons(0xffff); flow->l2_key.num_vlans = 1; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); struct flow_dissector_key_ipv4_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS; flow->l3_key.ipv4.daddr.s_addr = key->dst; flow->l3_mask.ipv4.daddr.s_addr = mask->dst; flow->l3_key.ipv4.saddr.s_addr = key->src; flow->l3_mask.ipv4.saddr.s_addr = mask->src; } else if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); struct flow_dissector_key_ipv6_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS; flow->l3_key.ipv6.daddr = key->dst; flow->l3_mask.ipv6.daddr = mask->dst; flow->l3_key.ipv6.saddr = key->src; flow->l3_mask.ipv6.saddr = mask->src; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_dissector_key_ports *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); struct flow_dissector_key_ports *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS); flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS; flow->l4_key.ports.dport = key->dst; flow->l4_mask.ports.dport = mask->dst; flow->l4_key.ports.sport = key->src; flow->l4_mask.ports.sport = mask->src; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) { struct flow_dissector_key_icmp *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); struct flow_dissector_key_icmp *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP); flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP; flow->l4_key.icmp.type = key->type; flow->l4_key.icmp.code = key->code; flow->l4_mask.icmp.type = mask->type; flow->l4_mask.icmp.code = mask->code; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) { struct flow_dissector_key_control *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL); addr_type = key->addr_type; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); struct flow_dissector_key_ipv4_addrs *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS; flow->tun_key.u.ipv4.dst = key->dst; flow->tun_mask.u.ipv4.dst = mask->dst; flow->tun_key.u.ipv4.src = key->src; flow->tun_mask.u.ipv4.src = mask->src; } else if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { return -EOPNOTSUPP; } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { struct flow_dissector_key_keyid *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); struct flow_dissector_key_keyid *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID; flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid); flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid); } if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { struct flow_dissector_key_ports *key = GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); struct flow_dissector_key_ports *mask = GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS); flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS; flow->tun_key.tp_dst = key->dst; flow->tun_mask.tp_dst = mask->dst; flow->tun_key.tp_src = key->src; flow->tun_mask.tp_src = mask->src; } return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); }
static int erspan_rcv(struct sk_buff *skb, struct tnl_ptk_info *tpi, int gre_hdr_len) { struct net *net = dev_net(skb->dev); struct metadata_dst *tun_dst = NULL; struct erspan_base_hdr *ershdr; struct erspan_metadata *pkt_md; struct ip_tunnel_net *itn; struct ip_tunnel *tunnel; const struct iphdr *iph; struct erspan_md2 *md2; int ver; int len; itn = net_generic(net, erspan_net_id); len = gre_hdr_len + sizeof(*ershdr); /* Check based hdr len */ if (unlikely(!pskb_may_pull(skb, len))) return PACKET_REJECT; iph = ip_hdr(skb); ershdr = (struct erspan_base_hdr *)(skb->data + gre_hdr_len); ver = ershdr->ver; /* The original GRE header does not have key field, * Use ERSPAN 10-bit session ID as key. */ tpi->key = cpu_to_be32(get_session_id(ershdr)); tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, tpi->flags, iph->saddr, iph->daddr, tpi->key); if (tunnel) { len = gre_hdr_len + erspan_hdr_len(ver); if (unlikely(!pskb_may_pull(skb, len))) return PACKET_REJECT; ershdr = (struct erspan_base_hdr *)skb->data; pkt_md = (struct erspan_metadata *)(ershdr + 1); if (__iptunnel_pull_header(skb, len, htons(ETH_P_TEB), false, false) < 0) goto drop; if (tunnel->collect_md) { struct ip_tunnel_info *info; struct erspan_metadata *md; __be64 tun_id; __be16 flags; tpi->flags |= TUNNEL_KEY; flags = tpi->flags; tun_id = key32_to_tunnel_id(tpi->key); tun_dst = rpl_ip_tun_rx_dst(skb, flags, tun_id, sizeof(*md)); if (!tun_dst) return PACKET_REJECT; md = ip_tunnel_info_opts(&tun_dst->u.tun_info); md->version = ver; md2 = &md->u.md2; memcpy(md2, pkt_md, ver == 1 ? ERSPAN_V1_MDSIZE : ERSPAN_V2_MDSIZE); info = &tun_dst->u.tun_info; info->key.tun_flags |= TUNNEL_ERSPAN_OPT; info->options_len = sizeof(*md); } skb_reset_mac_header(skb); ovs_ip_tunnel_rcv(tunnel->dev, skb, tun_dst); kfree(tun_dst); return PACKET_RCVD; } drop: kfree_skb(skb); return PACKET_RCVD; }