Пример #1
0
static int nft_queue_init(const struct nft_ctx *ctx,
			  const struct nft_expr *expr,
			  const struct nlattr * const tb[])
{
	struct nft_queue *priv = nft_expr_priv(expr);
	u32 maxid;

	priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));

	if (tb[NFTA_QUEUE_TOTAL])
		priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
	else
		priv->queues_total = 1;

	if (priv->queues_total == 0)
		return -EINVAL;

	maxid = priv->queues_total - 1 + priv->queuenum;
	if (maxid > U16_MAX)
		return -ERANGE;

	if (tb[NFTA_QUEUE_FLAGS]) {
		priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
		if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
			return -EINVAL;
	}
	return 0;
}
Пример #2
0
/* This function returns true when ENCAP attributes are present in the nl msg */
static bool ipip_netlink_encap_parms(struct nlattr *data[],
				     struct ip_tunnel_encap *ipencap)
{
	bool ret = false;

	memset(ipencap, 0, sizeof(*ipencap));

	if (!data)
		return ret;

	if (data[IFLA_IPTUN_ENCAP_TYPE]) {
		ret = true;
		ipencap->type = nla_get_u16(data[IFLA_IPTUN_ENCAP_TYPE]);
	}

	if (data[IFLA_IPTUN_ENCAP_FLAGS]) {
		ret = true;
		ipencap->flags = nla_get_u16(data[IFLA_IPTUN_ENCAP_FLAGS]);
	}

	if (data[IFLA_IPTUN_ENCAP_SPORT]) {
		ret = true;
		ipencap->sport = nla_get_be16(data[IFLA_IPTUN_ENCAP_SPORT]);
	}

	if (data[IFLA_IPTUN_ENCAP_DPORT]) {
		ret = true;
		ipencap->dport = nla_get_be16(data[IFLA_IPTUN_ENCAP_DPORT]);
	}

	return ret;
}
Пример #3
0
static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
			      struct lwtunnel_state **ts)
{
	struct ip_tunnel_info *tun_info;
	struct lwtunnel_state *new_state;
	struct nlattr *tb[IP_TUN_MAX + 1];
	int err;

	err = nla_parse_nested(tb, IP_TUN_MAX, attr, ip_tun_policy);
	if (err < 0)
		return err;

	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
	if (!new_state)
		return -ENOMEM;

	new_state->type = LWTUNNEL_ENCAP_IP;

	tun_info = lwt_tun_info(new_state);

	if (tb[IP_TUN_ID])
		tun_info->key.tun_id = nla_get_u64(tb[IP_TUN_ID]);

	if (tb[IP_TUN_DST])
		tun_info->key.ipv4_dst = nla_get_be32(tb[IP_TUN_DST]);

	if (tb[IP_TUN_SRC])
		tun_info->key.ipv4_src = nla_get_be32(tb[IP_TUN_SRC]);

	if (tb[IP_TUN_TTL])
		tun_info->key.ipv4_ttl = nla_get_u8(tb[IP_TUN_TTL]);

	if (tb[IP_TUN_TOS])
		tun_info->key.ipv4_tos = nla_get_u8(tb[IP_TUN_TOS]);

	if (tb[IP_TUN_SPORT])
		tun_info->key.tp_src = nla_get_be16(tb[IP_TUN_SPORT]);

	if (tb[IP_TUN_DPORT])
		tun_info->key.tp_dst = nla_get_be16(tb[IP_TUN_DPORT]);

	if (tb[IP_TUN_FLAGS])
		tun_info->key.tun_flags = nla_get_u16(tb[IP_TUN_FLAGS]);

	tun_info->mode = IP_TUNNEL_INFO_TX;
	tun_info->options = NULL;
	tun_info->options_len = 0;

	*ts = new_state;

	return 0;
}
int nf_nat_proto_nlattr_to_range(struct nlattr *tb[],
				 struct nf_nat_ipv4_range *range)
{
	if (tb[CTA_PROTONAT_PORT_MIN]) {
		range->min.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MIN]);
		range->max.all = range->min.tcp.port;
		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
	}
	if (tb[CTA_PROTONAT_PORT_MAX]) {
		range->max.all = nla_get_be16(tb[CTA_PROTONAT_PORT_MAX]);
		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
	}
	return 0;
}
Пример #5
0
static int ipgre_tunnel_validate(struct nlattr *tb[], struct nlattr *data[])
{
	__be16 flags;

	if (!data)
		return 0;

	flags = 0;
	if (data[IFLA_GRE_IFLAGS])
		flags |= nla_get_be16(data[IFLA_GRE_IFLAGS]);
	if (data[IFLA_GRE_OFLAGS])
		flags |= nla_get_be16(data[IFLA_GRE_OFLAGS]);
	if (flags & (GRE_VERSION|GRE_ROUTING))
		return -EINVAL;

	return 0;
}
Пример #6
0
static int ip_tun_build_state(struct nlattr *attr,
			      unsigned int family, const void *cfg,
			      struct lwtunnel_state **ts,
			      struct netlink_ext_ack *extack)
{
	struct ip_tunnel_info *tun_info;
	struct lwtunnel_state *new_state;
	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
	int err;

	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP_MAX, attr,
					  ip_tun_policy, extack);
	if (err < 0)
		return err;

	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
	if (!new_state)
		return -ENOMEM;

	new_state->type = LWTUNNEL_ENCAP_IP;

	tun_info = lwt_tun_info(new_state);

#ifdef CONFIG_DST_CACHE
	err = dst_cache_init(&tun_info->dst_cache, GFP_KERNEL);
	if (err) {
		lwtstate_free(new_state);
		return err;
	}
#endif

	if (tb[LWTUNNEL_IP_ID])
		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);

	if (tb[LWTUNNEL_IP_DST])
		tun_info->key.u.ipv4.dst = nla_get_in_addr(tb[LWTUNNEL_IP_DST]);

	if (tb[LWTUNNEL_IP_SRC])
		tun_info->key.u.ipv4.src = nla_get_in_addr(tb[LWTUNNEL_IP_SRC]);

	if (tb[LWTUNNEL_IP_TTL])
		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);

	if (tb[LWTUNNEL_IP_TOS])
		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);

	if (tb[LWTUNNEL_IP_FLAGS])
		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);

	tun_info->mode = IP_TUNNEL_INFO_TX;
	tun_info->options_len = 0;

	*ts = new_state;

	return 0;
}
Пример #7
0
static int nft_queue_init(const struct nft_ctx *ctx,
			   const struct nft_expr *expr,
			   const struct nlattr * const tb[])
{
	struct nft_queue *priv = nft_expr_priv(expr);

	if (tb[NFTA_QUEUE_NUM] == NULL)
		return -EINVAL;

	init_hashrandom(&jhash_initval);
	priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));

	if (tb[NFTA_QUEUE_TOTAL] != NULL)
		priv->queues_total = ntohs(nla_get_be16(tb[NFTA_QUEUE_TOTAL]));
	if (tb[NFTA_QUEUE_FLAGS] != NULL) {
		priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
		if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
			return -EINVAL;
	}
	return 0;
}
Пример #8
0
static void ipgre_netlink_parms(struct nlattr *data[], struct nlattr *tb[],
			       struct ip_tunnel_parm *parms)
{
	memset(parms, 0, sizeof(*parms));

	parms->iph.protocol = IPPROTO_GRE;

	if (!data)
		return;

	if (data[IFLA_GRE_LINK])
		parms->link = nla_get_u32(data[IFLA_GRE_LINK]);

	if (data[IFLA_GRE_IFLAGS])
		parms->i_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_IFLAGS]));

	if (data[IFLA_GRE_OFLAGS])
		parms->o_flags = gre_flags_to_tnl_flags(nla_get_be16(data[IFLA_GRE_OFLAGS]));

	if (data[IFLA_GRE_IKEY])
		parms->i_key = nla_get_be32(data[IFLA_GRE_IKEY]);

	if (data[IFLA_GRE_OKEY])
		parms->o_key = nla_get_be32(data[IFLA_GRE_OKEY]);

	if (data[IFLA_GRE_LOCAL])
		parms->iph.saddr = nla_get_be32(data[IFLA_GRE_LOCAL]);

	if (data[IFLA_GRE_REMOTE])
		parms->iph.daddr = nla_get_be32(data[IFLA_GRE_REMOTE]);

	if (data[IFLA_GRE_TTL])
		parms->iph.ttl = nla_get_u8(data[IFLA_GRE_TTL]);

	if (data[IFLA_GRE_TOS])
		parms->iph.tos = nla_get_u8(data[IFLA_GRE_TOS]);

	if (!data[IFLA_GRE_PMTUDISC] || nla_get_u8(data[IFLA_GRE_PMTUDISC]))
		parms->iph.frag_off = htons(IP_DF);
}
Пример #9
0
static int
tunnel_key_copy_geneve_opt(const struct nlattr *nla, void *dst, int dst_len,
			   struct netlink_ext_ack *extack)
{
	struct nlattr *tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1];
	int err, data_len, opt_len;
	u8 *data;

	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
			       nla, geneve_opt_policy, extack);
	if (err < 0)
		return err;

	if (!tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] ||
	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] ||
	    !tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]) {
		NL_SET_ERR_MSG(extack, "Missing tunnel key geneve option class, type or data");
		return -EINVAL;
	}

	data = nla_data(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
	data_len = nla_len(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA]);
	if (data_len < 4) {
		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is less than 4 bytes long");
		return -ERANGE;
	}
	if (data_len % 4) {
		NL_SET_ERR_MSG(extack, "Tunnel key geneve option data is not a multiple of 4 bytes long");
		return -ERANGE;
	}

	opt_len = sizeof(struct geneve_opt) + data_len;
	if (dst) {
		struct geneve_opt *opt = dst;

		WARN_ON(dst_len < opt_len);

		opt->opt_class =
			nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS]);
		opt->type = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE]);
		opt->length = data_len / 4; /* length is in units of 4 bytes */
		opt->r1 = 0;
		opt->r2 = 0;
		opt->r3 = 0;

		memcpy(opt + 1, data, data_len);
	}

	return opt_len;
}
Пример #10
0
static int vlan_validate(struct nlattr *tb[], struct nlattr *data[],
			 struct netlink_ext_ack *extack)
{
	struct ifla_vlan_flags *flags;
	u16 id;
	int err;

	if (tb[IFLA_ADDRESS]) {
		if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
			return -EINVAL;
		if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
			return -EADDRNOTAVAIL;
	}

	if (!data)
		return -EINVAL;

	if (data[IFLA_VLAN_PROTOCOL]) {
		switch (nla_get_be16(data[IFLA_VLAN_PROTOCOL])) {
		case htons(ETH_P_8021Q):
		case htons(ETH_P_8021AD):
			break;
		default:
			return -EPROTONOSUPPORT;
		}
	}

	if (data[IFLA_VLAN_ID]) {
		id = nla_get_u16(data[IFLA_VLAN_ID]);
		if (id >= VLAN_VID_MASK)
			return -ERANGE;
	}
	if (data[IFLA_VLAN_FLAGS]) {
		flags = nla_data(data[IFLA_VLAN_FLAGS]);
		if ((flags->flags & flags->mask) &
		    ~(VLAN_FLAG_REORDER_HDR | VLAN_FLAG_GVRP |
		      VLAN_FLAG_LOOSE_BINDING | VLAN_FLAG_MVRP))
			return -EINVAL;
	}

	err = vlan_validate_qos_map(data[IFLA_VLAN_INGRESS_QOS]);
	if (err < 0)
		return err;
	err = vlan_validate_qos_map(data[IFLA_VLAN_EGRESS_QOS]);
	if (err < 0)
		return err;
	return 0;
}
Пример #11
0
static int ip6_tun_build_state(struct nlattr *attr,
			       unsigned int family, const void *cfg,
			       struct lwtunnel_state **ts,
			       struct netlink_ext_ack *extack)
{
	struct ip_tunnel_info *tun_info;
	struct lwtunnel_state *new_state;
	struct nlattr *tb[LWTUNNEL_IP6_MAX + 1];
	int err;

	err = nla_parse_nested_deprecated(tb, LWTUNNEL_IP6_MAX, attr,
					  ip6_tun_policy, extack);
	if (err < 0)
		return err;

	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
	if (!new_state)
		return -ENOMEM;

	new_state->type = LWTUNNEL_ENCAP_IP6;

	tun_info = lwt_tun_info(new_state);

	if (tb[LWTUNNEL_IP6_ID])
		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP6_ID]);

	if (tb[LWTUNNEL_IP6_DST])
		tun_info->key.u.ipv6.dst = nla_get_in6_addr(tb[LWTUNNEL_IP6_DST]);

	if (tb[LWTUNNEL_IP6_SRC])
		tun_info->key.u.ipv6.src = nla_get_in6_addr(tb[LWTUNNEL_IP6_SRC]);

	if (tb[LWTUNNEL_IP6_HOPLIMIT])
		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP6_HOPLIMIT]);

	if (tb[LWTUNNEL_IP6_TC])
		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP6_TC]);

	if (tb[LWTUNNEL_IP6_FLAGS])
		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP6_FLAGS]);

	tun_info->mode = IP_TUNNEL_INFO_TX | IP_TUNNEL_INFO_IPV6;
	tun_info->options_len = 0;

	*ts = new_state;

	return 0;
}
Пример #12
0
static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
			      unsigned int family, const void *cfg,
			      struct lwtunnel_state **ts)
{
	struct ip_tunnel_info *tun_info;
	struct lwtunnel_state *new_state;
	struct nlattr *tb[LWTUNNEL_IP_MAX + 1];
	int err;

	err = nla_parse_nested(tb, LWTUNNEL_IP_MAX, attr, ip_tun_policy);
	if (err < 0)
		return err;

	new_state = lwtunnel_state_alloc(sizeof(*tun_info));
	if (!new_state)
		return -ENOMEM;

	new_state->type = LWTUNNEL_ENCAP_IP;

	tun_info = lwt_tun_info(new_state);

	if (tb[LWTUNNEL_IP_ID])
		tun_info->key.tun_id = nla_get_be64(tb[LWTUNNEL_IP_ID]);

	if (tb[LWTUNNEL_IP_DST])
		tun_info->key.u.ipv4.dst = nla_get_be32(tb[LWTUNNEL_IP_DST]);

	if (tb[LWTUNNEL_IP_SRC])
		tun_info->key.u.ipv4.src = nla_get_be32(tb[LWTUNNEL_IP_SRC]);

	if (tb[LWTUNNEL_IP_TTL])
		tun_info->key.ttl = nla_get_u8(tb[LWTUNNEL_IP_TTL]);

	if (tb[LWTUNNEL_IP_TOS])
		tun_info->key.tos = nla_get_u8(tb[LWTUNNEL_IP_TOS]);

	if (tb[LWTUNNEL_IP_FLAGS])
		tun_info->key.tun_flags = nla_get_be16(tb[LWTUNNEL_IP_FLAGS]);

	tun_info->mode = IP_TUNNEL_INFO_TX;
	tun_info->options_len = 0;

	*ts = new_state;

	return 0;
}
static int icmp_nlattr_to_tuple(struct nlattr *tb[],
				struct nf_conntrack_tuple *tuple)
{
	if (!tb[CTA_PROTO_ICMP_TYPE] ||
	    !tb[CTA_PROTO_ICMP_CODE] ||
	    !tb[CTA_PROTO_ICMP_ID])
		return -EINVAL;

	tuple->dst.u.icmp.type = nla_get_u8(tb[CTA_PROTO_ICMP_TYPE]);
	tuple->dst.u.icmp.code = nla_get_u8(tb[CTA_PROTO_ICMP_CODE]);
	tuple->src.u.icmp.id = nla_get_be16(tb[CTA_PROTO_ICMP_ID]);

	if (tuple->dst.u.icmp.type >= sizeof(invmap) ||
	    !invmap[tuple->dst.u.icmp.type])
		return -EINVAL;

	return 0;
}
Пример #14
0
static int nft_queue_sreg_init(const struct nft_ctx *ctx,
			       const struct nft_expr *expr,
			       const struct nlattr * const tb[])
{
	struct nft_queue *priv = nft_expr_priv(expr);
	int err;

	priv->sreg_qnum = nft_parse_register(tb[NFTA_QUEUE_SREG_QNUM]);
	err = nft_validate_register_load(priv->sreg_qnum, sizeof(u32));
	if (err < 0)
		return err;

	if (tb[NFTA_QUEUE_FLAGS]) {
		priv->flags = ntohs(nla_get_be16(tb[NFTA_QUEUE_FLAGS]));
		if (priv->flags & ~NFT_QUEUE_FLAG_MASK)
			return -EINVAL;
		if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT)
			return -EOPNOTSUPP;
	}

	return 0;
}
Пример #15
0
static int tunnel_key_init(struct net *net, struct nlattr *nla,
			   struct nlattr *est, struct tc_action **a,
			   int ovr, int bind, bool rtnl_held,
			   struct netlink_ext_ack *extack)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
	struct tcf_tunnel_key_params *params_new;
	struct metadata_dst *metadata = NULL;
	struct tc_tunnel_key *parm;
	struct tcf_tunnel_key *t;
	bool exists = false;
	__be16 dst_port = 0;
	int opts_len = 0;
	__be64 key_id;
	__be16 flags;
	u8 tos, ttl;
	int ret = 0;
	int err;

	if (!nla) {
		NL_SET_ERR_MSG(extack, "Tunnel requires attributes to be passed");
		return -EINVAL;
	}

	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy,
			       extack);
	if (err < 0) {
		NL_SET_ERR_MSG(extack, "Failed to parse nested tunnel key attributes");
		return err;
	}

	if (!tb[TCA_TUNNEL_KEY_PARMS]) {
		NL_SET_ERR_MSG(extack, "Missing tunnel key parameters");
		return -EINVAL;
	}

	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
	err = tcf_idr_check_alloc(tn, &parm->index, a, bind);
	if (err < 0)
		return err;
	exists = err;
	if (exists && bind)
		return 0;

	switch (parm->t_action) {
	case TCA_TUNNEL_KEY_ACT_RELEASE:
		break;
	case TCA_TUNNEL_KEY_ACT_SET:
		if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
			NL_SET_ERR_MSG(extack, "Missing tunnel key id");
			ret = -EINVAL;
			goto err_out;
		}

		key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));

		flags = TUNNEL_KEY | TUNNEL_CSUM;
		if (tb[TCA_TUNNEL_KEY_NO_CSUM] &&
		    nla_get_u8(tb[TCA_TUNNEL_KEY_NO_CSUM]))
			flags &= ~TUNNEL_CSUM;

		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);

		if (tb[TCA_TUNNEL_KEY_ENC_OPTS]) {
			opts_len = tunnel_key_get_opts_len(tb[TCA_TUNNEL_KEY_ENC_OPTS],
							   extack);
			if (opts_len < 0) {
				ret = opts_len;
				goto err_out;
			}
		}

		tos = 0;
		if (tb[TCA_TUNNEL_KEY_ENC_TOS])
			tos = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TOS]);
		ttl = 0;
		if (tb[TCA_TUNNEL_KEY_ENC_TTL])
			ttl = nla_get_u8(tb[TCA_TUNNEL_KEY_ENC_TTL]);

		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
			__be32 saddr;
			__be32 daddr;

			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);

			metadata = __ip_tun_set_dst(saddr, daddr, tos, ttl,
						    dst_port, flags,
						    key_id, opts_len);
		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
			struct in6_addr saddr;
			struct in6_addr daddr;

			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);

			metadata = __ipv6_tun_set_dst(&saddr, &daddr, tos, ttl, dst_port,
						      0, flags,
						      key_id, 0);
		} else {
			NL_SET_ERR_MSG(extack, "Missing either ipv4 or ipv6 src and dst");
			ret = -EINVAL;
			goto err_out;
		}

		if (!metadata) {
			NL_SET_ERR_MSG(extack, "Cannot allocate tunnel metadata dst");
			ret = -ENOMEM;
			goto err_out;
		}

		if (opts_len) {
			ret = tunnel_key_opts_set(tb[TCA_TUNNEL_KEY_ENC_OPTS],
						  &metadata->u.tun_info,
						  opts_len, extack);
			if (ret < 0)
				goto release_tun_meta;
		}

		metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
		break;
	default:
		NL_SET_ERR_MSG(extack, "Unknown tunnel key action");
		ret = -EINVAL;
		goto err_out;
	}

	if (!exists) {
		ret = tcf_idr_create(tn, parm->index, est, a,
				     &act_tunnel_key_ops, bind, true);
		if (ret) {
			NL_SET_ERR_MSG(extack, "Cannot create TC IDR");
			goto release_tun_meta;
		}

		ret = ACT_P_CREATED;
	} else if (!ovr) {
		NL_SET_ERR_MSG(extack, "TC IDR already exists");
		ret = -EEXIST;
		goto release_tun_meta;
	}

	t = to_tunnel_key(*a);

	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
	if (unlikely(!params_new)) {
		NL_SET_ERR_MSG(extack, "Cannot allocate tunnel key parameters");
		ret = -ENOMEM;
		exists = true;
		goto release_tun_meta;
	}
	params_new->tcft_action = parm->t_action;
	params_new->tcft_enc_metadata = metadata;

	spin_lock_bh(&t->tcf_lock);
	t->tcf_action = parm->action;
	rcu_swap_protected(t->params, params_new,
			   lockdep_is_held(&t->tcf_lock));
	spin_unlock_bh(&t->tcf_lock);
	if (params_new)
		kfree_rcu(params_new, rcu);

	if (ret == ACT_P_CREATED)
		tcf_idr_insert(tn, *a);

	return ret;

release_tun_meta:
	dst_release(&metadata->dst);

err_out:
	if (exists)
		tcf_idr_release(*a, bind);
	else
		tcf_idr_cleanup(tn, parm->index);
	return ret;
}
Пример #16
0
static int nft_log_init(const struct nft_ctx *ctx,
			const struct nft_expr *expr,
			const struct nlattr * const tb[])
{
	struct nft_log *priv = nft_expr_priv(expr);
	struct nf_loginfo *li = &priv->loginfo;
	const struct nlattr *nla;
	int err;

	li->type = NF_LOG_TYPE_LOG;
	if (tb[NFTA_LOG_LEVEL] != NULL &&
	    tb[NFTA_LOG_GROUP] != NULL)
		return -EINVAL;
	if (tb[NFTA_LOG_GROUP] != NULL) {
		li->type = NF_LOG_TYPE_ULOG;
		if (tb[NFTA_LOG_FLAGS] != NULL)
			return -EINVAL;
	}

	nla = tb[NFTA_LOG_PREFIX];
	if (nla != NULL) {
		priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
		if (priv->prefix == NULL)
			return -ENOMEM;
		nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
	} else {
		priv->prefix = (char *)nft_log_null_prefix;
	}

	switch (li->type) {
	case NF_LOG_TYPE_LOG:
		if (tb[NFTA_LOG_LEVEL] != NULL) {
			li->u.log.level =
				ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
		} else {
			li->u.log.level = LOGLEVEL_WARNING;
		}
		if (li->u.log.level > LOGLEVEL_DEBUG) {
			err = -EINVAL;
			goto err1;
		}

		if (tb[NFTA_LOG_FLAGS] != NULL) {
			li->u.log.logflags =
				ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
			if (li->u.log.logflags & ~NF_LOG_MASK) {
				err = -EINVAL;
				goto err1;
			}
		}
		break;
	case NF_LOG_TYPE_ULOG:
		li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
		if (tb[NFTA_LOG_SNAPLEN] != NULL) {
			li->u.ulog.flags |= NF_LOG_F_COPY_LEN;
			li->u.ulog.copy_len =
				ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
		}
		if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
			li->u.ulog.qthreshold =
				ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
		}
		break;
	}

	err = nf_logger_find_get(ctx->afi->family, li->type);
	if (err < 0)
		goto err1;

	return 0;

err1:
	if (priv->prefix != nft_log_null_prefix)
		kfree(priv->prefix);
	return err;
}
Пример #17
0
static int
hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
	const struct hash_ipport *h = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct hash_ipport4_elem e = { };
	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
	u32 ip, ip_to = 0, p = 0, port, port_to;
	bool with_ports = false;
	int ret;

	if (unlikely(!tb[IPSET_ATTR_IP] ||
		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES)))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_LINENO])
		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);

	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &e.ip) ||
	      ip_set_get_extensions(set, tb, &ext);
	if (ret)
		return ret;

	if (tb[IPSET_ATTR_PORT])
		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
	else
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_PROTO]) {
		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
		with_ports = ip_set_proto_with_ports(e.proto);

		if (e.proto == 0)
			return -IPSET_ERR_INVALID_PROTO;
	} else
		return -IPSET_ERR_MISSING_PROTO;

	if (!(with_ports || e.proto == IPPROTO_ICMP))
		e.port = 0;

	if (adt == IPSET_TEST ||
	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
	      tb[IPSET_ATTR_PORT_TO])) {
		ret = adtfn(set, &e, &ext, &ext, flags);
		return ip_set_eexist(ret, flags) ? 0 : ret;
	}

	ip_to = ip = ntohl(e.ip);
	if (tb[IPSET_ATTR_IP_TO]) {
		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
		if (ret)
			return ret;
		if (ip > ip_to)
			swap(ip, ip_to);
	} else if (tb[IPSET_ATTR_CIDR]) {
		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);

		if (!cidr || cidr > 32)
			return -IPSET_ERR_INVALID_CIDR;
		ip_set_mask_from_to(ip, ip_to, cidr);
	}

	port_to = port = ntohs(e.port);
	if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
		if (port > port_to)
			swap(port, port_to);
	}

	if (retried)
		ip = ntohl(h->next.ip);
	for (; !before(ip_to, ip); ip++) {
		p = retried && ip == ntohl(h->next.ip) ? ntohs(h->next.port)
						       : port;
		for (; p <= port_to; p++) {
			e.ip = htonl(ip);
			e.port = htons(p);
			ret = adtfn(set, &e, &ext, &ext, flags);

			if (ret && !ip_set_eexist(ret, flags))
				return ret;
			else
				ret = 0;
		}
	}
	return ret;
}
Пример #18
0
static int
hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
	const struct ip_set_hash *h = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct hash_ipport6_elem data = { };
	u32 port, port_to;
	u32 timeout = h->timeout;
	bool with_ports = false;
	int ret;

	if (unlikely(!tb[IPSET_ATTR_IP] ||
		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
		     tb[IPSET_ATTR_IP_TO] ||
		     tb[IPSET_ATTR_CIDR]))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_LINENO])
		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);

	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &data.ip);
	if (ret)
		return ret;

	if (tb[IPSET_ATTR_PORT])
		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
	else
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_PROTO]) {
		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
		with_ports = ip_set_proto_with_ports(data.proto);

		if (data.proto == 0)
			return -IPSET_ERR_INVALID_PROTO;
	} else
		return -IPSET_ERR_MISSING_PROTO;

	if (!(with_ports || data.proto == IPPROTO_ICMPV6))
		data.port = 0;

	if (tb[IPSET_ATTR_TIMEOUT]) {
		if (!with_timeout(h->timeout))
			return -IPSET_ERR_TIMEOUT;
		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
	}

	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
		ret = adtfn(set, &data, timeout, flags);
		return ip_set_eexist(ret, flags) ? 0 : ret;
	}

	port = ntohs(data.port);
	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
	if (port > port_to)
		swap(port, port_to);

	if (retried)
		port = ntohs(h->next.port);
	for (; port <= port_to; port++) {
		data.port = htons(port);
		ret = adtfn(set, &data, timeout, flags);

		if (ret && !ip_set_eexist(ret, flags))
			return ret;
		else
			ret = 0;
	}
	return ret;
}
Пример #19
0
static int tcf_vlan_init(struct net *net, struct nlattr *nla,
			 struct nlattr *est, struct tc_action *a,
			 int ovr, int bind)
{
	struct tc_action_net *tn = net_generic(net, vlan_net_id);
	struct nlattr *tb[TCA_VLAN_MAX + 1];
	struct tc_vlan *parm;
	struct tcf_vlan *v;
	int action;
	__be16 push_vid = 0;
	__be16 push_proto = 0;
	int ret = 0, exists = 0;
	int err;

	if (!nla)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_VLAN_MAX, nla, vlan_policy);
	if (err < 0)
		return err;

	if (!tb[TCA_VLAN_PARMS])
		return -EINVAL;
	parm = nla_data(tb[TCA_VLAN_PARMS]);
	exists = tcf_hash_check(tn, parm->index, a, bind);
	if (exists && bind)
		return 0;

	switch (parm->v_action) {
	case TCA_VLAN_ACT_POP:
		break;
	case TCA_VLAN_ACT_PUSH:
		if (!tb[TCA_VLAN_PUSH_VLAN_ID]) {
			if (exists)
				tcf_hash_release(a, bind);
			return -EINVAL;
		}
		push_vid = nla_get_u16(tb[TCA_VLAN_PUSH_VLAN_ID]);
		if (push_vid >= VLAN_VID_MASK) {
			if (exists)
				tcf_hash_release(a, bind);
			return -ERANGE;
		}

		if (tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]) {
			push_proto = nla_get_be16(tb[TCA_VLAN_PUSH_VLAN_PROTOCOL]);
			switch (push_proto) {
			case htons(ETH_P_8021Q):
			case htons(ETH_P_8021AD):
				break;
			default:
				return -EPROTONOSUPPORT;
			}
		} else {
			push_proto = htons(ETH_P_8021Q);
		}
		break;
	default:
		if (exists)
			tcf_hash_release(a, bind);
		return -EINVAL;
	}
	action = parm->v_action;

	if (!exists) {
		ret = tcf_hash_create(tn, parm->index, est, a,
				      sizeof(*v), bind, false);
		if (ret)
			return ret;

		ret = ACT_P_CREATED;
	} else {
		tcf_hash_release(a, bind);
		if (!ovr)
			return -EEXIST;
	}

	v = to_vlan(a);

	spin_lock_bh(&v->tcf_lock);

	v->tcfv_action = action;
	v->tcfv_push_vid = push_vid;
	v->tcfv_push_proto = push_proto;

	v->tcf_action = parm->action;

	spin_unlock_bh(&v->tcf_lock);

	if (ret == ACT_P_CREATED)
		tcf_hash_insert(tn, a);
	return ret;
}
Пример #20
0
static int nft_log_init(const struct nft_ctx *ctx,
			const struct nft_expr *expr,
			const struct nlattr * const tb[])
{
	struct nft_log *priv = nft_expr_priv(expr);
	struct nf_loginfo *li = &priv->loginfo;
	const struct nlattr *nla;
	int ret;

	nla = tb[NFTA_LOG_PREFIX];
	if (nla != NULL) {
		priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
		if (priv->prefix == NULL)
			return -ENOMEM;
		nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
	} else {
		priv->prefix = (char *)nft_log_null_prefix;
	}

	li->type = NF_LOG_TYPE_LOG;
	if (tb[NFTA_LOG_LEVEL] != NULL &&
	    tb[NFTA_LOG_GROUP] != NULL)
		return -EINVAL;
	if (tb[NFTA_LOG_GROUP] != NULL)
		li->type = NF_LOG_TYPE_ULOG;

	switch (li->type) {
	case NF_LOG_TYPE_LOG:
		if (tb[NFTA_LOG_LEVEL] != NULL) {
			li->u.log.level =
				ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
		} else {
			li->u.log.level = 4;
		}
		if (tb[NFTA_LOG_FLAGS] != NULL) {
			li->u.log.logflags =
				ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
		}
		break;
	case NF_LOG_TYPE_ULOG:
		li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
		if (tb[NFTA_LOG_SNAPLEN] != NULL) {
			li->u.ulog.copy_len =
				ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
		}
		if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
			li->u.ulog.qthreshold =
				ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
		}
		break;
	}

	if (ctx->afi->family == NFPROTO_INET) {
		ret = nf_logger_find_get(NFPROTO_IPV4, li->type);
		if (ret < 0)
			return ret;

		ret = nf_logger_find_get(NFPROTO_IPV6, li->type);
		if (ret < 0) {
			nf_logger_put(NFPROTO_IPV4, li->type);
			return ret;
		}
		return 0;
	}

	return nf_logger_find_get(ctx->afi->family, li->type);
}
Пример #21
0
static int
hash_ipport6_uadt(struct ip_set *set, struct nlattr *tb[],
		  enum ipset_adt adt, u32 *lineno, u32 flags, bool retried)
{
	const struct hash_ipport *h = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct hash_ipport6_elem e = { };
	struct ip_set_ext ext = IP_SET_INIT_UEXT(h);
	u32 port, port_to;
	bool with_ports = false;
	int ret;

	if (unlikely(!tb[IPSET_ATTR_IP] ||
		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) ||
		     tb[IPSET_ATTR_IP_TO] ||
		     tb[IPSET_ATTR_CIDR]))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_LINENO])
		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);

	ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip) ||
	      ip_set_get_extensions(set, tb, &ext);
	if (ret)
		return ret;

	if (tb[IPSET_ATTR_PORT])
		e.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
	else
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_PROTO]) {
		e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
		with_ports = ip_set_proto_with_ports(e.proto);

		if (e.proto == 0)
			return -IPSET_ERR_INVALID_PROTO;
	} else
		return -IPSET_ERR_MISSING_PROTO;

	if (!(with_ports || e.proto == IPPROTO_ICMPV6))
		e.port = 0;

	if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) {
		ret = adtfn(set, &e, &ext, &ext, flags);
		return ip_set_eexist(ret, flags) ? 0 : ret;
	}

	port = ntohs(e.port);
	port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
	if (port > port_to)
		swap(port, port_to);

	if (retried)
		port = ntohs(h->next.port);
	for (; port <= port_to; port++) {
		e.port = htons(port);
		ret = adtfn(set, &e, &ext, &ext, flags);

		if (ret && !ip_set_eexist(ret, flags))
			return ret;
		else
			ret = 0;
	}
	return ret;
}
static int
hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
		  enum ipset_adt adt, u32 *lineno, u32 flags)
{
	const struct ip_set_hash *h = set->data;
	ipset_adtfn adtfn = set->variant->adt[adt];
	struct hash_ipport4_elem data = { };
	u32 ip, ip_to, p, port, port_to;
	u32 timeout = h->timeout;
	bool with_ports = false;
	int ret;

	if (unlikely(!tb[IPSET_ATTR_IP] ||
		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) ||
		     !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT)))
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_LINENO])
		*lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]);

	ret = ip_set_get_ipaddr4(tb[IPSET_ATTR_IP], &data.ip);
	if (ret)
		return ret;

	if (tb[IPSET_ATTR_PORT])
		data.port = nla_get_be16(tb[IPSET_ATTR_PORT]);
	else
		return -IPSET_ERR_PROTOCOL;

	if (tb[IPSET_ATTR_PROTO]) {
		data.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]);
		with_ports = ip_set_proto_with_ports(data.proto);

		if (data.proto == 0)
			return -IPSET_ERR_INVALID_PROTO;
	} else
		return -IPSET_ERR_MISSING_PROTO;

	if (!(with_ports || data.proto == IPPROTO_ICMP))
		data.port = 0;

	if (tb[IPSET_ATTR_TIMEOUT]) {
		if (!with_timeout(h->timeout))
			return -IPSET_ERR_TIMEOUT;
		timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]);
	}

	if (adt == IPSET_TEST ||
	    !(tb[IPSET_ATTR_IP_TO] || tb[IPSET_ATTR_CIDR] ||
	      tb[IPSET_ATTR_PORT_TO])) {
		ret = adtfn(set, &data, timeout);
		return ip_set_eexist(ret, flags) ? 0 : ret;
	}

	ip = ntohl(data.ip);
	if (tb[IPSET_ATTR_IP_TO]) {
		ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
		if (ret)
			return ret;
		if (ip > ip_to)
			swap(ip, ip_to);
	} else if (tb[IPSET_ATTR_CIDR]) {
		u8 cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]);

		if (cidr > 32)
			return -IPSET_ERR_INVALID_CIDR;
		ip &= ip_set_hostmask(cidr);
		ip_to = ip | ~ip_set_hostmask(cidr);
	} else
		ip_to = ip;

	port_to = port = ntohs(data.port);
	if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
		port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]);
		if (port > port_to)
			swap(port, port_to);
	}

	for (; !before(ip_to, ip); ip++)
		for (p = port; p <= port_to; p++) {
			data.ip = htonl(ip);
			data.port = htons(p);
			ret = adtfn(set, &data, timeout);

			if (ret && !ip_set_eexist(ret, flags))
				return ret;
			else
				ret = 0;
		}
	return ret;
}
Пример #23
0
static int tunnel_key_init(struct net *net, struct nlattr *nla,
			   struct nlattr *est, struct tc_action **a,
			   int ovr, int bind)
{
	struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
	struct nlattr *tb[TCA_TUNNEL_KEY_MAX + 1];
	struct tcf_tunnel_key_params *params_old;
	struct tcf_tunnel_key_params *params_new;
	struct metadata_dst *metadata = NULL;
	struct tc_tunnel_key *parm;
	struct tcf_tunnel_key *t;
	bool exists = false;
	__be16 dst_port = 0;
	__be64 key_id;
	int ret = 0;
	int err;

	if (!nla)
		return -EINVAL;

	err = nla_parse_nested(tb, TCA_TUNNEL_KEY_MAX, nla, tunnel_key_policy);
	if (err < 0)
		return err;

	if (!tb[TCA_TUNNEL_KEY_PARMS])
		return -EINVAL;

	parm = nla_data(tb[TCA_TUNNEL_KEY_PARMS]);
	exists = tcf_hash_check(tn, parm->index, a, bind);
	if (exists && bind)
		return 0;

	switch (parm->t_action) {
	case TCA_TUNNEL_KEY_ACT_RELEASE:
		break;
	case TCA_TUNNEL_KEY_ACT_SET:
		if (!tb[TCA_TUNNEL_KEY_ENC_KEY_ID]) {
			ret = -EINVAL;
			goto err_out;
		}

		key_id = key32_to_tunnel_id(nla_get_be32(tb[TCA_TUNNEL_KEY_ENC_KEY_ID]));

		if (tb[TCA_TUNNEL_KEY_ENC_DST_PORT])
			dst_port = nla_get_be16(tb[TCA_TUNNEL_KEY_ENC_DST_PORT]);

		if (tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC] &&
		    tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]) {
			__be32 saddr;
			__be32 daddr;

			saddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_SRC]);
			daddr = nla_get_in_addr(tb[TCA_TUNNEL_KEY_ENC_IPV4_DST]);

			metadata = __ip_tun_set_dst(saddr, daddr, 0, 0,
						    dst_port, TUNNEL_KEY,
						    key_id, 0);
		} else if (tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC] &&
			   tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]) {
			struct in6_addr saddr;
			struct in6_addr daddr;

			saddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_SRC]);
			daddr = nla_get_in6_addr(tb[TCA_TUNNEL_KEY_ENC_IPV6_DST]);

			metadata = __ipv6_tun_set_dst(&saddr, &daddr, 0, 0, 0,
						      dst_port, TUNNEL_KEY,
						      key_id, 0);
		}

		if (!metadata) {
			ret = -EINVAL;
			goto err_out;
		}

		metadata->u.tun_info.mode |= IP_TUNNEL_INFO_TX;
		break;
	default:
		goto err_out;
	}

	if (!exists) {
		ret = tcf_hash_create(tn, parm->index, est, a,
				      &act_tunnel_key_ops, bind, true);
		if (ret)
			return ret;

		ret = ACT_P_CREATED;
	} else {
		tcf_hash_release(*a, bind);
		if (!ovr)
			return -EEXIST;
	}

	t = to_tunnel_key(*a);

	ASSERT_RTNL();
	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
	if (unlikely(!params_new)) {
		if (ret == ACT_P_CREATED)
			tcf_hash_release(*a, bind);
		return -ENOMEM;
	}

	params_old = rtnl_dereference(t->params);

	params_new->action = parm->action;
	params_new->tcft_action = parm->t_action;
	params_new->tcft_enc_metadata = metadata;

	rcu_assign_pointer(t->params, params_new);

	if (params_old)
		kfree_rcu(params_old, rcu);

	if (ret == ACT_P_CREATED)
		tcf_hash_insert(tn, *a);

	return ret;

err_out:
	if (exists)
		tcf_hash_release(*a, bind);
	return ret;
}
Пример #24
0
nfct_t *nfct_create(ginkgo_ctx *ctx, int grps, nfct_notify cb, void *ud)
{
    nfct_t *ct;
    char name[20];
    ginkgo_src src = {
        .name = name,
        .fd = -1,
        .rd = NULL,
        .wr = NULL,
        .pars = nl_parse,
        .resp = nfct_response,
        .hand = nfct_handler,
        .ud = NULL,
    };
    int fd;

    if(! ctx) return NULL;
    if((fd = nl_open(NETLINK_NETFILTER, grps)) < 0)
        return NULL;
    if(! instantiate(ct))  {
        close(fd);
        return NULL;
    }
    ct->ginkgo = ctx;
    ct->nlfd = fd;
    ct->nlgrps = grps;
    ct->nlseq = 0;
    ct->cb = cb;
    ct->ud = ud;

    src.fd = fd;
    src.ud = ct;
    sprintf(name, "nfct-%d", fd);
    if(ginkgo_src_register(ctx, &src, &ct->id, 0) < 0)  {
        close(fd);
        free(ct);
        return NULL;
    }
    return ct;
}

void nfct_destroy(nfct_t *ct)
{
    if(ct)  {
        ginkgo_src_deregister(ct->ginkgo, ct->id, 1, 1);
        close(ct->nlfd);
        free(ct);
    }
}

int nfct_conn_get_counter(const conn_entry *e, conn_counter *counter)
{
    const struct nlattr *orig = e->nla[CTA_COUNTERS_ORIG];
    const struct nlattr *rep = e->nla[CTA_COUNTERS_REPLY];
    struct nlattr *arr[CTA_COUNTERS_MAX + 1];

    if(orig && rep)  {
        nla_parse_nested(arr, CTA_COUNTERS_MAX, orig);
        if(! arr[CTA_COUNTERS_PACKETS] || ! arr[CTA_COUNTERS_BYTES])
            return -1;
        counter->orig_pkts = be64toh(nla_get_be64(arr[CTA_COUNTERS_PACKETS]));
        counter->orig_bytes = be64toh(nla_get_be64(arr[CTA_COUNTERS_BYTES]));

        nla_parse_nested(arr, CTA_COUNTERS_MAX, rep);
        if(! arr[CTA_COUNTERS_PACKETS] || ! arr[CTA_COUNTERS_BYTES])
            return -1;
        counter->rep_pkts = be64toh(nla_get_be64(arr[CTA_COUNTERS_PACKETS]));
        counter->rep_bytes = be64toh(nla_get_be64(arr[CTA_COUNTERS_BYTES]));
        return 0;
    }
    return -1;
}

int nfct_conn_get_tcpinfo(const conn_entry *e, conn_tcpinfo *info)
{
    const struct nlattr *proto = e->nla[CTA_PROTOINFO];
    const struct nlattr *tcp;
    struct nlattr *arr[CTA_PROTOINFO_TCP_MAX + 1];

    if(proto)  {
        tcp = (const struct nlattr *)nla_data(proto);
        if(nla_type(tcp) != CTA_PROTOINFO_TCP)
            return -1;
        nla_parse_nested(arr, CTA_PROTOINFO_TCP_MAX, tcp);
        if(arr[CTA_PROTOINFO_TCP_STATE]
           || arr[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]
           || arr[CTA_PROTOINFO_TCP_WSCALE_REPLY]
           || arr[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL]
           || arr[CTA_PROTOINFO_TCP_FLAGS_REPLY])  {
            info->state = nla_get_u8(arr[CTA_PROTOINFO_TCP_STATE]);
            info->wscale_orig = nla_get_u8(arr[CTA_PROTOINFO_TCP_WSCALE_ORIGINAL]);
            info->wscale_rep = nla_get_u8(arr[CTA_PROTOINFO_TCP_WSCALE_REPLY]);
            nla_get_mem(arr[CTA_PROTOINFO_TCP_FLAGS_ORIGINAL], &info->flags_orig, sizeof(info->flags_orig));
            nla_get_mem(arr[CTA_PROTOINFO_TCP_FLAGS_REPLY], &info->flags_rep, sizeof(info->flags_rep));
            return 0;
        }
    }
    return -1;
}

int nfct_conn_get_sctpinfo(const conn_entry *e, conn_sctpinfo *info)
{
    const struct nlattr *proto = e->nla[CTA_PROTOINFO];
    const struct nlattr *sctp;
    struct nlattr *arr[CTA_PROTOINFO_SCTP_MAX + 1];

    if(proto)  {
        sctp = (const struct nlattr *)nla_data(proto);
        if(nla_type(sctp) != CTA_PROTOINFO_SCTP)
            return -1;
        nla_parse_nested(arr, CTA_PROTOINFO_SCTP_MAX, sctp);
        if(! arr[CTA_PROTOINFO_SCTP_STATE]
           || ! arr[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]
           || ! arr[CTA_PROTOINFO_SCTP_VTAG_REPLY])  {
            info->state = nla_get_u8(arr[CTA_PROTOINFO_SCTP_STATE]);
            info->vtag_orig = ntohl(nla_get_u8(arr[CTA_PROTOINFO_SCTP_VTAG_ORIGINAL]));
            info->vtag_rep = ntohl(nla_get_u8(arr[CTA_PROTOINFO_SCTP_VTAG_REPLY]));
            return 0;
        }
    }
    return -1;
}

int __nfct_conn_get_tuple(const conn_entry *e, int type, conn_tuple *tuple)
{
    struct nlattr *arr[CTA_TUPLE_MAX + 1];
    struct nlattr *ip[CTA_IP_MAX + 1];
    struct nlattr *proto[CTA_PROTO_MAX + 1];

    if(! e->nla[type])
        return -1;

    memset(tuple, 0, sizeof(*tuple));
    nla_parse_nested(arr, CTA_TUPLE_MAX, e->nla[type]);
    if(! arr[CTA_TUPLE_IP] || ! arr[CTA_TUPLE_PROTO])  {
        PR_WARN("unexpected NULL tuple IP or proto");
        return-1;
    }

    nla_parse_nested(ip, CTA_IP_MAX, arr[CTA_TUPLE_IP]);
    tuple->src.l3num = e->l3num;
    if(e->l3num == AF_INET)  {
        if(! ip[CTA_IP_V4_SRC] || ! ip[CTA_IP_V4_DST])
            return -1;
        tuple->src.u3.ip = nla_get_be32(ip[CTA_IP_V4_SRC]);
        tuple->dst.u3.ip = nla_get_be32(ip[CTA_IP_V4_DST]);
    }else if(e->l3num == AF_INET6)  {
        if(! ip[CTA_IP_V6_SRC] || ! ip[CTA_IP_V6_DST])
            return -1;
        nla_get_mem(ip[CTA_IP_V6_SRC], &tuple->src.u3.in6, sizeof(tuple->src.u3.in6));
        nla_get_mem(ip[CTA_IP_V6_DST], &tuple->dst.u3.in6, sizeof(tuple->dst.u3.in6));
    }else  {
        PR_WARN("unexpected l3num while parsing tuple:%d", e->l3num);
        return -1;
    }

    nla_parse_nested(proto, CTA_PROTO_MAX, arr[CTA_TUPLE_PROTO]);
    if(! proto[CTA_PROTO_NUM])  {
        PR_WARN("unexpected NULL proto num while parsing tuple");
        return -1;
    }
    tuple->dst.protonum = nla_get_u8(proto[CTA_PROTO_NUM]);
    switch(tuple->dst.protonum)  {
    case IPPROTO_TCP:
    case IPPROTO_UDP:
    case IPPROTO_UDPLITE:
        if(! proto[CTA_PROTO_SRC_PORT] || ! proto[CTA_PROTO_DST_PORT])  {
            PR_WARN("unexpected NULL port while parsing tuple");
            return -1;
        }
        tuple->src.u.tcp.port = nla_get_be16(proto[CTA_PROTO_SRC_PORT]);
        tuple->dst.u.tcp.port = nla_get_be16(proto[CTA_PROTO_DST_PORT]);
        break;
    case IPPROTO_ICMP:
        if(! proto[CTA_PROTO_ICMP_ID] || ! proto[CTA_PROTO_ICMP_TYPE] || ! proto[CTA_PROTO_ICMP_CODE])  {
            PR_WARN("unexpected NULL icmp attr while parsing tuple");
            return -1;
        }
        tuple->src.u.icmp.id = nla_get_be16(proto[CTA_PROTO_ICMP_ID]);
        tuple->dst.u.icmp.type = nla_get_u8(proto[CTA_PROTO_ICMP_TYPE]);
        tuple->dst.u.icmp.code = nla_get_u8(proto[CTA_PROTO_ICMP_CODE]);
        break;
    case IPPROTO_ICMPV6:
        if(! proto[CTA_PROTO_ICMPV6_ID]
           || ! proto[CTA_PROTO_ICMPV6_TYPE]
           || ! proto[CTA_PROTO_ICMPV6_CODE])  {
            PR_WARN("unexpected NULL icmpv6 attr while parsing tuple");
            return -1;
        }
        tuple->src.u.icmp.id = nla_get_be16(proto[CTA_PROTO_ICMPV6_ID]);
        tuple->dst.u.icmp.type = nla_get_u8(proto[CTA_PROTO_ICMPV6_TYPE]);
        tuple->dst.u.icmp.code = nla_get_u8(proto[CTA_PROTO_ICMPV6_CODE]);
        break;
    default:
        PR_INFO("unsupported proto %d while parsing tuple", tuple->dst.protonum);
        return -1;
    }
    return 0;
}

/* fixme: nat seq adj */

nfct_msg *nfct_msg_new(__u8 l3num, int flags)
{
    ginkgo_msg *msg;
    struct nlmsghdr *nlh;
    struct nfgenmsg *nfmsg;
    nfct_msg_ctl *ctl;
    conn_entry *e;

    if((msg = ginkgo_new_msg(0, NFCT_MSG_GOOD_SIZE)))  {
        if(! instantiate(e))  {
            free(msg);
            return NULL;
        }

        nlh = NL_HEADER(msg);
        BZERO(nlh);
        nlh->nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
        if(flags & NFCT_F_CREATE)
            nlh->nlmsg_flags |= NLM_F_CREATE;
        if(flags & NFCT_F_EXCL)
            nlh->nlmsg_flags |= NLM_F_EXCL;
        if(flags & NFCT_F_DUMP)
            nlh->nlmsg_flags |= NLM_F_DUMP;

        nfmsg = (struct nfgenmsg *)NLMSG_DATA(nlh);
        nfmsg->nfgen_family = l3num;
        nfmsg->version = NFNETLINK_V0;
        nfmsg->res_id = 0;

        BZERO(e);
        e->l3num = l3num;

        ctl = msg_ctl(msg);
        BZERO(ctl);
        ctl->base.entry = e;
        ctl->ctx = (char *)nfmsg + NLMSG_ALIGN(sizeof(struct nfgenmsg));
        ctl->sz = NFCT_MSG_GOOD_SIZE;
        return (nfct_msg *)ctl;
    }
    return NULL;
}