Esempio n. 1
0
static unsigned int
set_target_v1(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_set_info_target_v1 *info = par->targinfo;

	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
		info->add_set.flags, 0, UINT_MAX);
	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
		info->del_set.flags, 0, UINT_MAX);

	if (info->add_set.index != IPSET_INVALID_ID)
		ip_set_add(info->add_set.index, skb, par, &add_opt);
	if (info->del_set.index != IPSET_INVALID_ID)
		ip_set_del(info->del_set.index, skb, par, &del_opt);

	return XT_CONTINUE;
}
Esempio n. 2
0
static bool
pkttype_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_pkttype_info *info = par->matchinfo;
	u_int8_t type;

	if (skb->pkt_type != PACKET_LOOPBACK)
		type = skb->pkt_type;
	else if (xt_family(par) == NFPROTO_IPV4 &&
	    ipv4_is_multicast(ip_hdr(skb)->daddr))
		type = PACKET_MULTICAST;
	else if (xt_family(par) == NFPROTO_IPV6 &&
	    ipv6_hdr(skb)->daddr.s6_addr[0] == 0xFF)
		type = PACKET_MULTICAST;
	else
		type = PACKET_BROADCAST;

	return (type == info->pkttype) ^ info->invert;
}
Esempio n. 3
0
static bool
set_match_v0(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_set_info_match_v0 *info = par->matchinfo;

	ADT_OPT(opt, xt_family(par), info->match_set.u.compat.dim,
		info->match_set.u.compat.flags, 0, UINT_MAX);

	return match_set(info->match_set.index, skb, par, &opt,
			 info->match_set.u.compat.flags & IPSET_INV_MATCH);
}
Esempio n. 4
0
static unsigned int
set_target_v3(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_set_info_target_v3 *info = par->targinfo;
	int ret;

	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
		info->add_set.flags, info->flags, info->timeout);
	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
		info->del_set.flags, 0, UINT_MAX);
	ADT_OPT(map_opt, xt_family(par), info->map_set.dim,
		info->map_set.flags, 0, UINT_MAX);

	/* Normalize to fit into jiffies */
	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
	    add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
		add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
	if (info->add_set.index != IPSET_INVALID_ID)
		ip_set_add(info->add_set.index, skb, par, &add_opt);
	if (info->del_set.index != IPSET_INVALID_ID)
		ip_set_del(info->del_set.index, skb, par, &del_opt);
	if (info->map_set.index != IPSET_INVALID_ID) {
		map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK |
						   IPSET_FLAG_MAP_SKBPRIO |
						   IPSET_FLAG_MAP_SKBQUEUE);
		ret = match_set(info->map_set.index, skb, par, &map_opt,
				info->map_set.flags & IPSET_INV_MATCH);
		if (!ret)
			return XT_CONTINUE;
		if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK)
			skb->mark = (skb->mark & ~MOPT(map_opt,skbmarkmask))
				    ^ MOPT(map_opt, skbmark);
		if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO)
			skb->priority = MOPT(map_opt, skbprio);
		if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) &&
		    skb->dev &&
		    skb->dev->real_num_tx_queues > MOPT(map_opt, skbqueue))
			skb_set_queue_mapping(skb, MOPT(map_opt, skbqueue));
	}
	return XT_CONTINUE;
}
Esempio n. 5
0
File: xt_osf.c Progetto: Lyude/linux
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
	const struct xt_osf_info *info = p->matchinfo;
	struct net *net = xt_net(p);

	if (!info)
		return false;

	return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
			    xt_out(p), info, net, nf_osf_fingers);
}
Esempio n. 6
0
static unsigned int
set_target_v2(struct sk_buff *skb, const struct xt_action_param *par)
{
	const struct xt_set_info_target_v2 *info = par->targinfo;

	ADT_OPT(add_opt, xt_family(par), info->add_set.dim,
		info->add_set.flags, info->flags, info->timeout);
	ADT_OPT(del_opt, xt_family(par), info->del_set.dim,
		info->del_set.flags, 0, UINT_MAX);

	/* Normalize to fit into jiffies */
	if (add_opt.ext.timeout != IPSET_NO_TIMEOUT &&
	    add_opt.ext.timeout > UINT_MAX / MSEC_PER_SEC)
		add_opt.ext.timeout = UINT_MAX / MSEC_PER_SEC;
	if (info->add_set.index != IPSET_INVALID_ID)
		ip_set_add(info->add_set.index, skb, par, &add_opt);
	if (info->del_set.index != IPSET_INVALID_ID)
		ip_set_del(info->del_set.index, skb, par, &del_opt);

	return XT_CONTINUE;
}
Esempio n. 7
0
static bool
set_match_v1(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_set_info_match_v1 *info = par->matchinfo;

	ADT_OPT(opt, xt_family(par), info->match_set.dim,
		info->match_set.flags, 0, UINT_MAX);

	if (opt.flags & IPSET_RETURN_NOMATCH)
		opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH;

	return match_set(info->match_set.index, skb, par, &opt,
			 info->match_set.flags & IPSET_INV_MATCH);
}
Esempio n. 8
0
static bool
xt_cluster_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	struct sk_buff *pskb = (struct sk_buff *)skb;
	const struct xt_cluster_match_info *info = par->matchinfo;
	const struct nf_conn *ct;
	enum ip_conntrack_info ctinfo;
	unsigned long hash;

	/* This match assumes that all nodes see the same packets. This can be
	 * achieved if the switch that connects the cluster nodes support some
	 * sort of 'port mirroring'. However, if your switch does not support
	 * this, your cluster nodes can reply ARP request using a multicast MAC
	 * address. Thus, your switch will flood the same packets to the
	 * cluster nodes with the same multicast MAC address. Using a multicast
	 * link address is a RFC 1812 (section 3.3.2) violation, but this works
	 * fine in practise.
	 *
	 * Unfortunately, if you use the multicast MAC address, the link layer
	 * sets skbuff's pkt_type to PACKET_MULTICAST, which is not accepted
	 * by TCP and others for packets coming to this node. For that reason,
	 * this match mangles skbuff's pkt_type if it detects a packet
	 * addressed to a unicast address but using PACKET_MULTICAST. Yes, I
	 * know, matches should not alter packets, but we are doing this here
	 * because we would need to add a PKTTYPE target for this sole purpose.
	 */
	if (!xt_cluster_is_multicast_addr(skb, xt_family(par)) &&
	    skb->pkt_type == PACKET_MULTICAST) {
	    	pskb->pkt_type = PACKET_HOST;
	}

	ct = nf_ct_get(skb, &ctinfo);
	if (ct == NULL)
		return false;

	if (ct->master)
		hash = xt_cluster_hash(ct->master, info);
	else
		hash = xt_cluster_hash(ct, info);

	return !!((1 << hash) & info->node_mask) ^
	       !!(info->flags & XT_CLUSTER_F_INV);
}
Esempio n. 9
0
static bool
set_match_v4(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_set_info_match_v4 *info = par->matchinfo;
	int ret;

	ADT_OPT(opt, xt_family(par), info->match_set.dim,
		info->match_set.flags, info->flags, UINT_MAX);

	if (info->packets.op != IPSET_COUNTER_NONE ||
	    info->bytes.op != IPSET_COUNTER_NONE)
		opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS;

	ret = match_set(info->match_set.index, skb, par, &opt,
			info->match_set.flags & IPSET_INV_MATCH);

	if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS))
		return ret;

	if (!match_counter(opt.ext.packets, &info->packets))
		return false;
	return match_counter(opt.ext.bytes, &info->bytes);
}
Esempio n. 10
0
static bool
xt_osf_match_packet(const struct sk_buff *skb, struct xt_action_param *p)
{
	return nf_osf_match(skb, xt_family(p), xt_hooknum(p), xt_in(p),
			    xt_out(p), p->matchinfo, xt_net(p), nf_osf_fingers);
}
Esempio n. 11
0
static bool
ipvs_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_ipvs_mtinfo *data = par->matchinfo;
	struct netns_ipvs *ipvs = net_ipvs(xt_net(par));
	/* ipvs_mt_check ensures that family is only NFPROTO_IPV[46]. */
	const u_int8_t family = xt_family(par);
	struct ip_vs_iphdr iph;
	struct ip_vs_protocol *pp;
	struct ip_vs_conn *cp;
	bool match = true;

	if (data->bitmask == XT_IPVS_IPVS_PROPERTY) {
		match = skb->ipvs_property ^
			!!(data->invert & XT_IPVS_IPVS_PROPERTY);
		goto out;
	}

	/* other flags than XT_IPVS_IPVS_PROPERTY are set */
	if (!skb->ipvs_property) {
		match = false;
		goto out;
	}

	ip_vs_fill_iph_skb(family, skb, true, &iph);

	if (data->bitmask & XT_IPVS_PROTO)
		if ((iph.protocol == data->l4proto) ^
		    !(data->invert & XT_IPVS_PROTO)) {
			match = false;
			goto out;
		}

	pp = ip_vs_proto_get(iph.protocol);
	if (unlikely(!pp)) {
		match = false;
		goto out;
	}

	/*
	 * Check if the packet belongs to an existing entry
	 */
	cp = pp->conn_out_get(ipvs, family, skb, &iph);
	if (unlikely(cp == NULL)) {
		match = false;
		goto out;
	}

	/*
	 * We found a connection, i.e. ct != 0, make sure to call
	 * __ip_vs_conn_put before returning.  In our case jump to out_put_con.
	 */

	if (data->bitmask & XT_IPVS_VPORT)
		if ((cp->vport == data->vport) ^
		    !(data->invert & XT_IPVS_VPORT)) {
			match = false;
			goto out_put_cp;
		}

	if (data->bitmask & XT_IPVS_VPORTCTL)
		if ((cp->control != NULL &&
		     cp->control->vport == data->vportctl) ^
		    !(data->invert & XT_IPVS_VPORTCTL)) {
			match = false;
			goto out_put_cp;
		}

	if (data->bitmask & XT_IPVS_DIR) {
		enum ip_conntrack_info ctinfo;
		struct nf_conn *ct = nf_ct_get(skb, &ctinfo);

		if (ct == NULL) {
			match = false;
			goto out_put_cp;
		}

		if ((ctinfo >= IP_CT_IS_REPLY) ^
		    !!(data->invert & XT_IPVS_DIR)) {
			match = false;
			goto out_put_cp;
		}
	}

	if (data->bitmask & XT_IPVS_METHOD)
		if (((cp->flags & IP_VS_CONN_F_FWD_MASK) == data->fwd_method) ^
		    !(data->invert & XT_IPVS_METHOD)) {
			match = false;
			goto out_put_cp;
		}

	if (data->bitmask & XT_IPVS_VADDR) {
		if (ipvs_mt_addrcmp(&cp->vaddr, &data->vaddr,
				    &data->vmask, family) ^
		    !(data->invert & XT_IPVS_VADDR)) {
			match = false;
			goto out_put_cp;
		}
	}

out_put_cp:
	__ip_vs_conn_put(cp);
out:
	pr_debug("match=%d\n", match);
	return match;
}