コード例 #1
0
/* log handler for internal netfilter logging api */
void
nfulnl_log_packet(u_int8_t pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	unsigned int size, data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int plen;

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(li->u.ulog.group);
	if (!inst)
		return;

	plen = 0;
	if (prefix)
		plen = strlen(prefix) + 1;

	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#endif
		+ nla_total_size(sizeof(u_int32_t))	/* mark */
		+ nla_total_size(sizeof(u_int32_t))	/* uid */
		+ nla_total_size(sizeof(u_int32_t))	/* gid */
		+ nla_total_size(plen)			/* prefix */
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));

	spin_lock_bh(&inst->lock);

	if (inst->flags & NFULNL_CFG_F_SEQ)
		size += nla_total_size(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		size += nla_total_size(sizeof(u_int32_t));

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (qthreshold > li->u.ulog.qthreshold)
		qthreshold = li->u.ulog.qthreshold;

	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;

	case NFULNL_COPY_PACKET:
		if (inst->copy_range == 0
		    || inst->copy_range > skb->len)
			data_len = skb->len;
		else
			data_len = inst->copy_range;

		size += nla_total_size(data_len);
		break;

	default:
		goto unlock_and_release;
	}

	if (inst->skb &&
	    size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		__nfulnl_flush(inst);
	}

	if (!inst->skb) {
		inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size);
		if (!inst->skb)
			goto alloc_failure;
	}

	inst->qlen++;

	__build_packet_message(inst, skb, data_len, pf,
				hooknum, in, out, li, prefix, plen);

	if (inst->qlen >= qthreshold)
		__nfulnl_flush(inst);
	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	else if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}

unlock_and_release:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	return;

alloc_failure:
	/* FIXME: statistics */
	goto unlock_and_release;
}
コード例 #2
0
ファイル: nfnetlink_log.c プロジェクト: asmalldev/linux
/* log handler for internal netfilter logging api */
void
nfulnl_log_packet(struct net *net,
		  u_int8_t pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	size_t size;
	unsigned int data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int plen;
	struct nfnl_log_net *log = nfnl_log_pernet(net);
	const struct nfnl_ct_hook *nfnl_ct = NULL;
	struct nf_conn *ct = NULL;
	enum ip_conntrack_info uninitialized_var(ctinfo);

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(log, li->u.ulog.group);
	if (!inst)
		return;

	plen = 0;
	if (prefix)
		plen = strlen(prefix) + 1;

	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#endif
		+ nla_total_size(sizeof(u_int32_t))	/* mark */
		+ nla_total_size(sizeof(u_int32_t))	/* uid */
		+ nla_total_size(sizeof(u_int32_t))	/* gid */
		+ nla_total_size(plen)			/* prefix */
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
		+ nla_total_size(sizeof(struct nfgenmsg));	/* NLMSG_DONE */

	if (in && skb_mac_header_was_set(skb)) {
		size +=   nla_total_size(skb->dev->hard_header_len)
			+ nla_total_size(sizeof(u_int16_t))	/* hwtype */
			+ nla_total_size(sizeof(u_int16_t));	/* hwlen */
	}

	spin_lock_bh(&inst->lock);

	if (inst->flags & NFULNL_CFG_F_SEQ)
		size += nla_total_size(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		size += nla_total_size(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_CONNTRACK) {
		nfnl_ct = rcu_dereference(nfnl_ct_hook);
		if (nfnl_ct != NULL) {
			ct = nfnl_ct->get_ct(skb, &ctinfo);
			if (ct != NULL)
				size += nfnl_ct->build_size(ct);
		}
	}

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (li->u.ulog.qthreshold)
		if (qthreshold > li->u.ulog.qthreshold)
			qthreshold = li->u.ulog.qthreshold;


	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;

	case NFULNL_COPY_PACKET:
		data_len = inst->copy_range;
		if ((li->u.ulog.flags & NF_LOG_F_COPY_LEN) &&
		    (li->u.ulog.copy_len < data_len))
			data_len = li->u.ulog.copy_len;

		if (data_len > skb->len)
			data_len = skb->len;

		size += nla_total_size(data_len);
		break;

	case NFULNL_COPY_DISABLED:
	default:
		goto unlock_and_release;
	}

	if (inst->skb && size > skb_tailroom(inst->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		__nfulnl_flush(inst);
	}

	if (!inst->skb) {
		inst->skb = nfulnl_alloc_skb(net, inst->peer_portid,
					     inst->nlbufsiz, size);
		if (!inst->skb)
			goto alloc_failure;
	}

	inst->qlen++;

	__build_packet_message(log, inst, skb, data_len, pf,
				hooknum, in, out, prefix, plen,
				nfnl_ct, ct, ctinfo);

	if (inst->qlen >= qthreshold)
		__nfulnl_flush(inst);
	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	else if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}

unlock_and_release:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	return;

alloc_failure:
	/* FIXME: statistics */
	goto unlock_and_release;
}
コード例 #3
0
/* log handler for internal netfilter logging api */
static void
nfulnl_log_packet(unsigned int pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	unsigned int size, data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int nlbufsiz;

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(li->u.ulog.group);
	if (!inst)
		inst = instance_lookup_get(0);
	if (!inst) {
		PRINTR("nfnetlink_log: trying to log packet, "
			"but no instance for group %u\n", li->u.ulog.group);
		return;
	}

	/* all macros expand to constant values at compile time */
	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr))
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#endif
		+ NFA_SPACE(sizeof(u_int32_t))	/* mark */
		+ NFA_SPACE(sizeof(u_int32_t))	/* uid */
		+ NFA_SPACE(NFULNL_PREFIXLEN)	/* prefix */
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));

	UDEBUG("initial size=%u\n", size);

	spin_lock_bh(&inst->lock);

	if (inst->flags & NFULNL_CFG_F_SEQ)
		size += NFA_SPACE(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		size += NFA_SPACE(sizeof(u_int32_t));

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (qthreshold > li->u.ulog.qthreshold)
		qthreshold = li->u.ulog.qthreshold;
	
	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;
	
	case NFULNL_COPY_PACKET:
		if (inst->copy_range == 0 
		    || inst->copy_range > skb->len)
			data_len = skb->len;
		else
			data_len = inst->copy_range;
		
		size += NFA_SPACE(data_len);
		UDEBUG("copy_packet, therefore size now %u\n", size);
		break;
	
	default:
		spin_unlock_bh(&inst->lock);
		instance_put(inst);
		return;
	}

	if (size > inst->nlbufsiz)
		nlbufsiz = size;
	else
		nlbufsiz = inst->nlbufsiz;

	if (!inst->skb) {
		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	} else if (inst->qlen >= qthreshold ||
		   size > skb_tailroom(inst->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		UDEBUG("flushing old skb\n");

		__nfulnl_send(inst);

		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	}

	UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
	inst->qlen++;

	__build_packet_message(inst, skb, data_len, pf,
				hooknum, in, out, li, prefix);

	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}
	spin_unlock_bh(&inst->lock);

	return;

alloc_failure:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	UDEBUG("error allocating skb\n");
	/* FIXME: statistics */
}