Пример #1
0
static void
_instance_destroy2(struct nfulnl_instance *inst, int lock)
{
	/* first pull it out of the global list */
	if (lock)
		write_lock_bh(&instances_lock);

	UDEBUG("removing instance %p (queuenum=%u) from hash\n",
		inst, inst->group_num);

	hlist_del(&inst->hlist);

	if (lock)
		write_unlock_bh(&instances_lock);

	/* then flush all pending packets from skb */

	spin_lock_bh(&inst->lock);
	if (inst->skb) {
		if (inst->qlen)
			__nfulnl_send(inst);
		if (inst->skb) {
			kfree_skb(inst->skb);
			inst->skb = NULL;
		}
	}
	spin_unlock_bh(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);

	module_put(THIS_MODULE);
}
Пример #2
0
static void
__instance_destroy(struct nfulnl_instance *inst)
{
	/* first pull it out of the global list */
	UDEBUG("removing instance %p (queuenum=%u) from hash\n",
		inst, inst->group_num);

	hlist_del(&inst->hlist);

	/* then flush all pending packets from skb */

	spin_lock_bh(&inst->lock);
	if (inst->skb) {
		/* timer "holds" one reference (we have one more) */
		if (del_timer(&inst->timer))
			instance_put(inst);
		if (inst->qlen)
			__nfulnl_send(inst);
		if (inst->skb) {
			kfree_skb(inst->skb);
			inst->skb = NULL;
		}
	}
	spin_unlock_bh(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);
}
Пример #3
0
static void
__nfulnl_flush(struct nfulnl_instance *inst)
{
	/* timer holds a reference */
	if (del_timer(&inst->timer))
		instance_put(inst);
	if (inst->skb)
		__nfulnl_send(inst);
}
Пример #4
0
static void
nfulnl_timer(struct timer_list *t)
{
	struct nfulnl_instance *inst = from_timer(inst, t, timer);

	spin_lock_bh(&inst->lock);
	if (inst->skb)
		__nfulnl_send(inst);
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
}
Пример #5
0
static void
nfulnl_timer(unsigned long data)
{
	struct nfulnl_instance *inst = (struct nfulnl_instance *)data;

	spin_lock_bh(&inst->lock);
	if (inst->skb)
		__nfulnl_send(inst);
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
}
Пример #6
0
static void nfulnl_timer(unsigned long data)
{
	struct nfulnl_instance *inst = (struct nfulnl_instance *)data; 

	UDEBUG("timer function called, flushing buffer\n");

	spin_lock_bh(&inst->lock);
	__nfulnl_send(inst);
	instance_put(inst);
	spin_unlock_bh(&inst->lock);
}
Пример #7
0
/* log handler for internal netfilter logging api */
static void
nfulnl_log_packet(unsigned int pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	unsigned int size, data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int nlbufsiz;

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(li->u.ulog.group);
	if (!inst)
		inst = instance_lookup_get(0);
	if (!inst) {
		PRINTR("nfnetlink_log: trying to log packet, "
			"but no instance for group %u\n", li->u.ulog.group);
		return;
	}

	/* all macros expand to constant values at compile time */
	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr))
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#endif
		+ NFA_SPACE(sizeof(u_int32_t))	/* mark */
		+ NFA_SPACE(sizeof(u_int32_t))	/* uid */
		+ NFA_SPACE(NFULNL_PREFIXLEN)	/* prefix */
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));

	UDEBUG("initial size=%u\n", size);

	spin_lock_bh(&inst->lock);

	if (inst->flags & NFULNL_CFG_F_SEQ)
		size += NFA_SPACE(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		size += NFA_SPACE(sizeof(u_int32_t));

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (qthreshold > li->u.ulog.qthreshold)
		qthreshold = li->u.ulog.qthreshold;
	
	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;
	
	case NFULNL_COPY_PACKET:
		if (inst->copy_range == 0 
		    || inst->copy_range > skb->len)
			data_len = skb->len;
		else
			data_len = inst->copy_range;
		
		size += NFA_SPACE(data_len);
		UDEBUG("copy_packet, therefore size now %u\n", size);
		break;
	
	default:
		spin_unlock_bh(&inst->lock);
		instance_put(inst);
		return;
	}

	if (size > inst->nlbufsiz)
		nlbufsiz = size;
	else
		nlbufsiz = inst->nlbufsiz;

	if (!inst->skb) {
		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	} else if (inst->qlen >= qthreshold ||
		   size > skb_tailroom(inst->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		UDEBUG("flushing old skb\n");

		__nfulnl_send(inst);

		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	}

	UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
	inst->qlen++;

	__build_packet_message(inst, skb, data_len, pf,
				hooknum, in, out, li, prefix);

	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}
	spin_unlock_bh(&inst->lock);

	return;

alloc_failure:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	UDEBUG("error allocating skb\n");
	/* FIXME: statistics */
}