static void nfulnl_timer(unsigned long data) { struct nfulnl_instance *inst = (struct nfulnl_instance *)data; spin_lock_bh(&inst->lock); if (inst->skb) __nfulnl_send(inst); spin_unlock_bh(&inst->lock); instance_put(inst); }
static void nfulnl_timer(struct timer_list *t) { struct nfulnl_instance *inst = from_timer(inst, t, timer); spin_lock_bh(&inst->lock); if (inst->skb) __nfulnl_send(inst); spin_unlock_bh(&inst->lock); instance_put(inst); }
static void nfulnl_timer(unsigned long data) { struct nfulnl_instance *inst = (struct nfulnl_instance *)data; UDEBUG("timer function called, flushing buffer\n"); spin_lock_bh(&inst->lock); __nfulnl_send(inst); instance_put(inst); spin_unlock_bh(&inst->lock); }
static struct nfulnl_instance * instance_create(u_int16_t group_num, int pid) { struct nfulnl_instance *inst; UDEBUG("entering (group_num=%u, pid=%d)\n", group_num, pid); write_lock_bh(&instances_lock); if (__instance_lookup(group_num)) { inst = NULL; UDEBUG("aborting, instance already exists\n"); goto out_unlock; } inst = kzalloc(sizeof(*inst), GFP_ATOMIC); if (!inst) goto out_unlock; INIT_HLIST_NODE(&inst->hlist); spin_lock_init(&inst->lock); /* needs to be two, since we _put() after creation */ atomic_set(&inst->use, 2); setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst); inst->peer_pid = pid; inst->group_num = group_num; inst->qthreshold = NFULNL_QTHRESH_DEFAULT; inst->flushtimeout = NFULNL_TIMEOUT_DEFAULT; inst->nlbufsiz = NFULNL_NLBUFSIZ_DEFAULT; inst->copy_mode = NFULNL_COPY_PACKET; inst->copy_range = 0xffff; if (!try_module_get(THIS_MODULE)) goto out_free; hlist_add_head(&inst->hlist, &instance_table[instance_hashfn(group_num)]); UDEBUG("newly added node: %p, next=%p\n", &inst->hlist, inst->hlist.next); write_unlock_bh(&instances_lock); return inst; out_free: instance_put(inst); out_unlock: write_unlock_bh(&instances_lock); return NULL; }
static void __instance_destroy(struct nfulnl_instance *inst) { /* first pull it out of the global list */ hlist_del(&inst->hlist); /* then flush all pending packets from skb */ spin_lock_bh(&inst->lock); if (inst->skb) __nfulnl_flush(inst); spin_unlock_bh(&inst->lock); /* and finally put the refcount */ instance_put(inst); }
/* called with BH disabled */ static void __instance_destroy(struct nfulnl_instance *inst) { /* first pull it out of the global list */ hlist_del_rcu(&inst->hlist); /* then flush all pending packets from skb */ spin_lock(&inst->lock); /* lockless readers wont be able to use us */ inst->copy_mode = NFULNL_COPY_DISABLED; if (inst->skb) __nfulnl_flush(inst); spin_unlock(&inst->lock); /* and finally put the refcount */ instance_put(inst); }
/* log handler for internal netfilter logging api */ void nfulnl_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li_user, const char *prefix) { size_t size; unsigned int data_len; struct nfulnl_instance *inst; const struct nf_loginfo *li; unsigned int qthreshold; unsigned int plen; struct nfnl_log_net *log = nfnl_log_pernet(net); const struct nfnl_ct_hook *nfnl_ct = NULL; struct nf_conn *ct = NULL; enum ip_conntrack_info uninitialized_var(ctinfo); if (li_user && li_user->type == NF_LOG_TYPE_ULOG) li = li_user; else li = &default_loginfo; inst = instance_lookup_get(log, li->u.ulog.group); if (!inst) return; plen = 0; if (prefix) plen = strlen(prefix) + 1; /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ size = nlmsg_total_size(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(u_int32_t)) /* uid */ + nla_total_size(sizeof(u_int32_t)) /* gid */ + nla_total_size(plen) /* prefix */ + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)) + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */ if (in && skb_mac_header_was_set(skb)) { size += nla_total_size(skb->dev->hard_header_len) + nla_total_size(sizeof(u_int16_t)) /* hwtype */ + nla_total_size(sizeof(u_int16_t)); /* hwlen */ } spin_lock_bh(&inst->lock); if (inst->flags & NFULNL_CFG_F_SEQ) size += nla_total_size(sizeof(u_int32_t)); if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) size += nla_total_size(sizeof(u_int32_t)); if (inst->flags & NFULNL_CFG_F_CONNTRACK) { nfnl_ct = rcu_dereference(nfnl_ct_hook); if (nfnl_ct != NULL) { ct = nfnl_ct->get_ct(skb, &ctinfo); if (ct != NULL) size += nfnl_ct->build_size(ct); } } qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ if (li->u.ulog.qthreshold) if (qthreshold > li->u.ulog.qthreshold) qthreshold = li->u.ulog.qthreshold; switch (inst->copy_mode) { case NFULNL_COPY_META: case NFULNL_COPY_NONE: data_len = 0; break; case NFULNL_COPY_PACKET: data_len = inst->copy_range; if ((li->u.ulog.flags & NF_LOG_F_COPY_LEN) && (li->u.ulog.copy_len < data_len)) data_len = li->u.ulog.copy_len; if (data_len > skb->len) data_len = skb->len; size += nla_total_size(data_len); break; case NFULNL_COPY_DISABLED: default: goto unlock_and_release; } if (inst->skb && size > skb_tailroom(inst->skb)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ __nfulnl_flush(inst); } if (!inst->skb) { inst->skb = nfulnl_alloc_skb(net, inst->peer_portid, inst->nlbufsiz, size); if (!inst->skb) goto alloc_failure; } inst->qlen++; __build_packet_message(log, inst, skb, data_len, pf, hooknum, in, out, prefix, plen, nfnl_ct, ct, ctinfo); if (inst->qlen >= qthreshold) __nfulnl_flush(inst); /* timer_pending always called within inst->lock, so there * is no chance of a race here */ else if (!timer_pending(&inst->timer)) { instance_get(inst); inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); add_timer(&inst->timer); } unlock_and_release: spin_unlock_bh(&inst->lock); instance_put(inst); return; alloc_failure: /* FIXME: statistics */ goto unlock_and_release; }
/* log handler for internal netfilter logging api */ void nfulnl_log_packet(u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li_user, const char *prefix) { unsigned int size, data_len; struct nfulnl_instance *inst; const struct nf_loginfo *li; unsigned int qthreshold; unsigned int plen; if (li_user && li_user->type == NF_LOG_TYPE_ULOG) li = li_user; else li = &default_loginfo; inst = instance_lookup_get(li->u.ulog.group); if (!inst) return; plen = 0; if (prefix) plen = strlen(prefix) + 1; /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + nla_total_size(sizeof(struct nfulnl_msg_packet_hdr)) + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER + nla_total_size(sizeof(u_int32_t)) /* ifindex */ + nla_total_size(sizeof(u_int32_t)) /* ifindex */ #endif + nla_total_size(sizeof(u_int32_t)) /* mark */ + nla_total_size(sizeof(u_int32_t)) /* uid */ + nla_total_size(sizeof(u_int32_t)) /* gid */ + nla_total_size(plen) /* prefix */ + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); spin_lock_bh(&inst->lock); if (inst->flags & NFULNL_CFG_F_SEQ) size += nla_total_size(sizeof(u_int32_t)); if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) size += nla_total_size(sizeof(u_int32_t)); qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ if (qthreshold > li->u.ulog.qthreshold) qthreshold = li->u.ulog.qthreshold; switch (inst->copy_mode) { case NFULNL_COPY_META: case NFULNL_COPY_NONE: data_len = 0; break; case NFULNL_COPY_PACKET: if (inst->copy_range == 0 || inst->copy_range > skb->len) data_len = skb->len; else data_len = inst->copy_range; size += nla_total_size(data_len); break; default: goto unlock_and_release; } if (inst->skb && size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ __nfulnl_flush(inst); } if (!inst->skb) { inst->skb = nfulnl_alloc_skb(inst->nlbufsiz, size); if (!inst->skb) goto alloc_failure; } inst->qlen++; __build_packet_message(inst, skb, data_len, pf, hooknum, in, out, li, prefix, plen); if (inst->qlen >= qthreshold) __nfulnl_flush(inst); /* timer_pending always called within inst->lock, so there * is no chance of a race here */ else if (!timer_pending(&inst->timer)) { instance_get(inst); inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); add_timer(&inst->timer); } unlock_and_release: spin_unlock_bh(&inst->lock); instance_put(inst); return; alloc_failure: /* FIXME: statistics */ goto unlock_and_release; }
/* log handler for internal netfilter logging api */ static void nfulnl_log_packet(unsigned int pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li_user, const char *prefix) { unsigned int size, data_len; struct nfulnl_instance *inst; const struct nf_loginfo *li; unsigned int qthreshold; unsigned int nlbufsiz; if (li_user && li_user->type == NF_LOG_TYPE_ULOG) li = li_user; else li = &default_loginfo; inst = instance_lookup_get(li->u.ulog.group); if (!inst) inst = instance_lookup_get(0); if (!inst) { PRINTR("nfnetlink_log: trying to log packet, " "but no instance for group %u\n", li->u.ulog.group); return; } /* all macros expand to constant values at compile time */ /* FIXME: do we want to make the size calculation conditional based on * what is actually present? way more branches and checks, but more * memory efficient... */ size = NLMSG_SPACE(sizeof(struct nfgenmsg)) + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr)) + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ #ifdef CONFIG_BRIDGE_NETFILTER + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ + NFA_SPACE(sizeof(u_int32_t)) /* ifindex */ #endif + NFA_SPACE(sizeof(u_int32_t)) /* mark */ + NFA_SPACE(sizeof(u_int32_t)) /* uid */ + NFA_SPACE(NFULNL_PREFIXLEN) /* prefix */ + NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw)) + NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp)); UDEBUG("initial size=%u\n", size); spin_lock_bh(&inst->lock); if (inst->flags & NFULNL_CFG_F_SEQ) size += NFA_SPACE(sizeof(u_int32_t)); if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) size += NFA_SPACE(sizeof(u_int32_t)); qthreshold = inst->qthreshold; /* per-rule qthreshold overrides per-instance */ if (qthreshold > li->u.ulog.qthreshold) qthreshold = li->u.ulog.qthreshold; switch (inst->copy_mode) { case NFULNL_COPY_META: case NFULNL_COPY_NONE: data_len = 0; break; case NFULNL_COPY_PACKET: if (inst->copy_range == 0 || inst->copy_range > skb->len) data_len = skb->len; else data_len = inst->copy_range; size += NFA_SPACE(data_len); UDEBUG("copy_packet, therefore size now %u\n", size); break; default: spin_unlock_bh(&inst->lock); instance_put(inst); return; } if (size > inst->nlbufsiz) nlbufsiz = size; else nlbufsiz = inst->nlbufsiz; if (!inst->skb) { if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) { UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n", inst->nlbufsiz, size); goto alloc_failure; } } else if (inst->qlen >= qthreshold || size > skb_tailroom(inst->skb)) { /* either the queue len is too high or we don't have * enough room in the skb left. flush to userspace. */ UDEBUG("flushing old skb\n"); __nfulnl_send(inst); if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) { UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n", inst->nlbufsiz, size); goto alloc_failure; } } UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold); inst->qlen++; __build_packet_message(inst, skb, data_len, pf, hooknum, in, out, li, prefix); /* timer_pending always called within inst->lock, so there * is no chance of a race here */ if (!timer_pending(&inst->timer)) { instance_get(inst); inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100); add_timer(&inst->timer); } spin_unlock_bh(&inst->lock); return; alloc_failure: spin_unlock_bh(&inst->lock); instance_put(inst); UDEBUG("error allocating skb\n"); /* FIXME: statistics */ }