static int list_set_uadd(struct ip_set *set, void *value, const struct ip_set_ext *ext, struct ip_set_ext *mext, u32 flags) { struct list_set *map = set->data; struct set_adt_elem *d = value; struct set_elem *e; bool flag_exist = flags & IPSET_FLAG_EXIST; u32 i, ret = 0; if (SET_WITH_TIMEOUT(set)) set_cleanup_entries(set); /* Check already added element */ for (i = 0; i < map->size; i++) { e = list_set_elem(set, map, i); if (e->id == IPSET_INVALID_ID) goto insert; else if (e->id != d->id) continue; if ((d->before > 1 && !id_eq(set, i + 1, d->refid)) || (d->before < 0 && (i == 0 || !id_eq(set, i - 1, d->refid)))) /* Before/after doesn't match */ return -IPSET_ERR_REF_EXIST; if (!flag_exist) /* Can't re-add */ return -IPSET_ERR_EXIST; /* Update extensions */ ip_set_ext_destroy(set, e); if (SET_WITH_TIMEOUT(set)) ip_set_timeout_set(ext_timeout(e, set), ext->timeout); if (SET_WITH_COUNTER(set)) ip_set_init_counter(ext_counter(e, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(e, set), ext); /* Set is already added to the list */ ip_set_put_byindex(map->net, d->id); return 0; } insert: ret = -IPSET_ERR_LIST_FULL; for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) { e = list_set_elem(set, map, i); if (e->id == IPSET_INVALID_ID) ret = d->before != 0 ? -IPSET_ERR_REF_EXIST : list_set_add(set, i, d, ext); else if (e->id != d->refid) continue; else if (d->before > 0) ret = list_set_add(set, i, d, ext); else if (i + 1 < map->size) ret = list_set_add(set, i + 1, d, ext); } return ret; }
/* Kernel module to match an IP set. */ #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/version.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv4/ip_set.h> #include <linux/netfilter_ipv4/ipt_set.h> static inline int match_set(const struct ipt_set_info *info, const struct sk_buff *skb, int inv) { if (ip_set_testip_kernel(info->index, skb, info->flags)) inv = !inv; return inv; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool match(const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct xt_match *match, const void *matchinfo, int offset, unsigned int protoff, bool *hotdrop) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static bool match(const struct sk_buff *skb, const struct xt_match_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_set_info_match *info = matchinfo; #else const struct ipt_set_info_match *info = par->matchinfo; #endif return match_set(&info->match_set, skb, info->match_set.flags[0] & IPSET_MATCH_INV); } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool checkentry(const char *tablename, const void *inf, const struct xt_match *match, void *matchinfo, unsigned int hook_mask) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static bool checkentry(const struct xt_mtchk_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) struct ipt_set_info_match *info = matchinfo; #else struct ipt_set_info_match *info = par->matchinfo; #endif ip_set_id_t index; index = ip_set_get_byindex(info->match_set.index); if (index == IP_SET_INVALID_ID) { ip_set_printk("Cannot find set indentified by id %u to match", info->match_set.index); return 0; /* error */ } if (info->match_set.flags[IP_SET_MAX_BINDINGS] != 0) { ip_set_printk("That's nasty!"); return 0; /* error */ } return 1; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static void destroy(const struct xt_match *match, void *matchinfo) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static void destroy(const struct xt_mtdtor_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) struct ipt_set_info_match *info = matchinfo; #else struct ipt_set_info_match *info = par->matchinfo; #endif ip_set_put_byindex(info->match_set.index); }
/* ipt_SET.c - netfilter target to manipulate IP sets */ #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/version.h> #include <linux/netfilter_ipv4.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) #include <linux/netfilter_ipv4/ip_tables.h> #define xt_register_target ipt_register_target #define xt_unregister_target ipt_unregister_target #define xt_target ipt_target #define XT_CONTINUE IPT_CONTINUE #else #include <linux/netfilter/x_tables.h> #endif #include <linux/netfilter_ipv4/ipt_set.h> static unsigned int #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in, const struct net_device *out, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) target(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ target(struct sk_buff *skb, const struct xt_target_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_set_info_target *info = targinfo; #else const struct ipt_set_info_target *info = par->targinfo; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) struct sk_buff *skb = *pskb; #endif if (info->add_set.index != IP_SET_INVALID_ID) ip_set_addip_kernel(info->add_set.index, skb, info->add_set.flags); if (info->del_set.index != IP_SET_INVALID_ID) ip_set_delip_kernel(info->del_set.index, skb, info->del_set.flags); return XT_CONTINUE; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) static int checkentry(const char *tablename, const struct ipt_entry *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static int checkentry(const char *tablename, const void *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static bool checkentry(const struct xt_tgchk_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_set_info_target *info = targinfo; #else const struct ipt_set_info_target *info = par->targinfo; #endif ip_set_id_t index; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) if (targinfosize != IPT_ALIGN(sizeof(*info))) { DP("bad target info size %u", targinfosize); return 0; } #endif if (info->add_set.index != IP_SET_INVALID_ID) { index = ip_set_get_byindex(info->add_set.index); if (index == IP_SET_INVALID_ID) { ip_set_printk("cannot find add_set index %u as target", info->add_set.index); return 0; /* error */ } } if (info->del_set.index != IP_SET_INVALID_ID) { index = ip_set_get_byindex(info->del_set.index); if (index == IP_SET_INVALID_ID) { ip_set_printk("cannot find del_set index %u as target", info->del_set.index); return 0; /* error */ } } if (info->add_set.flags[IP_SET_MAX_BINDINGS] != 0 || info->del_set.flags[IP_SET_MAX_BINDINGS] != 0) { ip_set_printk("That's nasty!"); return 0; /* error */ } return 1; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static void destroy(void *targetinfo, unsigned int targetsize) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static void destroy(const struct xt_target *target, void *targetinfo, unsigned int targetsize) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static void destroy(const struct xt_target *target, void *targetinfo) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static void destroy(const struct xt_tgdtor_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ipt_set_info_target *info = targetinfo; #else const struct ipt_set_info_target *info = par->targinfo; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) if (targetsize != IPT_ALIGN(sizeof(struct ipt_set_info_target))) { ip_set_printk("invalid targetsize %d", targetsize); return; } #endif if (info->add_set.index != IP_SET_INVALID_ID) ip_set_put_byindex(info->add_set.index); if (info->del_set.index != IP_SET_INVALID_ID) ip_set_put_byindex(info->del_set.index); }
static int list_set_add(struct ip_set *set, u32 i, struct set_adt_elem *d, const struct ip_set_ext *ext) { struct list_set *map = set->data; struct set_elem *e = list_set_elem(set, map, i); if (e->id != IPSET_INVALID_ID) { if (i == map->size - 1) { /* Last element replaced: e.g. add new,before,last */ ip_set_put_byindex(map->net, e->id); ip_set_ext_destroy(set, e); } else { struct set_elem *x = list_set_elem(set, map, map->size - 1); /* Last element pushed off */ if (x->id != IPSET_INVALID_ID) { ip_set_put_byindex(map->net, x->id); ip_set_ext_destroy(set, x); } memmove(list_set_elem(set, map, i + 1), e, set->dsize * (map->size - (i + 1))); /* Extensions must be initialized to zero */ memset(e, 0, set->dsize); } } e->id = d->id; if (SET_WITH_TIMEOUT(set)) ip_set_timeout_set(ext_timeout(e, set), ext->timeout); if (SET_WITH_COUNTER(set)) ip_set_init_counter(ext_counter(e, set), ext); if (SET_WITH_COMMENT(set)) ip_set_init_comment(ext_comment(e, set), ext); if (SET_WITH_SKBINFO(set)) ip_set_init_skbinfo(ext_skbinfo(e, set), ext); return 0; }
static void list_set_flush(struct ip_set *set) { struct list_set *map = set->data; struct set_elem *elem; u32 i; for (i = 0; i < map->size; i++) { elem = list_set_elem(map, i); if (elem->id != IPSET_INVALID_ID) { ip_set_put_byindex(elem->id); elem->id = IPSET_INVALID_ID; } } }
static int list_set_add(struct list_set *map, u32 i, ip_set_id_t id, unsigned long timeout) { const struct set_elem *e = list_set_elem(map, i); if (i == map->size - 1 && e->id != IPSET_INVALID_ID) /* Last element replaced: e.g. add new,before,last */ ip_set_put_byindex(e->id); if (with_timeout(map->timeout)) list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); else list_elem_add(map, i, id); return 0; }
static void list_set_flush(struct ip_set *set) { struct list_set *map = set->data; struct set_elem *e; u32 i; for (i = 0; i < map->size; i++) { e = list_set_elem(set, map, i); if (e->id != IPSET_INVALID_ID) { ip_set_put_byindex(map->net, e->id); ip_set_ext_destroy(set, e); e->id = IPSET_INVALID_ID; } } }
static int list_set_del(struct ip_set *set, u32 i) { struct list_set *map = set->data; struct set_elem *e = list_set_elem(set, map, i); ip_set_put_byindex(map->net, e->id); ip_set_ext_destroy(set, e); if (i < map->size - 1) memmove(e, list_set_elem(set, map, i + 1), set->dsize * (map->size - (i + 1))); /* Last element */ e = list_set_elem(set, map, map->size - 1); e->id = IPSET_INVALID_ID; return 0; }
static int list_set_del(struct list_set *map, u32 i) { struct set_elem *a = list_set_elem(map, i), *b; ip_set_put_byindex(a->id); for (; i < map->size - 1; i++) { b = list_set_elem(map, i + 1); a->id = b->id; if (with_timeout(map->timeout)) ((struct set_telem *)a)->timeout = ((struct set_telem *)b)->timeout; a = b; if (a->id == IPSET_INVALID_ID) break; } /* Last element */ a->id = IPSET_INVALID_ID; return 0; }
static int list_set_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct list_set *map = set->data; bool with_timeout = with_timeout(map->timeout); bool flag_exist = flags & IPSET_FLAG_EXIST; int before = 0; u32 timeout = map->timeout; ip_set_id_t id, refid = IPSET_INVALID_ID; const struct set_elem *elem; struct ip_set *s; u32 i; int ret = 0; if (unlikely(!tb[IPSET_ATTR_NAME] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s); if (id == IPSET_INVALID_ID) return -IPSET_ERR_NAME; /* "Loop detection" */ if (s->type->features & IPSET_TYPE_NAME) { ret = -IPSET_ERR_LOOP; goto finish; } if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); before = f & IPSET_FLAG_BEFORE; } if (before && !tb[IPSET_ATTR_NAMEREF]) { ret = -IPSET_ERR_BEFORE; goto finish; } if (tb[IPSET_ATTR_NAMEREF]) { refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]), &s); if (refid == IPSET_INVALID_ID) { ret = -IPSET_ERR_NAMEREF; goto finish; } if (!before) before = -1; } if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout) { ret = -IPSET_ERR_TIMEOUT; goto finish; } timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (with_timeout && adt != IPSET_TEST) cleanup_entries(map); switch (adt) { case IPSET_TEST: for (i = 0; i < map->size && !ret; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID || (before != 0 && i + 1 >= map->size)) break; else if (with_timeout && list_set_expired(map, i)) continue; else if (before > 0 && elem->id == id) ret = id_eq_timeout(map, i + 1, refid); else if (before < 0 && elem->id == refid) ret = id_eq_timeout(map, i + 1, id); else if (before == 0 && elem->id == id) ret = 1; } break; case IPSET_ADD: for (i = 0; i < map->size; i++) { elem = list_set_elem(map, i); if (elem->id != id) continue; if (!(with_timeout && flag_exist)) { ret = -IPSET_ERR_EXIST; goto finish; } else { struct set_telem *e = list_set_telem(map, i); if ((before > 1 && !id_eq(map, i + 1, refid)) || (before < 0 && (i == 0 || !id_eq(map, i - 1, refid)))) { ret = -IPSET_ERR_EXIST; goto finish; } e->timeout = ip_set_timeout_set(timeout); ip_set_put_byindex(id); ret = 0; goto finish; } } ret = -IPSET_ERR_LIST_FULL; for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID) ret = before != 0 ? -IPSET_ERR_REF_EXIST : list_set_add(map, i, id, timeout); else if (elem->id != refid) continue; else if (before > 0) ret = list_set_add(map, i, id, timeout); else if (i + 1 < map->size) ret = list_set_add(map, i + 1, id, timeout); } break; case IPSET_DEL: ret = -IPSET_ERR_EXIST; for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID) { ret = before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST; break; } else if (elem->id == id && (before == 0 || (before > 0 && id_eq(map, i + 1, refid)))) ret = list_set_del(map, i); else if (elem->id == refid && before < 0 && id_eq(map, i + 1, id)) ret = list_set_del(map, i + 1); } break; default: break; } finish: if (refid != IPSET_INVALID_ID) ip_set_put_byindex(refid); if (adt != IPSET_ADD || ret) ip_set_put_byindex(id); return ip_set_eexist(ret, flags) ? 0 : ret; }
static int list_set_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct list_set *map = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct set_adt_elem e = { .refid = IPSET_INVALID_ID }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); struct ip_set *s; int ret = 0; if (unlikely(!tb[IPSET_ATTR_NAME] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PACKETS) || !ip_set_optattr_netorder(tb, IPSET_ATTR_BYTES) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBMARK) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBPRIO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_SKBQUEUE))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; e.id = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAME]), &s); if (e.id == IPSET_INVALID_ID) return -IPSET_ERR_NAME; /* "Loop detection" */ if (s->type->features & IPSET_TYPE_NAME) { ret = -IPSET_ERR_LOOP; goto finish; } if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); e.before = f & IPSET_FLAG_BEFORE; } if (e.before && !tb[IPSET_ATTR_NAMEREF]) { ret = -IPSET_ERR_BEFORE; goto finish; } if (tb[IPSET_ATTR_NAMEREF]) { e.refid = ip_set_get_byname(map->net, nla_data(tb[IPSET_ATTR_NAMEREF]), &s); if (e.refid == IPSET_INVALID_ID) { ret = -IPSET_ERR_NAMEREF; goto finish; } if (!e.before) e.before = -1; } if (adt != IPSET_TEST && SET_WITH_TIMEOUT(set)) set_cleanup_entries(set); ret = adtfn(set, &e, &ext, &ext, flags); finish: if (e.refid != IPSET_INVALID_ID) ip_set_put_byindex(map->net, e.refid); if (adt != IPSET_ADD || ret) ip_set_put_byindex(map->net, e.id); return ip_set_eexist(ret, flags) ? 0 : ret; }