static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; }
static bool set_match_v0(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v0 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.u.compat.dim, info->match_set.u.compat.flags, 0, UINT_MAX); return match_set(info->match_set.index, skb, par, &opt, info->match_set.u.compat.flags & IPSET_INV_MATCH); }
static unsigned int set_target_v3(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v3 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); ADT_OPT(map_opt, par->family, info->map_set.dim, info->map_set.flags, 0, UINT_MAX); int ret; /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); if (info->map_set.index != IPSET_INVALID_ID) { map_opt.cmdflags |= info->flags & (IPSET_FLAG_MAP_SKBMARK | IPSET_FLAG_MAP_SKBPRIO | IPSET_FLAG_MAP_SKBQUEUE); ret = match_set(info->map_set.index, skb, par, &map_opt, info->map_set.flags & IPSET_INV_MATCH); if (!ret) return XT_CONTINUE; if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBMARK) skb->mark = (skb->mark & ~(map_opt.ext.skbmarkmask)) ^ (map_opt.ext.skbmark); if (map_opt.cmdflags & IPSET_FLAG_MAP_SKBPRIO) skb->priority = map_opt.ext.skbprio; if ((map_opt.cmdflags & IPSET_FLAG_MAP_SKBQUEUE) && skb->dev && skb->dev->real_num_tx_queues > map_opt.ext.skbqueue) skb_set_queue_mapping(skb, map_opt.ext.skbqueue); } return XT_CONTINUE; }
static bool set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v1 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, 0, UINT_MAX); if (opt.flags & IPSET_RETURN_NOMATCH) opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH; return match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); }
static bool set_match_v4(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v4 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, info->flags, UINT_MAX); int ret; if (info->packets.op != IPSET_COUNTER_NONE || info->bytes.op != IPSET_COUNTER_NONE) opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; ret = match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) return ret; if (!match_counter(opt.ext.packets, &info->packets)) return false; return match_counter(opt.ext.bytes, &info->bytes); }