Пример #1
0
/*
 * Destroys nat64 instance.
 * Data layout (v0)(current):
 * Request: [ ipfw_obj_header ]
 *
 * Returns 0 on success
 */
static int
nat64lsn_destroy(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
    struct sockopt_data *sd)
{
	struct nat64lsn_cfg *cfg;
	ipfw_obj_header *oh;

	if (sd->valsize != sizeof(*oh))
		return (EINVAL);

	oh = (ipfw_obj_header *)op3;

	IPFW_UH_WLOCK(ch);
	cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
	if (cfg == NULL) {
		IPFW_UH_WUNLOCK(ch);
		return (ESRCH);
	}

	if (cfg->no.refcnt > 0) {
		IPFW_UH_WUNLOCK(ch);
		return (EBUSY);
	}

	IPFW_WLOCK(ch);
	SRV_OBJECT(ch, cfg->no.kidx) = NULL;
	IPFW_WUNLOCK(ch);

	nat64lsn_detach_config(ch, cfg);
	IPFW_UH_WUNLOCK(ch);

	nat64lsn_destroy_instance(cfg);
	return (0);
}
Пример #2
0
/*
 * Checks if kernel interface is contained in our tracked
 * interface list and calls attach/detach handler.
 */
static void
ipfw_kifhandler(void *arg, struct ifnet *ifp)
{
	struct ip_fw_chain *ch;
	struct ipfw_iface *iif;
	struct namedobj_instance *ii;
	uintptr_t htype;

	if (V_ipfw_vnet_ready == 0)
		return;

	ch = &V_layer3_chain;
	htype = (uintptr_t)arg;

	IPFW_UH_WLOCK(ch);
	ii = CHAIN_TO_II(ch);
	if (ii == NULL) {
		IPFW_UH_WUNLOCK(ch);
		return;
	}
	iif = (struct ipfw_iface*)ipfw_objhash_lookup_name(ii, 0,
	    if_name(ifp));
	if (iif != NULL) {
		if (htype == 1)
			handle_ifattach(ch, iif, ifp->if_index);
		else
			handle_ifdetach(ch, iif, ifp->if_index);
	}
	IPFW_UH_WUNLOCK(ch);
}
Пример #3
0
static int
dyn_create(struct ip_fw_chain *ch, struct tid_info *ti,
    uint16_t *pkidx)
{
	struct namedobj_instance *ni;
	struct dyn_state_obj *obj;
	struct named_object *no;
	ipfw_obj_ntlv *ntlv;
	char *name;

	DYN_DEBUG("uidx %d", ti->uidx);
	if (ti->uidx != 0) {
		if (ti->tlvs == NULL)
			return (EINVAL);
		ntlv = ipfw_find_name_tlv_type(ti->tlvs, ti->tlen, ti->uidx,
		    IPFW_TLV_STATE_NAME);
		if (ntlv == NULL)
			return (EINVAL);
		name = ntlv->name;
	} else
		name = default_state_name;

	ni = CHAIN_TO_SRV(ch);
	obj = malloc(sizeof(*obj), M_IPFW, M_WAITOK | M_ZERO);
	obj->no.name = obj->name;
	obj->no.etlv = IPFW_TLV_STATE_NAME;
	strlcpy(obj->name, name, sizeof(obj->name));

	IPFW_UH_WLOCK(ch);
	no = ipfw_objhash_lookup_name_type(ni, 0,
	    IPFW_TLV_STATE_NAME, name);
	if (no != NULL) {
		/*
		 * Object is already created.
		 * Just return its kidx and bump refcount.
		 */
		*pkidx = no->kidx;
		no->refcnt++;
		IPFW_UH_WUNLOCK(ch);
		free(obj, M_IPFW);
		DYN_DEBUG("\tfound kidx %d", *pkidx);
		return (0);
	}
	if (ipfw_objhash_alloc_idx(ni, &obj->no.kidx) != 0) {
		DYN_DEBUG("\talloc_idx failed for %s", name);
		IPFW_UH_WUNLOCK(ch);
		free(obj, M_IPFW);
		return (ENOSPC);
	}
	ipfw_objhash_add(ni, &obj->no);
	IPFW_WLOCK(ch);
	SRV_OBJECT(ch, obj->no.kidx) = obj;
	IPFW_WUNLOCK(ch);
	obj->no.refcnt++;
	*pkidx = obj->no.kidx;
	IPFW_UH_WUNLOCK(ch);
	DYN_DEBUG("\tcreated kidx %d", *pkidx);
	return (0);
}
Пример #4
0
/*
 * Add a new rule to the list. Copy the rule into a malloc'ed area, then
 * possibly create a rule number and add the rule to the list.
 * Update the rule_number in the input struct so the caller knows it as well.
 * XXX DO NOT USE FOR THE DEFAULT RULE.
 * Must be called without IPFW_UH held
 */
int
ipfw_add_rule(struct ip_fw_chain *chain, struct ip_fw *input_rule)
{
	struct ip_fw *rule;
	int i, l, insert_before;
	struct ip_fw **map;	/* the new array of pointers */

	if (chain->rules == NULL || input_rule->rulenum > IPFW_DEFAULT_RULE-1)
		return (EINVAL);

	l = RULESIZE(input_rule);
	rule = malloc(l, M_IPFW, M_WAITOK | M_ZERO);
	if (rule == NULL)
		return (ENOSPC);
	/* get_map returns with IPFW_UH_WLOCK if successful */
	map = get_map(chain, 1, 0 /* not locked */);
	if (map == NULL) {
		free(rule, M_IPFW);
		return ENOSPC;
	}

	bcopy(input_rule, rule, l);
	/* clear fields not settable from userland */
	rule->x_next = NULL;
	rule->next_rule = NULL;
	rule->pcnt = 0;
	rule->bcnt = 0;
	rule->timestamp = 0;

	if (V_autoinc_step < 1)
		V_autoinc_step = 1;
	else if (V_autoinc_step > 1000)
		V_autoinc_step = 1000;
	/* find the insertion point, we will insert before */
	insert_before = rule->rulenum ? rule->rulenum + 1 : IPFW_DEFAULT_RULE;
	i = ipfw_find_rule(chain, insert_before, 0);
	/* duplicate first part */
	if (i > 0)
		bcopy(chain->map, map, i * sizeof(struct ip_fw *));
	map[i] = rule;
	/* duplicate remaining part, we always have the default rule */
	bcopy(chain->map + i, map + i + 1,
		sizeof(struct ip_fw *) *(chain->n_rules - i));
	if (rule->rulenum == 0) {
		/* write back the number */
		rule->rulenum = i > 0 ? map[i-1]->rulenum : 0;
		if (rule->rulenum < IPFW_DEFAULT_RULE - V_autoinc_step)
			rule->rulenum += V_autoinc_step;
		input_rule->rulenum = rule->rulenum;
	}

	rule->id = chain->id + 1;
	map = swap_map(chain, map, chain->n_rules + 1);
	chain->static_len += l;
	IPFW_UH_WUNLOCK(chain);
	if (map)
		free(map, M_IPFW);
	return (0);
}
Пример #5
0
/*
 * Called for the removal of each instance.
 */
static int
vnet_ipfw_uninit(const void *unused)
{
	struct ip_fw *reap, *rule;
	struct ip_fw_chain *chain = &V_layer3_chain;
	int i;

	V_ipfw_vnet_ready = 0; /* tell new callers to go away */
	/*
	 * disconnect from ipv4, ipv6, layer2 and sockopt.
	 * Then grab, release and grab again the WLOCK so we make
	 * sure the update is propagated and nobody will be in.
	 */
	(void)ipfw_attach_hooks(0 /* detach */);
	V_ip_fw_ctl_ptr = NULL;
	IPFW_UH_WLOCK(chain);
	IPFW_UH_WUNLOCK(chain);
	IPFW_UH_WLOCK(chain);

	IPFW_WLOCK(chain);
	ipfw_dyn_uninit(0);	/* run the callout_drain */
	IPFW_WUNLOCK(chain);

	ipfw_destroy_tables(chain);
	reap = NULL;
	IPFW_WLOCK(chain);
	for (i = 0; i < chain->n_rules; i++) {
		rule = chain->map[i];
		rule->x_next = reap;
		reap = rule;
	}
	if (chain->map)
		free(chain->map, M_IPFW);
	IPFW_WUNLOCK(chain);
	IPFW_UH_WUNLOCK(chain);
	if (reap != NULL)
		ipfw_reap_rules(reap);
	IPFW_LOCK_DESTROY(chain);
	ipfw_dyn_uninit(1);	/* free the remaining parts */
	return 0;
}
Пример #6
0
/*
 * Unreference interface specified by @ic.
 * Must be called without holding any locks.
 */
void
ipfw_iface_unref(struct ip_fw_chain *ch, struct ipfw_ifc *ic)
{
	struct ipfw_iface *iif;

	iif = ic->iface;
	ic->iface = NULL;

	IPFW_UH_WLOCK(ch);
	iif->no.refcnt--;
	/* TODO: check for references & delete */
	IPFW_UH_WUNLOCK(ch);
}
Пример #7
0
/*
 * Reset nat64lsn statistics.
 * Data layout (v0)(current):
 * Request: [ ipfw_obj_header ]
 *
 * Returns 0 on success
 */
static int
nat64lsn_reset_stats(struct ip_fw_chain *ch, ip_fw3_opheader *op,
    struct sockopt_data *sd)
{
	struct nat64lsn_cfg *cfg;
	ipfw_obj_header *oh;

	if (sd->valsize != sizeof(*oh))
		return (EINVAL);
	oh = (ipfw_obj_header *)sd->kbuf;
	if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
	    oh->ntlv.set >= IPFW_MAX_SETS)
		return (EINVAL);

	IPFW_UH_WLOCK(ch);
	cfg = nat64lsn_find(CHAIN_TO_SRV(ch), oh->ntlv.name, oh->ntlv.set);
	if (cfg == NULL) {
		IPFW_UH_WUNLOCK(ch);
		return (ESRCH);
	}
	COUNTER_ARRAY_ZERO(cfg->stats.stats, NAT64STATS);
	IPFW_UH_WUNLOCK(ch);
	return (0);
}
Пример #8
0
/*
 * Per-VNET ipfw detach hook.
 *
 */
void
vnet_ipfw_iface_destroy(struct ip_fw_chain *ch)
{
	struct namedobj_instance *ii;

	IPFW_UH_WLOCK(ch);
	ii = CHAIN_TO_II(ch);
	ch->ifcfg = NULL;
	IPFW_UH_WUNLOCK(ch);

	if (ii != NULL) {
		ipfw_objhash_foreach(ii, destroy_iface, ch);
		ipfw_objhash_destroy(ii);
		iface_khandler_deregister();
	}
}
Пример #9
0
/*
 * Perform actual init on internal request.
 * Inits both namehash and global khandler.
 */
static void
vnet_ipfw_iface_init(struct ip_fw_chain *ch)
{
	struct namedobj_instance *ii;

	ii = ipfw_objhash_create(DEFAULT_IFACES);
	IPFW_UH_WLOCK(ch);
	if (ch->ifcfg == NULL) {
		ch->ifcfg = ii;
		ii = NULL;
	}
	IPFW_UH_WUNLOCK(ch);

	if (ii != NULL) {
		/* Already initialized. Free namehash. */
		ipfw_objhash_destroy(ii);
	} else {
		/* We're the first ones. Init kernel hooks. */
		iface_khandler_register();
	}
}
Пример #10
0
void
nat64lsn_uninit(struct ip_fw_chain *ch, int last)
{

	IPFW_DEL_OBJ_REWRITER(last, opcodes);
	IPFW_DEL_SOPT_HANDLER(last, scodes);
	ipfw_del_eaction(ch, V_nat64lsn_eid);
	/*
	 * Since we already have deregistered external action,
	 * our named objects become unaccessible via rules, because
	 * all rules were truncated by ipfw_del_eaction().
	 * So, we can unlink and destroy our named objects without holding
	 * IPFW_WLOCK().
	 */
	IPFW_UH_WLOCK(ch);
	ipfw_objhash_foreach_type(CHAIN_TO_SRV(ch), destroy_config_cb, ch,
	    IPFW_TLV_NAT64LSN_NAME);
	V_nat64lsn_eid = 0;
	IPFW_UH_WUNLOCK(ch);
	if (last != 0)
		nat64lsn_uninit_internal();
}
Пример #11
0
static int
resize_dynamic_table(struct ip_fw_chain *chain, int nbuckets)
{
	int i, k, nbuckets_old;
	ipfw_dyn_rule *q;
	struct ipfw_dyn_bucket *dyn_v, *dyn_v_old;

	/* Check if given number is power of 2 and less than 64k */
	if ((nbuckets > 65536) || (!powerof2(nbuckets)))
		return 1;

	CTR3(KTR_NET, "%s: resize dynamic hash: %d -> %d", __func__,
	    V_curr_dyn_buckets, nbuckets);

	/* Allocate and initialize new hash */
	dyn_v = malloc(nbuckets * sizeof(ipfw_dyn_rule), M_IPFW,
	    M_WAITOK | M_ZERO);

	for (i = 0 ; i < nbuckets; i++)
		IPFW_BUCK_LOCK_INIT(&dyn_v[i]);

	/*
	 * Call upper half lock, as get_map() do to ease
	 * read-only access to dynamic rules hash from sysctl
	 */
	IPFW_UH_WLOCK(chain);

	/*
	 * Acquire chain write lock to permit hash access
	 * for main traffic path without additional locks
	 */
	IPFW_WLOCK(chain);

	/* Save old values */
	nbuckets_old = V_curr_dyn_buckets;
	dyn_v_old = V_ipfw_dyn_v;

	/* Skip relinking if array is not set up */
	if (V_ipfw_dyn_v == NULL)
		V_curr_dyn_buckets = 0;

	/* Re-link all dynamic states */
	for (i = 0 ; i < V_curr_dyn_buckets ; i++) {
		while (V_ipfw_dyn_v[i].head != NULL) {
			/* Remove from current chain */
			q = V_ipfw_dyn_v[i].head;
			V_ipfw_dyn_v[i].head = q->next;

			/* Get new hash value */
			k = hash_packet(&q->id, nbuckets);
			q->bucket = k;
			/* Add to the new head */
			q->next = dyn_v[k].head;
			dyn_v[k].head = q;
             }
	}

	/* Update current pointers/buckets values */
	V_curr_dyn_buckets = nbuckets;
	V_ipfw_dyn_v = dyn_v;

	IPFW_WUNLOCK(chain);

	IPFW_UH_WUNLOCK(chain);

	/* Start periodic callout on initial creation */
	if (dyn_v_old == NULL) {
        	callout_reset_on(&V_ipfw_timeout, hz, ipfw_dyn_tick, curvnet, 0);
		return (0);
	}

	/* Destroy all mutexes */
	for (i = 0 ; i < nbuckets_old ; i++)
		IPFW_BUCK_LOCK_DESTROY(&dyn_v_old[i]);

	/* Free old hash */
	free(dyn_v_old, M_IPFW);

	return 0;
}
Пример #12
0
/*
 * Change existing nat64lsn instance configuration.
 * Data layout (v0)(current):
 * Request: [ ipfw_obj_header ipfw_nat64lsn_cfg ]
 * Reply: [ ipfw_obj_header ipfw_nat64lsn_cfg ]
 *
 * Returns 0 on success
 */
static int
nat64lsn_config(struct ip_fw_chain *ch, ip_fw3_opheader *op,
    struct sockopt_data *sd)
{
	ipfw_obj_header *oh;
	ipfw_nat64lsn_cfg *uc;
	struct nat64lsn_cfg *cfg;
	struct namedobj_instance *ni;

	if (sd->valsize != sizeof(*oh) + sizeof(*uc))
		return (EINVAL);

	oh = (ipfw_obj_header *)ipfw_get_sopt_space(sd,
	    sizeof(*oh) + sizeof(*uc));
	uc = (ipfw_nat64lsn_cfg *)(oh + 1);

	if (ipfw_check_object_name_generic(oh->ntlv.name) != 0 ||
	    oh->ntlv.set >= IPFW_MAX_SETS)
		return (EINVAL);

	ni = CHAIN_TO_SRV(ch);
	if (sd->sopt->sopt_dir == SOPT_GET) {
		IPFW_UH_RLOCK(ch);
		cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
		if (cfg == NULL) {
			IPFW_UH_RUNLOCK(ch);
			return (EEXIST);
		}
		nat64lsn_export_config(ch, cfg, uc);
		IPFW_UH_RUNLOCK(ch);
		return (0);
	}

	nat64lsn_default_config(uc);

	IPFW_UH_WLOCK(ch);
	cfg = nat64lsn_find(ni, oh->ntlv.name, oh->ntlv.set);
	if (cfg == NULL) {
		IPFW_UH_WUNLOCK(ch);
		return (EEXIST);
	}

	/*
	 * For now allow to change only following values:
	 *  jmaxlen, nh_del_age, pg_del_age, tcp_syn_age, tcp_close_age,
	 *  tcp_est_age, udp_age, icmp_age, flags, max_ports.
	 */

	cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
	cfg->jmaxlen = uc->jmaxlen;
	cfg->nh_delete_delay = uc->nh_delete_delay;
	cfg->pg_delete_delay = uc->pg_delete_delay;
	cfg->st_syn_ttl = uc->st_syn_ttl;
	cfg->st_close_ttl = uc->st_close_ttl;
	cfg->st_estab_ttl = uc->st_estab_ttl;
	cfg->st_udp_ttl = uc->st_udp_ttl;
	cfg->st_icmp_ttl = uc->st_icmp_ttl;
	cfg->flags = uc->flags & NAT64LSN_FLAGSMASK;

	IPFW_UH_WUNLOCK(ch);

	return (0);
}
Пример #13
0
/*
 * Creates new nat64lsn instance.
 * Data layout (v0)(current):
 * Request: [ ipfw_obj_lheader ipfw_nat64lsn_cfg ]
 *
 * Returns 0 on success
 */
static int
nat64lsn_create(struct ip_fw_chain *ch, ip_fw3_opheader *op3,
    struct sockopt_data *sd)
{
	ipfw_obj_lheader *olh;
	ipfw_nat64lsn_cfg *uc;
	struct nat64lsn_cfg *cfg;
	struct namedobj_instance *ni;
	uint32_t addr4, mask4;

	if (sd->valsize != sizeof(*olh) + sizeof(*uc))
		return (EINVAL);

	olh = (ipfw_obj_lheader *)sd->kbuf;
	uc = (ipfw_nat64lsn_cfg *)(olh + 1);

	if (ipfw_check_object_name_generic(uc->name) != 0)
		return (EINVAL);

	if (uc->agg_prefix_len > 127 || uc->set >= IPFW_MAX_SETS)
		return (EINVAL);

	if (uc->plen4 > 32)
		return (EINVAL);
	if (uc->plen6 > 128 || ((uc->plen6 % 8) != 0))
		return (EINVAL);

	/* XXX: Check prefix4 to be global */
	addr4 = ntohl(uc->prefix4.s_addr);
	mask4 = ~((1 << (32 - uc->plen4)) - 1);
	if ((addr4 & mask4) != addr4)
		return (EINVAL);

	/* XXX: Check prefix6 */
	if (uc->min_port == 0)
		uc->min_port = NAT64_MIN_PORT;
	if (uc->max_port == 0)
		uc->max_port = 65535;
	if (uc->min_port > uc->max_port)
		return (EINVAL);
	uc->min_port = roundup(uc->min_port, NAT64_CHUNK_SIZE);
	uc->max_port = roundup(uc->max_port, NAT64_CHUNK_SIZE);

	nat64lsn_default_config(uc);

	ni = CHAIN_TO_SRV(ch);
	IPFW_UH_RLOCK(ch);
	if (nat64lsn_find(ni, uc->name, uc->set) != NULL) {
		IPFW_UH_RUNLOCK(ch);
		return (EEXIST);
	}
	IPFW_UH_RUNLOCK(ch);

	cfg = nat64lsn_init_instance(ch, 1 << (32 - uc->plen4));
	strlcpy(cfg->name, uc->name, sizeof(cfg->name));
	cfg->no.name = cfg->name;
	cfg->no.etlv = IPFW_TLV_NAT64LSN_NAME;
	cfg->no.set = uc->set;

	cfg->prefix4 = addr4;
	cfg->pmask4 = addr4 | ~mask4;
	/* XXX: Copy 96 bits */
	cfg->plen6 = 96;
	memcpy(&cfg->prefix6, &uc->prefix6, cfg->plen6 / 8);
	cfg->plen4 = uc->plen4;
	cfg->flags = uc->flags & NAT64LSN_FLAGSMASK;
	cfg->max_chunks = uc->max_ports / NAT64_CHUNK_SIZE;
	cfg->agg_prefix_len = uc->agg_prefix_len;
	cfg->agg_prefix_max = uc->agg_prefix_max;

	cfg->min_chunk = uc->min_port / NAT64_CHUNK_SIZE;
	cfg->max_chunk = uc->max_port / NAT64_CHUNK_SIZE;

	cfg->jmaxlen = uc->jmaxlen;
	cfg->nh_delete_delay = uc->nh_delete_delay;
	cfg->pg_delete_delay = uc->pg_delete_delay;
	cfg->st_syn_ttl = uc->st_syn_ttl;
	cfg->st_close_ttl = uc->st_close_ttl;
	cfg->st_estab_ttl = uc->st_estab_ttl;
	cfg->st_udp_ttl = uc->st_udp_ttl;
	cfg->st_icmp_ttl = uc->st_icmp_ttl;

	cfg->nomatch_verdict = IP_FW_DENY;
	cfg->nomatch_final = 1;	/* Exit outer loop by default */

	IPFW_UH_WLOCK(ch);

	if (nat64lsn_find(ni, uc->name, uc->set) != NULL) {
		IPFW_UH_WUNLOCK(ch);
		nat64lsn_destroy_instance(cfg);
		return (EEXIST);
	}

	if (ipfw_objhash_alloc_idx(CHAIN_TO_SRV(ch), &cfg->no.kidx) != 0) {
		IPFW_UH_WUNLOCK(ch);
		nat64lsn_destroy_instance(cfg);
		return (ENOSPC);
	}
	ipfw_objhash_add(CHAIN_TO_SRV(ch), &cfg->no);

	/* Okay, let's link data */
	IPFW_WLOCK(ch);
	SRV_OBJECT(ch, cfg->no.kidx) = cfg;
	IPFW_WUNLOCK(ch);

	nat64lsn_start_instance(cfg);

	IPFW_UH_WUNLOCK(ch);
	return (0);
}
Пример #14
0
/*
 * Notify the subsystem that we are interested in tracking
 * interface @name. This function has to be called without
 * holding any locks to permit allocating the necessary states
 * for proper interface tracking.
 *
 * Returns 0 on success.
 */
int
ipfw_iface_ref(struct ip_fw_chain *ch, char *name,
    struct ipfw_ifc *ic)
{
	struct namedobj_instance *ii;
	struct ipfw_iface *iif, *tmp;

	if (strlen(name) >= sizeof(iif->ifname))
		return (EINVAL);

	IPFW_UH_WLOCK(ch);

	ii = CHAIN_TO_II(ch);
	if (ii == NULL) {

		/*
		 * First request to subsystem.
		 * Let's perform init.
		 */
		IPFW_UH_WUNLOCK(ch);
		vnet_ipfw_iface_init(ch);
		IPFW_UH_WLOCK(ch);
		ii = CHAIN_TO_II(ch);
	}

	iif = (struct ipfw_iface *)ipfw_objhash_lookup_name(ii, 0, name);

	if (iif != NULL) {
		iif->no.refcnt++;
		ic->iface = iif;
		IPFW_UH_WUNLOCK(ch);
		return (0);
	}

	IPFW_UH_WUNLOCK(ch);

	/* Not found. Let's create one */
	iif = malloc(sizeof(struct ipfw_iface), M_IPFW, M_WAITOK | M_ZERO);
	TAILQ_INIT(&iif->consumers);
	iif->no.name = iif->ifname;
	strlcpy(iif->ifname, name, sizeof(iif->ifname));

	/*
	 * Ref & link to the list.
	 *
	 * We assume  ifnet_arrival_event / ifnet_departure_event
	 * are not holding any locks.
	 */
	iif->no.refcnt = 1;
	IPFW_UH_WLOCK(ch);

	tmp = (struct ipfw_iface *)ipfw_objhash_lookup_name(ii, 0, name);
	if (tmp != NULL) {
		/* Interface has been created since unlock. Ref and return */
		tmp->no.refcnt++;
		ic->iface = tmp;
		IPFW_UH_WUNLOCK(ch);
		free(iif, M_IPFW);
		return (0);
	}

	iif->ifindex = ipfw_kiflookup(name);
	if (iif->ifindex != 0)
		iif->resolved = 1;

	ipfw_objhash_add(ii, &iif->no);
	ic->iface = iif;

	IPFW_UH_WUNLOCK(ch);

	return (0);
}
Пример #15
0
/*
 * Main function used to link values of entries going to be added,
 * to the index. Since we may perform many UH locks drops/acquires,
 * handle changes by checking tablestate "modified" field.
 *
 * Success: return 0.
 */
int
ipfw_link_table_values(struct ip_fw_chain *ch, struct tableop_state *ts)
{
	int error, i, found;
	struct namedobj_instance *vi;
	struct table_config *tc;
	struct tentry_info *tei, *ptei;
	uint32_t count, vlimit;
	uint16_t vidx;
	struct table_val_link *ptv;
	struct table_value tval, *pval;

	/*
	 * Stage 1: reference all existing values and
	 * save their indices.
	 */
	IPFW_UH_WLOCK_ASSERT(ch);
	get_value_ptrs(ch, ts->tc, ts->vshared, &pval, &vi);

	error = 0;
	found = 0;
	vlimit = ts->ta->vlimit;
	vidx = 0;
	tc = ts->tc;
	tei = ts->tei;
	count = ts->count;
	for (i = 0; i < count; i++) {
		ptei = &tei[i];
		ptei->value = 0; /* Ensure value is always 0 in the beginnig */
		mask_table_value(ptei->pvalue, &tval, ts->vmask);
		ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
		    (char *)&tval);
		if (ptv == NULL)
			continue;
		/* Deal with vlimit later */
		if (vlimit > 0 && vlimit <= ptv->no.kidx)
			continue;

		/* Value found. Bump refcount */
		ptv->pval->refcnt++;
		ptei->value = ptv->no.kidx;
		found++;
	}

	if (ts->count == found) {
		/* We've found all values , no need ts create new ones */
		return (0);
	}

	/*
	 * we have added some state here, let's attach operation
	 * state ts the list ts be able ts rollback if necessary.
	 */
	add_toperation_state(ch, ts);
	/* Ensure table won't disappear */
	tc_ref(tc);
	IPFW_UH_WUNLOCK(ch);

	/*
	 * Stage 2: allocate objects for non-existing values.
	 */
	for (i = 0; i < count; i++) {
		ptei = &tei[i];
		if (ptei->value != 0)
			continue;
		if (ptei->ptv != NULL)
			continue;
		ptei->ptv = malloc(sizeof(struct table_val_link), M_IPFW,
		    M_WAITOK | M_ZERO);
	}

	/*
	 * Stage 3: allocate index numbers for new values
	 * and link them to index.
	 */
	IPFW_UH_WLOCK(ch);
	tc_unref(tc);
	del_toperation_state(ch, ts);
	if (ts->modified != 0) {

		/*
		 * In general, we should free all state/indexes here
		 * and return. However, we keep allocated state instead
		 * to ensure we achieve some progress on each restart.
		 */
		return (0);
	}

	KASSERT(pval == ch->valuestate, ("resize_storage() notify failure"));

	/* Let's try to link values */
	for (i = 0; i < count; i++) {
		ptei = &tei[i];

		/* Check if record has appeared */
		mask_table_value(ptei->pvalue, &tval, ts->vmask);
		ptv = (struct table_val_link *)ipfw_objhash_lookup_name(vi, 0,
		    (char *)&tval);
		if (ptv != NULL) {
			ptv->pval->refcnt++;
			ptei->value = ptv->no.kidx;
			continue;
		}

		/* May perform UH unlock/lock */
		error = alloc_table_vidx(ch, ts, vi, &vidx);
		if (error != 0) {
			ts->opstate.func(ts->tc, &ts->opstate);
			return (error);
		}
		/* value storage resize has happened, return */
		if (ts->modified != 0)
			return (0);

		/* Finally, we have allocated valid index, let's add entry */
		ptei->value = vidx;
		ptv = (struct table_val_link *)ptei->ptv;
		ptei->ptv = NULL;

		ptv->no.kidx = vidx;
		ptv->no.name = (char *)&pval[vidx];
		ptv->pval = &pval[vidx];
		memcpy(ptv->pval, &tval, sizeof(struct table_value));
		pval[vidx].refcnt = 1;
		ipfw_objhash_add(vi, &ptv->no);
	}

	return (0);
}
Пример #16
0
/*
 * Grows value storage shared among all tables.
 * Drops/reacquires UH locks.
 * Notifies other running adds on @ch shared storage resize.
 * Note function does not guarantee that free space
 * will be available after invocation, so one caller needs
 * to roll cycle himself.
 *
 * Returns 0 if case of no errors.
 */
static int
resize_shared_value_storage(struct ip_fw_chain *ch)
{
	struct tables_config *tcfg;
	struct namedobj_instance *vi;
	struct table_value *pval, *valuestate, *old_valuestate;
	void *new_idx;
	struct vdump_args da;
	int new_blocks;
	int val_size, val_size_old;

	IPFW_UH_WLOCK_ASSERT(ch);

	valuestate = NULL;
	new_idx = NULL;

	pval = (struct table_value *)ch->valuestate;
	vi = CHAIN_TO_VI(ch);
	tcfg = CHAIN_TO_TCFG(ch);

	val_size = tcfg->val_size * 2;

	if (val_size == (1 << 30))
		return (ENOSPC);

	IPFW_UH_WUNLOCK(ch);

	valuestate = malloc(sizeof(struct table_value) * val_size, M_IPFW,
	    M_WAITOK | M_ZERO);
	ipfw_objhash_bitmap_alloc(val_size, (void *)&new_idx,
	    &new_blocks);

	IPFW_UH_WLOCK(ch);

	/*
	 * Check if we still need to resize
	 */
	if (tcfg->val_size >= val_size)
		goto done;

	/* Update pointers and notify everyone we're changing @ch */
	pval = (struct table_value *)ch->valuestate;
	rollback_toperation_state(ch, ch);

	/* Good. Let's merge */
	memcpy(valuestate, pval, sizeof(struct table_value) * tcfg->val_size);
	ipfw_objhash_bitmap_merge(CHAIN_TO_VI(ch), &new_idx, &new_blocks);

	IPFW_WLOCK(ch);
	/* Change pointers */
	old_valuestate = ch->valuestate;
	ch->valuestate = valuestate;
	valuestate = old_valuestate;
	ipfw_objhash_bitmap_swap(CHAIN_TO_VI(ch), &new_idx, &new_blocks);

	val_size_old = tcfg->val_size;
	tcfg->val_size = val_size;
	val_size = val_size_old;
	IPFW_WUNLOCK(ch);
	/* Update pointers to reflect resize */
	memset(&da, 0, sizeof(da));
	da.pval = (struct table_value *)ch->valuestate;
	ipfw_objhash_foreach(vi, update_tvalue, &da);

done:
	free(valuestate, M_IPFW);
	ipfw_objhash_bitmap_free(new_idx, new_blocks);

	return (0);
}