Exemplo n.º 1
0
void	free_class(aClass *tmp)
{
#ifdef ENABLE_CIDR_LIMITS
	if(tmp->ip_limits)
		patricia_destroy(tmp->ip_limits);
#endif

	MyFree(tmp);
#ifdef	DEBUGMODE
	classs.inuse--;
#endif
}
Exemplo n.º 2
0
void garden_patricia_load_list(patricia_tree_t **pptree,
			       pass_through *ptlist,
			       uint32_t ptcnt) {
  patricia_tree_t *ptree = *pptree;
  int i;
  if (ptree) {
    patricia_destroy (ptree, free);
  }
  *pptree = ptree = patricia_new(32);
  for (i=0; i < ptcnt; i++)
    garden_patricia_add(&ptlist[i], ptree);
}
Exemplo n.º 3
0
/**
 * Destroy a map.
 */
void
map_destroy(map_t *m)
{
	map_check(m);

	switch (m->type) {
	case MAP_HASH:
		htable_free_null(&m->u.ht);
		break;
	case MAP_ORDERED_HASH:
		ohash_table_destroy_null(&m->u.ot);
		break;
	case MAP_PATRICIA:
		patricia_destroy(m->u.pt);
		break;
	case MAP_MAXTYPE:
		g_assert_not_reached();
	}

	m->type = MAP_MAXTYPE;
	m->magic = 0;
	WFREE(m);
}
Exemplo n.º 4
0
/*
 * Offload keys to remote node, as appropriate.
 *
 * Firstly we only consider remote nodes whose KUID falls within our k-ball.
 *
 * Secondly, we are only considering remote nodes that end-up being in our
 * routing table (i.e. ones which are close enough to us to get room in the
 * table, which also means they're not firewalled nor going to shutdown soon).
 * This is normally ensured by our caller.
 *
 * Thirdly, we are only going to consider keys closer to the node than we are
 * and for which we are the closest among our k-closest nodes, to avoid too
 * many redundant STORE operations.
 */
void
keys_offload(const knode_t *kn)
{
	struct offload_context ctx;
	unsigned n;
	knode_t *kclosest[KDA_K];		/* Our known k-closest nodes */
	bool debug;

	knode_check(kn);

	if (kn->flags & (KNODE_F_FIREWALLED | KNODE_F_SHUTDOWNING))
		return;

	if (
		!dht_bootstrapped() ||			/* Not bootstrapped */
		!keys_within_kball(kn->id) ||	/* Node KUID outside our k-ball */
		0 == hikset_count(keys)			/* No keys held */
	)
		return;

	debug = GNET_PROPERTY(dht_storage_debug) > 1 ||
			GNET_PROPERTY(dht_publish_debug) > 1;

	if (debug)
		g_debug("DHT preparing key offloading to %s", knode_to_string(kn));

	gnet_stats_inc_general(GNR_DHT_KEY_OFFLOADING_CHECKS);

	ctx.our_kuid = get_our_kuid();
	ctx.remote_kuid = kn->id;
	ctx.found = NULL;
	ctx.count = 0;

	/*
	 * We need to have KDA_K closest known alive neighbours in order to
	 * be able to select proper keys to offload.
	 *
	 * Note that we make sure to NOT include the new node in our k-closest set
	 * since it would always be closer than ourselves to keys we wish to
	 * offload to it...
	 */

	n = dht_fill_closest(ctx.our_kuid, kclosest,
			G_N_ELEMENTS(kclosest), ctx.remote_kuid, TRUE);

	if (n < G_N_ELEMENTS(kclosest)) {
		if (debug)
			g_warning("DHT got only %u closest alive nodes, cannot offload", n);
		return;
	}

	/*
	 * Prepare a PATRICIA containing the ID of our k-closest alive nodes
	 * plus ourselves.
	 */

	ctx.kclosest = patricia_create(KUID_RAW_BITSIZE);
	for (n = 0; n < G_N_ELEMENTS(kclosest); n++) {
		patricia_insert(ctx.kclosest, kclosest[n]->id, kclosest[n]->id);
	}
	patricia_insert(ctx.kclosest, ctx.our_kuid, ctx.our_kuid);

	/*
	 * Select offloading candidate keys.
	 */

	hikset_foreach(keys, keys_offload_prepare, &ctx);
	patricia_destroy(ctx.kclosest);

	if (debug) {
		g_debug("DHT found %u/%zu offloading candidate%s",
			ctx.count, hikset_count(keys), plural(ctx.count));
	}

	if (ctx.count)
		publish_offload(kn, ctx.found);

	pslist_free_null(&ctx.found);
}
Exemplo n.º 5
0
/**
 * Update k-ball information.
 */
void
keys_update_kball(void)
{
	kuid_t *our_kuid = get_our_kuid();
	knode_t **kvec;
	int kcnt;
	patricia_t *pt;
	int i;

	WALLOC_ARRAY(kvec, KDA_K);
	kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, TRUE);
	kball.seeded = TRUE;

	/*
	 * If we know of no alive nodes yet, request any node we have in the
	 * routing table, even "zombies".  If we get less than KDA_K of these,
	 * we definitively know not enough about the DHT structure yet!
	 */

	if (0 == kcnt) {
		kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, FALSE);
		if (kcnt < KDA_K)
			kball.seeded = FALSE;
	}

	pt = patricia_create(KUID_RAW_BITSIZE);

	for (i = 0; i < kcnt; i++) {
		knode_t *kn = kvec[i];
		patricia_insert(pt, kn->id, kn);
	}

	if (patricia_count(pt)) {
		knode_t *furthest = patricia_furthest(pt, our_kuid);
		knode_t *closest = patricia_closest(pt, our_kuid);
		size_t fbits;
		size_t cbits;

		kuid_atom_change(&kball.furthest, furthest->id);
		kuid_atom_change(&kball.closest, closest->id);

		fbits = kuid_common_prefix(kball.furthest, our_kuid);
		cbits = kuid_common_prefix(kball.closest, our_kuid);

		g_assert(fbits <= cbits);
		g_assert(cbits <= KUID_RAW_BITSIZE);

		if (GNET_PROPERTY(dht_debug)) {
			uint8 width = cbits - fbits;

			g_debug("DHT %sk-ball %s %u bit%s (was %u-bit wide)",
				kball.seeded ? "" : "(not seeded yet) ",
				width == kball.width ? "remained at" :
				width > kball.width ? "expanded to" : "shrunk to",
				width, plural(width), kball.width);
			g_debug("DHT k-ball closest (%zu common bit%s) is %s",
				cbits, plural(cbits), knode_to_string(closest));
			g_debug("DHT k-ball furthest (%zu common bit%s) is %s",
				fbits, plural(fbits), knode_to_string(furthest));
		}

		STATIC_ASSERT(KUID_RAW_BITSIZE < 256);

		kball.furthest_bits = fbits & 0xff;
		kball.closest_bits = cbits & 0xff;
		kball.width = (cbits - fbits) & 0xff;
		kball.theoretical_bits = dht_get_kball_furthest() & 0xff;

		gnet_stats_set_general(GNR_DHT_KBALL_FURTHEST, kball.furthest_bits);
		gnet_stats_set_general(GNR_DHT_KBALL_CLOSEST, kball.closest_bits);
	}

	WFREE_ARRAY(kvec, KDA_K);
	patricia_destroy(pt);
}