Exemplo n.º 1
0
/**
 * Replace a key/value pair in the map.
 */
void
map_replace(const map_t *m, const void *key, const void *value)
{
	map_check(m);

	switch (m->type) {
	case MAP_HASH:
		htable_insert_const(m->u.ht, key, value);
		break;
	case MAP_ORDERED_HASH:
		ohash_table_replace(m->u.ot, key, value);
		break;
	case MAP_PATRICIA:
		patricia_insert(m->u.pt, key, value);		/* Does replace */
		break;
	case MAP_MAXTYPE:
		g_assert_not_reached();
	}
}
Exemplo n.º 2
0
/*
 * Offload keys to remote node, as appropriate.
 *
 * Firstly we only consider remote nodes whose KUID falls within our k-ball.
 *
 * Secondly, we are only considering remote nodes that end-up being in our
 * routing table (i.e. ones which are close enough to us to get room in the
 * table, which also means they're not firewalled nor going to shutdown soon).
 * This is normally ensured by our caller.
 *
 * Thirdly, we are only going to consider keys closer to the node than we are
 * and for which we are the closest among our k-closest nodes, to avoid too
 * many redundant STORE operations.
 */
void
keys_offload(const knode_t *kn)
{
	struct offload_context ctx;
	unsigned n;
	knode_t *kclosest[KDA_K];		/* Our known k-closest nodes */
	bool debug;

	knode_check(kn);

	if (kn->flags & (KNODE_F_FIREWALLED | KNODE_F_SHUTDOWNING))
		return;

	if (
		!dht_bootstrapped() ||			/* Not bootstrapped */
		!keys_within_kball(kn->id) ||	/* Node KUID outside our k-ball */
		0 == hikset_count(keys)			/* No keys held */
	)
		return;

	debug = GNET_PROPERTY(dht_storage_debug) > 1 ||
			GNET_PROPERTY(dht_publish_debug) > 1;

	if (debug)
		g_debug("DHT preparing key offloading to %s", knode_to_string(kn));

	gnet_stats_inc_general(GNR_DHT_KEY_OFFLOADING_CHECKS);

	ctx.our_kuid = get_our_kuid();
	ctx.remote_kuid = kn->id;
	ctx.found = NULL;
	ctx.count = 0;

	/*
	 * We need to have KDA_K closest known alive neighbours in order to
	 * be able to select proper keys to offload.
	 *
	 * Note that we make sure to NOT include the new node in our k-closest set
	 * since it would always be closer than ourselves to keys we wish to
	 * offload to it...
	 */

	n = dht_fill_closest(ctx.our_kuid, kclosest,
			G_N_ELEMENTS(kclosest), ctx.remote_kuid, TRUE);

	if (n < G_N_ELEMENTS(kclosest)) {
		if (debug)
			g_warning("DHT got only %u closest alive nodes, cannot offload", n);
		return;
	}

	/*
	 * Prepare a PATRICIA containing the ID of our k-closest alive nodes
	 * plus ourselves.
	 */

	ctx.kclosest = patricia_create(KUID_RAW_BITSIZE);
	for (n = 0; n < G_N_ELEMENTS(kclosest); n++) {
		patricia_insert(ctx.kclosest, kclosest[n]->id, kclosest[n]->id);
	}
	patricia_insert(ctx.kclosest, ctx.our_kuid, ctx.our_kuid);

	/*
	 * Select offloading candidate keys.
	 */

	hikset_foreach(keys, keys_offload_prepare, &ctx);
	patricia_destroy(ctx.kclosest);

	if (debug) {
		g_debug("DHT found %u/%zu offloading candidate%s",
			ctx.count, hikset_count(keys), plural(ctx.count));
	}

	if (ctx.count)
		publish_offload(kn, ctx.found);

	pslist_free_null(&ctx.found);
}
Exemplo n.º 3
0
/**
 * ccache_read(path):
 * Read the chunkification cache (if present) from the directory ${path};
 * return a Patricia tree mapping absolute paths to cache entries.
 */
CCACHE *
ccache_read(const char * path)
{
	struct ccache_internal * C;
	struct ccache_read_internal R;
	struct ccache_record * ccr;
#ifdef HAVE_MMAP
	struct stat sb;
	off_t fpos;
	long int pagesize;
#endif
	size_t i;
	uint8_t N[4];

	/* The caller must pass a file name to be read. */
	assert(path != NULL);

	/* Allocate memory for the cache. */
	if ((C = malloc(sizeof(struct ccache_internal))) == NULL)
		goto err0;
	memset(C, 0, sizeof(struct ccache_internal));

	/* Create a Patricia tree to store cache entries. */
	if ((C->tree = patricia_init()) == NULL)
		goto err1;

	/* Construct the name of cache file. */
	if (asprintf(&R.s, "%s/cache", path) == -1) {
		warnp("asprintf");
		goto err2;
	}

	/* Open the cache file. */
	if ((R.f = fopen(R.s, "r")) == NULL) {
		/* ENOENT isn't an error. */
		if (errno != ENOENT) {
			warnp("fopen(%s)", R.s);
			goto err3;
		}

		/* No cache exists on disk; return an empty cache. */
		goto emptycache;
	}

	/**
	 * We read the cache file in three steps:
	 * 1. Read a little-endian uint32_t which indicates the number of
	 *    records in the cache file.
	 * 2. Read N (record, path suffix) pairs and insert them into a
	 *    Patricia tree.
	 * 3. Iterate through the tree and read chunk headers and compressed
	 *    entry trailers.
	 */

	/* Read the number of cache entries. */
	if (fread(N, 4, 1, R.f) != 1) {
		if (ferror(R.f))
			warnp("Error reading cache: %s", R.s);
		else
			warn0("Error reading cache: %s", R.s);
		goto err4;
	}
	R.N = le32dec(N);

	/* Read N (record, path suffix) pairs. */
	R.sbuf = NULL;
	R.sbuflen = R.slen = R.datalen = 0;
	for (i = 0; i < R.N; i++) {
		if ((ccr = read_rec(&R)) == NULL)
			goto err5;
		if (patricia_insert(C->tree, R.sbuf, R.slen, ccr))
			goto err5;
		C->chunksusage += ccr->nch * sizeof(struct chunkheader);
		C->trailerusage += ccr->tzlen;
	}

#ifdef HAVE_MMAP
	/* Obtain page size, since mmapped regions must be page-aligned. */
	if ((pagesize = sysconf(_SC_PAGESIZE)) == -1) {
		warnp("sysconf(_SC_PAGESIZE)");
		goto err5;
	}

	/* Map the remainder of the cache into memory. */
	fpos = ftello(R.f);
	if (fpos == -1) {
		warnp("ftello(%s)", R.s);
		goto err5;
	}
	if (fstat(fileno(R.f), &sb)) {
		warnp("fstat(%s)", R.s);
		goto err5;
	}
	if (sb.st_size != (off_t)(fpos + R.datalen)) {
		warn0("Cache has incorrect size (%jd, expected %jd)\n",
		    (intmax_t)(sb.st_size), (intmax_t)(fpos + R.datalen));
		goto err5;
	}
	C->datalen = R.datalen + (fpos % pagesize);
	if ((C->data = mmap(NULL, C->datalen, PROT_READ,
#ifdef MAP_NOCORE
	    MAP_PRIVATE | MAP_NOCORE,
#else
	    MAP_PRIVATE,
#endif
	    fileno(R.f), fpos - (fpos % pagesize))) == MAP_FAILED) {
		warnp("mmap(%s)", R.s);
		goto err5;
	}
	R.data = (uint8_t *)C->data + (fpos % pagesize);
#else
	/* Allocate space. */
	C->datalen = R.datalen;
	if (((C->data = malloc(C->datalen)) == NULL) && (C->datalen > 0))
		goto err5;
	if (fread(C->data, C->datalen, 1, R.f) != 1) {
		warnp("fread(%s)", R.s);
		goto err6;
	}
	R.data = (uint8_t *)C->data;
#endif

	/* Iterate through the tree reading chunk headers and trailers. */
	if (patricia_foreach(C->tree, callback_read_data, &R)) {
		warnp("Error reading cache: %s", R.s);
		goto err6;
	}

	/* Free buffer used for storing paths. */
	free(R.sbuf);

	/* Close the cache file. */
	fclose(R.f);

	/* Free string allocated by asprintf. */
	free(R.s);

	/* Success! */
	return (C);

emptycache:
	/* Nothing went wrong, but there's nothing on disk. */
	free(R.s);
	return (C);

err6:
#ifdef HAVE_MMAP
	if (C->datalen > 0)
		munmap(C->data, C->datalen);
#else
	free(C->data);
#endif
err5:
	free(R.sbuf);
	patricia_foreach(C->tree, callback_free, NULL);
err4:
	fclose(R.f);
err3:
	free(R.s);
err2:
	patricia_free(C->tree);
err1:
	free(C);
err0:
	/* Failure! */
	return (NULL);
}
Exemplo n.º 4
0
/**
 * Update k-ball information.
 */
void
keys_update_kball(void)
{
	kuid_t *our_kuid = get_our_kuid();
	knode_t **kvec;
	int kcnt;
	patricia_t *pt;
	int i;

	WALLOC_ARRAY(kvec, KDA_K);
	kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, TRUE);
	kball.seeded = TRUE;

	/*
	 * If we know of no alive nodes yet, request any node we have in the
	 * routing table, even "zombies".  If we get less than KDA_K of these,
	 * we definitively know not enough about the DHT structure yet!
	 */

	if (0 == kcnt) {
		kcnt = dht_fill_closest(our_kuid, kvec, KDA_K, NULL, FALSE);
		if (kcnt < KDA_K)
			kball.seeded = FALSE;
	}

	pt = patricia_create(KUID_RAW_BITSIZE);

	for (i = 0; i < kcnt; i++) {
		knode_t *kn = kvec[i];
		patricia_insert(pt, kn->id, kn);
	}

	if (patricia_count(pt)) {
		knode_t *furthest = patricia_furthest(pt, our_kuid);
		knode_t *closest = patricia_closest(pt, our_kuid);
		size_t fbits;
		size_t cbits;

		kuid_atom_change(&kball.furthest, furthest->id);
		kuid_atom_change(&kball.closest, closest->id);

		fbits = kuid_common_prefix(kball.furthest, our_kuid);
		cbits = kuid_common_prefix(kball.closest, our_kuid);

		g_assert(fbits <= cbits);
		g_assert(cbits <= KUID_RAW_BITSIZE);

		if (GNET_PROPERTY(dht_debug)) {
			uint8 width = cbits - fbits;

			g_debug("DHT %sk-ball %s %u bit%s (was %u-bit wide)",
				kball.seeded ? "" : "(not seeded yet) ",
				width == kball.width ? "remained at" :
				width > kball.width ? "expanded to" : "shrunk to",
				width, plural(width), kball.width);
			g_debug("DHT k-ball closest (%zu common bit%s) is %s",
				cbits, plural(cbits), knode_to_string(closest));
			g_debug("DHT k-ball furthest (%zu common bit%s) is %s",
				fbits, plural(fbits), knode_to_string(furthest));
		}

		STATIC_ASSERT(KUID_RAW_BITSIZE < 256);

		kball.furthest_bits = fbits & 0xff;
		kball.closest_bits = cbits & 0xff;
		kball.width = (cbits - fbits) & 0xff;
		kball.theoretical_bits = dht_get_kball_furthest() & 0xff;

		gnet_stats_set_general(GNR_DHT_KBALL_FURTHEST, kball.furthest_bits);
		gnet_stats_set_general(GNR_DHT_KBALL_CLOSEST, kball.closest_bits);
	}

	WFREE_ARRAY(kvec, KDA_K);
	patricia_destroy(pt);
}