/**
 * Retry publishing after some delay.
 *
 * @param pe		the entry to publish
 * @param delay		delay in seconds
 * @param msg		if non-NULL, logging message explaining the delay
 */
static void
publisher_retry(struct publisher_entry *pe, int delay, const char *msg)
{
	struct pubdata *pd;

	publisher_check(pe);
	g_assert(NULL == pe->publish_ev);
	g_assert(delay > 0);

	pd = get_pubdata(pe->sha1);
	if (pd != NULL) {
		pd->next_enqueue = time_advance(tm_time(), UNSIGNED(delay));
		dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd);
	}

	pe->publish_ev = cq_insert(publish_cq, delay * 1000, handle_entry, pe);
	pe->last_delayed = tm_time();

	if (GNET_PROPERTY(publisher_debug) > 3) {
		shared_file_t *sf = shared_file_by_sha1(pe->sha1);
		g_debug("PUBLISHER will retry SHA-1 %s %s\"%s\" in %s: %s",
			sha1_to_string(pe->sha1),
			(sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ?
				"partial " : "",
			(sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "",
			compact_time(delay), msg != NULL ? msg : "<no reason>");
		shared_file_unref(&sf);
	}
}
Exemple #2
0
/**
 * Record activity on the node.
 */
void
stable_record_activity(const knode_t *kn)
{
    struct lifedata *ld;
    struct lifedata new_ld;

    knode_check(kn);
    g_assert(kn->flags & KNODE_F_ALIVE);

    ld = get_lifedata(kn->id);

    if (NULL == ld) {
        ld = &new_ld;

        new_ld.version = LIFEDATA_STRUCT_VERSION;
        new_ld.first_seen = kn->first_seen;
        new_ld.last_seen = kn->last_seen;

        gnet_stats_count_general(GNR_DHT_STABLE_NODES_HELD, +1);
    } else {
        if (kn->last_seen <= ld->last_seen)
            return;
        ld->last_seen = kn->last_seen;
    }

    dbmw_write(db_lifedata, kn->id->v, ld, sizeof *ld);
}
/**
 * Record a SHA1 for publishing.
 */
void
publisher_add(const sha1_t *sha1)
{
	struct publisher_entry *pe;
	struct pubdata *pd;

	g_assert(sha1 != NULL);

	if (NULL == db_pubdata)
		return;					/* Shutdowning */

	/*
	 * If already known, ignore silently.
	 */

	if (hikset_lookup(publisher_sha1, sha1))
		return;

	/*
	 * Create persistent publishing data if none known already.
	 */

	pd = get_pubdata(sha1);
	if (NULL == pd) {
		struct pubdata new_pd;

		new_pd.next_enqueue = 0;
		new_pd.expiration = 0;

		dbmw_write(db_pubdata, sha1, &new_pd, sizeof new_pd);

		if (GNET_PROPERTY(publisher_debug) > 2) {
			g_debug("PUBLISHER allocating new SHA-1 %s",
				sha1_to_string(sha1));
		}
	} else {
		if (GNET_PROPERTY(publisher_debug) > 2) {
			time_delta_t enqueue = delta_time(pd->next_enqueue, tm_time());
			time_delta_t expires = delta_time(pd->expiration, tm_time());

			g_debug("PUBLISHER existing SHA-1 %s, next enqueue %s%s, %s%s",
				sha1_to_string(sha1),
				enqueue > 0 ? "in " : "",
				enqueue > 0 ? compact_time(enqueue) : "now",
				pd->expiration ?
					(expires > 0 ? "expires in " : "expired") : "not published",
				expires > 0 ? compact_time2(expires) : "");
		}
	}

	/*
	 * New entry will be processed immediately.
	 */

	pe = publisher_entry_alloc(sha1);
	hikset_insert_key(publisher_sha1, &pe->sha1);

	publisher_handle(pe);
}
Exemple #4
0
/**
 * DBMW iterator to reset key data.
 */
static G_GNUC_COLD void
keys_reset_keydata(void *key, void *u_data)
{
	struct keyinfo *ki = key;
	struct keydata kd;

	keyinfo_check(ki);
	(void) u_data;

	ZERO(&kd);
	dbmw_write(db_keydata, ki->kuid, &kd, sizeof kd);
}
/**
 * Hold publishing for some delay, for data we do not want to republish
 * in the short term (data deemed to be popular).  It therefore does not
 * matter if this already published data expires in the DHT.
 */
static void
publisher_hold(struct publisher_entry *pe, int delay, const char *msg)
{
	struct pubdata *pd;

	publisher_check(pe);

	pd = get_pubdata(pe->sha1);
	if (pd != NULL) {
		pd->expiration = 0;		/* Signals: do not care any more */
		dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd);
	}

	publisher_retry(pe, delay, msg);
}
Exemple #6
0
/**
 * A value held under the key was updated and has a new expiration time.
 *
 * @param id		the primary key (existing already)
 * @param cid		the secondary key (creator's ID)
 * @param expire	expiration time for the value
 */
void
keys_update_value(const kuid_t *id, const kuid_t *cid, time_t expire)
{
	struct keyinfo *ki;
	struct keydata *kd;

	ki = hikset_lookup(keys, id);
	g_assert(ki != NULL);

	ki->next_expire = MIN(ki->next_expire, expire);
	kd = get_keydata(id);

	if (kd != NULL) {
		int low = 0, high = ki->values - 1;
		bool found = FALSE;

		while (low <= high) {
			int mid = low + (high - low) / 2;
			int c;

			g_assert(mid >= 0 && mid < ki->values);

			c = kuid_cmp(&kd->creators[mid], cid);

			if (0 == c) {
				kd->expire[mid] = expire;
				found = TRUE;
				break;
			} else if (c < 0) {
				low = mid + 1;
			} else {
				high = mid - 1;
			}
		}

		if (found) {
			dbmw_write(db_keydata, id, kd, sizeof *kd);
		} else if (GNET_PROPERTY(dht_keys_debug)) {
			g_warning("DHT KEYS %s(): creator %s not found under %s",
				G_STRFUNC, kuid_to_hex_string(cid), kuid_to_hex_string2(id));
		}
	}
}
Exemple #7
0
/**
 * Add GUID to the banned list or refresh the fact that we are still seeing
 * it as being worth banning.
 */
void
guid_add_banned(const struct guid *guid)
{
	struct guiddata *gd;
	struct guiddata new_gd;

	gd = get_guiddata(guid);

	if (NULL == gd) {
		gd = &new_gd;
		gd->create_time = gd->last_time = tm_time();
		gnet_stats_inc_general(GNR_BANNED_GUID_HELD);

		if (GNET_PROPERTY(guid_debug)) {
			g_debug("GUID banning %s", guid_hex_str(guid));
		}
	} else {
		gd->last_time = tm_time();
	}

	dbmw_write(db_guid, guid, gd, sizeof *gd);
}
Exemple #8
0
/**
 * Map iterator to record security tokens in the database.
 */
static void
record_token(void *key, void *value, void *unused_u)
{
	kuid_t *id = key;
	lookup_token_t *ltok = value;
	struct tokdata td;

	(void) unused_u;

	td.last_update = ltok->retrieved;
	td.length = ltok->token->length;
	td.token = td.length ? wcopy(ltok->token->v, td.length) : NULL;

	if (GNET_PROPERTY(dht_tcache_debug) > 4) {
		char buf[80];
		bin_to_hex_buf(td.token, td.length, buf, sizeof buf);
		g_debug("DHT TCACHE adding security token for %s: %u-byte \"%s\"",
			kuid_to_hex_string(id), td.length, buf);
	}

	/*
	 * Data is put in the DBMW cache and the dynamically allocated token
	 * will be freed via free_tokdata() when the cached entry is released.
	 */

	if (!dbmw_exists(db_tokdata, id->v))
		gnet_stats_inc_general(GNR_DHT_CACHED_TOKENS_HELD);

	dbmw_write(db_tokdata, id->v, &td, sizeof td);

	if (GNET_PROPERTY(dht_tcache_debug_flags) & DBG_DSF_USR1) {
		g_debug("DHT TCACHE %s: stats=%s, count=%zu, id=%s",
			G_STRFUNC, uint64_to_string(
				gnet_stats_get_general(GNR_DHT_CACHED_TOKENS_HELD)),
			dbmw_count(db_tokdata), kuid_to_hex_string(id));
	}
}
Exemple #9
0
/**
 * Add value to a key, recording the new association between the KUID of the
 * creator (secondary key) and the 64-bit DB key under which the value is
 * stored.
 *
 * @param id		the primary key (may not exist yet)
 * @param cid		the secondary key (creator's ID)
 * @param dbkey		the 64-bit DB key
 * @param expire	expiration time for the value
 */
void
keys_add_value(const kuid_t *id, const kuid_t *cid,
	uint64 dbkey, time_t expire)
{
	struct keyinfo *ki;
	struct keydata *kd;
	struct keydata new_kd;

	ki = hikset_lookup(keys, id);

	/*
	 * If we're storing the first value under a key, we do not have any
	 * keyinfo structure yet.
	 */

	if (NULL == ki) {
		size_t common;
		bool in_kball;

		common = kuid_common_prefix(get_our_kuid(), id);
		in_kball = bits_within_kball(common);

		if (GNET_PROPERTY(dht_storage_debug) > 5)
			g_debug("DHT STORE new %s %s (%zu common bit%s) with creator %s",
				in_kball ? "key" : "cached key",
				kuid_to_hex_string(id), common, plural(common),
				kuid_to_hex_string2(cid));

		ki = allocate_keyinfo(id, common);
		ki->next_expire = expire;
		ki->flags = in_kball ? 0 : DHT_KEY_F_CACHED;

		hikset_insert_key(keys, &ki->kuid);

		kd = &new_kd;
		kd->values = 0;						/* will be incremented below */
		kd->creators[0] = *cid;				/* struct copy */
		kd->dbkeys[0] = dbkey;
		kd->expire[0] = expire;

		gnet_stats_inc_general(GNR_DHT_KEYS_HELD);
		if (!in_kball)
			gnet_stats_inc_general(GNR_DHT_CACHED_KEYS_HELD);
	} else {
		int low = 0;
		int high = ki->values - 1;

		kd = get_keydata(id);

		if (NULL == kd)
			return;

		g_assert(kd->values == ki->values);
		g_assert(kd->values < MAX_VALUES);

		if (GNET_PROPERTY(dht_storage_debug) > 5)
			g_debug("DHT STORE existing key %s (%u common bit%s) "
				"has new creator %s",
				kuid_to_hex_string(id), ki->common_bits,
				plural(ki->common_bits), kuid_to_hex_string2(cid));

		/*
		 * Keys are collected asynchronously, so it is possible that
		 * the key structure still exists, yet holds no values.  If this
		 * happens, then we win because we spared the useless deletion of
		 * the key structure to recreate it a little bit later.
		 */

		if (0 == kd->values)
			goto empty;

		/*
		 * Insert KUID of creator in array, which must be kept sorted.
		 * We perform a binary insertion.
		 */

		while (low <= high) {
			int mid = low + (high - low) / 2;
			int c;

			g_assert(mid >= 0 && mid < ki->values);

			c = kuid_cmp(&kd->creators[mid], cid);

			if (0 == c)
				g_error("new creator KUID %s must not already be present",
					kuid_to_hex_string(cid));
			else if (c < 0)
				low = mid + 1;
			else
				high = mid - 1;
		}

		/* Make room for inserting new item at `low' */

		ARRAY_FIXED_MAKEROOM(kd->creators, low, kd->values);
		ARRAY_FIXED_MAKEROOM(kd->dbkeys,   low, kd->values);
		ARRAY_FIXED_MAKEROOM(kd->expire,   low, kd->values);

		/* FALL THROUGH */

	empty:
		/* Insert new item at `low' */

		kd->creators[low] = *cid;			/* struct copy */
		kd->dbkeys[low] = dbkey;
		kd->expire[low] = expire;

		ki->next_expire = MIN(ki->next_expire, expire);
	}

	kd->values++;
	ki->values++;

	dbmw_write(db_keydata, id, kd, sizeof *kd);

	if (GNET_PROPERTY(dht_storage_debug) > 2)
		g_debug("DHT STORE %s key %s now holds %d/%d value%s",
			&new_kd == kd ? "new" : "existing",
			kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values));
}
Exemple #10
0
/**
 * Remove value from a key, discarding the association between the creator ID
 * and the 64-bit DB key.
 *
 * The keys is known to hold the value already.
 *
 * @param id		the primary key
 * @param cid		the secondary key (creator's ID)
 * @param dbkey		the 64-bit DB key (informational, for assertions)
 */
void
keys_remove_value(const kuid_t *id, const kuid_t *cid, uint64 dbkey)
{
	struct keyinfo *ki;
	struct keydata *kd;
	int idx;

	ki = hikset_lookup(keys, id);

	g_assert(ki);

	kd = get_keydata(id);
	if (NULL == kd)
		return;

	g_assert(kd->values);
	g_assert(kd->values == ki->values);
	g_assert(kd->values <= MAX_VALUES);

	idx = lookup_secondary_idx(kd, cid);

	g_assert(idx >= 0 && idx < kd->values);
	g_assert(dbkey == kd->dbkeys[idx]);

	ARRAY_REMOVE(kd->creators, idx, kd->values);
	ARRAY_REMOVE(kd->dbkeys,   idx, kd->values);
	ARRAY_REMOVE(kd->expire,   idx, kd->values);

	/*
	 * We do not synchronously delete empty keys.
	 *
	 * This lets us optimize the nominal case whereby a key loses all its
	 * values due to a STORE request causing a lifetime check.  But the
	 * STORE will precisely insert back another value.
	 *
	 * Hence lazy expiration also gives us the opportunity to further exploit
	 * caching in memory, the keyinfo being held there as a "cached" value.
	 *
	 * Reclaiming of dead keys happens during periodic key load computation.
	 */

	kd->values--;
	ki->values--;

	/*
	 * Recompute next expiration time.
	 */

	ki->next_expire = TIME_T_MAX;

	for (idx = 0; idx < ki->values; idx++) {
		ki->next_expire = MIN(ki->next_expire, kd->expire[idx]);
	}

	dbmw_write(db_keydata, id, kd, sizeof *kd);

	if (GNET_PROPERTY(dht_storage_debug) > 2) {
		g_debug("DHT STORE key %s now holds only %d/%d value%s, expire in %s",
			kuid_to_hex_string(id), ki->values, MAX_VALUES, plural(ki->values),
			compact_time(delta_time(ki->next_expire, tm_time())));
	}
}
/**
 * Publishing callback invoked when asynchronous publication is completed,
 * or ended with an error.
 *
 * @return TRUE if we accept the publishing, FALSE otherwise to get the
 * publishing layer to continue attempts to failed STORE roots and report
 * on progress using the same callback.
 */
static bool
publisher_done(void *arg, pdht_error_t code, const pdht_info_t *info)
{
	struct publisher_entry *pe = arg;
	struct pubdata *pd;
	int delay = PUBLISH_BUSY;
	bool expired = FALSE;
	bool accepted = TRUE;

	publisher_check(pe);

	pd = get_pubdata(pe->sha1);

	/*
	 * Update stats on republishing before value expiration.
	 */

	if (PDHT_E_OK == code) {
		if (pe->last_publish && info->roots > 0) {
			if (pd != NULL) {
				if (pd->expiration && delta_time(tm_time(), pd->expiration) > 0)
					expired = TRUE;
			} else {
				time_delta_t elapsed = delta_time(tm_time(), pe->last_publish);
				if (elapsed > DHT_VALUE_ALOC_EXPIRE)
					expired = TRUE;
			}
			if (expired)
				gnet_stats_inc_general(GNR_DHT_REPUBLISHED_LATE);
		}
	}

	/*
	 * Compute retry delay.
	 */

	switch (code) {
	case PDHT_E_OK:
		/*
		 * If we were not able to publish to KDA_K nodes, decrease the
		 * delay before republishing.  We use a non-linear decimation of
		 * the republish time, as a function of the number of nodes to which
		 * we could publish.
		 */

		delay = publisher_delay(info, DHT_VALUE_ALOC_EXPIRE);
		accepted = publisher_is_acceptable(info);
		break;
	case PDHT_E_POPULAR:
		/*
		 * Compute the suitable delay: the first time, we use PUBLISH_POPULAR,
		 * and then we double each time until we reach PUBLISH_POPULAR_MAX.
		 *
		 * If we already tried to publish the entry, pe->last_delayed will
		 * be non-zero.
		 */
		if (0 != pe->last_delayed) {
			time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed);
			if (elapsed < PUBLISH_POPULAR) {
				delay = PUBLISH_POPULAR;
			} else if (elapsed >= PUBLISH_POPULAR_MAX / 2) {
				delay = PUBLISH_POPULAR_MAX;
			} else {
				delay = elapsed * 2;
			}
		} else {
			delay = PUBLISH_POPULAR;
		}
		break;
	case PDHT_E_NOT_SHARED:
	case PDHT_E_LOOKUP_EXPIRED:
	case PDHT_E_LOOKUP:
	case PDHT_E_UDP_CLOGGED:
	case PDHT_E_PUBLISH_EXPIRED:
	case PDHT_E_PUBLISH_ERROR:
	case PDHT_E_SHA1:
	case PDHT_E_PENDING:
	case PDHT_E_CANCELLED:
	case PDHT_E_GGEP:
	case PDHT_E_NONE:
		delay = PUBLISH_BUSY;
		break;
	case PDHT_E_MAX:
		g_assert_not_reached();
	}

	/*
	 * For a backgrounded entry publishing, we need to adjust the computed
	 * delay with the time that was elapsed
	 */

	g_assert(!pe->backgrounded == !(pe->publish_ev != NULL));

	if (pe->backgrounded) {
		time_delta_t elapsed = delta_time(tm_time(), pe->last_delayed);
		g_assert(pe->last_delayed > 0);
		cq_cancel(&pe->publish_ev);
		if (delay > elapsed) {
			delay -= elapsed;
		} else {
			delay = 1;
		}
	}

	/*
	 * Logging.
	 */

	if (GNET_PROPERTY(publisher_debug) > 1) {
		shared_file_t *sf = shared_file_by_sha1(pe->sha1);
		char retry[80];
		char after[80];
		const char *late = "";

		after[0] = '\0';
		if (pe->last_publish) {
			time_delta_t elapsed = delta_time(tm_time(), pe->last_publish);

			str_bprintf(after, sizeof after,
				" after %s", compact_time(elapsed));

			if (pd != NULL) {
				if (expired)
					late = "late, ";
			} else {
				late = "no data, ";
			}
		}

		str_bprintf(retry, sizeof retry, "%s", compact_time(delay));

		g_debug("PUBLISHER SHA-1 %s %s%s\"%s\" %spublished to %u node%s%s: %s"
			" (%stook %s, total %u node%s, proba %.3f%%, retry in %s,"
			" %s bg, path %u) [%s]",
			sha1_to_string(pe->sha1),
			pe->backgrounded ? "[bg] " : "",
			(sf && sf != SHARE_REBUILDING && shared_file_is_partial(sf)) ?
				"partial " : "",
			(sf && sf != SHARE_REBUILDING) ? shared_file_name_nfc(sf) : "",
			pe->last_publish ? "re" : "",
			info->roots, plural(info->roots),
			after, pdht_strerror(code), late,
			compact_time(delta_time(tm_time(), pe->last_enqueued)),
			info->all_roots, plural(info->all_roots),
			info->presence * 100.0, retry,
			info->can_bg ? "can" : "no", info->path_len,
			accepted ? "OK" : "INCOMPLETE");

		shared_file_unref(&sf);
	}

	/*
	 * Update last publishing time and remember expiration time.
	 */

	if (PDHT_E_OK == code && info->roots > 0) {
		pe->last_publish = tm_time();
		if (pd != NULL) {
			pd->expiration =
				time_advance(pe->last_publish, DHT_VALUE_ALOC_EXPIRE);
			dbmw_write(db_pubdata, pe->sha1, pd, sizeof *pd);
		}
	}

	/*
	 * If entry was deemed popular, we're going to delay its republishing
	 * by a larger amount of time and any data we published already about
	 * it will surely expire.  Since this is our decision, we do not want
	 * to be told that republishing, if it occurs again, was done later than
	 * required.  Hence call publisher_hold() to mark that we don't care.
	 */

	if (PDHT_E_POPULAR == code)
		publisher_hold(pe, delay, "popular entry");
	else
		publisher_retry(pe, delay, accepted ? "accepted publish" : "published");

	pe->backgrounded = !accepted;

	return accepted;
}