Пример #1
0
/**
 * External interface to call for getting the hash for a shared_file.
 */
void
request_sha1(shared_file_t *sf)
{
	struct sha1_cache_entry *cached;

	shared_file_check(sf);

	if (!shared_file_indexed(sf))
		return;		/* "stale" shared file, has been superseded or removed */

	cached = hikset_lookup(sha1_cache, shared_file_path(sf));

	if (cached && cached_entry_up_to_date(cached, sf)) {
		cache_dirty = TRUE;
		cached->shared = TRUE;
		shared_file_set_sha1(sf, cached->sha1);
		shared_file_set_tth(sf, cached->tth);
		request_tigertree(sf, NULL == cached->tth);
	} else {

		if (GNET_PROPERTY(share_debug) > 1) {
			if (cached)
				g_debug("cached SHA1 entry for \"%s\" outdated: "
					"had mtime %lu, now %lu",
					shared_file_path(sf),
					(ulong) cached->mtime,
					(ulong) shared_file_modification_time(sf));
			else
				g_debug("queuing \"%s\" for SHA1 computation",
						shared_file_path(sf));
		}

		queue_shared_file_for_sha1_computation(sf);
	}
}
Пример #2
0
static bool 
request_tigertree_callback(const struct verify *ctx, enum verify_status status,
	void *user_data)
{
	shared_file_t *sf = user_data;

	shared_file_check(sf);
	switch (status) {
	case VERIFY_START:
		if (!(SHARE_F_INDEXED & shared_file_flags(sf))) {
			/*
			 * After a rescan, there might be files in the queue which are
			 * no longer shared.
			 */

			if (GNET_PROPERTY(verify_debug) > 1) {
				g_debug("skipping TTH computation for %s: no longer shared",
					shared_file_path(sf));
			}
			return FALSE;
		}
		if (
			shared_file_tth(sf) &&
			tth_cache_lookup(shared_file_tth(sf), shared_file_size(sf)) > 0
		) {
			if (
				GNET_PROPERTY(tigertree_debug) > 1 ||
				GNET_PROPERTY(verify_debug) > 1
			) {
				g_debug("TTH for %s is already cached (%s)",
					shared_file_path(sf), tth_base32(shared_file_tth(sf)));
			}
			return FALSE;
		}
		gnet_prop_set_boolean_val(PROP_TTH_REBUILDING, TRUE);
		return TRUE;
	case VERIFY_PROGRESS:
		return 0 != (SHARE_F_INDEXED & shared_file_flags(sf));
	case VERIFY_DONE:
		{
			const struct tth *tth = verify_tth_digest(ctx);
			
			huge_update_hashes(sf, shared_file_sha1(sf), tth);
			tth_cache_insert(tth, verify_tth_leaves(ctx),
				verify_tth_leave_count(ctx));
		}
		/* FALL THROUGH */
	case VERIFY_ERROR:
	case VERIFY_SHUTDOWN:
		shared_file_unref(&sf);
		gnet_prop_set_boolean_val(PROP_TTH_REBUILDING, FALSE);
		return TRUE;
	case VERIFY_INVALID:
		break;
	}
	g_assert_not_reached();
	return FALSE;
}
Пример #3
0
void
request_tigertree(shared_file_t *sf, bool high_priority)
{
	int inserted;

	verify_tth_init();

	shared_file_check(sf);
	g_return_if_fail(shared_file_is_finished(sf));

	if (!shared_file_is_servable(sf))
		return;		/* "stale" shared file, has been superseded or removed */

	/*
	 * This routine can be called when the VERIFY_DONE event is received by
	 * huge_verify_callback().  We may have already shutdown the TTH
	 * verification thread.
	 */

	if G_UNLIKELY(NULL == verify_tth.verify)
		return;

	sf = shared_file_ref(sf);

	inserted = verify_enqueue(verify_tth.verify, high_priority,
					shared_file_path(sf), 0, shared_file_size(sf),
					request_tigertree_callback, sf);

	if (!inserted)
		shared_file_unref(&sf);
}
Пример #4
0
/**
 * Add `comp' to the current completed count, and update the amount of
 * bytes transferred.  Note that `comp' can be zero.
 * When `update_dtime' is TRUE, we update the "done time", otherwise we
 * change the "last request time".
 *
 * If the row does not exist (race condition: deleted since upload started),
 * recreate one.
 */
static void
upload_stats_file_add(
	const shared_file_t *sf,
	int comp, guint64 sent, gboolean update_dtime)
{
	const char *pathname = shared_file_path(sf);
	filesize_t size = shared_file_size(sf);
	struct ul_stats *s;
	const struct sha1 *sha1;

	g_assert(comp >= 0);

	sha1 = sha1_hash_available(sf) ? shared_file_sha1(sf) : NULL;

	/* find this file in the ul_stats_clist */
	s = upload_stats_find(sha1, pathname, size);

	/* increment the completed counter */
	if (NULL == s) {
		/* uh oh, row has since been deleted, add it: 1 attempt */
		upload_stats_add(pathname, size, shared_file_name_nfc(sf),
			1, comp, sent, tm_time(), tm_time(), sha1);
	} else {
		s->bytes_sent += sent;
		s->norm = 1.0 * s->bytes_sent / s->size;
		s->complete += comp;
		if (update_dtime)
			s->dtime = tm_time();
		else
			s->rtime = tm_time();
		gcu_upload_stats_gui_update(s);
	}

	dirty = TRUE;		/* Request asynchronous save of stats */
}
Пример #5
0
/**
 * Called when an upload starts.
 */
void
upload_stats_file_begin(const shared_file_t *sf)
{
	struct ul_stats *s;
	const char *pathname;
	filesize_t size;
	const struct sha1 *sha1;

	g_return_if_fail(sf);
	pathname = shared_file_path(sf);
	size = shared_file_size(sf);
	sha1 = sha1_hash_available(sf) ? shared_file_sha1(sf) : NULL;

	/* find this file in the ul_stats_clist */
	s = upload_stats_find(sha1, pathname, size);

	/* increment the attempted counter */
	if (NULL == s) {
		upload_stats_add(pathname, size, shared_file_name_nfc(sf),
			1, 0, 0, tm_time(), 0, sha1);
	} else {
		s->attempts++;
		s->rtime = tm_time();
		gcu_upload_stats_gui_update(s);
	}

	dirty = TRUE;		/* Request asynchronous save of stats */
}
Пример #6
0
/**
 * Make sure the filename associated to a SHA1 is given the name of
 * the shared file and no longer bears the name of the partial file.
 * This can happen when the partial file is seeded then the file is
 * renamed and shared.
 */
void
upload_stats_enforce_local_filename(const shared_file_t *sf)
{
	struct ul_stats *s;
	const struct sha1 *sha1;
	const char *name;

	if (!upload_stats_by_sha1)
		return;		/* Nothing known by SHA1 yet */

	sha1 = sha1_hash_available(sf) ? shared_file_sha1(sf) : NULL;

	if (!sha1)
		return;		/* File's SHA1 not known yet, nothing to do here */

	s = g_hash_table_lookup(upload_stats_by_sha1, sha1);

	if (NULL == s)
		return;							/* SHA1 not in stats, nothing to do */

	name = shared_file_name_nfc(sf);
	if (name == s->filename)			/* Both are string atoms */
		return;							/* Everything is fine */

	/*
	 * We need to update the filename to match the shared file.
	 */

	hash_list_remove(upload_stats_list, s);
	atom_str_change(&s->pathname, shared_file_path(sf));
	atom_str_change(&s->filename, name);
	hash_list_append(upload_stats_list, s);

	gcu_upload_stats_gui_update_name(s);
}
Пример #7
0
/**
 * External interface to check whether the sha1 for shared_file is known.
 */
bool
sha1_is_cached(const shared_file_t *sf)
{
	const struct sha1_cache_entry *cached;

	cached = hikset_lookup(sha1_cache, shared_file_path(sf));
	return cached && cached_entry_up_to_date(cached, sf);
}
Пример #8
0
static bool
huge_spam_check(shared_file_t *sf, const struct sha1 *sha1)
{
	if (NULL != sha1 && spam_sha1_check(sha1)) {
		g_warning("file \"%s\" is listed as spam (SHA1)", shared_file_path(sf));
		return TRUE;
	}

	if (
		spam_check_filename_size(shared_file_name_nfc(sf),
			shared_file_size(sf))
	) {
		g_warning("file \"%s\" is listed as spam (Name)", shared_file_path(sf));
		return TRUE;
	}
	return FALSE;
}
Пример #9
0
/**
 * Look whether we still need to compute the SHA1 of the given shared file
 * by looking into our in-core cache to see whether the entry we have is
 * up-to-date.
 *
 * @param sf	the shared file for which we want to compute the SHA1
 *
 * @return TRUE if the file need SHA1 recomputation.
 */
static bool
huge_need_sha1(shared_file_t *sf)
{
	struct sha1_cache_entry *cached;

	shared_file_check(sf);

	/*
	 * After a rescan, there might be files in the queue which are
	 * no longer shared.
	 */

	if (!shared_file_indexed(sf))
		return FALSE;

	if G_UNLIKELY(NULL == sha1_cache)
		return FALSE;		/* Shutdown occurred (processing TEQ event?) */

	cached = hikset_lookup(sha1_cache, shared_file_path(sf));

	if (cached != NULL) {
		filestat_t sb;

		if (-1 == stat(shared_file_path(sf), &sb)) {
			g_warning("ignoring SHA1 recomputation request for \"%s\": %m",
				shared_file_path(sf));
			return FALSE;
		}
		if (
			cached->size + (fileoffset_t) 0 == sb.st_size + (filesize_t) 0 &&
			cached->mtime == sb.st_mtime
		) {
			if (GNET_PROPERTY(share_debug) > 1) {
				g_warning("ignoring duplicate SHA1 work for \"%s\"",
					shared_file_path(sf));
			}
			return FALSE;
		}
	}
	return TRUE;
}
Пример #10
0
bool
huge_update_hashes(shared_file_t *sf,
	const struct sha1 *sha1, const struct tth *tth)
{
	struct sha1_cache_entry *cached;
	filestat_t sb;

	shared_file_check(sf);
	g_return_val_if_fail(sha1, FALSE);

	/*
	 * Make sure the file's timestamp is still accurate.
	 */

	if (-1 == stat(shared_file_path(sf), &sb)) {
		g_warning("discarding SHA1 for file \"%s\": can't stat(): %m",
			shared_file_path(sf));
		shared_file_remove(sf);
		return TRUE;
	}

	if (sb.st_mtime != shared_file_modification_time(sf)) {
		g_warning("file \"%s\" was modified whilst SHA1 was computed",
			shared_file_path(sf));
		shared_file_set_modification_time(sf, sb.st_mtime);
		request_sha1(sf);					/* Retry! */
		return TRUE;
	}

	if (huge_spam_check(sf, sha1)) {
		shared_file_remove(sf);
		return FALSE;
	}

	shared_file_set_sha1(sf, sha1);
	shared_file_set_tth(sf, tth);

	/* Update cache */

	cached = hikset_lookup(sha1_cache, shared_file_path(sf));

	if (cached) {
		update_volatile_cache(cached, shared_file_size(sf),
			shared_file_modification_time(sf), sha1, tth);
		cache_dirty = TRUE;

		/* Dump the cache at most about once per minute. */
		if (!cache_dumped || delta_time(tm_time(), cache_dumped) > 60) {
			dump_cache(FALSE);
		}
	} else {
		add_volatile_cache_entry(shared_file_path(sf),
			shared_file_size(sf), shared_file_modification_time(sf),
			sha1, tth, TRUE);
		add_persistent_cache_entry(shared_file_path(sf),
			shared_file_size(sf), shared_file_modification_time(sf),
			sha1, tth);
	}
	return TRUE;
}
Пример #11
0
/**
 * Put the shared file on the stack of the things to do.
 *
 * We first begin with the computation of the SHA1, and when completed we
 * will continue with the TTH computation.
 */
static void
queue_shared_file_for_sha1_computation(shared_file_t *sf)
{
	int inserted;
	
 	shared_file_check(sf);

	inserted = verify_sha1_enqueue(FALSE, shared_file_path(sf),
					shared_file_size(sf), huge_verify_callback,
					shared_file_ref(sf));

	if (!inserted)
		shared_file_unref(&sf);
}
Пример #12
0
void
request_tigertree(shared_file_t *sf, bool high_priority)
{
	int inserted;

	verify_tth_init();

	g_return_if_fail(sf);
	shared_file_check(sf);
	g_return_if_fail(!shared_file_is_partial(sf));

	sf = shared_file_ref(sf);

	inserted = verify_enqueue(verify_tth.verify, high_priority,
					shared_file_path(sf), 0, shared_file_size(sf),
					request_tigertree_callback, sf);

	if (!inserted)
		shared_file_unref(&sf);
}
Пример #13
0
static void
upload_stats_dump_item(gpointer p, gpointer user_data)
{
	const shared_file_t *sf;
	FILE *out = user_data;
	struct ul_stats *s = p;
	char rtime_buf[TIME_T_DEC_BUFLEN];
	char dtime_buf[TIME_T_DEC_BUFLEN];
	const char *pathname;
	char *escaped;

	g_assert(NULL != s);

	sf = s->sha1 ? shared_file_by_sha1(s->sha1) : NULL;
	sf = SHARE_REBUILDING != sf ? sf : NULL;
	if (sf) {
		pathname = shared_file_path(sf);
	} else {
		pathname = s->pathname;
	}
	escaped = url_escape_cntrl(pathname);

	time_t_to_string_buf(s->rtime, rtime_buf, sizeof rtime_buf);
	time_t_to_string_buf(s->dtime, dtime_buf, sizeof dtime_buf);

	fprintf(out, "%s\t%s\t%u\t%u\t%lu\t%lu\t%s\t%s\t%s\n",
		escaped,
		uint64_to_string(s->size),
		s->attempts,
		s->complete,
		(unsigned long) (s->bytes_sent >> 32),
		(unsigned long) (s->bytes_sent & 0xffffffff),
		rtime_buf,
		dtime_buf,
		s->sha1 ? sha1_base32(s->sha1) : "*");

	if (escaped != pathname) {		/* File had escaped chars */
		HFREE_NULL(escaped);
	}
}
Пример #14
0
static bool
request_tigertree_callback(const struct verify *ctx, enum verify_status status,
	void *user_data)
{
	shared_file_t *sf = user_data;

	shared_file_check(sf);
	switch (status) {
	case VERIFY_START:
		if (!shared_file_is_servable(sf)) {
			/*
			 * After a rescan, there might be files in the queue which are
			 * no longer shared.
			 */

			if (GNET_PROPERTY(verify_debug) > 1) {
				g_debug("skipping TTH computation for %s: not a servable file",
					shared_file_path(sf));
			}
			return FALSE;
		}
		if (shared_file_tth_is_available(sf)) {
			if (
				GNET_PROPERTY(tigertree_debug) > 1 ||
				GNET_PROPERTY(verify_debug) > 1
			) {
				g_debug("TTH for %s is already cached (%s)",
					shared_file_path(sf), tth_base32(shared_file_tth(sf)));
			}
			return FALSE;
		}
		gnet_prop_set_boolean_val(PROP_TTH_REBUILDING, TRUE);
		return TRUE;
	case VERIFY_PROGRESS:
		/*
		 * Processing can continue whilst the library file is indexed or the
		 * completed file is still beeing seeded.
		 */
		return shared_file_is_servable(sf);
	case VERIFY_DONE:
		{
			const struct tth *tth = verify_tth_digest(ctx);
			size_t n_leaves = verify_tth_leave_count(ctx);

			if (GNET_PROPERTY(verify_debug)) {
				g_debug("%s(): computed TTH %s (%zu lea%s) for %s",
					G_STRFUNC, tth_base32(tth),
					n_leaves, plural_f(n_leaves),
					shared_file_path(sf));
			}

			/*
			 * Write the TTH to the cache first, before updating the hashes.
			 * That way, the logic behind huge_update_hashes() can rely on
			 * the fact that the TTH is persisted already.
			 *
			 * This is important for seeded files for which we re-compute
			 * the TTH once they are completed (to make sure we can serve
			 * THEX requests at the proper good depth).  In order to update
			 * the GUI information, we'll need to probe the cache to determine
			 * how large the TTH is exactly, since all we pass back to the
			 * routines is the TTH root hash.
			 *		--RAM, 2017-10-20
			 */

			tth_cache_insert(tth, verify_tth_leaves(ctx), n_leaves);
			huge_update_hashes(sf, shared_file_sha1(sf), tth);
		}
		goto done;
	case VERIFY_ERROR:
		if (GNET_PROPERTY(verify_debug)) {
			g_debug("%s(): unable to compute TTH for %s",
				G_STRFUNC, shared_file_path(sf));
		}
		/* FALL THROUGH */
	case VERIFY_SHUTDOWN:
		goto done;
	case VERIFY_INVALID:
		break;
	}
	g_assert_not_reached();
	return FALSE;

done:
	shared_file_unref(&sf);
	gnet_prop_set_boolean_val(PROP_TTH_REBUILDING, FALSE);
	return TRUE;
}