Ejemplo n.º 1
0
/**
 * Invalidate possibly cached page.
 *
 * This is used when we know a new and fresh copy of the page is held on
 * the disk.  Further access to the page will require reloading the
 * page from disk.
 */
void
lru_invalidate(DBM *db, long bno)
{
	struct lru_cache *cache = db->cache;
	void *value;

	if (
		g_hash_table_lookup_extended(cache->pagnum,
			ulong_to_pointer(bno), NULL, &value)
	) {
		long idx = pointer_to_int(value);

		g_assert(idx >= 0 && idx < cache->pages);
		g_assert(cache->numpag[idx] == bno);

		/*
		 * One should never be invalidating a dirty page, unless something
		 * went wrong during a split and we're trying to undo things.
		 * Since the operation will cause a data loss, warn.
		 */

		if (cache->dirty[idx]) {
			g_warning("sdbm: \"%s\": %s() invalidating dirty page #%ld",
				db->name, stacktrace_caller_name(1), bno);
		}

		hash_list_remove(cache->used, value);
		g_hash_table_remove(cache->pagnum, ulong_to_pointer(bno));
		cache->numpag[idx] = -1;
		cache->dirty[idx] = FALSE;
		slist_append(cache->available, value);	/* Make index available */
	}
}
Ejemplo n.º 2
0
/**
 * Expire registered pings.
 *
 * @param forced	TRUE if we're shutdowning and want to cleanup
 */
static void
udp_ping_expire(bool forced)
{
	time_t now;

	g_return_if_fail(udp_pings);

	now = tm_time();
	for (;;) {
		struct udp_ping *ping;
		time_delta_t d;

		ping = hash_list_head(udp_pings);
		if (NULL == ping)
			break;
		if (!forced) {
			d = delta_time(now, ping->added);
			if (d > 0 && d <= UDP_PING_TIMEOUT) {
				break;
			}
		}
		if (ping->callback) {
			(*ping->callback->cb)(
				ping->callback->got_reply ?
					UDP_PING_EXPIRED : UDP_PING_TIMEDOUT,
				NULL, ping->callback->data);
		}
		hash_list_remove(udp_pings, ping);
		udp_ping_free(ping);
	}
}
Ejemplo n.º 3
0
/**
 * Remove cached entry for key, optionally disposing of the whole structure.
 * Cached entry is flushed if it was dirty and flush is set.
 *
 * @return the reusable cached entry if dispose was FALSE and the key was
 * indeed cached, NULL otherwise.
 */
static struct cached *
remove_entry(dbmw_t *dw, gconstpointer key, gboolean dispose, gboolean flush)
{
	struct cached *old;
	gpointer old_key;
	gboolean found;

	found = map_lookup_extended(dw->values, key, &old_key, (gpointer) &old);

	if (!found)
		return NULL;

	g_assert(old != NULL);

	if (old->dirty && flush)
		write_back(dw, key, old);

	hash_list_remove(dw->keys, key);
	map_remove(dw->values, key);
	wfree(old_key, dbmw_keylen(dw, old_key));

	if (!dispose)
		return old;

	/*
	 * Dispose of the cache structure.
	 */

	free_value(dw, old, TRUE);
	WFREE(old);

	return NULL;
}
Ejemplo n.º 4
0
/**
 * Make sure the filename associated to a SHA1 is given the name of
 * the shared file and no longer bears the name of the partial file.
 * This can happen when the partial file is seeded then the file is
 * renamed and shared.
 */
void
upload_stats_enforce_local_filename(const shared_file_t *sf)
{
	struct ul_stats *s;
	const struct sha1 *sha1;
	const char *name;

	if (!upload_stats_by_sha1)
		return;		/* Nothing known by SHA1 yet */

	sha1 = sha1_hash_available(sf) ? shared_file_sha1(sf) : NULL;

	if (!sha1)
		return;		/* File's SHA1 not known yet, nothing to do here */

	s = g_hash_table_lookup(upload_stats_by_sha1, sha1);

	if (NULL == s)
		return;							/* SHA1 not in stats, nothing to do */

	name = shared_file_name_nfc(sf);
	if (name == s->filename)			/* Both are string atoms */
		return;							/* Everything is fine */

	/*
	 * We need to update the filename to match the shared file.
	 */

	hash_list_remove(upload_stats_list, s);
	atom_str_change(&s->pathname, shared_file_path(sf));
	atom_str_change(&s->filename, name);
	hash_list_append(upload_stats_list, s);

	gcu_upload_stats_gui_update_name(s);
}
Ejemplo n.º 5
0
/**
 * Callout queue callback fired when waiting event times out.
 */
static void
wq_timed_out(cqueue_t *cq, void *arg)
{
	wq_event_t *we = arg;
	hash_list_t *hl;
	wq_status_t status;

	wq_event_check(we);
	g_assert(we->tm != NULL);

	cq_zero(cq, &we->tm->timeout_ev);
	hl = htable_lookup(waitqueue, we->key);

	g_assert(hl != NULL);

	/*
	 * Invoke the callback with the sentinel data signalling a timeout.
	 */

	status = (*we->cb)(we->arg, WQ_TIMED_OUT);

	/*
	 * When the callback returns WQ_SLEEP, we re-instantiate the initial
	 * timeout.
	 *
	 * Otherwise the event is discarded (removed from the wait queue) and
	 * the callback will never be invoked again for this event.
	 */

	switch (status) {
	case WQ_SLEEP:
		we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we);
		return;
	case WQ_EXCLUSIVE:
		s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()",
			stacktrace_function_name(we->cb));
		/* FALL THROUGH */
	case WQ_REMOVE:
		hash_list_remove(hl, we);

		/*
		 * Cleanup the table if it ends-up being empty.
		 */

		if (0 == hash_list_length(hl)) {
			hash_list_free(&hl);
			htable_remove(waitqueue, we->key);
		}

		wq_event_free(we);
		return;
	}

	g_assert_not_reached();
}
Ejemplo n.º 6
0
/**
 * @return NULL on error, a newly allocated string via halloc() otherwise.
 */
static char *
uhc_get_next(void)
{
	struct uhc *uhc;
	char *host;
	time_t now;
	size_t n;

	g_return_val_if_fail(uhc_list, NULL);

	now = tm_time();

	n = hash_list_count(uhc_list);
	if (0 == n)
		return NULL;

	/*
	 * Wait UHC_RETRY_AFTER secs before contacting the UHC again.
	 * Can't be too long because the UDP reply may get lost if the
	 * requesting host already has a saturated b/w.
	 * If we come here, it's because we're lacking hosts for establishing
	 * a Gnutella connection, after we exhausted our caches.
	 */

	while (n-- != 0) {
		uhc = hash_list_head(uhc_list);

		g_assert(uhc != NULL);	/* We computed count on entry */

		if (delta_time(now, uhc->stamp) >= UHC_RETRY_AFTER)
			goto found;

		hash_list_moveto_tail(uhc_list, uhc);
	}

	return NULL;

found:
	uhc->stamp = now;
	host = h_strdup(uhc->host);

	if (uhc->used < UHC_MAX_ATTEMPTS) {
		uhc->used++;
		hash_list_moveto_tail(uhc_list, uhc);
	} else {
		hash_list_remove(uhc_list, uhc);
		uhc_free(&uhc);
	}

	return host;
}
Ejemplo n.º 7
0
/**
 * Detach a UDP TX scheduling layer from a TX stack.
 *
 * @param us			the UDP TX scheduler to detach from
 * @param tx			the TX driver detaching from the scheduler
 */
void
udp_sched_detach(udp_sched_t *us, const txdrv_t *tx)
{
	struct udp_tx_stack key, *uts;
	const void *oldkey;

	udp_sched_check(us);

	key.tx = tx;
	g_assert(hash_list_contains(us->stacks, &key));

	hash_list_find(us->stacks, &key, &oldkey);
	uts = deconstify_pointer(oldkey);
	hash_list_remove(us->stacks, uts);
	WFREE(uts);
}
Ejemplo n.º 8
0
/**
 * Remove an attribute from the table.
 *
 * The key is only used for lookups, only the hashed elements need to be
 * filled in.
 *
 * @return TRUE if we found the attribute and removed it.
 */
static bool
xattr_table_remove_key(xattr_table_t *xat, const struct xattr *key)
{
	struct xattr *old;

	xattr_table_check(xat);
	g_assert(key != NULL);

	old = xattr_table_lookup_key(xat, key);

	if (old != NULL) {
		hash_list_remove(xat->hl, old);
		xattr_free(old);
		return TRUE;
	} else {
		return FALSE;
	}
}
Ejemplo n.º 9
0
/**
 * Map iterator to free cached entries that have been marked as removable.
 */
static gboolean
cache_free_removable(gpointer key, gpointer value, gpointer data)
{
	dbmw_t *dw = data;
	struct cached *entry = value;

	dbmw_check(dw);
	g_assert(!entry->len == !entry->data);

	if (!entry->removable)
		return FALSE;

	free_value(dw, entry, TRUE);
	hash_list_remove(dw->keys, key);
	wfree(key, dbmw_keylen(dw, key));
	WFREE(entry);

	return TRUE;
}
Ejemplo n.º 10
0
/**
 * Remove a key from the table.
 *
 * @return TRUE if the key was found and removed.
 */
bool
ohash_table_remove(ohash_table_t *oh, const void *key)
{
	struct ohash_pair pk;
	struct ohash_pair *op;

	ohash_table_check(oh);

	pk.oh = oh;
	pk.key = key;

	if (!hash_list_contains(oh->hl, &pk))
		return FALSE;

	op = hash_list_remove(oh->hl, &pk);
	g_assert(op->oh == oh);
	ohash_free_kv(op);

	return TRUE;
}
Ejemplo n.º 11
0
/**
 * Clear all the upload stats data structure.
 */
static G_GNUC_COLD void
upload_stats_free_all(void)
{
	if (upload_stats_list) {
		struct ul_stats *s;

		while (NULL != (s = hash_list_head(upload_stats_list))) {
			hash_list_remove(upload_stats_list, s);
			atom_str_free_null(&s->pathname);
			atom_str_free_null(&s->filename);
			if (s->sha1)
				g_hash_table_remove(upload_stats_by_sha1, s->sha1);
			atom_sha1_free_null(&s->sha1);
			WFREE(s);
		}
		hash_list_free(&upload_stats_list);
		gm_hash_table_destroy_null(&upload_stats_by_sha1);
	}
	dirty = TRUE;
}
Ejemplo n.º 12
0
/**
 * Remove an event from the queue.
 */
static void
wq_remove(wq_event_t *we)
{
	hash_list_t *hl;

	wq_event_check(we);

	hl = htable_lookup(waitqueue, we->key);
	if (NULL == hl) {
		s_critical("attempt to remove event %s() on unknown key %p",
			stacktrace_function_name(we->cb), we->key);
	} if (NULL == hash_list_remove(hl, we)) {
		s_critical("attempt to remove unknown event %s() on %p",
			stacktrace_function_name(we->cb), we->key);
	} else if (0 == hash_list_length(hl)) {
		hash_list_free(&hl);
		htable_remove(waitqueue, we->key);
	}

	wq_event_free(we);
}
Ejemplo n.º 13
0
/**
 * Remove cached entry for key, optionally disposing of the whole structure.
 * Cached entry is flushed if it was dirty and flush is set.
 *
 * @return the reusable cached entry if dispose was FALSE and the key was
 * indeed cached, NULL otherwise.
 */
static struct cached *
remove_entry(dbmw_t *dw, const void *key, bool dispose, bool flush)
{
	struct cached *old;
	void *old_key;
	bool found;

	found = map_lookup_extended(dw->values, key, &old_key, (void *) &old);

	if (!found)
		return NULL;

	g_assert(old != NULL);

	if (dbg_ds_debugging(dw->dbg, 3, DBG_DSF_CACHING)) {
		dbg_ds_log(dw->dbg, dw, "%s: %s key=%s (%s)",
			G_STRFUNC, old->dirty ? "dirty" : "clean",
			dbg_ds_keystr(dw->dbg, key, (size_t) -1),
			flush ? "flushing" : " discarding");
	}

	if (old->dirty && flush)
		write_back(dw, key, old);

	hash_list_remove(dw->keys, key);
	map_remove(dw->values, key);
	wfree(old_key, dbmw_keylen(dw, old_key));

	if (!dispose)
		return old;

	/*
	 * Dispose of the cache structure.
	 */

	free_value(dw, old, TRUE);
	WFREE(old);

	return NULL;
}
Ejemplo n.º 14
0
/**
 * Upon reception of an UDP pong, check whether we had a matching registered
 * ping bearing the given MUID.
 *
 * If there was a callback atttached to the reception of a reply, invoke it
 * before returning UDP_PONG_HANDLED.
 *
 * The ``host'' paramaeter MUST be a stack or static pointer to a gnet_host_t,
 * and NOT the address of a dynamically allocated host because gnet_host_copy()
 * is going to be used on it.
 *
 * @param n		the gnutella node replying
 * @param host	if non-NULL, filled with the host to whom we sent the ping
 *
 * @return TRUE if indeed this was a reply for a ping we sent.
 */
enum udp_pong_status
udp_ping_is_registered(const struct gnutella_node *n, gnet_host_t *host)
{
	const struct guid *muid = gnutella_header_get_muid(&n->header);

	if (udp_pings) {
		struct udp_ping *ping;

		ping = hash_list_remove(udp_pings, muid);
		if (ping != NULL) {
			if (host != NULL) {
				/*
				 * Let caller know the exact IP:port of the host we contacted,
				 * since the replying party can use a different port (which
				 * we may not be able to contact, whereas we know the targeted
				 * port did cause a reply).
				 */
				gnet_host_copy(host, ping->host);
			}

			if (ping->callback) {
				(*ping->callback->cb)(UDP_PING_REPLY, n, ping->callback->data);
				if (ping->callback->multiple) {
					ping->callback->got_reply = TRUE;
					ping->added = tm_time();	/* Delay expiration */
					hash_list_append(udp_pings, ping);
				} else {
					udp_ping_free(ping);
				}
				return UDP_PONG_HANDLED;
			}
			udp_ping_free(ping);
			return UDP_PONG_SOLICITED;
		}
	}
	return UDP_PONG_UNSOLICITED;
}
Ejemplo n.º 15
0
/**
 * Callback for adns_resolve(), invoked when the resolution is complete.
 */
static void
uhc_host_resolved(const host_addr_t *addrs, size_t n, void *uu_udata)
{
	(void) uu_udata;
	g_assert(addrs);

	/*
	 * If resolution failed, try again if possible.
	 */

	if (0 == n) {
		if (GNET_PROPERTY(bootstrap_debug))
			g_warning("could not resolve UDP host cache \"%s\"",
				uhc_ctx.host);

		uhc_try_next();
		return;
	}

	if (n > 1) {
		size_t i;
		host_addr_t *hav;
		/* Current UHC was moved to tail by uhc_get_next() */
		struct uhc *uhc = hash_list_tail(uhc_list);

		/*
		 * UHC resolved to multiple endpoints. Could be roundrobbin or
		 * IPv4 and IPv6 addresss. Adding them as seperate entries: if the
		 * IPv6 is unreachable we have an opportunity to skip it.
		 * 		-- JA 24/7/2011
		 *
		 * Shuffle the address array before appending them to the UHC list.
		 *		--RAM, 2015-10-01
		 */

		hav = HCOPY_ARRAY(addrs, n);
		SHUFFLE_ARRAY_N(hav, n);

		for (i = 0; i < n; i++) {
			const char *host = host_addr_port_to_string(hav[i], uhc_ctx.port);
			g_debug("BOOT UDP host cache \"%s\" resolved to %s (#%zu)",
				uhc_ctx.host, host, i + 1);

			uhc_list_append(host);
		}

		hash_list_remove(uhc_list, uhc);	/* Replaced by IP address list */
		uhc_free(&uhc);

		/*
		 * We're going to continue and process the first address (in our
		 * shuffled array).  Make sure it is put at the end of the list
		 * and marked as being used, mimicing what uhc_get_next() would do.
		 *		--RAM, 2015-10-01
		 */

		{
			struct uhc key;

			key.host = host_addr_port_to_string(hav[0], uhc_ctx.port);
			uhc = hash_list_lookup(uhc_list, &key);
			g_assert(uhc != NULL);	/* We added the entry above! */
			uhc->stamp = tm_time();
			uhc->used++;
			hash_list_moveto_tail(uhc_list, uhc);
		}

		uhc_ctx.addr = hav[0];		/* Struct copy */
		HFREE_NULL(hav);
	} else {
		uhc_ctx.addr = addrs[0];
	}

	if (GNET_PROPERTY(bootstrap_debug))
		g_debug("BOOT UDP host cache \"%s\" resolved to %s",
			uhc_ctx.host, host_addr_to_string(uhc_ctx.addr));


	/*
	 * Now send the ping.
	 */

	uhc_send_ping();
}