Exemplo n.º 1
0
/**
 * Let sleepers know about the wake-up condition.
 *
 * @param hl		the list of waiting parties
 * @param data		waking-up data to supply to callback
 */
static void
wq_notify(hash_list_t *hl, void *data)
{
	hash_list_iter_t *iter;
	size_t i, count;

	iter = hash_list_iterator(hl);
	count = hash_list_length(hl);
	i = 0;

	while (hash_list_iter_has_next(iter)) {
		wq_event_t *we = hash_list_iter_next(iter);
		wq_status_t status;

		wq_event_check(we);

		/*
		 * Stop iteration in case callbacks have called wq_sleep() on the
		 * same waiting queue we're iterating on and added items to the list.
		 * This sanity check ensures we're not going to loop forever with a
		 * callback systematically appending something.
		 */

		if (i++ >= count) {
			/* Something is odd, let them know about the calling stack */
			s_critical("stopping after processing %zu item%s (list now has %u)",
				count, plural(count), hash_list_length(hl));
		}

		status = (*we->cb)(we->arg, data);

		switch (status) {
		case WQ_SLEEP:
			continue;		/* Still sleeping, leave in the list */
		case WQ_EXCLUSIVE:
		case WQ_REMOVE:
			goto remove;
		}

		s_error("invalid status %d returned by %s()",
			status, stacktrace_function_name(we->cb));

	remove:
		hash_list_iter_remove(iter);
		wq_event_free(we);

		/*
		 * The callback may decide that we shouldn't continue notifying
		 * other sleepers (because it knows it grabbed a resource that others
		 * will need for instance).  This is used as an early termination
		 * of the loop.
		 */

		if (WQ_EXCLUSIVE == status)
			break;
	}

	hash_list_iter_release(&iter);
}
Exemplo n.º 2
0
/**
 * Callout queue callback fired when waiting event times out.
 */
static void
wq_timed_out(cqueue_t *cq, void *arg)
{
	wq_event_t *we = arg;
	hash_list_t *hl;
	wq_status_t status;

	wq_event_check(we);
	g_assert(we->tm != NULL);

	cq_zero(cq, &we->tm->timeout_ev);
	hl = htable_lookup(waitqueue, we->key);

	g_assert(hl != NULL);

	/*
	 * Invoke the callback with the sentinel data signalling a timeout.
	 */

	status = (*we->cb)(we->arg, WQ_TIMED_OUT);

	/*
	 * When the callback returns WQ_SLEEP, we re-instantiate the initial
	 * timeout.
	 *
	 * Otherwise the event is discarded (removed from the wait queue) and
	 * the callback will never be invoked again for this event.
	 */

	switch (status) {
	case WQ_SLEEP:
		we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we);
		return;
	case WQ_EXCLUSIVE:
		s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()",
			stacktrace_function_name(we->cb));
		/* FALL THROUGH */
	case WQ_REMOVE:
		hash_list_remove(hl, we);

		/*
		 * Cleanup the table if it ends-up being empty.
		 */

		if (0 == hash_list_length(hl)) {
			hash_list_free(&hl);
			htable_remove(waitqueue, we->key);
		}

		wq_event_free(we);
		return;
	}

	g_assert_not_reached();
}
Exemplo n.º 3
0
/**
 * Flush current /QH2.
 *
 * Depending how the QH2 builder is configured, this either sends the message
 * to the target node or invokes a processing callback.
 */
static void
g2_build_qh2_flush(struct g2_qh2_builder *ctx)
{
	pmsg_t *mb;

	g_assert(ctx != NULL);
	g_assert(ctx->t != NULL);
	g_assert((ctx->n != NULL) ^ (ctx->cb != NULL));

	/*
	 * Restore the order of children in the root packet to be the order we
	 * used when we added the nodes, since we prepend new children.
	 */

	g2_tree_reverse_children(ctx->t);

	/*
	 * If sending over UDP, ask for reliable delivery of the query hit.
	 * To be able to monitor the fate of the message, we asssociate a free
	 * routine to it.
	 */

	if (ctx->to_udp) {
		struct g2_qh2_pmsg_info *pmi;

		WALLOC0(pmi);
		pmi->magic = G2_QH2_PMI_MAGIC;
		pmi->hub_id = nid_ref(NODE_ID(ctx->hub));
		mb = g2_build_pmsg_extended(ctx->t, g2_qh2_pmsg_free, pmi);
		pmsg_mark_reliable(mb);
	} else {
		mb = g2_build_pmsg(ctx->t);
	}

	if (GNET_PROPERTY(g2_debug) > 3) {
		g_debug("%s(): flushing the following hit for "
			"Q2 #%s to %s%s (%d bytes):",
			G_STRFUNC, guid_hex_str(ctx->muid),
			NULL == ctx->n ?
				stacktrace_function_name(ctx->cb) : node_infostr(ctx->n),
			NULL == ctx->n ? "()" : "", pmsg_size(mb));
		g2_tfmt_tree_dump(ctx->t, stderr, G2FMT_O_PAYLOAD | G2FMT_O_PAYLEN);
	}

	if (ctx->n != NULL)
		g2_node_send(ctx->n, mb);
	else
		(*ctx->cb)(mb, ctx->arg);

	ctx->messages++;
	ctx->current_size = 0;
	g2_tree_free_null(&ctx->t);
}
Exemplo n.º 4
0
/**
 * Remove an event from the queue.
 */
static void
wq_remove(wq_event_t *we)
{
	hash_list_t *hl;

	wq_event_check(we);

	hl = htable_lookup(waitqueue, we->key);
	if (NULL == hl) {
		s_critical("attempt to remove event %s() on unknown key %p",
			stacktrace_function_name(we->cb), we->key);
	} if (NULL == hash_list_remove(hl, we)) {
		s_critical("attempt to remove unknown event %s() on %p",
			stacktrace_function_name(we->cb), we->key);
	} else if (0 == hash_list_length(hl)) {
		hash_list_free(&hl);
		htable_remove(waitqueue, we->key);
	}

	wq_event_free(we);
}
Exemplo n.º 5
0
/**
 * Hash list iterator callback to free and remove waiting events.
 */
static bool
wq_free_waiting(void *key, void *unused_data)
{
	wq_event_t *we = key;

	wq_event_check(we);
	(void) unused_data;

	s_warning("leaked waiting event %s() on %p",
		stacktrace_function_name(we->cb), we->key);

	wq_event_free(we);
	return TRUE;
}
Exemplo n.º 6
0
/**
 * RPC timeout callback.
 */
static void
g2_rpc_timeout(cqueue_t *cq, void *obj)
{
	struct g2_rpc *gr = obj;

	g2_rpc_check(gr);

	if (GNET_PROPERTY(g2_rpc_debug) > 1) {
		g_debug("%s(): /%s RPC to %s timed out, calling %s()",
			G_STRFUNC, g2_msg_type_name(gr->key.type),
			host_addr_to_string(gr->key.addr),
			stacktrace_function_name(gr->cb));
	}

	cq_zero(cq, &gr->timeout_ev);
	(*gr->cb)(NULL, NULL, gr->arg);
	g2_rpc_free(gr, FALSE);
}
Exemplo n.º 7
0
/*
 * Ensure we do not have a check/hook installed already, otherwise loudly
 * warn them once, as this is probably not intended!
 */
static void
pmsg_no_presend_check(const pmsg_t * const mb, const char *caller)
{
	/*
	 * Because m_check and m_hook are in the same union and have the same
	 * memory size, it is sufficient to check for one field being non-NULL.
	 */

	if G_LIKELY(NULL == mb->m_u.m_check)
		return;

	s_carp_once("%s(): mb=%p (%d byte%s, prio=%u, refcnt=%u, flags=0x%x)"
		" already has %s %s()",
		caller, mb, pmsg_size(mb), plural(pmsg_size(mb)),
		mb->m_prio, mb->m_refcnt, mb->m_flags,
		(mb->m_flags & PMSG_PF_HOOK) ? "transmit hook" : "can-send callback",
		stacktrace_function_name(mb->m_u.m_check));
}
Exemplo n.º 8
0
/**
 * Trigger callback and then put the watchdog to sleep, ignoring any desire
 * from the callback to re-arm the watchdog.
 *
 * @return TRUE if we stopped the watchdog, FALSE if it was already aslept,
 * in which case the trigger was not invoked.
 */
bool
wd_expire(watchdog_t *wd)
{
	watchdog_check(wd);

	if (NULL == wd->ev)
		return FALSE;

	cq_cancel(&wd->ev);
	(*wd->trigger)(wd, wd->arg);

	if (wd->ev != NULL) {
		g_critical("%s(): "
			"watchdog \"%s\" re-armed within %s() callback, turning it off",
			G_STRFUNC, wd_name(wd), stacktrace_function_name(wd->trigger));
	}

	return TRUE;
}
Exemplo n.º 9
0
/**
 * Notification that a message was received that could be the answer to
 * a pending RPC.
 *
 * @param n		the node from which we got the message
 * @param t		the received message tree
 *
 * @return TRUE if the message was indeed an RPC reply, FALSE otherwise.
 */
bool
g2_rpc_answer(const gnutella_node_t *n, const g2_tree_t *t)
{
	struct g2_rpc *gr;
	struct g2_rpc_key key;
	enum g2_msg type;

	type = g2_msg_name_type(g2_tree_name(t));

	key.type = g2_rpc_send_type(type);
	key.addr = n->addr;

	gr = hevset_lookup(g2_rpc_pending, &key);

	if (NULL == gr) {
		/*
		 * No known RPC, but wait... we can receive a /QKA when we issue a /Q2
		 * and the query key we knew for the remote host has expired, hence
		 * we must look whether there is not a /Q2 pending as well in that
		 * case.  Once again, the lack of MUID in these messages is a handicap.
		 */

		if (G2_MSG_QKA == type) {
			key.type = G2_MSG_Q2;
			gr = hevset_lookup(g2_rpc_pending, &key);
			if (gr != NULL)
				goto found;		/* Sent a /Q2, got a /QKA back */
		}

		if (GNET_PROPERTY(g2_rpc_debug) > 1) {
			g_debug("%s(): unexpected /%s RPC reply from %s",
				G_STRFUNC, g2_msg_type_name(key.type), node_infostr(n));
		}

		return FALSE;
	}

found:

	/*
	 * Got a reply for an RPC we sent, based solely on message type and
	 * source address of the message.  This is weak, but G2 works like that.
	 *
	 * Weakness comes from the fact that we cannot have multiple RPCs with
	 * a same IP address but towards different ports, nor have concurrent
	 * RPCs with the same host for several different by similar requests
	 * (although this can be viewed as anti-hammering, servers should protect
	 * against that in different ways, a crippled protocol not being an answer).
	 *
	 * The only transaction where we could use the MUID is /Q2 -> /QA but we
	 * leave that check to the GUESS layer and make sure here that we have only
	 * one single RPC transaction at a time with a given IP address.
	 */

	if (GNET_PROPERTY(g2_rpc_debug) > 2) {
		g_debug("%s(): /%s RPC to %s got a /%s reply, calling %s()",
			G_STRFUNC, g2_msg_type_name(gr->key.type),
			host_addr_to_string(gr->key.addr), g2_tree_name(t),
			stacktrace_function_name(gr->cb));
	}

	(*gr->cb)(n, t, gr->arg);
	g2_rpc_free(gr, FALSE);

	return TRUE;
}
Exemplo n.º 10
0
/**
 * Read value from database file, returning a pointer to the allocated
 * deserialized data.  These data can be modified freely and stored back,
 * but their lifetime will not exceed that of the next call to a dbmw
 * operation on the same descriptor.
 *
 * User code does not need to bother with freeing the allocated data, this
 * is managed directly by the DBM wrapper.
 *
 * @param dw		the DBM wrapper
 * @param key		the key (constant-width, determined at open time)
 * @param lenptr	if non-NULL, writes length of (deserialized) value
 *
 * @return pointer to value, or NULL if it was either not found or the
 * deserialization failed.
 */
G_GNUC_HOT void *
dbmw_read(dbmw_t *dw, const void *key, size_t *lenptr)
{
	struct cached *entry;
	dbmap_datum_t dval;

	dbmw_check(dw);
	g_assert(key);

	dw->r_access++;

	entry = map_lookup(dw->values, key);
	if (entry) {
		if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING | DBG_DSF_ACCESS)) {
			dbg_ds_log(dw->dbg, dw, "%s: read cache hit on %s key=%s%s",
				G_STRFUNC, entry->dirty ? "dirty" : "clean",
				dbg_ds_keystr(dw->dbg, key, (size_t) -1),
				entry->absent ? " (absent)" : "");
		}

		dw->r_hits++;
		if (lenptr)
			*lenptr = entry->len;
		return entry->data;
	}

	/*
	 * Not cached, must read from DB.
	 */

	dw->ioerr = FALSE;
	dval = dbmap_lookup(dw->dm, key);

	if (dbmap_has_ioerr(dw->dm)) {
		dw->ioerr = TRUE;
		dw->error = errno;
		s_warning_once_per(LOG_PERIOD_SECOND,
			"DBMW \"%s\" I/O error whilst reading entry: %s",
			dw->name, dbmap_strerror(dw->dm));
		return NULL;
	} else if (NULL == dval.data)
		return NULL;	/* Not found in DB */

	/*
	 * Value was found, allocate a cache entry object for it.
	 */

	WALLOC0(entry);

	/*
	 * Deserialize data if needed.
	 */

	if (dw->unpack) {
		/*
		 * Allocate cache entry arena to hold the deserialized version.
		 */

		entry->data = walloc(dw->value_size);
		entry->len = dw->value_size;

		bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR);

		if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) {
			s_critical("DBMW \"%s\" deserialization error in %s(): %s",
				dw->name, stacktrace_function_name(dw->unpack),
				bstr_error(dw->bs));
			/* Not calling value free routine on deserialization failures */
			wfree(entry->data, dw->value_size);
			WFREE(entry);
			return NULL;
		}

		if (lenptr)
			*lenptr = dw->value_size;
	} else {
		g_assert(dw->value_size >= dval.len);

		if (dval.len) {
			entry->len = dval.len;
			entry->data = wcopy(dval.data, dval.len);
		} else {
			entry->data = NULL;
			entry->len = 0;
		}

		if (lenptr)
			*lenptr = dval.len;
	}

	g_assert((entry->len != 0) == (entry->data != NULL));

	/*
	 * Insert into cache.
	 */

	(void) allocate_entry(dw, key, entry);

	if (dbg_ds_debugging(dw->dbg, 4, DBG_DSF_CACHING)) {
		dbg_ds_log(dw->dbg, dw, "%s: cached %s key=%s%s",
			G_STRFUNC, entry->dirty ? "dirty" : "clean",
			dbg_ds_keystr(dw->dbg, key, (size_t) -1),
			entry->absent ? " (absent)" : "");
	}

	return entry->data;
}
Exemplo n.º 11
0
/**
 * Write back cached value to disk.
 * @return TRUE on success
 */
static bool
write_back(dbmw_t *dw, const void *key, struct cached *value)
{
	dbmap_datum_t dval;
	bool ok;

	g_assert(value->dirty);

	if (value->absent) {
		/* Key not present, value is null item */
		dval.data = NULL;
		dval.len = 0;
	} else {
		/*
		 * Serialize value into our reused message block if a
		 * serialization routine was provided.
		 */

		if (dw->pack) {
			pmsg_reset(dw->mb);
			(*dw->pack)(dw->mb, value->data);

			dval.data = pmsg_start(dw->mb);
			dval.len = pmsg_size(dw->mb);

			/*
			 * We allocated the message block one byte larger than the
			 * maximum size, in order to detect unexpected serialization
			 * overflows.
			 */

			if (dval.len > dw->value_data_size) {
				/* Don't s_carp() as this is asynchronous wrt data change */
				s_critical("DBMW \"%s\" serialization overflow in %s() "
					"whilst flushing dirty entry",
					dw->name, stacktrace_function_name(dw->pack));
				return FALSE;
			}
		} else {
			dval.data = value->data;
			dval.len = value->len;
		}
	}

	/*
	 * If cached entry is absent, delete the key.
	 * Otherwise store the serialized value.
	 *
	 * Dirty bit is cleared on success.
	 */

	if (
		dbg_ds_debugging(dw->dbg, 1,
			DBG_DSF_CACHING | DBG_DSF_UPDATE | DBG_DSF_INSERT | DBG_DSF_DELETE)
	) {
		dbg_ds_log(dw->dbg, dw, "%s: %s dirty value (%zu byte%s) key=%s",
			G_STRFUNC, value->absent ? "deleting" : "flushing",
			dval.len, plural(dval.len),
			dbg_ds_keystr(dw->dbg, key, (size_t) -1));
	}

	dw->ioerr = FALSE;
	ok = value->absent ?
		dbmap_remove(dw->dm, key) : dbmap_insert(dw->dm, key, dval);

	if (ok) {
		value->dirty = FALSE;
	} else if (dbmap_has_ioerr(dw->dm)) {
		dw->ioerr = TRUE;
		dw->error = errno;
		s_warning("DBMW \"%s\" I/O error whilst %s dirty entry: %s",
			dw->name, value->absent ? "deleting" : "flushing",
			dbmap_strerror(dw->dm));
	} else {
		s_warning("DBMW \"%s\" error whilst %s dirty entry: %s",
			dw->name, value->absent ? "deleting" : "flushing",
			dbmap_strerror(dw->dm));
	}

	return ok;
}
Exemplo n.º 12
0
/**
 * Iterate over the DB, invoking the callback on each item along with the
 * supplied argument and removing the item when the callback returns TRUE.
 *
 * @return the amount of removed entries.
 */
size_t
dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, void *arg)
{
	struct foreach_ctx ctx;
	struct cache_foreach_ctx fctx;
	size_t pruned;

	dbmw_check(dw);

	if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) {
		dbg_ds_log(dw->dbg, dw, "%s: starting with %s(%p)", G_STRFUNC,
			stacktrace_function_name(cbr), arg);
	}

	/*
	 * Before iterating we flush the deleted keys we know about in the cache
	 * and whose deletion was deferred, so that the underlying map will
	 * not have to iterate on them.
	 */

	dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY);

	/*
	 * Some values may be present only in the cache.  Hence we clear all
	 * marks in the cache and each traversed value that happens to be
	 * present in the cache will be marked as "traversed".
	 *
	 * We flushed deleted keys above, but that does not remove them from
	 * the cache structure.  We don't need to traverse these after iterating
	 * on the map, so we make sure they are artifically set to "traversed".
	 */

	ctx.u.cbr = cbr;
	ctx.arg = arg;
	ctx.dw = dw;

	map_foreach(dw->values, cache_reset_before_traversal, NULL);
	pruned = dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx);

	ZERO(&fctx);
	fctx.removing = TRUE;
	fctx.foreach = &ctx;
	fctx.u.cbr = dbmw_foreach_remove_trampoline;

	/*
	 * Continue traversal with all the cached entries that were not traversed
	 * already because they do not exist in the underlying map.
	 *
	 * We count these and remember how many there are so that we can determine
	 * the correct overall item count after an iteration without having to
	 * flush all the dirty values!
	 *
	 * Any cached entry that needs to be removed will be marked as such
	 * and we'll complete processing by discarding from the cache all
	 * the entries that have been marked as "removable" during the traversal.
	 */

	map_foreach(dw->values, cache_finish_traversal, &fctx);
	map_foreach_remove(dw->values, cache_free_removable, dw);
	dw->cached = fctx.cached;
	dw->count_needs_sync = FALSE;	/* We just counted items the slow way! */

	if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) {
		dbg_ds_log(dw->dbg, dw, "%s: done with %s(%p): "
			"pruned %zu from dbmap, %zu from cache (%zu total), "
			"has %zu unflushed entr%s in cache",
			G_STRFUNC,
			stacktrace_function_name(cbr), arg,
			pruned, fctx.removed, pruned + fctx.removed,
			dw->cached, plural_y(dw->cached));
	}

	return pruned + fctx.removed;
}
Exemplo n.º 13
0
/**
 * Common code for dbmw_foreach_trampoline() and
 * dbmw_foreach_remove_trampoline().
 */
static bool
dbmw_foreach_common(bool removing, void *key, dbmap_datum_t *d, void *arg)
{
	struct foreach_ctx *ctx = arg;
	dbmw_t *dw = ctx->dw;
	struct cached *entry;

	dbmw_check(dw);

	entry = map_lookup(dw->values, key);
	if (entry != NULL) {
		/*
		 * Key / value pair is present in the cache.
		 *
		 * This affects us in two ways:
		 *
		 *   - We may already know that the key was deleted, in which case
		 *     that entry is just skipped: no further access is possible
		 *     through DBMW until that key is recreated.  We still return
		 *     TRUE to make sure the lower layers will delete the entry
		 *     physically, since deletion has not been flushed yet (that's
		 *     the reason we're still iterating on it).
		 *
		 *   - Should the cached key need to be deleted (as determined by
		 *     the user callback, we make sure we delete the entry in the
		 *     cache upon callback return).
		 *
		 * Because we sync the cache (the dirty deleted items only), we should
		 * normally never iterate on a deleted entry, coming from the
		 * underlying database, hence we loudly complain!
		 */

		entry->traversed = TRUE;	/* Signal we iterated on cached value */

		if (entry->absent) {
			s_carp("%s(): DBMW \"%s\" iterating over a %s absent key in cache!",
				G_STRFUNC, dw->name, entry->dirty ? "dirty" : "clean");
			return TRUE;		/* Key was already deleted, info cached */
		}

		if (removing) {
			bool status;
			status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg);
			if (status) {
				entry->removable = TRUE;	/* Discard it after traversal */
			}
			return status;
		} else {
			(*ctx->u.cb)(key, entry->data, entry->len, ctx->arg);
			return FALSE;
		}
	} else {
		bool status = FALSE;
		void *data = d->data;
		size_t len = d->len;

		/*
		 * Deserialize data if needed, but do not cache this value.
		 * Iterating over the map must not disrupt the cache.
		 */

		if (dw->unpack) {
			len = dw->value_size;
			data = walloc(len);

			bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR);

			if (!dbmw_deserialize(dw, dw->bs, data, len)) {
				s_critical("DBMW \"%s\" deserialization error in %s(): %s",
					dw->name,
					stacktrace_function_name(dw->unpack),
					bstr_error(dw->bs));
				/* Not calling value free routine on deserialization failures */
				wfree(data, len);
				return FALSE;
			}
		}

		if (removing) {
			status = (*ctx->u.cbr)(key, data, len, ctx->arg);
		} else {
			(*ctx->u.cb)(key, data, len, ctx->arg);
		}

		if (dw->unpack) {
			if (dw->valfree)
				(*dw->valfree)(data, len);
			wfree(data, len);
		}

		return status;
	}
}