Beispiel #1
0
/**
 * Let sleepers know about the wake-up condition.
 *
 * @param hl		the list of waiting parties
 * @param data		waking-up data to supply to callback
 */
static void
wq_notify(hash_list_t *hl, void *data)
{
	hash_list_iter_t *iter;
	size_t i, count;

	iter = hash_list_iterator(hl);
	count = hash_list_length(hl);
	i = 0;

	while (hash_list_iter_has_next(iter)) {
		wq_event_t *we = hash_list_iter_next(iter);
		wq_status_t status;

		wq_event_check(we);

		/*
		 * Stop iteration in case callbacks have called wq_sleep() on the
		 * same waiting queue we're iterating on and added items to the list.
		 * This sanity check ensures we're not going to loop forever with a
		 * callback systematically appending something.
		 */

		if (i++ >= count) {
			/* Something is odd, let them know about the calling stack */
			s_critical("stopping after processing %zu item%s (list now has %u)",
				count, plural(count), hash_list_length(hl));
		}

		status = (*we->cb)(we->arg, data);

		switch (status) {
		case WQ_SLEEP:
			continue;		/* Still sleeping, leave in the list */
		case WQ_EXCLUSIVE:
		case WQ_REMOVE:
			goto remove;
		}

		s_error("invalid status %d returned by %s()",
			status, stacktrace_function_name(we->cb));

	remove:
		hash_list_iter_remove(iter);
		wq_event_free(we);

		/*
		 * The callback may decide that we shouldn't continue notifying
		 * other sleepers (because it knows it grabbed a resource that others
		 * will need for instance).  This is used as an early termination
		 * of the loop.
		 */

		if (WQ_EXCLUSIVE == status)
			break;
	}

	hash_list_iter_release(&iter);
}
Beispiel #2
0
/**
 * Callout queue callback fired when waiting event times out.
 */
static void
wq_timed_out(cqueue_t *cq, void *arg)
{
	wq_event_t *we = arg;
	hash_list_t *hl;
	wq_status_t status;

	wq_event_check(we);
	g_assert(we->tm != NULL);

	cq_zero(cq, &we->tm->timeout_ev);
	hl = htable_lookup(waitqueue, we->key);

	g_assert(hl != NULL);

	/*
	 * Invoke the callback with the sentinel data signalling a timeout.
	 */

	status = (*we->cb)(we->arg, WQ_TIMED_OUT);

	/*
	 * When the callback returns WQ_SLEEP, we re-instantiate the initial
	 * timeout.
	 *
	 * Otherwise the event is discarded (removed from the wait queue) and
	 * the callback will never be invoked again for this event.
	 */

	switch (status) {
	case WQ_SLEEP:
		we->tm->timeout_ev = cq_main_insert(we->tm->delay, wq_timed_out, we);
		return;
	case WQ_EXCLUSIVE:
		s_critical("weird status WQ_EXCLUSIVE on timeout invocation of %s()",
			stacktrace_function_name(we->cb));
		/* FALL THROUGH */
	case WQ_REMOVE:
		hash_list_remove(hl, we);

		/*
		 * Cleanup the table if it ends-up being empty.
		 */

		if (0 == hash_list_length(hl)) {
			hash_list_free(&hl);
			htable_remove(waitqueue, we->key);
		}

		wq_event_free(we);
		return;
	}

	g_assert_not_reached();
}
Beispiel #3
0
/**
 * Remove an event from the queue.
 */
static void
wq_remove(wq_event_t *we)
{
	hash_list_t *hl;

	wq_event_check(we);

	hl = htable_lookup(waitqueue, we->key);
	if (NULL == hl) {
		s_critical("attempt to remove event %s() on unknown key %p",
			stacktrace_function_name(we->cb), we->key);
	} if (NULL == hash_list_remove(hl, we)) {
		s_critical("attempt to remove unknown event %s() on %p",
			stacktrace_function_name(we->cb), we->key);
	} else if (0 == hash_list_length(hl)) {
		hash_list_free(&hl);
		htable_remove(waitqueue, we->key);
	}

	wq_event_free(we);
}
Beispiel #4
0
/**
 * Time sorting routine.
 *
 * @param f		the function we call and compute the average execution time for
 * @param vt	describes the data we're sorting, given as input to ``f''
 * @param loops	amount of loops to perform, possibly updated (increased)
 *
 * @return real clock-time in seconds for one single iteration.
 */
static double
vsort_timeit(vsort_timer_t f, struct vsort_timing *vt, size_t *loops)
{
	double start, end;
	size_t n = *loops;
	double elapsed = 0.0, telapsed = 0.0;
	uint attempts = 0;
	tm_t tstart, tend;

retry:
	/*
	 * Safety against broken clocks which would stall the process forever if
	 * we were to continue.
	 */

	if (attempts++ >= VSORT_ATTEMPTS) {
		s_critical("%s(): "
			"either CPU is too fast or kernel clock resultion too low: "
			"elapsed time is %F secs after %zu loops",
			G_STRFUNC, elapsed, n);
		goto done;
	}

	/*
	 * This is a pure CPU grinding algorithm, hence we monitor the amount of
	 * CPU used and not the wall clock: if the process gets suspended in the
	 * middle of the test, that would completely taint the results.
	 *
	 * However, in multi-threaded processes, the accounted CPU time is for
	 * the whole process, and this is not fair for tqsort() which uses multiple
	 * threads in order to minimize the overall elapsed time.
	 *
	 * Hence we measure both the CPU time and the wall-clock time and pick
	 * the lowest figure.
	 */

	(*f)(vt, 1);		/* Blank run to offset effect of memory caching */

	tm_now_exact(&tstart);
	tm_cputime(&start, NULL);
	(*f)(vt, n);
	tm_cputime(&end, NULL);
	tm_now_exact(&tend);

	elapsed = end - start;
	telapsed = tm_elapsed_f(&tend, &tstart);

	/*
	 * If the machine is too powerful (or the clock granularity too low),
	 * double the amount of items and retry.
	 */

	if (elapsed < VSORT_MIN_SECS) {
		*loops = n = n * 2;
		goto retry;
	}

	elapsed = MIN(elapsed, telapsed);

done:
	return elapsed / n;
}
Beispiel #5
0
/**
 * Read value from database file, returning a pointer to the allocated
 * deserialized data.  These data can be modified freely and stored back,
 * but their lifetime will not exceed that of the next call to a dbmw
 * operation on the same descriptor.
 *
 * User code does not need to bother with freeing the allocated data, this
 * is managed directly by the DBM wrapper.
 *
 * @param dw		the DBM wrapper
 * @param key		the key (constant-width, determined at open time)
 * @param lenptr	if non-NULL, writes length of (deserialized) value
 *
 * @return pointer to value, or NULL if it was either not found or the
 * deserialization failed.
 */
G_GNUC_HOT void *
dbmw_read(dbmw_t *dw, const void *key, size_t *lenptr)
{
	struct cached *entry;
	dbmap_datum_t dval;

	dbmw_check(dw);
	g_assert(key);

	dw->r_access++;

	entry = map_lookup(dw->values, key);
	if (entry) {
		if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING | DBG_DSF_ACCESS)) {
			dbg_ds_log(dw->dbg, dw, "%s: read cache hit on %s key=%s%s",
				G_STRFUNC, entry->dirty ? "dirty" : "clean",
				dbg_ds_keystr(dw->dbg, key, (size_t) -1),
				entry->absent ? " (absent)" : "");
		}

		dw->r_hits++;
		if (lenptr)
			*lenptr = entry->len;
		return entry->data;
	}

	/*
	 * Not cached, must read from DB.
	 */

	dw->ioerr = FALSE;
	dval = dbmap_lookup(dw->dm, key);

	if (dbmap_has_ioerr(dw->dm)) {
		dw->ioerr = TRUE;
		dw->error = errno;
		s_warning_once_per(LOG_PERIOD_SECOND,
			"DBMW \"%s\" I/O error whilst reading entry: %s",
			dw->name, dbmap_strerror(dw->dm));
		return NULL;
	} else if (NULL == dval.data)
		return NULL;	/* Not found in DB */

	/*
	 * Value was found, allocate a cache entry object for it.
	 */

	WALLOC0(entry);

	/*
	 * Deserialize data if needed.
	 */

	if (dw->unpack) {
		/*
		 * Allocate cache entry arena to hold the deserialized version.
		 */

		entry->data = walloc(dw->value_size);
		entry->len = dw->value_size;

		bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR);

		if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) {
			s_critical("DBMW \"%s\" deserialization error in %s(): %s",
				dw->name, stacktrace_function_name(dw->unpack),
				bstr_error(dw->bs));
			/* Not calling value free routine on deserialization failures */
			wfree(entry->data, dw->value_size);
			WFREE(entry);
			return NULL;
		}

		if (lenptr)
			*lenptr = dw->value_size;
	} else {
		g_assert(dw->value_size >= dval.len);

		if (dval.len) {
			entry->len = dval.len;
			entry->data = wcopy(dval.data, dval.len);
		} else {
			entry->data = NULL;
			entry->len = 0;
		}

		if (lenptr)
			*lenptr = dval.len;
	}

	g_assert((entry->len != 0) == (entry->data != NULL));

	/*
	 * Insert into cache.
	 */

	(void) allocate_entry(dw, key, entry);

	if (dbg_ds_debugging(dw->dbg, 4, DBG_DSF_CACHING)) {
		dbg_ds_log(dw->dbg, dw, "%s: cached %s key=%s%s",
			G_STRFUNC, entry->dirty ? "dirty" : "clean",
			dbg_ds_keystr(dw->dbg, key, (size_t) -1),
			entry->absent ? " (absent)" : "");
	}

	return entry->data;
}
Beispiel #6
0
/**
 * Map iterator to traverse cached entries that were not already flagged
 * as being traversed, invoking the supplied trampoline callback.
 */
static void
cache_finish_traversal(void *key, void *value, void *data)
{
	struct cached *entry = value;
	struct cache_foreach_ctx *fctx = data;
	dbmap_datum_t d;

	if (entry->traversed)
		return;

#define dw	fctx->foreach->dw

	if (dbg_ds_debugging(dw->dbg, 3, DBG_DSF_ITERATOR)) {
		dbg_ds_log(dw->dbg, dw, "%s: traversing cached %s key=%s%s",
			G_STRFUNC, entry->dirty ? "dirty" : "clean",
			dbg_ds_keystr(dw->dbg, key, (size_t) -1),
			entry->absent ? " (absent)" : "");
	}

	if (entry->absent)
		return;				/* Entry not there, just caching it is missing */

	/*
	 * We should not be traversing a clean entry at this stage: if the entry
	 * is clean, it means it was flushed to the database, and we should
	 * have traversed it already.  Loudly warn, but process anyway!
	 */

	if (!entry->dirty && !fctx->warned_clean_key) {
		fctx->warned_clean_key = TRUE;
		s_critical("%s(): DBMW \"%s\" "
			"iterating via %s over a clean key in cache",
			G_STRFUNC, dw->name,
			stacktrace_routine_name(fctx->foreach->u.any, FALSE));
	}

#undef dw

	d.data = entry->data;
	d.len = entry->len;

	/*
	 * We ignore the returned value because to-be-removed data (when traversing
	 * for removal) will be marked as "removable": we can't delete them yet as
	 * we are traversing the cache structure already.
	 *
	 * For values we're keeping, we increment the cached count: these are keys
	 * present in the cache but not in the underlying database because they
	 * have not been flushed yet.  Knowing this count allows us to compute an
	 * accurate item count without flushing.
	 */

	if (fctx->removing) {
		if ((*fctx->u.cbr)(key, &d, fctx->foreach)) {
			fctx->removed++;	/* Item removed in cache_free_removable() */
			entry->removable = TRUE;
		} else {
			fctx->cached++;
		}
	} else {
		(*fctx->u.cb)(key, &d, fctx->foreach);
		fctx->cached++;
	}
}
Beispiel #7
0
/**
 * Write back cached value to disk.
 * @return TRUE on success
 */
static bool
write_back(dbmw_t *dw, const void *key, struct cached *value)
{
	dbmap_datum_t dval;
	bool ok;

	g_assert(value->dirty);

	if (value->absent) {
		/* Key not present, value is null item */
		dval.data = NULL;
		dval.len = 0;
	} else {
		/*
		 * Serialize value into our reused message block if a
		 * serialization routine was provided.
		 */

		if (dw->pack) {
			pmsg_reset(dw->mb);
			(*dw->pack)(dw->mb, value->data);

			dval.data = pmsg_start(dw->mb);
			dval.len = pmsg_size(dw->mb);

			/*
			 * We allocated the message block one byte larger than the
			 * maximum size, in order to detect unexpected serialization
			 * overflows.
			 */

			if (dval.len > dw->value_data_size) {
				/* Don't s_carp() as this is asynchronous wrt data change */
				s_critical("DBMW \"%s\" serialization overflow in %s() "
					"whilst flushing dirty entry",
					dw->name, stacktrace_function_name(dw->pack));
				return FALSE;
			}
		} else {
			dval.data = value->data;
			dval.len = value->len;
		}
	}

	/*
	 * If cached entry is absent, delete the key.
	 * Otherwise store the serialized value.
	 *
	 * Dirty bit is cleared on success.
	 */

	if (
		dbg_ds_debugging(dw->dbg, 1,
			DBG_DSF_CACHING | DBG_DSF_UPDATE | DBG_DSF_INSERT | DBG_DSF_DELETE)
	) {
		dbg_ds_log(dw->dbg, dw, "%s: %s dirty value (%zu byte%s) key=%s",
			G_STRFUNC, value->absent ? "deleting" : "flushing",
			dval.len, plural(dval.len),
			dbg_ds_keystr(dw->dbg, key, (size_t) -1));
	}

	dw->ioerr = FALSE;
	ok = value->absent ?
		dbmap_remove(dw->dm, key) : dbmap_insert(dw->dm, key, dval);

	if (ok) {
		value->dirty = FALSE;
	} else if (dbmap_has_ioerr(dw->dm)) {
		dw->ioerr = TRUE;
		dw->error = errno;
		s_warning("DBMW \"%s\" I/O error whilst %s dirty entry: %s",
			dw->name, value->absent ? "deleting" : "flushing",
			dbmap_strerror(dw->dm));
	} else {
		s_warning("DBMW \"%s\" error whilst %s dirty entry: %s",
			dw->name, value->absent ? "deleting" : "flushing",
			dbmap_strerror(dw->dm));
	}

	return ok;
}
Beispiel #8
0
/**
 * Common code for dbmw_foreach_trampoline() and
 * dbmw_foreach_remove_trampoline().
 */
static bool
dbmw_foreach_common(bool removing, void *key, dbmap_datum_t *d, void *arg)
{
	struct foreach_ctx *ctx = arg;
	dbmw_t *dw = ctx->dw;
	struct cached *entry;

	dbmw_check(dw);

	entry = map_lookup(dw->values, key);
	if (entry != NULL) {
		/*
		 * Key / value pair is present in the cache.
		 *
		 * This affects us in two ways:
		 *
		 *   - We may already know that the key was deleted, in which case
		 *     that entry is just skipped: no further access is possible
		 *     through DBMW until that key is recreated.  We still return
		 *     TRUE to make sure the lower layers will delete the entry
		 *     physically, since deletion has not been flushed yet (that's
		 *     the reason we're still iterating on it).
		 *
		 *   - Should the cached key need to be deleted (as determined by
		 *     the user callback, we make sure we delete the entry in the
		 *     cache upon callback return).
		 *
		 * Because we sync the cache (the dirty deleted items only), we should
		 * normally never iterate on a deleted entry, coming from the
		 * underlying database, hence we loudly complain!
		 */

		entry->traversed = TRUE;	/* Signal we iterated on cached value */

		if (entry->absent) {
			s_carp("%s(): DBMW \"%s\" iterating over a %s absent key in cache!",
				G_STRFUNC, dw->name, entry->dirty ? "dirty" : "clean");
			return TRUE;		/* Key was already deleted, info cached */
		}

		if (removing) {
			bool status;
			status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg);
			if (status) {
				entry->removable = TRUE;	/* Discard it after traversal */
			}
			return status;
		} else {
			(*ctx->u.cb)(key, entry->data, entry->len, ctx->arg);
			return FALSE;
		}
	} else {
		bool status = FALSE;
		void *data = d->data;
		size_t len = d->len;

		/*
		 * Deserialize data if needed, but do not cache this value.
		 * Iterating over the map must not disrupt the cache.
		 */

		if (dw->unpack) {
			len = dw->value_size;
			data = walloc(len);

			bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR);

			if (!dbmw_deserialize(dw, dw->bs, data, len)) {
				s_critical("DBMW \"%s\" deserialization error in %s(): %s",
					dw->name,
					stacktrace_function_name(dw->unpack),
					bstr_error(dw->bs));
				/* Not calling value free routine on deserialization failures */
				wfree(data, len);
				return FALSE;
			}
		}

		if (removing) {
			status = (*ctx->u.cbr)(key, data, len, ctx->arg);
		} else {
			(*ctx->u.cb)(key, data, len, ctx->arg);
		}

		if (dw->unpack) {
			if (dw->valfree)
				(*dw->valfree)(data, len);
			wfree(data, len);
		}

		return status;
	}
}