Esempio n. 1
0
std::map<std::string, int> encount(std::istream *in, const strings &searched)
{
	std::map<std::string,int> result;
	std::vector<s_info> watched;
	// insert all string from searched with 0 frequency
	for (s_citer iter = searched.begin(), end = searched.end(); iter != end; ++iter)
	{
		result[*iter] = 0;
		if (iter->begin() != iter->end())
			watched.push_back(s_info(&(*iter)));
	}

	char ch;
	while (in->get(ch))
	{
		for (std::vector<s_info>::iterator iter = watched.begin(), end = watched.end(); iter != end; ++iter)
		{
			if (ch == iter->curr())
			{
				iter->current++;
				if (iter->isEnd())
				{
					result[*(iter->pstr)]++;
					iter->reset();
				}
			}
			else iter->reset();
		}
	}

	return result;
}
Esempio n. 2
0
static void
log_lrustats(DBM *db)
{
	struct lru_cache *cache = db->cache;
	unsigned long raccesses = cache->rhits + cache->rmisses;
	unsigned long waccesses = cache->whits + cache->wmisses;

	sdbm_lru_check(cache);

	s_info("sdbm: \"%s\" LRU cache size = %ld page%s, %s writes, %s DB",
		sdbm_name(db), cache->pages, plural(cache->pages),
		cache->write_deferred ? "deferred" : "synchronous",
		db->is_volatile ? "volatile" : "persistent");
	s_info("sdbm: \"%s\" LRU read cache hits = %.2f%% on %lu request%s",
		sdbm_name(db), cache->rhits * 100.0 / MAX(raccesses, 1), raccesses,
		plural(raccesses));
	s_info("sdbm: \"%s\" LRU write cache hits = %.2f%% on %lu request%s",
		sdbm_name(db), cache->whits * 100.0 / MAX(waccesses, 1), waccesses,
		plural(waccesses));
}
Esempio n. 3
0
/**
 * Check which of qsort() or xqsort() is best for sorting aligned arrays with
 * a native item size of OPSIZ.
 */
void
vsort_init(int verbose)
{
	tm_t start, end;
	bool blockable = TRUE;

	STATIC_ASSERT(VSORT_HUGE_ITEMS > VSORT_ITEMS);
	STATIC_ASSERT(VSORT_ITEMS > VSORT_SMALL_ITEMS);

	if (verbose)
		s_info("benchmarking sort routines to select the best one...");

	/*
	 * Allow main thread to block during the duration of our tests.
	 * This is needed since tqsort() can create threads and block.
	 */

	if (thread_is_main() && !thread_main_is_blockable()) {
		thread_set_main(TRUE);
		blockable = FALSE;
	}

	tm_now_exact(&start);
	vsort_init_items(VSORT_HUGE_ITEMS, VSORT_HUGE, verbose, "huge");
	vsort_init_items(VSORT_ITEMS, VSORT_LARGE, verbose, "large");
	vsort_init_items(VSORT_SMALL_ITEMS, VSORT_SMALL, verbose, "small");
	tm_now_exact(&end);

	if (verbose)
		s_info("vsort() benchmarking took %F secs", tm_elapsed_f(&end, &start));

	/*
	 * Restore non-blockable main thread if needed.
	 */

	if (!blockable)
		thread_set_main(FALSE);
}
Esempio n. 4
0
/**
 * Set the page cache size.
 * @return 0 if OK, -1 on failure with errno set.
 */
int
setcache(DBM *db, long pages)
{
	struct lru_cache *cache = db->cache;
	bool wdelay;

	sdbm_lru_check(cache);

	if (pages <= 0) {
		errno = EINVAL;
		return -1;
	}

	if (NULL == cache)
		return init_cache(db, pages, FALSE);

	/*
	 * Easiest case: the size identical.
	 */

	if (pages == cache->pages)
		return 0;

	/*
	 * Cache size is changed.
	 *
	 * This means the arena will be reallocated, so we must invalidate the
	 * current db->pagbuf pointer, which lies within the old arena.  It is
	 * sufficient to reset db->pagbno, forcing a reload from the upper layers.
	 * Note than when the cache size is enlarged, the old page is still cached
	 * so reloading will be just a matter of recomputing db->pagbuf.  We could
	 * do so here, but cache size changes should only be infrequent.
	 *
	 * We also reset all the cache statistics, since a different cache size
	 * will imply a different set of hit/miss ratio.
	 */

	db->pagbno = -1;		/* Current page address will become invalid */
	db->pagbuf = NULL;

	if (common_stats) {
		s_info("sdbm: \"%s\" LRU cache size %s from %ld page%s to %ld",
			sdbm_name(db), pages > cache->pages ? "increased" : "decreased",
			cache->pages, plural(cache->pages), pages);
		log_lrustats(db);
	}

	cache->rhits = cache->rmisses = 0;
	cache->whits = cache->wmisses = 0;

	/*
	 * Straightforward: the size is increased.
	 */

	if (pages > cache->pages) {
		char *new_arena = vmm_alloc(pages * DBM_PBLKSIZ);
		if (NULL == new_arena)
			return -1;
		memmove(new_arena, cache->arena, cache->pages * DBM_PBLKSIZ);
		vmm_free(cache->arena, cache->pages * DBM_PBLKSIZ);
		cache->arena = new_arena;
		cache->dirty = wrealloc(cache->dirty, cache->pages, pages);
		cache->numpag = wrealloc(cache->numpag,
			cache->pages * sizeof(long), pages * sizeof(long));
		cache->pages = pages;
		return 0;
	}

	/*
	 * Difficult: the size is decreased.
	 *
	 * The current page buffer could point in a cache area that is going
	 * to disappear, and the internal data structures must forget about
	 * all the old indices that are greater than the new limit.
	 *
	 * We do not try to optimize anything here, as this call should happen
	 * only infrequently: we flush the current cache (in case there are
	 * deferred writes), destroy the LRU cache data structures, recreate a
	 * new one and invalidate the current DB page.
	 */

	wdelay = cache->write_deferred;
	flush_dirtypag(db);
	free_cache(cache);
	return setup_cache(cache, pages, wdelay);
}
Esempio n. 5
0
/**
 * Check which of qsort(), xqsort(), xsort() or smsort() is best for sorting
 * aligned arrays with a native item size of OPSIZ.  At identical performance
 * level, we prefer our own sorting algorithms instead of libc's qsort() for
 * memory allocation purposes.
 *
 * @param items		amount of items to use in the sorted array
 * @param idx		index of the virtual routine to update
 * @param verbose	whether to be verbose
 * @param which		either "large" or "small", for logging
 */
static void
vsort_init_items(size_t items, unsigned idx, int verbose, const char *which)
{
	struct vsort_testing tests[] = {
		{ vsort_qsort,	qsort,	0.0, 0, "qsort" },
		{ vsort_xqsort,	xqsort,	0.0, 2, "xqsort" },
		{ vsort_xsort,	xsort,	0.0, 1, "xsort" },
		{ vsort_tqsort,	tqsort,	0.0, 1, "tqsort" },
		{ vsort_smsort,	smsort,	0.0, 1, "smsort" },	/* Only for almost sorted */
	};
	size_t len = items * OPSIZ;
	struct vsort_timing vt;
	size_t loops, highest_loops;
	unsigned i;

	g_assert(uint_is_non_negative(idx));
	g_assert(idx < N_ITEMS(vsort_table));

	vt.data = vmm_alloc(len);
	vt.copy = vmm_alloc(len);
	vt.items = items;
	vt.isize = OPSIZ;
	vt.len = len;
	random_bytes(vt.data, len);

	highest_loops = loops = vsort_loops(items);

	/* The -1 below is to avoid benchmarking smsort() for the general case */

retry_random:
	for (i = 0; i < N_ITEMS(tests) - 1; i++) {
		tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops);

		if (verbose > 1) {
			s_debug("%s() took %.4f secs for %s array (%zu loops)",
				tests[i].v_name, tests[i].v_elapsed * loops, which, loops);
		}

		if (loops != highest_loops) {
			highest_loops = loops;
			/* Redo all the tests if the number of timing loops changes */
			if (i != 0)
				goto retry_random;
		}
	}

	/*
	 * When dealing with a large amount of items, redo the tests twice with
	 * another set of random bytes to make sure we're not hitting a special
	 * ordering case.
	 */

	if (items >= VSORT_ITEMS) {
		unsigned j;

		for (j = 0; j < 2; j++) {
			random_bytes(vt.data, len);

			for (i = 0; i < N_ITEMS(tests) - 1; i++) {
				tests[i].v_elapsed +=
					vsort_timeit(tests[i].v_timer, &vt, &loops);

				if (verbose > 1) {
					s_debug("%s() spent %.6f secs total for %s array",
						tests[i].v_name, tests[i].v_elapsed, which);
				}

				if (loops != highest_loops) {
					highest_loops = loops;
					/* Redo all the tests if the number of loops changes */
					s_info("%s(): restarting %s array tests with %zu loops",
						G_STRFUNC, which, loops);
					goto retry_random;
				}
			}
		}
	}

	xqsort(tests, N_ITEMS(tests) - 1, sizeof tests[0], vsort_testing_cmp);

	vsort_table[idx].v_sort = vsort_routine(tests[0].v_routine, items);

	if (verbose) {
		s_info("vsort() will use %s() for %s arrays",
			vsort_routine_name(tests[0].v_name, items), which);
	}

	/*
	 * Now sort the data, then randomly perturb them by swapping a few items
	 * so that the array is almost sorted.
	 */

	xqsort(vt.data, vt.items, vt.isize, vsort_long_cmp);
	vsort_perturb_sorted_array(vt.data, vt.items, vt.isize);

retry_sorted:
	for (i = 0; i < N_ITEMS(tests); i++) {
		tests[i].v_elapsed = vsort_timeit(tests[i].v_timer, &vt, &loops);

		if (verbose > 1) {
			s_debug("%s() on almost-sorted took %.4f secs "
				"for %s array (%zu loops)",
				tests[i].v_name, tests[i].v_elapsed * loops, which, loops);
		}

		if (loops != highest_loops) {
			highest_loops = loops;
			/* Redo all the tests if the number of timing loops changes */
			if (i != 0)
				goto retry_sorted;
		}
	}

	xqsort(tests, N_ITEMS(tests), sizeof tests[0], vsort_testing_cmp);

	vsort_table[idx].v_sort_almost = vsort_routine(tests[0].v_routine, items);

	if (verbose) {
		s_info("vsort_almost() will use %s() for %s arrays",
			vsort_routine_name(tests[0].v_name, items), which);
	}

	vmm_free(vt.data, len);
	vmm_free(vt.copy, len);
}