Пример #1
0
/*
 * __verify_tree_shape --
 *	Dump the tree shape.
 */
static int
__verify_tree_shape(WT_SESSION_IMPL *session, WT_VSTUFF *vs)
{
	uint32_t total;
	size_t i;

	for (i = 0, total = 0; i < WT_ELEMENTS(vs->depth_internal); ++i)
		total += vs->depth_internal[i];
	WT_RET(__wt_msg(
	    session, "Internal page tree-depth (total %" PRIu32 "):", total));
	for (i = 0; i < WT_ELEMENTS(vs->depth_internal); ++i)
		if (vs->depth_internal[i] != 0)
			WT_RET(__wt_msg(session,
			    "\t%03zu: %u", i, vs->depth_internal[i]));

	for (i = 0, total = 0; i < WT_ELEMENTS(vs->depth_leaf); ++i)
		total += vs->depth_leaf[i];
	WT_RET(__wt_msg(
	    session, "Leaf page tree-depth (total %" PRIu32 "):", total));
	for (i = 0; i < WT_ELEMENTS(vs->depth_leaf); ++i)
		if (vs->depth_leaf[i] != 0)
			WT_RET(__wt_msg(session,
			    "\t%03zu: %u", i, vs->depth_leaf[i]));

	return (0);
}
Пример #2
0
static void
uri_teardown(void)
{
	u_int i;

	for (i = 0; i < WT_ELEMENTS(uri_list); ++i)
		free(uri_list[i]);
}
Пример #3
0
int
__wt_conn_config_init(WT_SESSION_IMPL *session)
{
	WT_CONNECTION_IMPL *conn;
	const WT_CONFIG_ENTRY *ep, **epp;

	conn = S2C(session);

	/* Build a list of pointers to the configuration information. */
	WT_RET(__wt_calloc_def(session, WT_ELEMENTS(config_entries), &epp));
	conn->config_entries = epp;

	/* Fill in the list to reference the default information. */
	for (ep = config_entries;;) {
		*epp++ = ep++;
		if (ep->method == NULL)
			break;
	}
	return (0);
}
Пример #4
0
static void
key_gen_common(WT_ITEM *key, uint64_t keyno, const char * const suffix)
{
	int len;
	char *p;

	p = key->mem;

	/*
	 * The key always starts with a 10-digit string (the specified row)
	 * followed by two digits, a random number between 1 and 15 if it's
	 * an insert, otherwise 00.
	 */
	u64_to_string_zf(keyno, key->mem, 11);
	p[10] = '.';
	p[11] = suffix[0];
	p[12] = suffix[1];
	len = 13;

	/*
	 * In a column-store, the key is only used for Berkeley DB inserts,
	 * and so it doesn't need a random length.
	 */
	if (g.type == ROW) {
		p[len] = '/';

		/*
		 * Because we're doing table lookup for key sizes, we weren't
		 * able to set really big keys sizes in the table, the table
		 * isn't big enough to keep our hash from selecting too many
		 * big keys and blowing out the cache. Handle that here, use a
		 * really big key 1 in 2500 times.
		 */
		len = keyno % 2500 == 0 && g.c_key_max < KILOBYTE(80) ?
		    KILOBYTE(80) :
		    (int)g.key_rand_len[keyno % WT_ELEMENTS(g.key_rand_len)];
	}

	key->data = key->mem;
	key->size = (size_t)len;
}
Пример #5
0
static void *
wthread(void *arg)
{
	WT_CURSOR *cursor_list[10];
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	u_int next;

	(void)arg;

	memset(cursor_list, 0, sizeof(cursor_list));

	testutil_check(conn->open_session(conn, NULL, NULL, &session));
	__wt_random_init_seed((WT_SESSION_IMPL *)session, &rnd);

	for (next = 0; !done;) {
		if (++next == WT_ELEMENTS(cursor_list))
			next = 0;
		op(session, &rnd, &cursor_list[next]);
	}

	return (NULL);
}
Пример #6
0
static void *
vthread(void *arg)
{
	WT_CURSOR *cursor_list[10];
	WT_DECL_RET;
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	u_int i, next;

	(void)arg;

	memset(cursor_list, 0, sizeof(cursor_list));

	testutil_check(conn->open_session(conn, NULL, NULL, &session));
	__wt_random_init_seed((WT_SESSION_IMPL *)session, &rnd);

	for (next = 0; !done;) {
		if (++next == WT_ELEMENTS(cursor_list))
			next = 0;
		op(session, &rnd, &cursor_list[next]);

		while (!done) {
			i = __wt_random(&rnd) % uris;
			ret = session->verify(session, uri_list[i], NULL);
			if (ret == EBUSY) {
				(void)__wt_atomic_add64(&verify_busy, 1);
				continue;
			}

			testutil_check(ret);
			(void)__wt_atomic_add64(&verify, 1);
			break;
		}
	}

	return (NULL);
}
Пример #7
0
/*
 * __wt_btree_huffman_open --
 *	Configure Huffman encoding for the tree.
 */
int
__wt_btree_huffman_open(WT_SESSION_IMPL *session)
{
	struct __wt_huffman_table *table;
	WT_BTREE *btree;
	WT_CONFIG_ITEM key_conf, value_conf;
	WT_DECL_RET;
	const char **cfg;
	u_int entries, numbytes;

	btree = S2BT(session);
	cfg = btree->dhandle->cfg;

	WT_RET(__wt_config_gets_none(session, cfg, "huffman_key", &key_conf));
	WT_RET(__wt_huffman_confchk(session, &key_conf));
	WT_RET(
	    __wt_config_gets_none(session, cfg, "huffman_value", &value_conf));
	WT_RET(__wt_huffman_confchk(session, &value_conf));
	if (key_conf.len == 0 && value_conf.len == 0)
		return (0);

	switch (btree->type) {		/* Check file type compatibility. */
	case BTREE_COL_FIX:
		WT_RET_MSG(session, EINVAL,
		    "fixed-size column-store files may not be Huffman encoded");
		/* NOTREACHED */
	case BTREE_COL_VAR:
		if (key_conf.len != 0)
			WT_RET_MSG(session, EINVAL,
			    "the keys of variable-length column-store files "
			    "may not be Huffman encoded");
		break;
	case BTREE_ROW:
		break;
	}

	if (key_conf.len == 0) {
		;
	} else if (strncmp(key_conf.str, "english", key_conf.len) == 0) {
		struct __wt_huffman_table
		    copy[WT_ELEMENTS(__wt_huffman_nytenglish)];

		memcpy(copy,
		    __wt_huffman_nytenglish, sizeof(__wt_huffman_nytenglish));
		WT_RET(__wt_huffman_open(
		    session, copy, WT_ELEMENTS(__wt_huffman_nytenglish),
		    1, &btree->huffman_key));

		/* Check for a shared key/value table. */
		if (value_conf.len != 0 && strncmp(
		    value_conf.str, "english", value_conf.len) == 0) {
			btree->huffman_value = btree->huffman_key;
			return (0);
		}
	} else {
		WT_RET(__wt_huffman_read(
		    session, &key_conf, &table, &entries, &numbytes));
		ret = __wt_huffman_open(
		    session, table, entries, numbytes, &btree->huffman_key);
		__wt_free(session, table);
		if (ret != 0)
			return (ret);

		/* Check for a shared key/value table. */
		if (value_conf.len != 0 && key_conf.len == value_conf.len &&
		    memcmp(key_conf.str, value_conf.str, key_conf.len) == 0) {
			btree->huffman_value = btree->huffman_key;
			return (0);
		}
	}

	if (value_conf.len == 0) {
		;
	} else if (strncmp(value_conf.str, "english", value_conf.len) == 0) {
		struct __wt_huffman_table
		    copy[WT_ELEMENTS(__wt_huffman_nytenglish)];

		memcpy(copy,
		    __wt_huffman_nytenglish, sizeof(__wt_huffman_nytenglish));
		WT_RET(__wt_huffman_open(
		    session, copy, WT_ELEMENTS(__wt_huffman_nytenglish),
		    1, &btree->huffman_value));
	} else {
		WT_RET(__wt_huffman_read(
		    session, &value_conf, &table, &entries, &numbytes));
		ret = __wt_huffman_open(
		    session, table, entries, numbytes, &btree->huffman_value);
		__wt_free(session, table);
		if (ret != 0)
			return (ret);
	}

	return (0);
}
Пример #8
0
/*
 * __verify_tree --
 *	Verify a tree, recursively descending through it in depth-first fashion.
 * The page argument was physically verified (so we know it's correctly formed),
 * and the in-memory version built.  Our job is to check logical relationships
 * in the page and in the tree.
 */
static int
__verify_tree(WT_SESSION_IMPL *session, WT_REF *ref, WT_VSTUFF *vs)
{
	WT_BM *bm;
	WT_CELL *cell;
	WT_CELL_UNPACK *unpack, _unpack;
	WT_COL *cip;
	WT_DECL_RET;
	WT_PAGE *page;
	WT_REF *child_ref;
	uint64_t recno;
	uint32_t entry, i;
	bool found;

	bm = S2BT(session)->bm;
	page = ref->page;

	unpack = &_unpack;
	WT_CLEAR(*unpack);	/* -Wuninitialized */

	WT_RET(__wt_verbose(session, WT_VERB_VERIFY, "%s %s",
	    __wt_page_addr_string(session, ref, vs->tmp1),
	    __wt_page_type_string(page->type)));

	/* Optionally dump the address. */
	if (vs->dump_address)
		WT_RET(__wt_msg(session, "%s %s",
		    __wt_page_addr_string(session, ref, vs->tmp1),
		    __wt_page_type_string(page->type)));

	/* Track the shape of the tree. */
	if (WT_PAGE_IS_INTERNAL(page))
		++vs->depth_internal[
		    WT_MIN(vs->depth, WT_ELEMENTS(vs->depth_internal) - 1)];
	else
		++vs->depth_leaf[
		    WT_MIN(vs->depth, WT_ELEMENTS(vs->depth_internal) - 1)];

	/*
	 * The page's physical structure was verified when it was read into
	 * memory by the read server thread, and then the in-memory version
	 * of the page was built. Now we make sure the page and tree are
	 * logically consistent.
	 *
	 * !!!
	 * The problem: (1) the read server has to build the in-memory version
	 * of the page because the read server is the thread that flags when
	 * any thread can access the page in the tree; (2) we can't build the
	 * in-memory version of the page until the physical structure is known
	 * to be OK, so the read server has to verify at least the physical
	 * structure of the page; (3) doing complete page verification requires
	 * reading additional pages (for example, overflow keys imply reading
	 * overflow pages in order to test the key's order in the page); (4)
	 * the read server cannot read additional pages because it will hang
	 * waiting on itself.  For this reason, we split page verification
	 * into a physical verification, which allows the in-memory version
	 * of the page to be built, and then a subsequent logical verification
	 * which happens here.
	 *
	 * Report progress occasionally.
	 */
#define	WT_VERIFY_PROGRESS_INTERVAL	100
	if (++vs->fcnt % WT_VERIFY_PROGRESS_INTERVAL == 0)
		WT_RET(__wt_progress(session, NULL, vs->fcnt));

#ifdef HAVE_DIAGNOSTIC
	/* Optionally dump the blocks or page in debugging mode. */
	if (vs->dump_blocks)
		WT_RET(__wt_debug_disk(session, page->dsk, NULL));
	if (vs->dump_pages)
		WT_RET(__wt_debug_page(session, page, NULL));
#endif

	/*
	 * Column-store key order checks: check the page's record number and
	 * then update the total record count.
	 */
	switch (page->type) {
	case WT_PAGE_COL_FIX:
		recno = page->pg_fix_recno;
		goto recno_chk;
	case WT_PAGE_COL_INT:
		recno = page->pg_intl_recno;
		goto recno_chk;
	case WT_PAGE_COL_VAR:
		recno = page->pg_var_recno;
recno_chk:	if (recno != vs->record_total + 1)
			WT_RET_MSG(session, WT_ERROR,
			    "page at %s has a starting record of %" PRIu64
			    " when the expected starting record is %" PRIu64,
			    __wt_page_addr_string(session, ref, vs->tmp1),
			    recno, vs->record_total + 1);
		break;
	}
	switch (page->type) {
	case WT_PAGE_COL_FIX:
		vs->record_total += page->pg_fix_entries;
		break;
	case WT_PAGE_COL_VAR:
		recno = 0;
		WT_COL_FOREACH(page, cip, i)
			if ((cell = WT_COL_PTR(page, cip)) == NULL)
				++recno;
			else {
				__wt_cell_unpack(cell, unpack);
				recno += __wt_cell_rle(unpack);
			}
		vs->record_total += recno;
		break;
	}

	/*
	 * Row-store leaf page key order check: it's a depth-first traversal,
	 * the first key on this page should be larger than any key previously
	 * seen.
	 */
	switch (page->type) {
	case WT_PAGE_ROW_LEAF:
		WT_RET(__verify_row_leaf_key_order(session, ref, vs));
		break;
	}

	/* If it's not the root page, unpack the parent cell. */
	if (!__wt_ref_is_root(ref)) {
		__wt_cell_unpack(ref->addr, unpack);

		/* Compare the parent cell against the page type. */
		switch (page->type) {
		case WT_PAGE_COL_FIX:
			if (unpack->raw != WT_CELL_ADDR_LEAF_NO)
				goto celltype_err;
			break;
		case WT_PAGE_COL_VAR:
			if (unpack->raw != WT_CELL_ADDR_LEAF &&
			    unpack->raw != WT_CELL_ADDR_LEAF_NO)
				goto celltype_err;
			break;
		case WT_PAGE_ROW_LEAF:
			if (unpack->raw != WT_CELL_ADDR_DEL &&
			    unpack->raw != WT_CELL_ADDR_LEAF &&
			    unpack->raw != WT_CELL_ADDR_LEAF_NO)
				goto celltype_err;
			break;
		case WT_PAGE_COL_INT:
		case WT_PAGE_ROW_INT:
			if (unpack->raw != WT_CELL_ADDR_INT)
celltype_err:			WT_RET_MSG(session, WT_ERROR,
				    "page at %s, of type %s, is referenced in "
				    "its parent by a cell of type %s",
				    __wt_page_addr_string(
					session, ref, vs->tmp1),
				    __wt_page_type_string(page->type),
				    __wt_cell_type_string(unpack->raw));
			break;
		}
	}

	/*
	 * Check overflow pages.  We check overflow cells separately from other
	 * tests that walk the page as it's simpler, and I don't care much how
	 * fast table verify runs.
	 */
	switch (page->type) {
	case WT_PAGE_COL_VAR:
	case WT_PAGE_ROW_INT:
	case WT_PAGE_ROW_LEAF:
		WT_RET(__verify_overflow_cell(session, ref, &found, vs));
		if (__wt_ref_is_root(ref) || page->type == WT_PAGE_ROW_INT)
			break;

		/*
		 * Object if a leaf-no-overflow address cell references a page
		 * with overflow keys, but don't object if a leaf address cell
		 * references a page without overflow keys.  Reconciliation
		 * doesn't guarantee every leaf page without overflow items will
		 * be a leaf-no-overflow type.
		 */
		if (found && unpack->raw == WT_CELL_ADDR_LEAF_NO)
			WT_RET_MSG(session, WT_ERROR,
			    "page at %s, of type %s and referenced in its "
			    "parent by a cell of type %s, contains overflow "
			    "items",
			    __wt_page_addr_string(session, ref, vs->tmp1),
			    __wt_page_type_string(page->type),
			    __wt_cell_type_string(WT_CELL_ADDR_LEAF_NO));
		break;
	}

	/* Check tree connections and recursively descend the tree. */
	switch (page->type) {
	case WT_PAGE_COL_INT:
		/* For each entry in an internal page, verify the subtree. */
		entry = 0;
		WT_INTL_FOREACH_BEGIN(session, page, child_ref) {
			/*
			 * It's a depth-first traversal: this entry's starting
			 * record number should be 1 more than the total records
			 * reviewed to this point.
			 */
			++entry;
			if (child_ref->key.recno != vs->record_total + 1) {
				WT_RET_MSG(session, WT_ERROR,
				    "the starting record number in entry %"
				    PRIu32 " of the column internal page at "
				    "%s is %" PRIu64 " and the expected "
				    "starting record number is %" PRIu64,
				    entry,
				    __wt_page_addr_string(
				    session, child_ref, vs->tmp1),
				    child_ref->key.recno,
				    vs->record_total + 1);
			}

			/* Verify the subtree. */
			++vs->depth;
			WT_RET(__wt_page_in(session, child_ref, 0));
			ret = __verify_tree(session, child_ref, vs);
			WT_TRET(__wt_page_release(session, child_ref, 0));
			--vs->depth;
			WT_RET(ret);

			__wt_cell_unpack(child_ref->addr, unpack);
			WT_RET(bm->verify_addr(
			    bm, session, unpack->data, unpack->size));
		} WT_INTL_FOREACH_END;
		break;
	case WT_PAGE_ROW_INT:
		/* For each entry in an internal page, verify the subtree. */
		entry = 0;
		WT_INTL_FOREACH_BEGIN(session, page, child_ref) {
			/*
			 * It's a depth-first traversal: this entry's starting
			 * key should be larger than the largest key previously
			 * reviewed.
			 *
			 * The 0th key of any internal page is magic, and we
			 * can't test against it.
			 */
			++entry;
			if (entry != 1)
				WT_RET(__verify_row_int_key_order(
				    session, page, child_ref, entry, vs));

			/* Verify the subtree. */
			++vs->depth;
			WT_RET(__wt_page_in(session, child_ref, 0));
			ret = __verify_tree(session, child_ref, vs);
			WT_TRET(__wt_page_release(session, child_ref, 0));
			--vs->depth;
			WT_RET(ret);

			__wt_cell_unpack(child_ref->addr, unpack);
			WT_RET(bm->verify_addr(
			    bm, session, unpack->data, unpack->size));
		} WT_INTL_FOREACH_END;
Пример #9
0
/*
 * __wt_statlog_dump_spinlock --
 *	Log the spin-lock statistics.
 */
int
__wt_statlog_dump_spinlock(WT_CONNECTION_IMPL *conn, const char *tag)
{
	WT_SPINLOCK *spin;
	WT_CONNECTION_STATS_SPINLOCK *p, *t;
	uint64_t block_manager, btree_page, ignore;
	u_int i, j;

	/*
	 * Ignore rare acquisition of a spinlock using a base value of 10 per
	 * second so we don't create graphs we don't care about.
	 */
	ignore = (uint64_t)(conn->stat_usecs / 1000000) * 10;

	/* Output the number of times each spinlock was acquired. */
	block_manager = btree_page = 0;
	for (i = 0; i < WT_ELEMENTS(conn->spinlock_list); ++i) {
		if ((spin = conn->spinlock_list[i]) == NULL)
			continue;

		/*
		 * There are two sets of spinlocks we aggregate, the btree page
		 * locks and the block manager per-file locks.  The reason is
		 * the block manager locks grow with the number of files open
		 * (and LSM and bloom filters can open a lot of files), and
		 * there are 16 btree page locks and splitting them out has not
		 * historically been that informative.
		 */
		if (strcmp(spin->name, "block manager") == 0) {
			block_manager += spin->counter;
			if (conn->stat_clear)
				spin->counter = 0;
			continue;
		}
		if (strcmp(spin->name, "btree page") == 0) {
			btree_page += spin->counter;
			if (conn->stat_clear)
				spin->counter = 0;
			continue;
		}

		WT_RET_TEST((fprintf(conn->stat_fp,
		    "%s %" PRIu64 " %s spinlock %s: acquisitions\n",
		    conn->stat_stamp,
		    spin->counter <= ignore ? 0 : spin->counter,
		    tag, spin->name) < 0),
		    __wt_errno());
		if (conn->stat_clear)
			spin->counter = 0;
	}
	WT_RET_TEST((fprintf(conn->stat_fp,
	    "%s %" PRIu64 " %s spinlock %s: acquisitions\n",
	    conn->stat_stamp,
	    block_manager <= ignore ? 0 : block_manager,
	    tag, "block manager") < 0),
	    __wt_errno());
	WT_RET_TEST((fprintf(conn->stat_fp,
	    "%s %" PRIu64 " %s spinlock %s: acquisitions\n",
	    conn->stat_stamp,
	    btree_page <= ignore ? 0 : btree_page,
	    tag, "btree page") < 0),
	    __wt_errno());

	/*
	 * Output the number of times each location acquires its spinlock and
	 * the blocking matrix.
	 */
	for (i = 0; i < WT_ELEMENTS(conn->spinlock_block); ++i) {
		p = &conn->spinlock_block[i];
		if (p->name == NULL)
			continue;

		WT_RET_TEST((fprintf(conn->stat_fp,
		    "%s %d %s spinlock %s acquired by %s(%d)\n",
		    conn->stat_stamp,
		    p->total <= ignore ? 0 : p->total,
		    tag,
		    p->name, p->file, p->line) < 0), __wt_errno());
		if (conn->stat_clear)
			p->total = 0;

		for (j = 0; j < WT_ELEMENTS(conn->spinlock_block); ++j) {
			t = &conn->spinlock_block[j];
			if (t->name == NULL)
				continue;

			WT_RET_TEST((fprintf(conn->stat_fp,
			    "%s %d %s spinlock %s: %s(%d) blocked by %s(%d)\n",
			    conn->stat_stamp,
			    p->blocked[j] <= ignore ? 0 : p->blocked[j],
			    tag,
			    p->name, p->file, p->line,
			    t->file, t->line) < 0), __wt_errno());
			if (conn->stat_clear)
				p->blocked[j] = 0;
		}
	}

	WT_FULL_BARRIER();			/* Minimize the window. */
	return (0);
}
Пример #10
0
int
main(int argc, char *argv[])
{
	enum { CACHE_SHARED, CACHE_SET, CACHE_NONE } cache;
	TEST_OPTS *opts, _opts;
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	size_t len;
	u_int i, j;
	const char *p;
	char *config;

	opts = &_opts;
	memset(opts, 0, sizeof(*opts));
	opts->table_type = TABLE_ROW;
	testutil_check(testutil_parse_opts(argc, argv, opts));
	testutil_make_work_dir(opts->home);

	testutil_check(
	    wiredtiger_open(opts->home, &event_handler, "create", &opts->conn));

	/* Open an LSM file so the LSM reconfiguration options make sense. */
	testutil_check(
	    opts->conn->open_session(opts->conn, NULL, NULL, &session));
	testutil_check(session->create(
	    session, opts->uri, "type=lsm,key_format=S,value_format=S"));

	/* Initialize the RNG. */
	__wt_random_init_seed(NULL, &rnd);

	/* Allocate memory for the config. */
	len = WT_ELEMENTS(list) * 64;
	config = dmalloc(len);

	/* Set an alarm so we can debug hangs. */
	(void)signal(SIGALRM, on_alarm);

	/* A linear pass through the list. */
	for (i = 0; i < WT_ELEMENTS(list); ++i)
		reconfig(opts, session, list[i]);

	/*
	 * A linear pass through the list, adding random elements.
	 *
	 * WiredTiger configurations are usually "the last one set wins", but
	 * "shared_cache" and "cache_set" options aren't allowed in the same
	 * configuration string.
	 */
	for (i = 0; i < WT_ELEMENTS(list); ++i) {
		p = list[i];
		cache = CACHE_NONE;
		if (WT_PREFIX_MATCH(p, ",shared_cache"))
			cache = CACHE_SHARED;
		else if (WT_PREFIX_MATCH(p, ",cache_size"))
			cache = CACHE_SET;
		strcpy(config, p);

		for (j =
		    (__wt_random(&rnd) % WT_ELEMENTS(list)) + 1; j > 0; --j) {
			p = list[__wt_random(&rnd) % WT_ELEMENTS(list)];
			if (WT_PREFIX_MATCH(p, ",shared_cache")) {
				if (cache == CACHE_SET)
					continue;
				cache = CACHE_SHARED;
			} else if (WT_PREFIX_MATCH(p, ",cache_size")) {
				if (cache == CACHE_SHARED)
					continue;
				cache = CACHE_SET;
			}
			strcat(config, p);
		}
		reconfig(opts, session, config);
	}

	/*
	 * Turn on-close statistics off, if on-close is on and statistics were
	 * randomly turned off during the run, close would fail.
	 */
	testutil_check(opts->conn->reconfigure(
	    opts->conn, "statistics_log=(on_close=0)"));

	free(config);
	testutil_cleanup(opts);
	return (EXIT_SUCCESS);
}
Пример #11
0
int
main(int argc, char *argv[])
{
	static const struct {
		u_int workers;
		u_int uris;
		bool  cache_cursors;
	} runs[] = {
		{  1,   1, false},
		{  1,   1, true},
		{  8,   1, false},
		{  8,   1, true},
		{ 16,   1, false},
		{ 16,   1, true},
		{ 16,   WT_ELEMENTS(uri_list), false},
		{ 16,   WT_ELEMENTS(uri_list), true},
		{200, 100, false},
		{200, 100, true},
		{200, WT_ELEMENTS(uri_list), false},
		{200, WT_ELEMENTS(uri_list), true},
		{300, 100, false},
		{300, 100, true},
		{600, WT_ELEMENTS(uri_list), false},
		{600, WT_ELEMENTS(uri_list), true},
	};
	WT_RAND_STATE rnd;
	u_int i, n;
	int ch;

	/*
	 * Bypass this test for valgrind. It has a fairly low thread limit.
	 */
	if (testutil_is_flag_set("TESTUTIL_BYPASS_VALGRIND"))
		return (EXIT_SUCCESS);

	(void)testutil_set_progname(argv);
	__wt_random_init_seed(NULL, &rnd);

	while ((ch = __wt_getopt(argv[0], argc, argv, "v")) != EOF) {
		switch (ch) {
		case 'v':
			verbose = true;
			break;
		default:
			fprintf(stderr, "usage: %s [-v]\n", argv[0]);
			return (EXIT_FAILURE);
		}
	}

	(void)signal(SIGALRM, on_alarm);

	/* Each test in the table runs for a minute, run 5 tests at random. */
	for (i = 0; i < 5; ++i) {
		n = __wt_random(&rnd) % WT_ELEMENTS(runs);
		workers = runs[n].workers;
		uris = runs[n].uris;
		run(runs[n].cache_cursors);
	}

	uri_teardown();

	return (EXIT_SUCCESS);
}
Пример #12
0
/*
 * modify_run
 *	Run some tests:
 *	1. Create an initial value, a copy and a fake cursor to use with the
 *	WiredTiger routines. Generate a set of modify vectors and apply them to
 *	the item stored in the cursor using the modify apply API. Also apply the
 *	same modify vector to one of the copies using a helper routine written
 *	to test the modify API. The final value generated with the modify API
 *	and the helper routine should match.
 *
 *	2. Use the initial value and the modified value generated above as
 *	inputs into the calculate-modify API to generate a set of modify
 *	vectors. Apply this generated vector to the initial value using the
 *	modify apply API to obtain a final value. The final value generated
 *	should match the modified value that was used as input to the
 *	calculate-modify API.
 */
static void
modify_run(bool verbose)
{
	WT_CURSOR *cursor, _cursor;
	WT_DECL_RET;
	WT_ITEM *localA, _localA, *localB, _localB;
	size_t len;
	int i, j;

	/* Initialize the RNG. */
	__wt_random_init_seed(NULL, &rnd);

	/* Set up replacement information. */
	modify_repl_init();

	/* We need three WT_ITEMs, one of them part of a fake cursor. */
	localA = &_localA;
	memset(&_localA, 0, sizeof(_localA));
	localB = &_localB;
	memset(&_localB, 0, sizeof(_localB));
	cursor = &_cursor;
	memset(&_cursor, 0, sizeof(_cursor));
	cursor->value_format = "u";

#define	NRUNS	10000
	for (i = 0; i < NRUNS; ++i) {
		/* Create an initial value. */
		len = (size_t)(__wt_random(&rnd) % MAX_REPL_BYTES);
		testutil_check(__wt_buf_set(NULL, localA, modify_repl, len));

		for (j = 0; j < 1000; ++j) {
			/* Copy the current value into the second item. */
			testutil_check(__wt_buf_set(
			    NULL, localB, localA->data, localA->size));

			/*
			 * Create a random set of modify vectors, run the
			 * underlying library modification function, then
			 * compare the result against our implementation
			 * of modify.
			 */
			modify_build();
			testutil_check(__wt_buf_set(
			    NULL, &cursor->value, localA->data, localA->size));
			testutil_check(__wt_modify_apply_api(
			    NULL, cursor, entries, nentries));
			slow_apply_api(localA);
			compare(localA, &cursor->value);

			/*
			 * Call the WiredTiger function to build a modification
			 * vector for the change, and repeat the test using the
			 * WiredTiger modification vector, then compare results
			 * against our implementation of modify.
			 */
			nentries = WT_ELEMENTS(entries);
			ret = wiredtiger_calc_modify(NULL,
			    localB, localA,
			    WT_MAX(localB->size, localA->size) + 100,
			    entries, &nentries);
			if (ret == WT_NOTFOUND)
				continue;
			testutil_check(ret);
			testutil_check(__wt_buf_set(
			    NULL, &cursor->value, localB->data, localB->size));
			testutil_check(__wt_modify_apply_api(
			    NULL, cursor, entries, nentries));
			compare(localA, &cursor->value);
		}
		if (verbose) {
			printf("%d (%d%%)\r", i, (i * 100) / NRUNS);
			fflush(stdout);
		}
	}
	if (verbose)
		printf("%d (100%%)\n", i);

	__wt_buf_free(NULL, localA);
	__wt_buf_free(NULL, localB);
	__wt_buf_free(NULL, &cursor->value);
}