Exemplo n.º 1
0
/*
 * __split_stash_add --
 *	Add a new entry into the session's split stash list.
 */
static int
__split_stash_add(
    WT_SESSION_IMPL *session, uint64_t split_gen, void *p, size_t len)
{
	WT_CONNECTION_IMPL *conn;
	WT_SPLIT_STASH *stash;

	WT_ASSERT(session, p != NULL);

	conn = S2C(session);

	/* Grow the list as necessary. */
	WT_RET(__wt_realloc_def(session, &session->split_stash_alloc,
	    session->split_stash_cnt + 1, &session->split_stash));

	stash = session->split_stash + session->split_stash_cnt++;
	stash->split_gen = split_gen;
	stash->p = p;
	stash->len = len;

	(void)__wt_atomic_add64(&conn->split_stashed_bytes, len);
	(void)__wt_atomic_add64(&conn->split_stashed_objects, 1);

	/* See if we can free any previous entries. */
	if (session->split_stash_cnt > 1)
		__wt_split_stash_discard(session);

	return (0);
}
Exemplo n.º 2
0
Arquivo: main.c Projeto: ajdavis/mongo
static void
op(WT_SESSION *session, WT_RAND_STATE *rnd, WT_CURSOR **cpp)
{
	WT_CURSOR *cursor;
	WT_DECL_RET;
	u_int i, key;
	char buf[128];
	bool readonly;

	/* Close any open cursor in the slot we're about to reuse. */
	if (*cpp != NULL) {
		testutil_check((*cpp)->close(*cpp));
		*cpp = NULL;
	}

	cursor = NULL;
	readonly = __wt_random(rnd) % 2 == 0;

	/* Loop to open an object handle. */
	for (i = __wt_random(rnd) % uris; !done; __wt_yield()) {
		/* Use a checkpoint handle for 50% of reads. */
		ret = session->open_cursor(session, uri_list[i], NULL,
		    readonly && (i % 2 == 0) ?
		    "checkpoint=WiredTigerCheckpoint" : NULL, &cursor);
		if (ret != EBUSY) {
			testutil_check(ret);
			break;
		}
		(void)__wt_atomic_add64(&worker_busy, 1);
	}
	if (cursor == NULL)
		return;

	/* Operate on some number of key/value pairs. */
	for (key = 1;
	    !done && key < MAXKEY; key += __wt_random(rnd) % 37, __wt_yield()) {
		testutil_check(
		    __wt_snprintf(buf, sizeof(buf), "key:%020u", key));
		cursor->set_key(cursor, buf);
		if (readonly)
			testutil_check(cursor->search(cursor));
		else {
			cursor->set_value(cursor, buf);
			testutil_check(cursor->insert(cursor));
		}
	}

	/* Close the cursor half the time, otherwise cache it. */
	if (__wt_random(rnd) % 2 == 0)
		testutil_check(cursor->close(cursor));
	else
		*cpp = cursor;

	(void)__wt_atomic_add64(&worker, 1);
}
Exemplo n.º 3
0
/*
 * __wt_async_op_enqueue --
 *	Enqueue an operation onto the work queue.
 */
int
__wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op)
{
	WT_ASYNC *async;
	WT_CONNECTION_IMPL *conn;
	uint64_t cur_head, cur_tail, my_alloc, my_slot;
#ifdef	HAVE_DIAGNOSTIC
	WT_ASYNC_OP_IMPL *my_op;
#endif

	conn = S2C(session);
	async = conn->async;

	/*
	 * If an application re-uses a WT_ASYNC_OP, we end up here with an
	 * invalid object.
	 */
	if (op->state != WT_ASYNCOP_READY)
		WT_RET_MSG(session, EINVAL,
		    "application error: WT_ASYNC_OP already in use");

	/*
	 * Enqueue op at the tail of the work queue.
	 * We get our slot in the ring buffer to use.
	 */
	my_alloc = __wt_atomic_add64(&async->alloc_head, 1);
	my_slot = my_alloc % async->async_qsize;

	/*
	 * Make sure we haven't wrapped around the queue.
	 * If so, wait for the tail to advance off this slot.
	 */
	WT_ORDERED_READ(cur_tail, async->tail_slot);
	while (cur_tail == my_slot) {
		__wt_yield();
		WT_ORDERED_READ(cur_tail, async->tail_slot);
	}

#ifdef	HAVE_DIAGNOSTIC
	WT_ORDERED_READ(my_op, async->async_queue[my_slot]);
	if (my_op != NULL)
		return (__wt_panic(session));
#endif
	WT_PUBLISH(async->async_queue[my_slot], op);
	op->state = WT_ASYNCOP_ENQUEUED;
	if (__wt_atomic_add32(&async->cur_queue, 1) > async->max_queue)
		WT_PUBLISH(async->max_queue, async->cur_queue);
	/*
	 * Multiple threads may be adding ops to the queue.  We need to wait
	 * our turn to make our slot visible to workers.
	 */
	WT_ORDERED_READ(cur_head, async->head);
	while (cur_head != (my_alloc - 1)) {
		__wt_yield();
		WT_ORDERED_READ(cur_head, async->head);
	}
	WT_PUBLISH(async->head, my_alloc);
	return (0);
}
Exemplo n.º 4
0
/*
 * Create a guaranteed unique table and open and close a bulk cursor on it.
 */
void
op_bulk_unique(void *arg)
{
	TEST_OPTS *opts;
	TEST_PER_THREAD_OPTS *args;
	WT_CURSOR *c;
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	int ret;
	char new_uri[64];

	args = (TEST_PER_THREAD_OPTS *)arg;
	opts = args->testopts;
	__wt_random_init_seed(NULL, &rnd);

	testutil_check(
	    opts->conn->open_session(opts->conn, NULL, NULL, &session));

	/* Generate a unique object name. */
	testutil_check(__wt_snprintf(
	    new_uri, sizeof(new_uri), "%s.%" PRIu64,
	    opts->uri, __wt_atomic_add64(&opts->unique_id, 1)));
	testutil_check(session->create(session, new_uri, DEFAULT_TABLE_SCHEMA));

	__wt_yield();

	/*
	 * Opening a bulk cursor may have raced with a forced checkpoint
	 * which created a checkpoint of the empty file, and triggers an EINVAL.
	 */
	if ((ret = session->open_cursor(
	    session, new_uri, NULL, "bulk,checkpoint_wait=false", &c)) == 0) {
		testutil_check(c->close(c));
	} else if (ret != EINVAL && ret != EBUSY)
		testutil_die(ret,
		    "session.open_cursor bulk unique: %s", new_uri);

	while ((ret = session->drop(session, new_uri, __wt_random(&rnd) & 1 ?
	    "force,checkpoint_wait=false" : "checkpoint_wait=false")) != 0)
		if (ret != EBUSY)
			testutil_die(ret, "session.drop: %s", new_uri);
		else
			/*
			 * The EBUSY is expected when we run with
			 * checkpoint_wait set to false, so we increment the
			 * counter while in this loop to avoid false positives.
			 */
			args->thread_counter++;

	testutil_check(session->close(session, NULL));
	args->thread_counter++;
}
Exemplo n.º 5
0
Arquivo: main.c Projeto: ajdavis/mongo
static void *
vthread(void *arg)
{
	WT_CURSOR *cursor_list[10];
	WT_DECL_RET;
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	u_int i, next;

	(void)arg;

	memset(cursor_list, 0, sizeof(cursor_list));

	testutil_check(conn->open_session(conn, NULL, NULL, &session));
	__wt_random_init_seed((WT_SESSION_IMPL *)session, &rnd);

	for (next = 0; !done;) {
		if (++next == WT_ELEMENTS(cursor_list))
			next = 0;
		op(session, &rnd, &cursor_list[next]);

		while (!done) {
			i = __wt_random(&rnd) % uris;
			ret = session->verify(session, uri_list[i], NULL);
			if (ret == EBUSY) {
				(void)__wt_atomic_add64(&verify_busy, 1);
				continue;
			}

			testutil_check(ret);
			(void)__wt_atomic_add64(&verify, 1);
			break;
		}
	}

	return (NULL);
}
Exemplo n.º 6
0
/*
 * Create and drop a unique guaranteed table.
 */
void
op_create_unique(void *arg)
{
	TEST_OPTS *opts;
	TEST_PER_THREAD_OPTS *args;
	WT_RAND_STATE rnd;
	WT_SESSION *session;
	int ret;
	char new_uri[64];

	args = (TEST_PER_THREAD_OPTS *)arg;
	opts = args->testopts;
	__wt_random_init_seed(NULL, &rnd);

	testutil_check(
	    opts->conn->open_session(opts->conn, NULL, NULL, &session));

	/* Generate a unique object name. */
	testutil_check(__wt_snprintf(
	    new_uri, sizeof(new_uri), "%s.%" PRIu64,
	    opts->uri, __wt_atomic_add64(&opts->unique_id, 1)));
	testutil_check(session->create(session, new_uri, DEFAULT_TABLE_SCHEMA));

	__wt_yield();
	while ((ret = session->drop(session, new_uri, __wt_random(&rnd) & 1 ?
	    "force,checkpoint_wait=false" : "checkpoint_wait=false")) != 0)
		if (ret != EBUSY)
			testutil_die(ret, "session.drop: %s", new_uri);
		else
			/*
			 * The EBUSY is expected when we run with
			 * checkpoint_wait set to false, so we increment the
			 * counter while in this loop to avoid false positives.
			 */
			args->thread_counter++;

	testutil_check(session->close(session, NULL));
	args->thread_counter++;
}
Exemplo n.º 7
0
uint64_t
workgen_atomic_add64(uint64_t *vp, uint64_t v)
{
	return (__wt_atomic_add64(vp, v));
}