示例#1
0
文件: txn.c 项目: sylvanchil/mongo
/*
 * __wt_txn_global_init --
 *	Initialize the global transaction state.
 */
int
__wt_txn_global_init(WT_SESSION_IMPL *session, const char *cfg[])
{
	WT_CONNECTION_IMPL *conn;
	WT_TXN_GLOBAL *txn_global;
	WT_TXN_STATE *s;
	u_int i;

	WT_UNUSED(cfg);
	conn = S2C(session);

	txn_global = &conn->txn_global;
	txn_global->current = txn_global->last_running =
	    txn_global->oldest_id = WT_TXN_FIRST;

	WT_RET(__wt_spin_init(session,
	    &txn_global->id_lock, "transaction id lock"));
	WT_RET(__wt_rwlock_alloc(session,
	    &txn_global->nsnap_rwlock, "named snapshot lock"));
	txn_global->nsnap_oldest_id = WT_TXN_NONE;
	TAILQ_INIT(&txn_global->nsnaph);

	WT_RET(__wt_calloc_def(
	    session, conn->session_size, &txn_global->states));
	WT_CACHE_LINE_ALIGNMENT_VERIFY(session, txn_global->states);

	for (i = 0, s = txn_global->states; i < conn->session_size; i++, s++)
		s->id = s->snap_min = WT_TXN_NONE;

	return (0);
}
示例#2
0
/*
 * __wt_connection_open --
 *	Open a connection.
 */
int
__wt_connection_open(WT_CONNECTION_IMPL *conn, const char *cfg[])
{
	WT_SESSION_IMPL *session;

	/* Default session. */
	session = conn->default_session;
	WT_ASSERT(session, session->iface.connection == &conn->iface);

	/*
	 * Tell internal server threads to run: this must be set before opening
	 * any sessions.
	 */
	F_SET(conn, WT_CONN_SERVER_RUN | WT_CONN_LOG_SERVER_RUN);

	/* WT_SESSION_IMPL array. */
	WT_RET(__wt_calloc(session,
	    conn->session_size, sizeof(WT_SESSION_IMPL), &conn->sessions));
	WT_CACHE_LINE_ALIGNMENT_VERIFY(session, conn->sessions);

	/*
	 * Open the default session.  We open this before starting service
	 * threads because those may allocate and use session resources that
	 * need to get cleaned up on close.
	 */
	WT_RET(__wt_open_internal_session(
	    conn, "connection", false, 0, &session));

	/*
	 * The connection's default session is originally a static structure,
	 * swap that out for a more fully-functional session.  It's necessary
	 * to have this step: the session allocation code uses the connection's
	 * session, and if we pass a reference to the default session as the
	 * place to store the allocated session, things get confused and error
	 * handling can be corrupted.  So, we allocate into a stack variable
	 * and then assign it on success.
	 */
	conn->default_session = session;

	/*
	 * Publish: there must be a barrier to ensure the connection structure
	 * fields are set before other threads read from the pointer.
	 */
	WT_WRITE_BARRIER();

	/* Create the cache. */
	WT_RET(__wt_cache_create(session, cfg));

	/* Initialize transaction support. */
	WT_RET(__wt_txn_global_init(session, cfg));

	return (0);
}
示例#3
0
文件: log_slot.c 项目: Jaryli/mongo
/*
 * __wt_log_slot_init --
 *	Initialize the slot array.
 */
int
__wt_log_slot_init(WT_SESSION_IMPL *session)
{
	WT_CONNECTION_IMPL *conn;
	WT_DECL_RET;
	WT_LOG *log;
	WT_LOGSLOT *slot;
	int32_t i;

	conn = S2C(session);
	log = conn->log;
	WT_CACHE_LINE_ALIGNMENT_VERIFY(session, log->slot_pool);
	for (i = 0; i < WT_SLOT_POOL; i++)
		log->slot_pool[i].slot_state = WT_LOG_SLOT_FREE;

	/*
	 * Allocate memory for buffers now that the arrays are setup. Separate
	 * this from the loop above to make error handling simpler.
	 */
	/*
	 * !!! If the buffer size is too close to the log file size, we will
	 * switch log files very aggressively.  Scale back the buffer for
	 * small log file sizes.
	 */
	log->slot_buf_size = (uint32_t)WT_MIN(
	    (size_t)conn->log_file_max / 10, WT_LOG_SLOT_BUF_SIZE);
	for (i = 0; i < WT_SLOT_POOL; i++) {
		WT_ERR(__wt_buf_init(session,
		    &log->slot_pool[i].slot_buf, log->slot_buf_size));
		F_SET(&log->slot_pool[i], WT_SLOT_INIT_FLAGS);
	}
	WT_STAT_FAST_CONN_INCRV(session,
	    log_buffer_size, log->slot_buf_size * WT_SLOT_POOL);
	/*
	 * Set up the available slot from the pool the first time.
	 */
	slot = &log->slot_pool[0];
	/*
	 * We cannot initialize the release LSN in the activate function
	 * because that function can be called after a log file switch.
	 */
	slot->slot_release_lsn = log->alloc_lsn;
	__wt_log_slot_activate(session, slot);
	log->active_slot = slot;

	if (0) {
err:		while (--i >= 0)
			__wt_buf_free(session, &log->slot_pool[i].slot_buf);
	}
	return (ret);
}
示例#4
0
/*
 * __wt_connection_init --
 *	Structure initialization for a just-created WT_CONNECTION_IMPL handle.
 */
int
__wt_connection_init(WT_CONNECTION_IMPL *conn)
{
	WT_SESSION_IMPL *session;
	u_int i;

	session = conn->default_session;

	for (i = 0; i < WT_HASH_ARRAY_SIZE; i++) {
		TAILQ_INIT(&conn->dhhash[i]);	/* Data handle hash lists */
		TAILQ_INIT(&conn->fhhash[i]);	/* File handle hash lists */
	}

	TAILQ_INIT(&conn->dhqh);		/* Data handle list */
	TAILQ_INIT(&conn->dlhqh);		/* Library list */
	TAILQ_INIT(&conn->dsrcqh);		/* Data source list */
	TAILQ_INIT(&conn->fhqh);		/* File list */
	TAILQ_INIT(&conn->collqh);		/* Collator list */
	TAILQ_INIT(&conn->compqh);		/* Compressor list */
	TAILQ_INIT(&conn->encryptqh);		/* Encryptor list */
	TAILQ_INIT(&conn->extractorqh);		/* Extractor list */

	TAILQ_INIT(&conn->lsmqh);		/* WT_LSM_TREE list */

	/* Setup the LSM work queues. */
	TAILQ_INIT(&conn->lsm_manager.switchqh);
	TAILQ_INIT(&conn->lsm_manager.appqh);
	TAILQ_INIT(&conn->lsm_manager.managerqh);

	/* Random numbers. */
	__wt_random_init(&session->rnd);

	/* Configuration. */
	WT_RET(__wt_conn_config_init(session));

	/* Statistics. */
	WT_RET(__wt_stat_connection_init(session, conn));

	/* Spinlocks. */
	WT_RET(__wt_spin_init(session, &conn->api_lock, "api"));
	WT_SPIN_INIT_TRACKED(session, &conn->checkpoint_lock, checkpoint);
	WT_SPIN_INIT_TRACKED(session, &conn->dhandle_lock, handle_list);
	WT_RET(__wt_spin_init(session, &conn->encryptor_lock, "encryptor"));
	WT_RET(__wt_spin_init(session, &conn->fh_lock, "file list"));
	WT_RET(__wt_spin_init(session, &conn->las_lock, "lookaside table"));
	WT_SPIN_INIT_TRACKED(session, &conn->metadata_lock, metadata);
	WT_RET(__wt_spin_init(session, &conn->reconfig_lock, "reconfigure"));
	WT_SPIN_INIT_TRACKED(session, &conn->schema_lock, schema);
	WT_SPIN_INIT_TRACKED(session, &conn->table_lock, table);
	WT_RET(__wt_spin_init(session, &conn->turtle_lock, "turtle file"));

	/* Read-write locks */
	WT_RET(__wt_rwlock_alloc(
	    session, &conn->hot_backup_lock, "hot backup"));

	WT_RET(__wt_calloc_def(session, WT_PAGE_LOCKS, &conn->page_lock));
	WT_CACHE_LINE_ALIGNMENT_VERIFY(session, conn->page_lock);
	for (i = 0; i < WT_PAGE_LOCKS; ++i)
		WT_RET(
		    __wt_spin_init(session, &conn->page_lock[i], "btree page"));

	/* Setup the spin locks for the LSM manager queues. */
	WT_RET(__wt_spin_init(session,
	    &conn->lsm_manager.app_lock, "LSM application queue lock"));
	WT_RET(__wt_spin_init(session,
	    &conn->lsm_manager.manager_lock, "LSM manager queue lock"));
	WT_RET(__wt_spin_init(
	    session, &conn->lsm_manager.switch_lock, "LSM switch queue lock"));
	WT_RET(__wt_cond_alloc(
	    session, "LSM worker cond", false, &conn->lsm_manager.work_cond));

	/*
	 * Generation numbers.
	 *
	 * Start split generations at one.  Threads publish this generation
	 * number before examining tree structures, and zero when they leave.
	 * We need to distinguish between threads that are in a tree before the
	 * first split has happened, and threads that are not in a tree.
	 */
	conn->split_gen = 1;

	/*
	 * Block manager.
	 * XXX
	 * If there's ever a second block manager, we'll want to make this
	 * more opaque, but for now this is simpler.
	 */
	WT_RET(__wt_spin_init(session, &conn->block_lock, "block manager"));
	for (i = 0; i < WT_HASH_ARRAY_SIZE; i++)
		TAILQ_INIT(&conn->blockhash[i]);/* Block handle hash lists */
	TAILQ_INIT(&conn->blockqh);		/* Block manager list */

	return (0);
}