/* * __wt_txn_get_snapshot -- * Set up a snapshot in the current transaction, without allocating an ID. */ void __wt_txn_get_snapshot( WT_SESSION_IMPL *session, wt_txnid_t my_id, wt_txnid_t max_id, int force) { WT_CONNECTION_IMPL *conn; WT_TXN *txn; WT_TXN_GLOBAL *txn_global; WT_TXN_STATE *s, *txn_state; wt_txnid_t current_id, id, oldest_snap_min; uint32_t i, n, session_cnt; conn = S2C(session); txn = &session->txn; txn_global = &conn->txn_global; txn_state = &txn_global->states[session->id]; /* If nothing has changed since last time, we're done. */ if (!force && txn->last_id == txn_global->current && txn->last_gen == txn_global->gen) { txn_state->snap_min = txn->snap_min; return; } do { /* Take a copy of the current session ID. */ txn->last_gen = txn->last_oldest_gen = txn_global->gen; txn->last_id = oldest_snap_min = current_id = txn_global->current; /* Copy the array of concurrent transactions. */ WT_ORDERED_READ(session_cnt, conn->session_cnt); for (i = n = 0, s = txn_global->states; i < session_cnt; i++, s++) { /* Ignore the session's own transaction. */ if (i == session->id) continue; if ((id = s->snap_min) != WT_TXN_NONE) if (TXNID_LT(id, oldest_snap_min)) oldest_snap_min = id; if ((id = s->id) == WT_TXN_NONE) continue; else if (max_id == WT_TXN_NONE || TXNID_LT(id, max_id)) txn->snapshot[n++] = id; } /* * Ensure the snapshot reads are scheduled before re-checking * the global current ID. */ WT_READ_BARRIER(); } while (current_id != txn_global->current); __txn_sort_snapshot(session, n, (max_id != WT_TXN_NONE) ? max_id : current_id, oldest_snap_min); txn_state->snap_min = (my_id == WT_TXN_NONE || TXNID_LT(txn->snap_min, my_id)) ? txn->snap_min : my_id; }
/* * __compact_checkpoint -- * Perform a checkpoint for compaction. */ static int __compact_checkpoint(WT_SESSION_IMPL *session) { WT_TXN_GLOBAL *txn_global; WT_DECL_RET; uint64_t txn_gen; /* * Force compaction checkpoints: we don't want to skip it because the * work we need to have done is done in the underlying block manager. */ const char *checkpoint_cfg[] = { WT_CONFIG_BASE(session, WT_SESSION_checkpoint), "force=1", NULL }; /* Checkpoints take a lot of time, check if we've run out. */ WT_RET(__wt_session_compact_check_timeout(session)); if ((ret = __wt_txn_checkpoint(session, checkpoint_cfg, false)) == 0) return (0); WT_RET_BUSY_OK(ret); /* * If there's a checkpoint running, wait for it to complete, checking if * we're out of time. If there's no checkpoint running or the checkpoint * generation number changes, the checkpoint blocking us has completed. */ txn_global = &S2C(session)->txn_global; for (txn_gen = txn_global->checkpoint_gen;;) { WT_READ_BARRIER(); if (!txn_global->checkpoint_running || txn_gen != txn_global->checkpoint_gen) break; WT_RET(__wt_session_compact_check_timeout(session)); __wt_sleep(2, 0); } return (0); }
/* * __wt_txn_begin -- * Begin a transaction. */ int __wt_txn_begin(WT_SESSION_IMPL *session, const char *cfg[]) { WT_CONFIG_ITEM cval; WT_CONNECTION_IMPL *conn; WT_TXN *txn; WT_TXN_GLOBAL *txn_global; WT_TXN_STATE *s, *txn_state; wt_txnid_t id, oldest_snap_min; uint32_t i, n, session_cnt; conn = S2C(session); txn = &session->txn; txn_global = &conn->txn_global; txn_state = &txn_global->states[session->id]; WT_ASSERT(session, txn_state->id == WT_TXN_NONE); WT_RET(__wt_config_gets_defno(session, cfg, "isolation", &cval)); if (cval.len == 0) txn->isolation = session->isolation; else txn->isolation = WT_STRING_MATCH("snapshot", cval.str, cval.len) ? TXN_ISO_SNAPSHOT : WT_STRING_MATCH("read-committed", cval.str, cval.len) ? TXN_ISO_READ_COMMITTED : TXN_ISO_READ_UNCOMMITTED; F_SET(txn, TXN_RUNNING); do { /* * Allocate a transaction ID. * * We use an atomic increment to ensure that we get a unique * ID, then publish that to the global state table. * * If two threads race to allocate an ID, only the latest ID * will proceed. The winning thread can be sure its snapshot * contains all of the earlier active IDs. Threads that race * and get an earlier ID may not appear in the snapshot, * but they will loop and allocate a new ID before proceeding * to make any updates. * * This potentially wastes transaction IDs when threads race to * begin transactions, but that is the price we pay to keep * this path latch free. */ do { txn->id = WT_ATOMIC_ADD(txn_global->current, 1); } while (txn->id == WT_TXN_NONE || txn->id == WT_TXN_ABORTED); WT_PUBLISH(txn_state->id, txn->id); /* * If we are starting a snapshot isolation transaction, get * a snapshot of the running transactions. * * If we already have a snapshot (e.g., for an auto-commit * operation), update it so that the newly-allocated ID is * visible. */ if (txn->isolation == TXN_ISO_SNAPSHOT) { txn->last_gen = txn->last_oldest_gen = txn_global->gen; oldest_snap_min = txn->id; /* Copy the array of concurrent transactions. */ WT_ORDERED_READ(session_cnt, conn->session_cnt); for (i = n = 0, s = txn_global->states; i < session_cnt; i++, s++) { if ((id = s->snap_min) != WT_TXN_NONE) if (TXNID_LT(id, oldest_snap_min)) oldest_snap_min = id; if ((id = s->id) == WT_TXN_NONE) continue; else txn->snapshot[n++] = id; } __txn_sort_snapshot( session, n, txn->id, oldest_snap_min); txn_state->snap_min = txn->snap_min; } /* * Ensure the snapshot reads are complete before re-checking * the global current ID. */ WT_READ_BARRIER(); } while (txn->id != txn_global->current); return (0); }