void yieldLocksForPreparedTransactions(OperationContext* opCtx) { // Create a new opCtx because we need an empty locker to refresh the locks. auto newClient = opCtx->getServiceContext()->makeClient("prepared-txns-yield-locks"); AlternativeClientRegion acr(newClient); auto newOpCtx = cc().makeOperationContext(); // Scan the sessions again to get the list of all sessions with prepared transaction // to yield their locks. SessionKiller::Matcher matcherAllSessions( KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(newOpCtx.get())}); killSessionsAction( newOpCtx.get(), matcherAllSessions, [](const ObservableSession& session) { return TransactionParticipant::get(session.get())->transactionIsPrepared(); }, [](OperationContext* killerOpCtx, const SessionToKill& session) { auto const txnParticipant = TransactionParticipant::get(session.get()); // Yield locks for prepared transactions. // When scanning and killing operations, all prepared transactions are included in the // list. Even though new sessions may be created after the scan, none of them can become // prepared during stepdown, since the RSTL has been enqueued, preventing any new // writes. if (txnParticipant->transactionIsPrepared()) { LOG(3) << "Yielding locks of prepared transaction. SessionId: " << session.getSessionId().getId() << " TxnNumber: " << txnParticipant->getActiveTxnNumber(); txnParticipant->refreshLocksForPreparedTransaction(killerOpCtx, true); } }); }
void killSessionsAbortUnpreparedTransactions(OperationContext* opCtx, const SessionKiller::Matcher& matcher, ErrorCodes::Error reason) { killSessionsAction( opCtx, matcher, [](const ObservableSession& session) { auto participant = TransactionParticipant::get(session); return participant.inMultiDocumentTransaction() && !participant.transactionIsPrepared(); }, [](OperationContext* opCtx, const SessionToKill& session) { TransactionParticipant::get(session).abortTransactionIfNotPrepared(opCtx); }, reason); }
void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) { // Invalidate sessions that could have a retryable write on it, so that we can refresh from disk // in case the in-memory state was out of sync. const auto catalog = SessionCatalog::get(opCtx); // The use of shared_ptr here is in order to work around the limitation of stdx::function that // the functor must be copyable. auto sessionKillTokens = std::make_shared<std::vector<SessionCatalog::KillToken>>(); // Scan all sessions and reacquire locks for prepared transactions. // There may be sessions that are checked out during this scan, but none of them // can be prepared transactions, since only oplog application can make transactions // prepared on secondaries and oplog application has been stopped at this moment. std::vector<LogicalSessionId> sessionIdToReacquireLocks; SessionKiller::Matcher matcher( KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)}); catalog->scanSessions(matcher, [&](const ObservableSession& session) { const auto txnParticipant = TransactionParticipant::get(session.get()); if (!txnParticipant->inMultiDocumentTransaction()) { sessionKillTokens->emplace_back(session.kill()); } if (txnParticipant->transactionIsPrepared()) { sessionIdToReacquireLocks.emplace_back(session.getSessionId()); } }); killSessionTokensFunction(opCtx, sessionKillTokens); { // Create a new opCtx because we need an empty locker to refresh the locks. auto newClient = opCtx->getServiceContext()->makeClient("restore-prepared-txn"); AlternativeClientRegion acr(newClient); for (const auto& sessionId : sessionIdToReacquireLocks) { auto newOpCtx = cc().makeOperationContext(); newOpCtx->setLogicalSessionId(sessionId); MongoDOperationContextSession ocs(newOpCtx.get()); auto txnParticipant = TransactionParticipant::get(OperationContextSession::get(newOpCtx.get())); txnParticipant->refreshLocksForPreparedTransaction(newOpCtx.get(), false); } } const size_t initialExtentSize = 0; const bool capped = false; const bool maxSize = 0; BSONObj result; DBDirectClient client(opCtx); if (client.createCollection(NamespaceString::kSessionTransactionsTableNamespace.ns(), initialExtentSize, capped, maxSize, &result)) { return; } const auto status = getStatusFromCommandResult(result); if (status == ErrorCodes::NamespaceExists) { return; } uassertStatusOKWithContext(status, str::stream() << "Failed to create the " << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection"); }