Status OperationContext::checkForInterruptNoAssert() noexcept { // TODO: Remove the MONGO_likely(getClient()) once all operation contexts are constructed with // clients. if (MONGO_likely(getClient() && getServiceContext()) && getServiceContext()->getKillAllOperations()) { return Status(ErrorCodes::InterruptedAtShutdown, "interrupted at shutdown"); } if (hasDeadlineExpired()) { if (!_hasArtificialDeadline) { markKilled(_timeoutError); } return Status(_timeoutError, "operation exceeded time limit"); } if (_ignoreInterrupts) { return Status::OK(); } MONGO_FAIL_POINT_BLOCK(checkForInterruptFail, scopedFailPoint) { if (opShouldFail(getClient(), scopedFailPoint.getData())) { log() << "set pending kill on op " << getOpID() << ", for checkForInterruptFail"; markKilled(); } } const auto killStatus = getKillStatus(); if (killStatus != ErrorCodes::OK) { return Status(killStatus, "operation was interrupted"); } return Status::OK(); }
void SyncTailTest::setUp() { ServiceContextMongoDTest::setUp(); auto service = getServiceContext(); ReplicationCoordinator::set(service, stdx::make_unique<ReplicationCoordinatorMock>(service)); auto storageInterface = stdx::make_unique<StorageInterfaceMock>(); _storageInterface = storageInterface.get(); storageInterface->insertDocumentsFn = [](OperationContext*, const NamespaceString&, const std::vector<InsertStatement>&) { return Status::OK(); }; StorageInterface::set(service, std::move(storageInterface)); DropPendingCollectionReaper::set( service, stdx::make_unique<DropPendingCollectionReaper>(_storageInterface)); _replicationProcess = new ReplicationProcess( _storageInterface, stdx::make_unique<ReplicationConsistencyMarkersMock>()); ReplicationProcess::set(cc().getServiceContext(), std::unique_ptr<ReplicationProcess>(_replicationProcess)); _opCtx = cc().makeOperationContext(); _opsApplied = 0; _applyOp = [](OperationContext* opCtx, Database* db, const BSONObj& op, bool inSteadyStateReplication, stdx::function<void()>) { return Status::OK(); }; _applyCmd = [](OperationContext* opCtx, const BSONObj& op, bool) { return Status::OK(); }; _incOps = [this]() { _opsApplied++; }; }
void ServiceContextMongoDTest::setUp() { Client::initThread(getThreadName()); auto const serviceContext = getServiceContext(); auto logicalClock = stdx::make_unique<LogicalClock>(serviceContext); LogicalClock::set(serviceContext, std::move(logicalClock)); if (!serviceContext->getGlobalStorageEngine()) { // When using the "ephemeralForTest" storage engine, it is fine for the temporary directory // to go away after the global storage engine is initialized. unittest::TempDir tempDir("service_context_d_test_fixture"); storageGlobalParams.dbpath = tempDir.path(); storageGlobalParams.engine = "ephemeralForTest"; storageGlobalParams.engineSetByUser = true; checked_cast<ServiceContextMongoD*>(serviceContext)->createLockFile(); serviceContext->initializeGlobalStorageEngine(); serviceContext->setOpObserver(stdx::make_unique<OpObserverNoop>()); } // Set up UUID Catalog observer. This is necessary because the Collection destructor contains an // invariant to ensure the UUID corresponding to that Collection object is no longer associated // with that Collection object in the UUIDCatalog. UUIDs may be registered in the UUIDCatalog // directly in certain code paths, but they can only be removed from the UUIDCatalog via a // UUIDCatalogObserver. It is therefore necessary to install the observer to ensure the // invariant in the Collection destructor is not triggered. auto observerRegistry = stdx::make_unique<OpObserverRegistry>(); observerRegistry->addObserver(stdx::make_unique<UUIDCatalogObserver>()); serviceContext->setOpObserver(std::unique_ptr<OpObserver>(observerRegistry.release())); }
TEST_F(KeysManagerShardedTest, GetKeyForSigningShouldReturnRightOldKey) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); keyManager()->refreshNow(operationContext()); { auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); } { auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(105, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ(Timestamp(110, 0), key.getExpiresAt().asTimestamp()); } }
void RollbackTest::setUp() { _storageInterface = new StorageInterfaceRollback(); auto serviceContext = getServiceContext(); auto consistencyMarkers = stdx::make_unique<ReplicationConsistencyMarkersMock>(); auto recovery = stdx::make_unique<ReplicationRecoveryImpl>(_storageInterface, consistencyMarkers.get()); _replicationProcess = stdx::make_unique<ReplicationProcess>( _storageInterface, std::move(consistencyMarkers), std::move(recovery)); _dropPendingCollectionReaper = new DropPendingCollectionReaper(_storageInterface); DropPendingCollectionReaper::set( serviceContext, std::unique_ptr<DropPendingCollectionReaper>(_dropPendingCollectionReaper)); StorageInterface::set(serviceContext, std::unique_ptr<StorageInterface>(_storageInterface)); _coordinator = new ReplicationCoordinatorRollbackMock(serviceContext); ReplicationCoordinator::set(serviceContext, std::unique_ptr<ReplicationCoordinator>(_coordinator)); setOplogCollectionName(serviceContext); _opCtx = makeOperationContext(); _replicationProcess->getConsistencyMarkers()->clearAppliedThrough(_opCtx.get(), {}); _replicationProcess->getConsistencyMarkers()->setMinValid(_opCtx.get(), OpTime{}); _replicationProcess->initializeRollbackID(_opCtx.get()).transitional_ignore(); // Increase rollback log component verbosity for unit tests. mongo::logger::globalLogDomain()->setMinimumLoggedSeverity( logger::LogComponent::kReplicationRollback, logger::LogSeverity::Debug(2)); auto observerRegistry = checked_cast<OpObserverRegistry*>(serviceContext->getOpObserver()); observerRegistry->addObserver(std::make_unique<RollbackTestOpObserver>()); }
ServiceContextMongoDTest::ServiceContextMongoDTest(std::string engine, RepairAction repair) : _tempDir("service_context_d_test_fixture") { _stashedStorageParams.engine = std::exchange(storageGlobalParams.engine, std::move(engine)); _stashedStorageParams.engineSetByUser = std::exchange(storageGlobalParams.engineSetByUser, true); _stashedStorageParams.repair = std::exchange(storageGlobalParams.repair, (repair == RepairAction::kRepair)); auto const serviceContext = getServiceContext(); serviceContext->setServiceEntryPoint(std::make_unique<ServiceEntryPointMongod>(serviceContext)); auto logicalClock = std::make_unique<LogicalClock>(serviceContext); LogicalClock::set(serviceContext, std::move(logicalClock)); // Set up a fake no-op PeriodicRunner. No jobs will ever get run, which is // desired behavior for unit tests unrelated to background jobs. auto runner = std::make_unique<MockPeriodicRunnerImpl>(); serviceContext->setPeriodicRunner(std::move(runner)); storageGlobalParams.dbpath = _tempDir.path(); initializeStorageEngine(serviceContext, StorageEngineInitFlags::kNone); // Set up UUID Catalog observer. This is necessary because the Collection destructor contains an // invariant to ensure the UUID corresponding to that Collection object is no longer associated // with that Collection object in the UUIDCatalog. UUIDs may be registered in the UUIDCatalog // directly in certain code paths, but they can only be removed from the UUIDCatalog via a // UUIDCatalogObserver. It is therefore necessary to install the observer to ensure the // invariant in the Collection destructor is not triggered. auto observerRegistry = checked_cast<OpObserverRegistry*>(serviceContext->getOpObserver()); observerRegistry->addObserver(std::make_unique<UUIDCatalogObserver>()); }
TEST_F(KeysManagerShardedTest, GetKeyForValidationErrorsIfKeyDoesntExist) { keyManager()->startMonitoring(getServiceContext()); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
TEST_F(KeysManagerShardedTest, GetKeyWithMultipleKeys) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); KeysCollectionDocument origKey2( 2, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey2.toBSON())); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); keyStatus = keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); key = keyStatus.getValue(); ASSERT_EQ(2, key.getKeyId()); ASSERT_EQ(origKey2.getKey(), key.getKey()); ASSERT_EQ(Timestamp(205, 0), key.getExpiresAt().asTimestamp()); }
TEST_F(KeysManagerShardedTest, HasSeenKeysIsFalseUntilKeysAreFound) { const LogicalTime currentTime(Timestamp(100, 0)); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); ASSERT_EQ(false, keyManager()->hasSeenKeys()); { FailPointEnableBlock failKeyGenerationBlock("disableKeyGeneration"); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForValidation( operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); ASSERT_EQ(false, keyManager()->hasSeenKeys()); } // Once the failpoint is disabled, the generator can make keys again. keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); ASSERT_EQ(true, keyManager()->hasSeenKeys()); }
TEST_F(KeysManagerShardedTest, ShouldStillBeAbleToUpdateCacheEvenIfItCantCreateKeys) { KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); // Set the time to be very ahead so the updater will be forced to create new keys. const LogicalTime fakeTime(Timestamp(20000, 0)); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(fakeTime); FailPointEnableBlock failWriteBlock("failCollectionInserts"); { FailPointEnableBlock failQueryBlock("planExecutorAlwaysFails"); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); } auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(1, key.getKeyId()); ASSERT_EQ(origKey1.getKey(), key.getKey()); ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); }
Milliseconds OperationContext::getRemainingMaxTimeMillis() const { if (!hasDeadline()) { return Milliseconds::max(); } return std::max(Milliseconds{0}, getDeadline() - getServiceContext()->getFastClockSource()->now()); }
void SyncTailTest::tearDown() { auto service = getServiceContext(); _opCtx.reset(); ReplicationProcess::set(service, {}); DropPendingCollectionReaper::set(service, {}); StorageInterface::set(service, {}); ServiceContextMongoDTest::tearDown(); }
// Theory of operation for waitForConditionOrInterruptNoAssertUntil and markKilled: // // An operation indicates to potential killers that it is waiting on a condition variable by setting // _waitMutex and _waitCV, while holding the lock on its parent Client. It then unlocks its Client, // unblocking any killers, which are required to have locked the Client before calling markKilled. // // When _waitMutex and _waitCV are set, killers must lock _waitMutex before setting the _killCode, // and must signal _waitCV before releasing _waitMutex. Unfortunately, they must lock _waitMutex // without holding a lock on Client to avoid a deadlock with callers of // waitForConditionOrInterruptNoAssertUntil(). So, in the event that _waitMutex is set, the killer // increments _numKillers, drops the Client lock, acquires _waitMutex and then re-acquires the // Client lock. We know that the Client, its OperationContext and _waitMutex will remain valid // during this period because the caller of waitForConditionOrInterruptNoAssertUntil will not return // while _numKillers > 0 and will not return until it has itself reacquired _waitMutex. Instead, // that caller will keep waiting on _waitCV until _numKillers drops to 0. // // In essence, when _waitMutex is set, _killCode is guarded by _waitMutex and _waitCV, but when // _waitMutex is not set, it is guarded by the Client spinlock. Changing _waitMutex is itself // guarded by the Client spinlock and _numKillers. // // When _numKillers does drop to 0, the waiter will null out _waitMutex and _waitCV. // // This implementation adds a minimum of two spinlock acquire-release pairs to every condition // variable wait. StatusWith<stdx::cv_status> OperationContext::waitForConditionOrInterruptNoAssertUntil( stdx::condition_variable& cv, stdx::unique_lock<stdx::mutex>& m, Date_t deadline) noexcept { invariant(getClient()); { stdx::lock_guard<Client> clientLock(*getClient()); invariant(!_waitMutex); invariant(!_waitCV); invariant(0 == _numKillers); // This interrupt check must be done while holding the client lock, so as not to race with a // concurrent caller of markKilled. auto status = checkForInterruptNoAssert(); if (!status.isOK()) { return status; } _waitMutex = m.mutex(); _waitCV = &cv; } if (hasDeadline()) { deadline = std::min(deadline, getDeadline()); } const auto waitStatus = [&] { if (Date_t::max() == deadline) { cv.wait(m); return stdx::cv_status::no_timeout; } return getServiceContext()->getPreciseClockSource()->waitForConditionUntil(cv, m, deadline); }(); // Continue waiting on cv until no other thread is attempting to kill this one. cv.wait(m, [this] { stdx::lock_guard<Client> clientLock(*getClient()); if (0 == _numKillers) { _waitMutex = nullptr; _waitCV = nullptr; return true; } return false; }); auto status = checkForInterruptNoAssert(); if (!status.isOK()) { return status; } if (hasDeadline() && waitStatus == stdx::cv_status::timeout && deadline == getDeadline()) { // It's possible that the system clock used in stdx::condition_variable::wait_until // is slightly ahead of the FastClock used in checkForInterrupt. In this case, // we treat the operation as though it has exceeded its time limit, just as if the // FastClock and system clock had agreed. markKilled(ErrorCodes::ExceededTimeLimit); return Status(ErrorCodes::ExceededTimeLimit, "operation exceeded time limit"); } return waitStatus; }
void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const { auto client = opCtx->getClient(); auto service = client->getServiceContext(); { stdx::lock_guard<Client> lk(*client); client->resetOperationContext(); } onDestroy(opCtx, service->_clientObservers); delete opCtx; }
bool OperationContext::hasDeadlineExpired() const { if (!hasDeadline()) { return false; } if (MONGO_FAIL_POINT(maxTimeNeverTimeOut)) { return false; } if (MONGO_FAIL_POINT(maxTimeAlwaysTimeOut)) { return true; } // TODO: Remove once all OperationContexts are properly connected to Clients and ServiceContexts // in tests. if (MONGO_unlikely(!getClient() || !getServiceContext())) { return false; } const auto now = getServiceContext()->getFastClockSource()->now(); return now >= getDeadline(); }
void OperationContext::setDeadlineByDate(Date_t when) { Microseconds maxTime; if (when == Date_t::max()) { maxTime = Microseconds::max(); } else { maxTime = when - getServiceContext()->getFastClockSource()->now(); if (maxTime < Microseconds::zero()) { maxTime = Microseconds::zero(); } } setDeadlineAndMaxTime(when, maxTime); }
TEST_F(KeysManagerShardedTest, GetKeyShouldErrorIfKeyIdMismatchKey) { keyManager()->startMonitoring(getServiceContext()); KeysCollectionDocument origKey1( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), KeysCollectionDocument::ConfigNS, origKey1.toBSON())); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 2, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
Microseconds OperationContext::computeMaxTimeFromDeadline(Date_t when) { Microseconds maxTime; if (when == Date_t::max()) { maxTime = Microseconds::max(); } else { maxTime = when - getServiceContext()->getFastClockSource()->now(); if (maxTime < Microseconds::zero()) { maxTime = Microseconds::zero(); } } return maxTime; }
ServiceContextMongoDTest::~ServiceContextMongoDTest() { { auto opCtx = getClient()->makeOperationContext(); Lock::GlobalLock glk(opCtx.get(), MODE_X); DatabaseHolder::getDatabaseHolder().closeAll(opCtx.get(), "all databases dropped"); } shutdownGlobalStorageEngineCleanly(getServiceContext()); std::swap(storageGlobalParams.engine, _stashedStorageParams.engine); std::swap(storageGlobalParams.engineSetByUser, _stashedStorageParams.engineSetByUser); std::swap(storageGlobalParams.repair, _stashedStorageParams.repair); }
void ClusterCommandTestFixture::setUp() { CatalogCacheTestFixture::setUp(); CatalogCacheTestFixture::setupNShards(numShards); // Set up a logical clock with an initial time. auto logicalClock = stdx::make_unique<LogicalClock>(getServiceContext()); logicalClock->setClusterTimeFromTrustedSource(kInMemoryLogicalTime); LogicalClock::set(getServiceContext(), std::move(logicalClock)); auto keysCollectionClient = stdx::make_unique<KeysCollectionClientSharded>( Grid::get(operationContext())->catalogClient()); auto keyManager = std::make_shared<KeysCollectionManager>( "dummy", std::move(keysCollectionClient), Seconds(KeysRotationIntervalSec)); auto validator = stdx::make_unique<LogicalTimeValidator>(keyManager); LogicalTimeValidator::set(getServiceContext(), std::move(validator)); LogicalSessionCache::set(getServiceContext(), stdx::make_unique<LogicalSessionCacheNoop>()); loadRoutingTableWithTwoChunksAndTwoShards(kNss); }
SHA256Block getLogicalSessionUserDigestForLoggedInUser(const OperationContext* opCtx) { auto client = opCtx->getClient(); ServiceContext* serviceContext = client->getServiceContext(); if (AuthorizationManager::get(serviceContext)->isAuthEnabled()) { UserName userName; const auto user = AuthorizationSession::get(client)->getSingleUser(); invariant(user); return user->getDigest(); } else { return kNoAuthDigest; } }
TEST_F(KeysManagerShardedTest, ShouldCreateKeysIfKeyGeneratorEnabled) { keyManager()->startMonitoring(getServiceContext()); const LogicalTime currentTime(LogicalTime(Timestamp(100, 0))); LogicalClock::get(operationContext())->setClusterTimeFromTrustedSource(currentTime); keyManager()->enableKeyGenerator(operationContext(), true); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForSigning(nullptr, LogicalTime(Timestamp(100, 100))); ASSERT_OK(keyStatus.getStatus()); auto key = keyStatus.getValue(); ASSERT_EQ(Timestamp(101, 0), key.getExpiresAt().asTimestamp()); }
void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const { auto client = opCtx->getClient(); auto service = client->getServiceContext(); { stdx::lock_guard<Client> lk(*client); client->resetOperationContext(); } try { for (const auto& observer : service->_clientObservers) { observer->onDestroyOperationContext(opCtx); } } catch (...) { std::terminate(); } delete opCtx; }
void OperationContext::setDeadlineAfterNowBy(Microseconds maxTime, ErrorCodes::Error timeoutError) { Date_t when; if (maxTime < Microseconds::zero()) { maxTime = Microseconds::zero(); } if (maxTime == Microseconds::max()) { when = Date_t::max(); } else { auto clock = getServiceContext()->getFastClockSource(); when = clock->now(); if (maxTime > Microseconds::zero()) { when += clock->getPrecision() + maxTime; } } setDeadlineAndMaxTime(when, maxTime, timeoutError); }
virtual bool run(OperationContext* opCtx, const std::string& db, const BSONObj& cmdObj, BSONObjBuilder& result) override { auto client = opCtx->getClient(); ServiceContext* serviceContext = client->getServiceContext(); auto lsCache = LogicalSessionCache::get(serviceContext); boost::optional<LogicalSessionRecord> record = makeLogicalSessionRecord(opCtx, lsCache->now()); uassertStatusOK(lsCache->startSession(opCtx, record.get())); makeLogicalSessionToClient(record->getId()).serialize(&result); return true; }
TEST_F(KeysManagerShardedTest, ShouldNotReturnKeysInFeatureCompatibilityVersion34) { serverGlobalParams.featureCompatibility.version.store( ServerGlobalParams::FeatureCompatibility::Version::k34); keyManager()->startMonitoring(getServiceContext()); keyManager()->enableKeyGenerator(operationContext(), true); KeysCollectionDocument origKey( 1, "dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))); ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString(KeysCollectionDocument::ConfigNS), origKey.toBSON())); keyManager()->refreshNow(operationContext()); auto keyStatus = keyManager()->getKeyForValidation(operationContext(), 1, LogicalTime(Timestamp(100, 0))); ASSERT_EQ(ErrorCodes::KeyNotFound, keyStatus.getStatus()); }
LogicalSessionRecord makeLogicalSessionRecord(OperationContext* opCtx, const LogicalSessionId& lsid, Date_t lastUse) { auto lsr = makeLogicalSessionRecord(lsid, lastUse); auto client = opCtx->getClient(); ServiceContext* serviceContext = client->getServiceContext(); if (AuthorizationManager::get(serviceContext)->isAuthEnabled()) { auto user = AuthorizationSession::get(client)->getSingleUser(); invariant(user); if (user->getDigest() == lsid.getUid()) { lsr.setUser(StringData(user->getName().toString())); } } return lsr; }
DbResponse ClusterCommandTestFixture::runCommand(BSONObj cmd) { // Create a new client/operation context per command auto client = getServiceContext()->makeClient("ClusterCmdClient"); auto opCtx = client->makeOperationContext(); const auto opMsgRequest = OpMsgRequest::fromDBAndBody(kNss.db(), cmd); // Ensure the clusterGLE on the Client has not yet been initialized. ASSERT(!ClusterLastErrorInfo::get(client.get())); // Initialize the cluster last error info for the client with a new request. ClusterLastErrorInfo::get(client.get()) = std::make_shared<ClusterLastErrorInfo>(); ASSERT(ClusterLastErrorInfo::get(client.get())); auto clusterGLE = ClusterLastErrorInfo::get(client.get()); clusterGLE->newRequest(); return Strategy::clientCommand(opCtx.get(), opMsgRequest.serialize()); }
void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx) const { auto client = opCtx->getClient(); auto service = client->getServiceContext(); // // TODO(schwerin): When callers no longer construct their own OperationContexts directly, // // but only through the ServiceContext, uncomment the following. Until then, it must // // be done in the operation context destructors, which introduces a potential race. // { // stdx::lock_guard<Client> lk(*client); // client->resetOperationContext(); // } try { for (const auto& observer : service->_clientObservers) { observer->onDestroyOperationContext(opCtx); } } catch (...) { std::terminate(); } delete opCtx; }
void MockReplCoordServerFixture::setUp() { ServiceContextMongoDTest::setUp(); _opCtx = cc().makeOperationContext(); auto service = getServiceContext(); _storageInterface = new repl::StorageInterfaceMock(); repl::StorageInterface::set(service, std::unique_ptr<repl::StorageInterface>(_storageInterface)); ASSERT_TRUE(_storageInterface == repl::StorageInterface::get(service)); repl::ReplicationProcess::set(service, stdx::make_unique<repl::ReplicationProcess>( _storageInterface, stdx::make_unique<repl::ReplicationConsistencyMarkersMock>(), stdx::make_unique<repl::ReplicationRecoveryMock>())); ASSERT_OK(repl::ReplicationProcess::get(service)->initializeRollbackID(opCtx())); // Insert code path assumes existence of repl coordinator! repl::ReplSettings replSettings; replSettings.setReplSetString( ConnectionString::forReplicaSet("sessionTxnStateTest", {HostAndPort("a:1")}).toString()); repl::ReplicationCoordinator::set( service, stdx::make_unique<repl::ReplicationCoordinatorMock>(service, replSettings)); ASSERT_OK( repl::ReplicationCoordinator::get(service)->setFollowerMode(repl::MemberState::RS_PRIMARY)); // Note: internal code does not allow implicit creation of non-capped oplog collection. DBDirectClient client(opCtx()); ASSERT_TRUE( client.createCollection(NamespaceString::kRsOplogNamespace.ns(), 1024 * 1024, true)); repl::setOplogCollectionName(service); repl::acquireOplogCollectionForLogging(opCtx()); repl::DropPendingCollectionReaper::set( service, stdx::make_unique<repl::DropPendingCollectionReaper>(repl::StorageInterface::get(service))); }