void ReplicationCoordinatorImpl::_startElectSelfV1_inlock( TopologyCoordinator::StartElectionReason reason) { invariant(!_voteRequester); invariant(!_freshnessChecker); switch (_rsConfigState) { case kConfigSteady: break; case kConfigInitiating: case kConfigReconfiguring: case kConfigHBReconfiguring: LOG(2) << "Not standing for election; processing a configuration change"; // Transition out of candidate role. _topCoord->processLoseElection(); return; default: severe() << "Entered replica set election code while in illegal config state " << int(_rsConfigState); fassertFailed(28641); } auto finishedEvent = _makeEvent(); if (!finishedEvent) { return; } _electionFinishedEvent = finishedEvent; auto dryRunFinishedEvent = _makeEvent(); if (!dryRunFinishedEvent) { return; } _electionDryRunFinishedEvent = dryRunFinishedEvent; LoseElectionDryRunGuardV1 lossGuard(this); invariant(_rsConfig.getMemberAt(_selfIndex).isElectable()); const auto lastOpTime = _getMyLastAppliedOpTime_inlock(); if (lastOpTime == OpTime()) { log() << "not trying to elect self, " "do not yet have a complete set of data from any point in time"; return; } long long term = _topCoord->getTerm(); int primaryIndex = -1; log() << "conducting a dry run election to see if we could be elected. current term: " << term; _voteRequester.reset(new VoteRequester); // Only set primaryIndex if the primary's vote is required during the dry run. if (reason == TopologyCoordinator::StartElectionReason::kCatchupTakeover) { primaryIndex = _topCoord->getCurrentPrimaryIndex(); } StatusWith<executor::TaskExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(_replExecutor.get(), _rsConfig, _selfIndex, term, true, // dry run lastOpTime, primaryIndex); if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(28685, nextPhaseEvh.getStatus()); _replExecutor ->onEvent(nextPhaseEvh.getValue(), [=](const executor::TaskExecutor::CallbackArgs&) { _onDryRunComplete(term); }) .status_with_transitional_ignore(); lossGuard.dismiss(); }
void ReplicationCoordinatorImpl::_startElectSelfV1() { invariant(!_voteRequester); invariant(!_freshnessChecker); stdx::unique_lock<stdx::mutex> lk(_mutex); switch (_rsConfigState) { case kConfigSteady: break; case kConfigInitiating: case kConfigReconfiguring: case kConfigHBReconfiguring: LOG(2) << "Not standing for election; processing a configuration change"; // Transition out of candidate role. _topCoord->processLoseElection(); return; default: severe() << "Entered replica set election code while in illegal config state " << int(_rsConfigState); fassertFailed(28641); } auto finishedEvent = _makeEvent(); if (!finishedEvent) { return; } _electionFinishedEvent = finishedEvent; auto dryRunFinishedEvent = _makeEvent(); if (!dryRunFinishedEvent) { return; } _electionDryRunFinishedEvent = dryRunFinishedEvent; LoseElectionDryRunGuardV1 lossGuard(this); invariant(_rsConfig.getMemberAt(_selfIndex).isElectable()); // Note: If we aren't durable, send last applied. const auto lastOpTime = _isDurableStorageEngine() ? _getMyLastDurableOpTime_inlock() : _getMyLastAppliedOpTime_inlock(); if (lastOpTime == OpTime()) { log() << "not trying to elect self, " "do not yet have a complete set of data from any point in time"; return; } log() << "conducting a dry run election to see if we could be elected"; _voteRequester.reset(new VoteRequester); // This is necessary because the voteRequester may call directly into winning an // election, if there are no other MaybeUp nodes. Winning an election attempts to lock // _mutex again. lk.unlock(); long long term = _topCoord->getTerm(); StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(&_replExecutor, _rsConfig, _selfIndex, _topCoord->getTerm(), true, // dry run lastOpTime); if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(28685, nextPhaseEvh.getStatus()); _replExecutor.onEvent(nextPhaseEvh.getValue(), stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term)); lossGuard.dismiss(); }