void ReplicationCoordinatorImpl::_startElectSelfV1() { invariant(!_electionWinnerDeclarer); invariant(!_voteRequester); invariant(!_freshnessChecker); boost::unique_lock<boost::mutex> lk(_mutex); switch (_rsConfigState) { case kConfigSteady: break; case kConfigInitiating: case kConfigReconfiguring: case kConfigHBReconfiguring: LOG(2) << "Not standing for election; processing a configuration change"; // Transition out of candidate role. _topCoord->processLoseElection(); return; default: severe() << "Entered replica set election code while in illegal config state " << int(_rsConfigState); fassertFailed(28641); } const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent(); if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(28642, finishEvh.getStatus()); _electionFinishedEvent = finishEvh.getValue(); LoseElectionGuardV1 lossGuard(_topCoord.get(), &_replExecutor, &_voteRequester, &_electionWinnerDeclarer, &_electionFinishedEvent); invariant(_rsConfig.getMemberAt(_selfIndex).isElectable()); OpTime lastOpTimeApplied(_getMyLastOptime_inlock()); if (lastOpTimeApplied == OpTime()) { log() << "not trying to elect self, " "do not yet have a complete set of data from any point in time"; return; } log() << "conducting a dry run election to see if we could be elected"; _voteRequester.reset(new VoteRequester); // This is necessary because the voteRequester may call directly into winning an // election, if there are no other MaybeUp nodes. Winning an election attempts to lock // _mutex again. lk.unlock(); long long term = _topCoord->getTerm(); StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _voteRequester->start( &_replExecutor, _rsConfig, _rsConfig.getMemberAt(_selfIndex).getId(), _topCoord->getTerm(), true, // dry run getMyLastOptime(), stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term)); if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(28685, nextPhaseEvh.getStatus()); lossGuard.dismiss(); }
void ReplicationCoordinatorImpl::_startElectSelf() { invariant(!_freshnessChecker); invariant(!_electCmdRunner); stdx::unique_lock<stdx::mutex> lk(_mutex); switch (_rsConfigState) { case kConfigSteady: break; case kConfigInitiating: case kConfigReconfiguring: case kConfigHBReconfiguring: LOG(2) << "Not standing for election; processing a configuration change"; // Transition out of candidate role. _topCoord->processLoseElection(); return; default: severe() << "Entered replica set election code while in illegal config state " << int(_rsConfigState); fassertFailed(18913); } log() << "Standing for election"; const StatusWith<ReplicationExecutor::EventHandle> finishEvh = _replExecutor.makeEvent(); if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(18680, finishEvh.getStatus()); _electionFinishedEvent = finishEvh.getValue(); LoseElectionGuard lossGuard(_topCoord.get(), &_replExecutor, &_freshnessChecker, &_electCmdRunner, &_electionFinishedEvent); invariant(_rsConfig.getMemberAt(_selfIndex).isElectable()); OpTime lastOpTimeApplied(_getMyLastOptime_inlock()); if (lastOpTimeApplied.isNull()) { log() << "not trying to elect self, " "do not yet have a complete set of data from any point in time"; return; } _freshnessChecker.reset(new FreshnessChecker); // This is necessary because the freshnessChecker may call directly into winning an // election, if there are no other MaybeUp nodes. Winning an election attempts to lock // _mutex again. lk.unlock(); StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh = _freshnessChecker->start( &_replExecutor, lastOpTimeApplied.getTimestamp(), _rsConfig, _selfIndex, _topCoord->getMaybeUpHostAndPorts(), stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this)); if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) { return; } fassert(18681, nextPhaseEvh.getStatus()); lossGuard.dismiss(); }