void ReplicationCoordinatorImpl::_startVoteRequester_inlock(long long newTerm) {
    invariant(_voteRequester);

    const auto lastOpTime = _getMyLastAppliedOpTime_inlock();

    _voteRequester.reset(new VoteRequester);
    StatusWith<executor::TaskExecutor::EventHandle> nextPhaseEvh = _voteRequester->start(
        _replExecutor.get(), _rsConfig, _selfIndex, newTerm, false, lastOpTime, -1);
    if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(28643, nextPhaseEvh.getStatus());
    _replExecutor
        ->onEvent(
            nextPhaseEvh.getValue(),
            [=](const executor::TaskExecutor::CallbackArgs&) { _onVoteRequestComplete(newTerm); })
        .status_with_transitional_ignore();
}
void ReplicationCoordinatorImpl::_startElectSelfV1_inlock(
    TopologyCoordinator::StartElectionReason reason) {
    invariant(!_voteRequester);
    invariant(!_freshnessChecker);

    switch (_rsConfigState) {
        case kConfigSteady:
            break;
        case kConfigInitiating:
        case kConfigReconfiguring:
        case kConfigHBReconfiguring:
            LOG(2) << "Not standing for election; processing a configuration change";
            // Transition out of candidate role.
            _topCoord->processLoseElection();
            return;
        default:
            severe() << "Entered replica set election code while in illegal config state "
                     << int(_rsConfigState);
            fassertFailed(28641);
    }

    auto finishedEvent = _makeEvent();
    if (!finishedEvent) {
        return;
    }
    _electionFinishedEvent = finishedEvent;

    auto dryRunFinishedEvent = _makeEvent();
    if (!dryRunFinishedEvent) {
        return;
    }
    _electionDryRunFinishedEvent = dryRunFinishedEvent;

    LoseElectionDryRunGuardV1 lossGuard(this);


    invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
    const auto lastOpTime = _getMyLastAppliedOpTime_inlock();

    if (lastOpTime == OpTime()) {
        log() << "not trying to elect self, "
                 "do not yet have a complete set of data from any point in time";
        return;
    }

    long long term = _topCoord->getTerm();
    int primaryIndex = -1;

    log() << "conducting a dry run election to see if we could be elected. current term: " << term;
    _voteRequester.reset(new VoteRequester);

    // Only set primaryIndex if the primary's vote is required during the dry run.
    if (reason == TopologyCoordinator::StartElectionReason::kCatchupTakeover) {
        primaryIndex = _topCoord->getCurrentPrimaryIndex();
    }
    StatusWith<executor::TaskExecutor::EventHandle> nextPhaseEvh =
        _voteRequester->start(_replExecutor.get(),
                              _rsConfig,
                              _selfIndex,
                              term,
                              true,  // dry run
                              lastOpTime,
                              primaryIndex);
    if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(28685, nextPhaseEvh.getStatus());
    _replExecutor
        ->onEvent(nextPhaseEvh.getValue(),
                  [=](const executor::TaskExecutor::CallbackArgs&) { _onDryRunComplete(term); })
        .status_with_transitional_ignore();
    lossGuard.dismiss();
}
void ReplicationCoordinatorImpl::_startElectSelf_inlock() {
    invariant(!_freshnessChecker);
    invariant(!_electCmdRunner);

    switch (_rsConfigState) {
        case kConfigSteady:
            break;
        case kConfigInitiating:
        case kConfigReconfiguring:
        case kConfigHBReconfiguring:
            LOG(2) << "Not standing for election; processing a configuration change";
            // Transition out of candidate role.
            _topCoord->processLoseElection();
            return;
        default:
            severe() << "Entered replica set election code while in illegal config state "
                     << int(_rsConfigState);
            fassertFailed(18913);
    }

    log() << "Standing for election";
    const StatusWith<executor::TaskExecutor::EventHandle> finishEvh = _replExecutor->makeEvent();
    if (finishEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(18680, finishEvh.getStatus());
    _electionFinishedEvent = finishEvh.getValue();
    LoseElectionGuard lossGuard(_topCoord.get(),
                                _replExecutor.get(),
                                &_freshnessChecker,
                                &_electCmdRunner,
                                &_electionFinishedEvent);


    invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
    OpTime lastOpTimeApplied(_getMyLastAppliedOpTime_inlock());

    if (lastOpTimeApplied.isNull()) {
        log() << "not trying to elect self, "
                 "do not yet have a complete set of data from any point in time"
                 " -- lastAppliedOpTime is null";
        return;
    }

    _freshnessChecker.reset(new FreshnessChecker);

    StatusWith<executor::TaskExecutor::EventHandle> nextPhaseEvh =
        _freshnessChecker->start(_replExecutor.get(),
                                 lastOpTimeApplied.getTimestamp(),
                                 _rsConfig,
                                 _selfIndex,
                                 _topCoord->getMaybeUpHostAndPorts());
    if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(18681, nextPhaseEvh.getStatus());
    _replExecutor
        ->onEvent(nextPhaseEvh.getValue(),
                  stdx::bind(&ReplicationCoordinatorImpl::_onFreshnessCheckComplete, this))
        .status_with_transitional_ignore();
    lossGuard.dismiss();
}
void ReplicationCoordinatorImpl::_startElectSelfV1() {
    invariant(!_voteRequester);
    invariant(!_freshnessChecker);

    stdx::unique_lock<stdx::mutex> lk(_mutex);
    switch (_rsConfigState) {
        case kConfigSteady:
            break;
        case kConfigInitiating:
        case kConfigReconfiguring:
        case kConfigHBReconfiguring:
            LOG(2) << "Not standing for election; processing a configuration change";
            // Transition out of candidate role.
            _topCoord->processLoseElection();
            return;
        default:
            severe() << "Entered replica set election code while in illegal config state "
                     << int(_rsConfigState);
            fassertFailed(28641);
    }

    auto finishedEvent = _makeEvent();
    if (!finishedEvent) {
        return;
    }
    _electionFinishedEvent = finishedEvent;

    auto dryRunFinishedEvent = _makeEvent();
    if (!dryRunFinishedEvent) {
        return;
    }
    _electionDryRunFinishedEvent = dryRunFinishedEvent;

    LoseElectionDryRunGuardV1 lossGuard(this);


    invariant(_rsConfig.getMemberAt(_selfIndex).isElectable());
    // Note: If we aren't durable, send last applied.
    const auto lastOpTime = _isDurableStorageEngine() ? _getMyLastDurableOpTime_inlock()
                                                      : _getMyLastAppliedOpTime_inlock();

    if (lastOpTime == OpTime()) {
        log() << "not trying to elect self, "
                 "do not yet have a complete set of data from any point in time";
        return;
    }

    log() << "conducting a dry run election to see if we could be elected";
    _voteRequester.reset(new VoteRequester);

    // This is necessary because the voteRequester may call directly into winning an
    // election, if there are no other MaybeUp nodes.  Winning an election attempts to lock
    // _mutex again.
    lk.unlock();

    long long term = _topCoord->getTerm();
    StatusWith<ReplicationExecutor::EventHandle> nextPhaseEvh =
        _voteRequester->start(&_replExecutor,
                              _rsConfig,
                              _selfIndex,
                              _topCoord->getTerm(),
                              true,  // dry run
                              lastOpTime);
    if (nextPhaseEvh.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(28685, nextPhaseEvh.getStatus());
    _replExecutor.onEvent(nextPhaseEvh.getValue(),
                          stdx::bind(&ReplicationCoordinatorImpl::_onDryRunComplete, this, term));
    lossGuard.dismiss();
}