void ReplicationCoordinatorImpl::_onDryRunComplete(long long originalTerm) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    LoseElectionDryRunGuardV1 lossGuard(this);

    invariant(_voteRequester);

    if (_topCoord->getTerm() != originalTerm) {
        log() << "not running for primary, we have been superseded already during dry run. "
              << "original term: " << originalTerm << ", current term: " << _topCoord->getTerm();
        return;
    }

    const VoteRequester::Result endResult = _voteRequester->getResult();

    if (endResult == VoteRequester::Result::kInsufficientVotes) {
        log() << "not running for primary, we received insufficient votes";
        return;
    } else if (endResult == VoteRequester::Result::kStaleTerm) {
        log() << "not running for primary, we have been superseded already";
        return;
    } else if (endResult == VoteRequester::Result::kPrimaryRespondedNo) {
        log() << "not running for primary, the current primary responded no in the dry run";
        return;
    } else if (endResult != VoteRequester::Result::kSuccessfullyElected) {
        log() << "not running for primary, we received an unexpected problem";
        return;
    }

    long long newTerm = originalTerm + 1;
    log() << "dry election run succeeded, running for election in term " << newTerm;
    // Stepdown is impossible from this term update.
    TopologyCoordinator::UpdateTermResult updateTermResult;
    _updateTerm_inlock(newTerm, &updateTermResult);
    invariant(updateTermResult == TopologyCoordinator::UpdateTermResult::kUpdatedTerm);
    // Secure our vote for ourself first
    _topCoord->voteForMyselfV1();

    // Store the vote in persistent storage.
    LastVote lastVote{newTerm, _selfIndex};

    auto cbStatus = _replExecutor->scheduleWork(
        [this, lastVote](const executor::TaskExecutor::CallbackArgs& cbData) {
            _writeLastVoteForMyElection(lastVote, cbData);
        });
    if (cbStatus.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(34421, cbStatus.getStatus());
    lossGuard.dismiss();
}
void ReplicationCoordinatorImpl::_onDryRunComplete(long long originalTerm) {
    invariant(_voteRequester);
    LoseElectionDryRunGuardV1 lossGuard(this);

    LockGuard lk(_topoMutex);

    if (_topCoord->getTerm() != originalTerm) {
        log() << "not running for primary, we have been superceded already";
        return;
    }

    const VoteRequester::Result endResult = _voteRequester->getResult();

    if (endResult == VoteRequester::Result::kInsufficientVotes) {
        log() << "not running for primary, we received insufficient votes";
        return;
    } else if (endResult == VoteRequester::Result::kStaleTerm) {
        log() << "not running for primary, we have been superceded already";
        return;
    } else if (endResult != VoteRequester::Result::kSuccessfullyElected) {
        log() << "not running for primary, we received an unexpected problem";
        return;
    }

    log() << "dry election run succeeded, running for election";
    // Stepdown is impossible from this term update.
    TopologyCoordinator::UpdateTermResult updateTermResult;
    _updateTerm_incallback(originalTerm + 1, &updateTermResult);
    invariant(updateTermResult == TopologyCoordinator::UpdateTermResult::kUpdatedTerm);
    // Secure our vote for ourself first
    _topCoord->voteForMyselfV1();

    // Store the vote in persistent storage.
    LastVote lastVote;
    lastVote.setTerm(originalTerm + 1);
    lastVote.setCandidateIndex(_selfIndex);

    auto cbStatus = _replExecutor.scheduleDBWork(
        [this, lastVote](const ReplicationExecutor::CallbackArgs& cbData) {
            _writeLastVoteForMyElection(lastVote, cbData);
        });
    if (cbStatus.getStatus() == ErrorCodes::ShutdownInProgress) {
        return;
    }
    fassert(34421, cbStatus.getStatus());
    lossGuard.dismiss();
}