示例#1
0
    void VoteRequester::Algorithm::processResponse(
            const RemoteCommandRequest& request,
            const ResponseStatus& response) {
        _responsesProcessed++;
        if (!response.isOK()) { // failed response
            log() << "VoteRequester: Got failed response from " << request.target
                  << ": " << response.getStatus();
        }
        else {
            ReplSetRequestVotesResponse voteResponse;
            voteResponse.initialize(response.getValue().data);
            if (voteResponse.getVoteGranted()) {
                _votes++;
            }
            else {
                log() << "VoteRequester: Got no vote from " << request.target
                      << " because: " << voteResponse.getReason();
            }

            if (voteResponse.getTerm() > _term) {
                _staleTerm = true;
            }
        }

    }
示例#2
0
void FreshnessScanner::Algorithm::processResponse(const RemoteCommandRequest& request,
                                                  const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        LOG(2) << "FreshnessScanner: Got failed response from " << request.target << ": "
               << response.getStatus();
    } else {
        BSONObj opTimesObj = response.getValue().data.getObjectField("optimes");
        OpTime lastOpTime;
        Status status = bsonExtractOpTimeField(opTimesObj, "appliedOpTime", &lastOpTime);
        if (!status.isOK()) {
            return;
        }

        int index = _rsConfig.findMemberIndexByHostAndPort(request.target);
        FreshnessInfo freshnessInfo{index, lastOpTime};

        auto cmp = [](const FreshnessInfo& a, const FreshnessInfo& b) {
            return a.opTime > b.opTime;
        };
        auto iter =
            std::upper_bound(_freshnessInfos.begin(), _freshnessInfos.end(), freshnessInfo, cmp);
        _freshnessInfos.insert(iter, freshnessInfo);
    }
}
示例#3
0
    void ElectCmdRunner::Algorithm::processResponse(
            const RemoteCommandRequest& request,
            const ResponseStatus& response) {

        ++_actualResponses;

        if (response.isOK()) {
            BSONObj res = response.getValue().data;
            log() << "received " << res["vote"] << " votes from " << request.target;
            LOG(1) << "full elect res: " << res.toString();
            BSONElement vote(res["vote"]); 
            if (vote.type() != mongo::NumberInt) {
                error() << "wrong type for vote argument in replSetElect command: " << 
                    typeName(vote.type());
                _sufficientResponsesReceived = true;
                return;
            }

            _receivedVotes += vote._numberInt();
        }
        else {
            warning() << "elect command to " << request.target << " failed: " <<
                response.getStatus();
        }
    }
void NetworkInterfaceASIO::AsyncOp::finish(const ResponseStatus& status) {
    // We never hold the access lock when we call finish from NetworkInterfaceASIO.
    _transitionToState(AsyncOp::State::kFinished);

    LOG(2) << "Request " << _request.id << " finished with response: "
           << (status.getStatus().isOK() ? status.getValue().data.toString()
                                         : status.getStatus().toString());

    // Calling the completion handler may invalidate state in this op, so do it last.
    _onFinish(status);
}
示例#5
0
 void NetworkInterfaceImpl::_consumeNetworkRequests() {
     boost::unique_lock<boost::mutex> lk(_mutex);
     while (!_inShutdown) {
         if (_pending.empty()) {
             if (_threads.size() > kMinThreads) {
                 const Date_t nowDate = now();
                 const Date_t nextThreadRetirementDate =
                     _lastFullUtilizationDate + kMaxIdleThreadAge;
                 if (nowDate > nextThreadRetirementDate) {
                     _lastFullUtilizationDate = nowDate;
                     break;
                 }
             }
             _hasPending.wait_for(lk, kMaxIdleThreadAge);
             continue;
         }
         CommandData todo = _pending.front();
         _pending.pop_front();
         ++_numActiveNetworkRequests;
         --_numIdleThreads;
         lk.unlock();
         ResponseStatus result = _commandExec.runCommand(todo.request);
         LOG(2) << "Network status of sending " << todo.request.cmdObj.firstElementFieldName() <<
             " to " << todo.request.target << " was " << result.getStatus();
         todo.onFinish(result);
         lk.lock();
         --_numActiveNetworkRequests;
         ++_numIdleThreads;
         _signalWorkAvailable_inlock();
     }
     --_numIdleThreads;
     if (_inShutdown) {
         return;
     }
     // This thread is ending because it was idle for too long.
     // Find self in _threads, remove self from _threads, detach self.
     for (size_t i = 0; i < _threads.size(); ++i) {
         if (_threads[i]->get_id() != boost::this_thread::get_id()) {
             continue;
         }
         _threads[i]->detach();
         _threads[i].swap(_threads.back());
         _threads.pop_back();
         return;
     }
     severe().stream() << "Could not find this thread, with id " <<
         boost::this_thread::get_id() << " in the replication networking thread pool";
     fassertFailedNoTrace(28581);
 }
示例#6
0
void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& request,
                                               const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        log() << "VoteRequester: Got failed response from " << request.target << ": "
              << response.status;
    } else {
        _responders.insert(request.target);
        ReplSetRequestVotesResponse voteResponse;
        const auto status = voteResponse.initialize(response.data);
        if (!status.isOK()) {
            log() << "VoteRequester: Got error processing response with status: " << status
                  << ", resp:" << response.data;
        }

        if (voteResponse.getVoteGranted()) {
            LOG(3) << "VoteRequester: Got yes vote from " << request.target
                   << ", resp:" << response.data;
            _votes++;
        } else {
            log() << "VoteRequester: Got no vote from " << request.target
                  << " because: " << voteResponse.getReason() << ", resp:" << response.data;
        }

        if (voteResponse.getTerm() > _term) {
            _staleTerm = true;
        }
    }
}
void ElectionWinnerDeclarer::Algorithm::processResponse(const RemoteCommandRequest& request,
                                                        const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        log() << "ElectionWinnerDeclarer: Got failed response from " << request.target << ": "
              << response.getStatus();
        return;
    }

    Status cmdResponseStatus = getStatusFromCommandResult(response.getValue().data);
    if (!cmdResponseStatus.isOK()) {  // disagreement response
        _failed = true;
        _status = cmdResponseStatus;
        log() << "ElectionWinnerDeclarer: Got error response from " << request.target
              << " with term: " << response.getValue().data["term"].Number()
              << " and error: " << cmdResponseStatus;
    }
}
void NetworkInterfaceMock::scheduleResponse(NetworkOperationIterator noi,
                                            Date_t when,
                                            const ResponseStatus& response) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);
    invariant(_currentlyRunning == kNetworkThread);
    NetworkOperationIterator insertBefore = _scheduled.begin();
    while ((insertBefore != _scheduled.end()) && (insertBefore->getResponseDate() <= when)) {
        ++insertBefore;
    }

    // If no RemoteCommandResponse was returned (for example, on a simulated network error), then
    // do not attempt to run the metadata hook, since there is no returned metadata.
    if (_metadataHook && response.isOK()) {
        _metadataHook->readReplyMetadata(noi->getRequest().target, response.getValue().metadata);
    }

    noi->setResponse(when, response);
    _scheduled.splice(insertBefore, _processing, noi);
}
示例#9
0
    void FreshnessChecker::Algorithm::processResponse(
                    const ReplicationExecutor::RemoteCommandRequest& request,
                    const ResponseStatus& response) {
        ++_responsesProcessed;
        bool votingMember = _isVotingMember(request.target);

        Status status = Status::OK();

        if (!response.isOK() ||
            !((status = getStatusFromCommandResult(response.getValue().data)).isOK())) {
            if (votingMember) {
                ++_failedVoterResponses;
                if (hadTooManyFailedVoterResponses()) {
                    _abortReason = QuorumUnreachable;
                }
            }
            if (!response.isOK()) { // network/executor error
                LOG(2) << "FreshnessChecker: Got failed response from " << request.target;
            }
            else {                 // command error, like unauth
                LOG(2) << "FreshnessChecker: Got error response from " << request.target
                       << " :" << status;
            }
            return;
        }

        const BSONObj res = response.getValue().data;

        LOG(2) << "FreshnessChecker: Got response from " << request.target
               << " of " << res;

        if (res["fresher"].trueValue()) {
            log() << "not electing self, we are not freshest";
            _abortReason = FresherNodeFound;
            return;
        }

        if (res["opTime"].type() != mongo::Date) {
            error() << "wrong type for opTime argument in replSetFresh response: " <<
                typeName(res["opTime"].type());
            _abortReason = FresherNodeFound;
            return;
        }
        OpTime remoteTime(res["opTime"].date());
        if (remoteTime == _lastOpTimeApplied) {
            _abortReason = FreshnessTie;
        }
        if (remoteTime > _lastOpTimeApplied) {
            // something really wrong (rogue command?)
            _abortReason = FresherNodeFound;
            return;
        }

        if (res["veto"].trueValue()) {
            BSONElement msg = res["errmsg"];
            if (!msg.eoo()) {
                log() << "not electing self, " << request.target.toString() <<
                    " would veto with '" << msg << "'";
            }
            else {
                log() << "not electing self, " << request.target.toString() <<
                    " would veto";
            }
            _abortReason = FresherNodeFound;
            return;
        }
    }
// NOTE: This method may only be called by ASIO threads
// (do not call from methods entered by TaskExecutor threads)
void NetworkInterfaceASIO::_completeOperation(AsyncOp* op, ResponseStatus resp) {
    auto metadata = op->getResponseMetadata();
    if (!metadata.isEmpty()) {
        resp.metadata = metadata;
    }

    // Cancel this operation's timeout. Note that the timeout callback may already be running,
    // may have run, or may have already been scheduled to run in the near future.
    if (op->_timeoutAlarm) {
        op->_timeoutAlarm->cancel();
    }

    if (resp.status.code() == ErrorCodes::ExceededTimeLimit) {
        _numTimedOutOps.fetchAndAdd(1);
    }

    if (op->_inSetup) {
        // If we are in setup we should only be here if we failed to connect.
        MONGO_ASIO_INVARIANT(!resp.isOK(), "Failed to connect in setup", op);
        // If we fail during connection, we won't be able to access any of op's members after
        // calling finish(), so we return here.
        log() << "Failed to connect to " << op->request().target << " - " << resp.status;
        _numFailedOps.fetchAndAdd(1);
        op->finish(resp);
        return;
    }

    if (op->_inRefresh) {
        // If we are in refresh we should only be here if we failed to heartbeat.
        MONGO_ASIO_INVARIANT(!resp.isOK(), "In refresh, but did not fail to heartbeat", op);
        // If we fail during heartbeating, we won't be able to access any of op's members after
        // calling finish(), so we return here.
        log() << "Failed asio heartbeat to " << op->request().target << " - "
              << redact(resp.status);
        _numFailedOps.fetchAndAdd(1);
        op->finish(resp);
        return;
    }

    if (!resp.isOK()) {
        // In the case that resp is not OK, but _inSetup is false, we are using a connection
        // that
        // we got from the pool to execute a command, but it failed for some reason.
        LOG(2) << "Failed to execute command: " << redact(op->request().toString())
               << " reason: " << redact(resp.status);

        if (resp.status.code() != ErrorCodes::CallbackCanceled) {
            _numFailedOps.fetchAndAdd(1);
        }
    } else {
        _numSucceededOps.fetchAndAdd(1);
    }

    std::unique_ptr<AsyncOp> ownedOp;

    {
        stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);

        auto iter = _inProgress.find(op);

        MONGO_ASIO_INVARIANT_INLOCK(
            iter != _inProgress.end(), "Could not find AsyncOp in _inProgress", op);

        ownedOp = std::move(iter->second);
        _inProgress.erase(iter);
    }

    op->finish(resp);

    MONGO_ASIO_INVARIANT(static_cast<bool>(ownedOp), "Invalid AsyncOp", op);

    auto conn = std::move(op->_connectionPoolHandle);
    auto asioConn = static_cast<connection_pool_asio::ASIOConnection*>(conn.get());

    // Prevent any other threads or callbacks from accessing this op so we may safely complete
    // and destroy it. It is key that we do this after we remove the op from the _inProgress map
    // or someone else in cancelCommand could read the bumped generation and cancel the next
    // command that uses this op. See SERVER-20556.
    {
        stdx::lock_guard<stdx::mutex> lk(op->_access->mutex);
        ++(op->_access->id);
    }

    // We need to bump the generation BEFORE we call reset() or we could flip the timeout in the
    // timeout callback before returning the AsyncOp to the pool.
    ownedOp->reset();

    asioConn->bindAsyncOp(std::move(ownedOp));
    if (!resp.isOK()) {
        asioConn->indicateFailure(resp.status);
    } else {
        asioConn->indicateUsed();
        asioConn->indicateSuccess();
    }

    signalWorkAvailable();
}
void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& request,
                                               const ResponseStatus& response) {
    ++_numResponses;
    if (!response.isOK()) {
        warning() << "Failed to complete heartbeat request to " << request.target << "; "
                  << response.getStatus();
        _badResponses.push_back(std::make_pair(request.target, response.getStatus()));
        return;
    }

    BSONObj resBSON = response.getValue().data;
    ReplSetHeartbeatResponse hbResp;
    Status hbStatus = hbResp.initialize(resBSON, 0);

    if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
        std::string message = str::stream() << "Our set name did not match that of "
                                            << request.target.toString();
        _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
        warning() << message;
        return;
    }

    if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
        warning() << "Got error (" << hbStatus << ") response on heartbeat request to "
                  << request.target << "; " << hbResp;
        _badResponses.push_back(std::make_pair(request.target, hbStatus));
        return;
    }

    if (!hbResp.getReplicaSetName().empty()) {
        if (hbResp.getConfigVersion() >= _rsConfig->getConfigVersion()) {
            std::string message = str::stream()
                << "Our config version of " << _rsConfig->getConfigVersion()
                << " is no larger than the version on " << request.target.toString()
                << ", which is " << hbResp.getConfigVersion();
            _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
            warning() << message;
            return;
        }
    }

    if (_rsConfig->hasReplicaSetId()) {
        StatusWith<rpc::ReplSetMetadata> replMetadata =
            rpc::ReplSetMetadata::readFromMetadata(response.getValue().metadata);
        if (replMetadata.isOK() && replMetadata.getValue().getReplicaSetId().isSet() &&
            _rsConfig->getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
            std::string message = str::stream()
                << "Our replica set ID of " << _rsConfig->getReplicaSetId()
                << " did not match that of " << request.target.toString() << ", which is "
                << replMetadata.getValue().getReplicaSetId();
            _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
            warning() << message;
        }
    }

    const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
    if (isInitialConfig && hbResp.hasData()) {
        std::string message = str::stream() << "'" << request.target.toString()
                                            << "' has data already, cannot initiate set.";
        _vetoStatus = Status(ErrorCodes::CannotInitializeNodeWithData, message);
        warning() << message;
        return;
    }

    for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
        const MemberConfig& memberConfig = _rsConfig->getMemberAt(i);
        if (memberConfig.getHostAndPort() != request.target) {
            continue;
        }
        if (memberConfig.isElectable()) {
            ++_numElectable;
        }
        if (memberConfig.isVoter()) {
            _voters.push_back(request.target);
        }
        return;
    }
    invariant(false);
}