void NetworkInterfaceASIO::AsyncOp::finish(const ResponseStatus& status) {
    // We never hold the access lock when we call finish from NetworkInterfaceASIO.
    _transitionToState(AsyncOp::State::kFinished);

    LOG(2) << "Request " << _request.id << " finished with response: "
           << (status.getStatus().isOK() ? status.getValue().data.toString()
                                         : status.getStatus().toString());

    // Calling the completion handler may invalidate state in this op, so do it last.
    _onFinish(status);
}
Пример #2
0
void VoteRequester::Algorithm::processResponse(const RemoteCommandRequest& request,
                                               const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        log() << "VoteRequester: Got failed response from " << request.target << ": "
              << response.getStatus();
    } else {
        _responders.insert(request.target);
        ReplSetRequestVotesResponse voteResponse;
        const auto status = voteResponse.initialize(response.getValue().data);
        if (!status.isOK()) {
            log() << "VoteRequester: Got error processing response with status: " << status
                  << ", resp:" << response.getValue().data;
        }

        if (voteResponse.getVoteGranted()) {
            LOG(3) << "VoteRequester: Got yes vote from " << request.target
                   << ", resp:" << response.getValue().data;
            _votes++;
        } else {
            log() << "VoteRequester: Got no vote from " << request.target
                  << " because: " << voteResponse.getReason()
                  << ", resp:" << response.getValue().data;
        }

        if (voteResponse.getTerm() > _term) {
            _staleTerm = true;
        }
    }
}
Пример #3
0
void FreshnessScanner::Algorithm::processResponse(const RemoteCommandRequest& request,
                                                  const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        LOG(2) << "FreshnessScanner: Got failed response from " << request.target << ": "
               << response.getStatus();
    } else {
        BSONObj opTimesObj = response.getValue().data.getObjectField("optimes");
        OpTime lastOpTime;
        Status status = bsonExtractOpTimeField(opTimesObj, "appliedOpTime", &lastOpTime);
        if (!status.isOK()) {
            return;
        }

        int index = _rsConfig.findMemberIndexByHostAndPort(request.target);
        FreshnessInfo freshnessInfo{index, lastOpTime};

        auto cmp = [](const FreshnessInfo& a, const FreshnessInfo& b) {
            return a.opTime > b.opTime;
        };
        auto iter =
            std::upper_bound(_freshnessInfos.begin(), _freshnessInfos.end(), freshnessInfo, cmp);
        _freshnessInfos.insert(iter, freshnessInfo);
    }
}
Пример #4
0
    void VoteRequester::Algorithm::processResponse(
            const RemoteCommandRequest& request,
            const ResponseStatus& response) {
        _responsesProcessed++;
        if (!response.isOK()) { // failed response
            log() << "VoteRequester: Got failed response from " << request.target
                  << ": " << response.getStatus();
        }
        else {
            ReplSetRequestVotesResponse voteResponse;
            voteResponse.initialize(response.getValue().data);
            if (voteResponse.getVoteGranted()) {
                _votes++;
            }
            else {
                log() << "VoteRequester: Got no vote from " << request.target
                      << " because: " << voteResponse.getReason();
            }

            if (voteResponse.getTerm() > _term) {
                _staleTerm = true;
            }
        }

    }
Пример #5
0
    void ElectCmdRunner::Algorithm::processResponse(
            const RemoteCommandRequest& request,
            const ResponseStatus& response) {

        ++_actualResponses;

        if (response.isOK()) {
            BSONObj res = response.getValue().data;
            log() << "received " << res["vote"] << " votes from " << request.target;
            LOG(1) << "full elect res: " << res.toString();
            BSONElement vote(res["vote"]); 
            if (vote.type() != mongo::NumberInt) {
                error() << "wrong type for vote argument in replSetElect command: " << 
                    typeName(vote.type());
                _sufficientResponsesReceived = true;
                return;
            }

            _receivedVotes += vote._numberInt();
        }
        else {
            warning() << "elect command to " << request.target << " failed: " <<
                response.getStatus();
        }
    }
Пример #6
0
 void NetworkInterfaceImpl::_consumeNetworkRequests() {
     boost::unique_lock<boost::mutex> lk(_mutex);
     while (!_inShutdown) {
         if (_pending.empty()) {
             if (_threads.size() > kMinThreads) {
                 const Date_t nowDate = now();
                 const Date_t nextThreadRetirementDate =
                     _lastFullUtilizationDate + kMaxIdleThreadAge;
                 if (nowDate > nextThreadRetirementDate) {
                     _lastFullUtilizationDate = nowDate;
                     break;
                 }
             }
             _hasPending.wait_for(lk, kMaxIdleThreadAge);
             continue;
         }
         CommandData todo = _pending.front();
         _pending.pop_front();
         ++_numActiveNetworkRequests;
         --_numIdleThreads;
         lk.unlock();
         ResponseStatus result = _commandExec.runCommand(todo.request);
         LOG(2) << "Network status of sending " << todo.request.cmdObj.firstElementFieldName() <<
             " to " << todo.request.target << " was " << result.getStatus();
         todo.onFinish(result);
         lk.lock();
         --_numActiveNetworkRequests;
         ++_numIdleThreads;
         _signalWorkAvailable_inlock();
     }
     --_numIdleThreads;
     if (_inShutdown) {
         return;
     }
     // This thread is ending because it was idle for too long.
     // Find self in _threads, remove self from _threads, detach self.
     for (size_t i = 0; i < _threads.size(); ++i) {
         if (_threads[i]->get_id() != boost::this_thread::get_id()) {
             continue;
         }
         _threads[i]->detach();
         _threads[i].swap(_threads.back());
         _threads.pop_back();
         return;
     }
     severe().stream() << "Could not find this thread, with id " <<
         boost::this_thread::get_id() << " in the replication networking thread pool";
     fassertFailedNoTrace(28581);
 }
void ElectionWinnerDeclarer::Algorithm::processResponse(const RemoteCommandRequest& request,
                                                        const ResponseStatus& response) {
    _responsesProcessed++;
    if (!response.isOK()) {  // failed response
        log() << "ElectionWinnerDeclarer: Got failed response from " << request.target << ": "
              << response.getStatus();
        return;
    }

    Status cmdResponseStatus = getStatusFromCommandResult(response.getValue().data);
    if (!cmdResponseStatus.isOK()) {  // disagreement response
        _failed = true;
        _status = cmdResponseStatus;
        log() << "ElectionWinnerDeclarer: Got error response from " << request.target
              << " with term: " << response.getValue().data["term"].Number()
              << " and error: " << cmdResponseStatus;
    }
}
void QuorumChecker::_tabulateHeartbeatResponse(const RemoteCommandRequest& request,
                                               const ResponseStatus& response) {
    ++_numResponses;
    if (!response.isOK()) {
        warning() << "Failed to complete heartbeat request to " << request.target << "; "
                  << response.getStatus();
        _badResponses.push_back(std::make_pair(request.target, response.getStatus()));
        return;
    }

    BSONObj resBSON = response.getValue().data;
    ReplSetHeartbeatResponse hbResp;
    Status hbStatus = hbResp.initialize(resBSON, 0);

    if (hbStatus.code() == ErrorCodes::InconsistentReplicaSetNames) {
        std::string message = str::stream() << "Our set name did not match that of "
                                            << request.target.toString();
        _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
        warning() << message;
        return;
    }

    if (!hbStatus.isOK() && hbStatus != ErrorCodes::InvalidReplicaSetConfig) {
        warning() << "Got error (" << hbStatus << ") response on heartbeat request to "
                  << request.target << "; " << hbResp;
        _badResponses.push_back(std::make_pair(request.target, hbStatus));
        return;
    }

    if (!hbResp.getReplicaSetName().empty()) {
        if (hbResp.getConfigVersion() >= _rsConfig->getConfigVersion()) {
            std::string message = str::stream()
                << "Our config version of " << _rsConfig->getConfigVersion()
                << " is no larger than the version on " << request.target.toString()
                << ", which is " << hbResp.getConfigVersion();
            _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
            warning() << message;
            return;
        }
    }

    if (_rsConfig->hasReplicaSetId()) {
        StatusWith<rpc::ReplSetMetadata> replMetadata =
            rpc::ReplSetMetadata::readFromMetadata(response.getValue().metadata);
        if (replMetadata.isOK() && replMetadata.getValue().getReplicaSetId().isSet() &&
            _rsConfig->getReplicaSetId() != replMetadata.getValue().getReplicaSetId()) {
            std::string message = str::stream()
                << "Our replica set ID of " << _rsConfig->getReplicaSetId()
                << " did not match that of " << request.target.toString() << ", which is "
                << replMetadata.getValue().getReplicaSetId();
            _vetoStatus = Status(ErrorCodes::NewReplicaSetConfigurationIncompatible, message);
            warning() << message;
        }
    }

    const bool isInitialConfig = _rsConfig->getConfigVersion() == 1;
    if (isInitialConfig && hbResp.hasData()) {
        std::string message = str::stream() << "'" << request.target.toString()
                                            << "' has data already, cannot initiate set.";
        _vetoStatus = Status(ErrorCodes::CannotInitializeNodeWithData, message);
        warning() << message;
        return;
    }

    for (int i = 0; i < _rsConfig->getNumMembers(); ++i) {
        const MemberConfig& memberConfig = _rsConfig->getMemberAt(i);
        if (memberConfig.getHostAndPort() != request.target) {
            continue;
        }
        if (memberConfig.isElectable()) {
            ++_numElectable;
        }
        if (memberConfig.isVoter()) {
            _voters.push_back(request.target);
        }
        return;
    }
    invariant(false);
}
// NOTE: This method may only be called by ASIO threads
// (do not call from methods entered by TaskExecutor threads)
void NetworkInterfaceASIO::_completeOperation(AsyncOp* op, const ResponseStatus& resp) {
    // Cancel this operation's timeout. Note that the timeout callback may already be running,
    // may have run, or may have already been scheduled to run in the near future.
    if (op->_timeoutAlarm) {
        op->_timeoutAlarm->cancel();
    }

    if (resp.getStatus().code() == ErrorCodes::ExceededTimeLimit) {
        _numTimedOutOps.fetchAndAdd(1);
    }

    if (op->_inSetup) {
        // If we are in setup we should only be here if we failed to connect.
        MONGO_ASIO_INVARIANT(!resp.isOK(), "Failed to connect in setup", op);
        // If we fail during connection, we won't be able to access any of op's members after
        // calling finish(), so we return here.
        LOG(1) << "Failed to connect to " << op->request().target << " - " << resp.getStatus();
        _numFailedOps.fetchAndAdd(1);
        op->finish(resp);
        return;
    }

    if (!resp.isOK()) {
        // In the case that resp is not OK, but _inSetup is false, we are using a connection
        // that
        // we got from the pool to execute a command, but it failed for some reason.
        LOG(2) << "Failed to execute command: " << op->request().toString()
               << " reason: " << resp.getStatus();

        if (resp.getStatus().code() != ErrorCodes::CallbackCanceled) {
            _numFailedOps.fetchAndAdd(1);
        }
    } else {
        _numSucceededOps.fetchAndAdd(1);
    }

    std::unique_ptr<AsyncOp> ownedOp;

    {
        stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);

        auto iter = _inProgress.find(op);

        MONGO_ASIO_INVARIANT_INLOCK(
            iter != _inProgress.end(), "Could not find AsyncOp in _inProgress", op);

        ownedOp = std::move(iter->second);
        _inProgress.erase(iter);
    }

    op->finish(resp);

    MONGO_ASIO_INVARIANT(static_cast<bool>(ownedOp), "Invalid AsyncOp", op);

    auto conn = std::move(op->_connectionPoolHandle);
    auto asioConn = static_cast<connection_pool_asio::ASIOConnection*>(conn.get());

    // Prevent any other threads or callbacks from accessing this op so we may safely complete
    // and destroy it. It is key that we do this after we remove the op from the _inProgress map
    // or someone else in cancelCommand could read the bumped generation and cancel the next
    // command that uses this op. See SERVER-20556.
    {
        stdx::lock_guard<stdx::mutex> lk(op->_access->mutex);
        ++(op->_access->id);
    }

    // We need to bump the generation BEFORE we call reset() or we could flip the timeout in the
    // timeout callback before returning the AsyncOp to the pool.
    ownedOp->reset();

    asioConn->bindAsyncOp(std::move(ownedOp));
    if (!resp.isOK()) {
        asioConn->indicateFailure(resp.getStatus());
    } else {
        asioConn->indicateSuccess();
    }

    signalWorkAvailable();
}