void NetworkInterfaceASIO::_completedOpCallback(AsyncOp* op) {
    // If we were told to send an empty message, toRecv will be empty here.

    // TODO: handle metadata readers
    const auto elapsed = [this, op]() { return now() - op->start(); };

    if (op->command().toRecv().empty()) {
        LOG(3) << "received an empty message";
        return _completeOperation(op, RemoteCommandResponse(BSONObj(), BSONObj(), elapsed()));
    }

    try {
        auto reply = rpc::makeReply(&(op->command().toRecv()));

        if (reply->getProtocol() != op->operationProtocol()) {
            return _completeOperation(
                op,
                Status(ErrorCodes::RPCProtocolNegotiationFailed,
                       str::stream() << "Mismatched RPC protocols - request was '"
                                     << opToString(op->command().toSend().operation()) << "' '"
                                     << " but reply was '"
                                     << opToString(op->command().toRecv().operation()) << "'"));
        }

        _completeOperation(op,
                           // unavoidable copy
                           RemoteCommandResponse(reply->getCommandReply().getOwned(),
                                                 reply->getMetadata().getOwned(),
                                                 elapsed()));
    } catch (...) {
        // makeReply can throw if the reply was invalid.
        _completeOperation(op, exceptionToStatus());
    }
}
void NetworkInterfaceASIO::_runIsMaster(AsyncOp* op) {
    // We use a legacy builder to create our ismaster request because we may
    // have to communicate with servers that do not support OP_COMMAND
    rpc::LegacyRequestBuilder requestBuilder{};
    requestBuilder.setDatabase("admin");
    requestBuilder.setCommandName("isMaster");
    requestBuilder.setMetadata(rpc::makeEmptyMetadata());
    requestBuilder.setCommandArgs(BSON("isMaster" << 1));

    // Set current command to ismaster request and run
    auto beginStatus = op->beginCommand(std::move(*(requestBuilder.done())));
    if (!beginStatus.isOK()) {
        return _completeOperation(op, beginStatus);
    }

    // Callback to parse protocol information out of received ismaster response
    auto parseIsMaster = [this, op]() {

        auto swCommandReply = op->command()->response(rpc::Protocol::kOpQuery, now());
        if (!swCommandReply.isOK()) {
            return _completeOperation(op, swCommandReply.getStatus());
        }

        auto commandReply = std::move(swCommandReply.getValue());

        if (_hook) {
            // Run the validation hook.
            auto validHost = callNoexcept(
                *_hook, &NetworkConnectionHook::validateHost, op->request().target, commandReply);
            if (!validHost.isOK()) {
                return _completeOperation(op, validHost);
            }
        }

        auto protocolSet = rpc::parseProtocolSetFromIsMasterReply(commandReply.data);
        if (!protocolSet.isOK())
            return _completeOperation(op, protocolSet.getStatus());

        op->connection().setServerProtocols(protocolSet.getValue());

        // Set the operation protocol
        auto negotiatedProtocol =
            rpc::negotiate(op->connection().serverProtocols(), op->connection().clientProtocols());

        if (!negotiatedProtocol.isOK()) {
            return _completeOperation(op, negotiatedProtocol.getStatus());
        }

        op->setOperationProtocol(negotiatedProtocol.getValue());

        return _authenticate(op);

    };

    _asyncRunCommand(op->command(),
                     [this, op, parseIsMaster](std::error_code ec, size_t bytes) {
                         _validateAndRun(op, ec, std::move(parseIsMaster));
                     });
}
void NetworkInterfaceASIO::_networkErrorCallback(AsyncOp* op, const std::error_code& ec) {
    if (ec.category() == mongoErrorCategory()) {
        // If we get a Mongo error code, we can preserve it.
        _completeOperation(op, Status(ErrorCodes::fromInt(ec.value()), ec.message()));
    } else {
        // If we get an asio or system error, we just convert it to a network error.
        _completeOperation(op, Status(ErrorCodes::HostUnreachable, ec.message()));
    }
}
void NetworkInterfaceASIO::_completedOpCallback(AsyncOp* op) {
    // If we were told to send an empty message, toRecv will be empty here.
    if (op->command().toRecv().empty()) {
        LOG(3) << "received an empty message";
        auto elapsed = now() - op->start();
        return _completeOperation(op, RemoteCommandResponse(BSONObj(), BSONObj(), elapsed));
    }

    // TODO: handle metadata readers.
    auto response = _responseFromMessage(op->command().toRecv(), op->operationProtocol());
    _completeOperation(op, response);
}
void NetworkInterfaceASIO::_runIsMaster(AsyncOp* op) {
    // We use a legacy builder to create our ismaster request because we may
    // have to communicate with servers that do not support OP_COMMAND
    rpc::LegacyRequestBuilder requestBuilder{};
    requestBuilder.setDatabase("admin");
    requestBuilder.setCommandName("isMaster");
    requestBuilder.setMetadata(rpc::makeEmptyMetadata());
    requestBuilder.setCommandArgs(BSON("isMaster" << 1));

    // Set current command to ismaster request and run
    auto& cmd = op->beginCommand(std::move(*(requestBuilder.done())));

    // Callback to parse protocol information out of received ismaster response
    auto parseIsMaster = [this, op]() {
        try {
            auto commandReply = rpc::makeReply(&(op->command().toRecv()));
            BSONObj isMasterReply = commandReply->getCommandReply();

            auto protocolSet = rpc::parseProtocolSetFromIsMasterReply(isMasterReply);
            if (!protocolSet.isOK())
                return _completeOperation(op, protocolSet.getStatus());

            op->connection().setServerProtocols(protocolSet.getValue());

            // Set the operation protocol
            auto negotiatedProtocol = rpc::negotiate(op->connection().serverProtocols(),
                                                     op->connection().clientProtocols());

            if (!negotiatedProtocol.isOK()) {
                return _completeOperation(op, negotiatedProtocol.getStatus());
            }

            op->setOperationProtocol(negotiatedProtocol.getValue());

            // Advance the state machine
            return _authenticate(op);

        } catch (...) {
            // makeReply will throw if the reply was invalid.
            return _completeOperation(op, exceptionToStatus());
        }
    };

    _asyncRunCommand(&cmd,
                     [this, op, parseIsMaster](std::error_code ec, size_t bytes) {
                         _validateAndRun(op, ec, std::move(parseIsMaster));
                     });
}
void NetworkInterfaceASIO::_beginCommunication(AsyncOp* op) {
    // The way that we connect connections for the connection pool is by
    // starting the callback chain with connect(), but getting off at the first
    // _beginCommunication. I.e. all AsyncOp's start off with _inSetup == true
    // and arrive here as they're connected and authed. Once they hit here, we
    // return to the connection pool's get() callback with _inSetup == false,
    // so we can proceed with user operations after they return to this
    // codepath.
    if (op->_inSetup) {
        log() << "Successfully connected to " << op->request().target.toString();
        op->_inSetup = false;
        op->finish(RemoteCommandResponse());
        return;
    }

    LOG(3) << "Initiating asynchronous command: " << redact(op->request().toString());

    auto beginStatus = op->beginCommand(op->request());
    if (!beginStatus.isOK()) {
        return _completeOperation(op, beginStatus);
    }

    _asyncRunCommand(op, [this, op](std::error_code ec, size_t bytes) {
        _validateAndRun(op, ec, [this, op]() { _completedOpCallback(op); });
    });
}
void NetworkInterfaceASIO::_runConnectionHook(AsyncOp* op) {
    if (!_hook) {
        return _beginCommunication(op);
    }

    auto swOptionalRequest =
        callNoexcept(*_hook, &NetworkConnectionHook::makeRequest, op->request().target);

    if (!swOptionalRequest.isOK()) {
        return _completeOperation(op, swOptionalRequest.getStatus());
    }

    auto optionalRequest = std::move(swOptionalRequest.getValue());

    if (optionalRequest == boost::none) {
        return _beginCommunication(op);
    }

    auto beginStatus = op->beginCommand(*optionalRequest, _metadataHook.get());
    if (!beginStatus.isOK()) {
        return _completeOperation(op, beginStatus);
    }

    auto finishHook = [this, op]() {
        auto response =
            op->command()->response(op->operationProtocol(), now(), _metadataHook.get());

        if (!response.isOK()) {
            return _completeOperation(op, response.getStatus());
        }

        auto handleStatus = callNoexcept(*_hook,
                                         &NetworkConnectionHook::handleReply,
                                         op->request().target,
                                         std::move(response.getValue()));

        if (!handleStatus.isOK()) {
            return _completeOperation(op, handleStatus);
        }

        return _beginCommunication(op);
    };

    return _asyncRunCommand(op, [this, op, finishHook](std::error_code ec, std::size_t bytes) {
        _validateAndRun(op, ec, finishHook);
    });
}
void NetworkInterfaceASIO::_recvMessageBody(AsyncOp* op) {
    // TODO: This error code should be more meaningful.
    std::error_code ec;

    // validate message length
    int len = op->header()->constView().getMessageLength();
    if (len == 542393671) {
        LOG(3) << "attempt to access MongoDB over HTTP on the native driver port.";
        return _networkErrorCallback(op, ec);
    } else if (len == -1) {
        // TODO: An endian check is run after the client connects, we should
        // set that we've received the client's handshake
        LOG(3) << "Endian check received from client";
        return _networkErrorCallback(op, ec);
    } else if (static_cast<size_t>(len) < sizeof(MSGHEADER::Value) ||
               static_cast<size_t>(len) > MaxMessageSizeBytes) {
        warning() << "recv(): message len " << len << " is invalid. "
                  << "Min " << sizeof(MSGHEADER::Value) << " Max: " << MaxMessageSizeBytes;
        return _networkErrorCallback(op, ec);
    }

    // validate response id
    uint32_t expectedId = op->toSend()->header().getId();
    uint32_t actualId = op->header()->constView().getResponseTo();
    if (actualId != expectedId) {
        LOG(3) << "got wrong response:"
               << " expected response id: " << expectedId << ", got response id: " << actualId;
        return _networkErrorCallback(op, ec);
    }

    int z = (len + 1023) & 0xfffffc00;
    invariant(z >= len);
    op->toRecv()->setData(reinterpret_cast<char*>(mongoMalloc(z)), true);
    MsgData::View mdView = op->toRecv()->buf();

    // copy header data into master buffer
    int headerLen = sizeof(MSGHEADER::Value);
    memcpy(mdView.view2ptr(), op->header(), headerLen);
    int bodyLength = len - headerLen;
    invariant(bodyLength >= 0);

    // receive remaining data into md->data
    asio::async_read(op->connection()->sock(),
                     asio::buffer(mdView.data(), bodyLength),
                     [this, op, mdView](asio::error_code ec, size_t bytes) {

                         if (op->canceled()) {
                             return _completeOperation(op, kCanceledStatus);
                         }

                         if (ec) {
                             LOG(3) << "error receiving message body";
                             return _networkErrorCallback(op, ec);
                         }

                         return _completedWriteCallback(op);
                     });
}
void NetworkInterfaceASIO::_authenticate(AsyncOp* op) {
    // There is currently no way for NetworkInterfaceASIO's users to run a command
    // without going through _authenticate(). Callers may want to run certain commands,
    // such as ismasters, pre-auth. We may want to offer this choice in the future.

    // This check is sufficient to see if auth is enabled on the system,
    // and avoids creating dependencies on deeper, less accessible auth code.
    if (!isInternalAuthSet()) {
        return _runConnectionHook(op);
    }

    // We will only have a valid clientName if SSL is enabled.
    std::string clientName;
#ifdef MONGO_CONFIG_SSL
    if (getSSLManager()) {
        clientName = getSSLManager()->getSSLConfiguration().clientSubjectName;
    }
#endif

    // authenticateClient will use this to run auth-related commands over our connection.
    auto runCommandHook = [this, op](executor::RemoteCommandRequest request,
                                     auth::AuthCompletionHandler handler) {

        // SERVER-14170: Set the metadataHook to nullptr explicitly as we cannot write metadata
        // here.
        auto beginStatus = op->beginCommand(request);
        if (!beginStatus.isOK()) {
            return handler(beginStatus);
        }

        auto callAuthCompletionHandler = [this, op, handler]() {
            auto authResponse =
                op->command()->response(op, op->operationProtocol(), now(), nullptr);
            handler(authResponse);
        };

        _asyncRunCommand(op,
                         [this, op, callAuthCompletionHandler](std::error_code ec, size_t bytes) {
                             _validateAndRun(op, ec, callAuthCompletionHandler);
                         });
    };

    // This will be called when authentication has completed.
    auto authHook = [this, op](auth::AuthResponse response) {
        if (!response.isOK())
            return _completeOperation(op, response);
        return _runConnectionHook(op);
    };

    auto params = getInternalUserAuthParams();
    auth::authenticateClient(
        params, op->request().target.host(), clientName, runCommandHook, authHook);
}
void NetworkInterfaceASIO::_completedWriteCallback(AsyncOp* op) {
    // If we were told to send an empty message, toRecv will be empty here.

    // TODO: handle metadata SERVER-19156
    BSONObj commandReply;
    if (op->toRecv()->empty()) {
        LOG(3) << "received an empty message";
    } else {
        QueryResult::View qr = op->toRecv()->singleData().view2ptr();
        // unavoidable copy
        commandReply = BSONObj(qr.data()).getOwned();
    }
    _completeOperation(
        op, RemoteCommandResponse(std::move(commandReply), BSONObj(), now() - op->start()));
}
void NetworkInterfaceASIO::_recvMessageHeader(AsyncOp* op) {
    asio::async_read(op->connection()->sock(),
                     asio::buffer(reinterpret_cast<char*>(op->header()), sizeof(MSGHEADER::Value)),
                     [this, op](asio::error_code ec, size_t bytes) {

                         if (op->canceled()) {
                             return _completeOperation(op, kCanceledStatus);
                         }

                         if (ec) {
                             LOG(3) << "error receiving header";
                             return _networkErrorCallback(op, ec);
                         }
                         _recvMessageBody(op);
                     });
}
void NetworkInterfaceASIO::_beginCommunication(AsyncOp* op) {
    if (op->canceled()) {
        return _completeOperation(op, kCanceledStatus);
    }

    Message* toSend = op->toSend();
    _messageFromRequest(op->request(), toSend);

    if (toSend->empty())
        return _completedWriteCallback(op);

    // TODO: Some day we may need to support vector messages.
    fassert(28708, toSend->buf() != 0);
    asio::const_buffer buf(toSend->buf(), toSend->size());
    return _asyncSendSimpleMessage(op, buf);
}
void NetworkInterfaceASIO::_asyncSendSimpleMessage(AsyncOp* op, const asio::const_buffer& buf) {
    asio::async_write(op->connection()->sock(),
                      asio::buffer(buf),
                      [this, op](std::error_code ec, std::size_t bytes) {

                          if (op->canceled()) {
                              return _completeOperation(op, kCanceledStatus);
                          }

                          if (ec) {
                              return _networkErrorCallback(op, ec);
                          }

                          _receiveResponse(op);
                      });
}
void NetworkInterfaceASIO::_beginCommunication(AsyncOp* op) {
    auto negotiatedProtocol =
        rpc::negotiate(op->connection().serverProtocols(), op->connection().clientProtocols());

    if (!negotiatedProtocol.isOK()) {
        return _completeOperation(op, negotiatedProtocol.getStatus());
    }

    op->setOperationProtocol(negotiatedProtocol.getValue());

    auto& cmd = op->beginCommand(
        std::move(*_messageFromRequest(op->request(), negotiatedProtocol.getValue())));

    _asyncRunCommand(&cmd,
                     [this, op](std::error_code ec, size_t bytes) {
                         _validateAndRun(op, ec, [this, op]() { _completedOpCallback(op); });
                     });
}
void NetworkInterfaceASIO::_beginCommunication(AsyncOp* op) {
    auto negotiatedProtocol =
        rpc::negotiate(op->connection()->serverProtocols(), op->connection()->clientProtocols());

    if (!negotiatedProtocol.isOK()) {
        return _completeOperation(op, negotiatedProtocol.getStatus());
    }

    op->setOperationProtocol(negotiatedProtocol.getValue());

    op->setToSend(std::move(*_messageFromRequest(op->request(), negotiatedProtocol.getValue())));

    // TODO: Is this logic actually necessary (SERVER-19320)?
    if (op->toSend()->empty())
        return _completedWriteCallback(op);

    // TODO: Some day we may need to support vector messages.
    fassert(28708, op->toSend()->buf() != 0);
    asio::const_buffer buf(op->toSend()->buf(), op->toSend()->size());
    return _asyncSendSimpleMessage(op, buf);
}
void NetworkInterfaceASIO::_completedOpCallback(AsyncOp* op) {
    auto response =
        op->command()->response(op, op->operationProtocol(), now(), _metadataHook.get());
    _completeOperation(op, response);
}
void NetworkInterfaceASIO::startCommand(const TaskExecutor::CallbackHandle& cbHandle,
                                        const RemoteCommandRequest& request,
                                        const RemoteCommandCompletionFn& onFinish) {
    invariant(onFinish);
    {
        stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
        const auto insertResult = _inGetConnection.emplace(cbHandle);
        // We should never see the same CallbackHandle added twice
        invariant(insertResult.second);
    }

    LOG(2) << "startCommand: " << request.toString();

    auto getConnectionStartTime = now();

    auto nextStep = [this, getConnectionStartTime, cbHandle, request, onFinish](
        StatusWith<ConnectionPool::ConnectionHandle> swConn) {

        if (!swConn.isOK()) {
            LOG(2) << "Failed to get connection from pool: " << swConn.getStatus();

            bool wasPreviouslyCanceled = false;
            {
                stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
                wasPreviouslyCanceled = _inGetConnection.erase(cbHandle) == 0;
            }

            onFinish(wasPreviouslyCanceled
                         ? Status(ErrorCodes::CallbackCanceled, "Callback canceled")
                         : swConn.getStatus());
            signalWorkAvailable();
            return;
        }

        auto conn = static_cast<connection_pool_asio::ASIOConnection*>(swConn.getValue().get());

        AsyncOp* op = nullptr;

        stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);

        const auto eraseCount = _inGetConnection.erase(cbHandle);

        // If we didn't find the request, we've been canceled
        if (eraseCount == 0) {
            lk.unlock();

            onFinish({ErrorCodes::CallbackCanceled, "Callback canceled"});

            // Though we were canceled, we know that the stream is fine, so indicate success.
            conn->indicateSuccess();

            signalWorkAvailable();

            return;
        }

        // We can't release the AsyncOp until we know we were not canceled.
        auto ownedOp = conn->releaseAsyncOp();
        op = ownedOp.get();

        // Sanity check that we are getting a clean AsyncOp.
        invariant(!op->canceled());
        invariant(!op->timedOut());

        // Now that we're inProgress, an external cancel can touch our op, but
        // not until we release the inProgressMutex.
        _inProgress.emplace(op, std::move(ownedOp));

        op->_cbHandle = std::move(cbHandle);
        op->_request = std::move(request);
        op->_onFinish = std::move(onFinish);
        op->_connectionPoolHandle = std::move(swConn.getValue());
        op->_start = getConnectionStartTime;

        // This ditches the lock and gets us onto the strand (so we're
        // threadsafe)
        op->_strand.post([this, op, getConnectionStartTime] {
            // Set timeout now that we have the correct request object
            if (op->_request.timeout != RemoteCommandRequest::kNoTimeout) {
                // Subtract the time it took to get the connection from the pool from the request
                // timeout.
                auto getConnectionDuration = now() - getConnectionStartTime;
                if (getConnectionDuration >= op->_request.timeout) {
                    // We only assume that the request timer is guaranteed to fire *after* the
                    // timeout duration - but make no stronger assumption. It is thus possible that
                    // we have already exceeded the timeout. In this case we timeout the operation
                    // manually.
                    return _completeOperation(op,
                                              {ErrorCodes::ExceededTimeLimit,
                                               "Remote command timed out while waiting to get a "
                                               "connection from the pool."});
                }

                // The above conditional guarantees that the adjusted timeout will never underflow.
                invariant(op->_request.timeout > getConnectionDuration);
                auto adjustedTimeout = op->_request.timeout - getConnectionDuration;

                op->_timeoutAlarm = op->_owner->_timerFactory->make(&op->_strand, adjustedTimeout);

                std::shared_ptr<AsyncOp::AccessControl> access;
                std::size_t generation;
                {
                    stdx::lock_guard<stdx::mutex> lk(op->_access->mutex);
                    access = op->_access;
                    generation = access->id;
                }

                op->_timeoutAlarm->asyncWait([this, op, access, generation](std::error_code ec) {
                    if (!ec) {
                        // We must pass a check for safe access before using op inside the
                        // callback or we may attempt access on an invalid pointer.
                        stdx::lock_guard<stdx::mutex> lk(access->mutex);
                        if (generation != access->id) {
                            // The operation has been cleaned up, do not access.
                            return;
                        }

                        LOG(2) << "Operation timed out: " << op->request().toString();

                        // An operation may be in mid-flight when it times out, so we
                        // cancel any in-progress async calls but do not complete the operation now.
                        op->_timedOut = 1;
                        if (op->_connection) {
                            op->_connection->cancel();
                        }
                    } else {
                        LOG(4) << "failed to time operation out: " << ec.message();
                    }
                });
            }

            _beginCommunication(op);
        });
    };

    _connectionPool.get(request.target, request.timeout, nextStep);
}
void NetworkInterfaceASIO::_networkErrorCallback(AsyncOp* op, const std::error_code& ec) {
    ErrorCodes::Error errorCode = (ec.category() == mongoErrorCategory())
        ? ErrorCodes::fromInt(ec.value())
        : ErrorCodes::HostUnreachable;
    _completeOperation(op, {errorCode, ec.message(), Milliseconds(now() - op->_start)});
}
void NetworkInterfaceASIO::_networkErrorCallback(AsyncOp* op, const std::error_code& ec) {
    LOG(3) << "networking error occurred";
    _completeOperation(op, Status(ErrorCodes::HostUnreachable, ec.message()));
}
void NetworkInterfaceASIO::_runIsMaster(AsyncOp* op) {
    // We use a legacy builder to create our ismaster request because we may
    // have to communicate with servers that do not support OP_COMMAND
    rpc::LegacyRequestBuilder requestBuilder{};
    requestBuilder.setDatabase("admin");
    requestBuilder.setCommandName("isMaster");

    BSONObjBuilder bob;
    bob.append("isMaster", 1);
    bob.append("hangUpOnStepDown", false);

    const auto versionString = VersionInfoInterface::instance().version();
    ClientMetadata::serialize(_options.instanceName, versionString, &bob);

    if (Command::testCommandsEnabled) {
        // Only include the host:port of this process in the isMaster command request if test
        // commands are enabled. mongobridge uses this field to identify the process opening a
        // connection to it.
        StringBuilder sb;
        sb << getHostName() << ':' << serverGlobalParams.port;
        bob.append("hostInfo", sb.str());
    }

    op->connection().getCompressorManager().clientBegin(&bob);

    if (WireSpec::instance().isInternalClient) {
        WireSpec::appendInternalClientWireVersion(WireSpec::instance().outgoing, &bob);
    }

    requestBuilder.setCommandArgs(bob.done());
    requestBuilder.setMetadata(rpc::makeEmptyMetadata());

    // Set current command to ismaster request and run
    auto beginStatus = op->beginCommand(requestBuilder.done(), op->request().target);
    if (!beginStatus.isOK()) {
        return _completeOperation(op, beginStatus);
    }

    // Callback to parse protocol information out of received ismaster response
    auto parseIsMaster = [this, op]() {

        auto swCommandReply = op->command()->response(op, rpc::Protocol::kOpQuery, now());
        if (!swCommandReply.isOK()) {
            return _completeOperation(op, swCommandReply);
        }

        auto commandReply = std::move(swCommandReply);

        // Ensure that the isMaster response is "ok:1".
        auto commandStatus = getStatusFromCommandResult(commandReply.data);
        if (!commandStatus.isOK()) {
            return _completeOperation(op, commandStatus);
        }

        auto protocolSet = rpc::parseProtocolSetFromIsMasterReply(commandReply.data);
        if (!protocolSet.isOK())
            return _completeOperation(op, protocolSet.getStatus());

        auto validateStatus =
            rpc::validateWireVersion(WireSpec::instance().outgoing, protocolSet.getValue().version);
        if (!validateStatus.isOK()) {
            warning() << "remote host has incompatible wire version: " << validateStatus;

            return _completeOperation(op, validateStatus);
        }

        op->connection().setServerProtocols(protocolSet.getValue().protocolSet);

        invariant(op->connection().clientProtocols() != rpc::supports::kNone);
        // Set the operation protocol
        auto negotiatedProtocol =
            rpc::negotiate(op->connection().serverProtocols(), op->connection().clientProtocols());

        if (!negotiatedProtocol.isOK()) {
            // Add relatively verbose logging here, since this should not happen unless we are
            // mongos and we try to connect to a node that doesn't support OP_COMMAND.
            warning() << "failed to negotiate protocol with remote host: " << op->request().target;
            warning() << "request was: " << redact(op->request().cmdObj);
            warning() << "response was: " << redact(commandReply.data);

            auto clientProtos = rpc::toString(op->connection().clientProtocols());
            if (clientProtos.isOK()) {
                warning() << "our (client) supported protocols: " << clientProtos.getValue();
            }
            auto serverProtos = rpc::toString(op->connection().serverProtocols());
            if (serverProtos.isOK()) {
                warning() << "remote server's supported protocols:" << serverProtos.getValue();
            }
            return _completeOperation(op, negotiatedProtocol.getStatus());
        }

        op->setOperationProtocol(negotiatedProtocol.getValue());

        op->connection().getCompressorManager().clientFinish(commandReply.data);

        if (_hook) {
            // Run the validation hook.
            auto validHost = callNoexcept(
                *_hook, &NetworkConnectionHook::validateHost, op->request().target, commandReply);
            if (!validHost.isOK()) {
                return _completeOperation(op, validHost);
            }
        }

        return _authenticate(op);

    };

    _asyncRunCommand(op, [this, op, parseIsMaster](std::error_code ec, size_t bytes) {
        _validateAndRun(op, ec, std::move(parseIsMaster));
    });
}
示例#21
0
Status NetworkInterfaceASIO::startCommand(const TaskExecutor::CallbackHandle& cbHandle,
                                          RemoteCommandRequest& request,
                                          const RemoteCommandCompletionFn& onFinish) {
    MONGO_ASIO_INVARIANT(onFinish, "Invalid completion function");
    {
        stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
        const auto insertResult = _inGetConnection.emplace(cbHandle);
        // We should never see the same CallbackHandle added twice
        MONGO_ASIO_INVARIANT_INLOCK(insertResult.second, "Same CallbackHandle added twice");
    }

    if (inShutdown()) {
        return {ErrorCodes::ShutdownInProgress, "NetworkInterfaceASIO shutdown in progress"};
    }

    LOG(2) << "startCommand: " << redact(request.toString());

    auto getConnectionStartTime = now();

    auto statusMetadata = attachMetadataIfNeeded(request, _metadataHook.get());
    if (!statusMetadata.isOK()) {
        return statusMetadata;
    }

    auto nextStep = [this, getConnectionStartTime, cbHandle, request, onFinish](
        StatusWith<ConnectionPool::ConnectionHandle> swConn) {

        if (!swConn.isOK()) {
            LOG(2) << "Failed to get connection from pool for request " << request.id << ": "
                   << swConn.getStatus();

            bool wasPreviouslyCanceled = false;
            {
                stdx::lock_guard<stdx::mutex> lk(_inProgressMutex);
                wasPreviouslyCanceled = _inGetConnection.erase(cbHandle) == 0;
            }

            Status status = wasPreviouslyCanceled
                ? Status(ErrorCodes::CallbackCanceled, "Callback canceled")
                : swConn.getStatus();
            if (status.code() == ErrorCodes::NetworkInterfaceExceededTimeLimit) {
                status = Status(ErrorCodes::ExceededTimeLimit, status.reason());
            }
            if (status.code() == ErrorCodes::ExceededTimeLimit) {
                _numTimedOutOps.fetchAndAdd(1);
            }
            if (status.code() != ErrorCodes::CallbackCanceled) {
                _numFailedOps.fetchAndAdd(1);
            }

            onFinish({status, now() - getConnectionStartTime});
            signalWorkAvailable();
            return;
        }

        auto conn = static_cast<connection_pool_asio::ASIOConnection*>(swConn.getValue().get());

        AsyncOp* op = nullptr;

        stdx::unique_lock<stdx::mutex> lk(_inProgressMutex);

        const auto eraseCount = _inGetConnection.erase(cbHandle);

        // If we didn't find the request, we've been canceled
        if (eraseCount == 0) {
            lk.unlock();

            onFinish({ErrorCodes::CallbackCanceled,
                      "Callback canceled",
                      now() - getConnectionStartTime});

            // Though we were canceled, we know that the stream is fine, so indicate success.
            conn->indicateSuccess();

            signalWorkAvailable();

            return;
        }

        // We can't release the AsyncOp until we know we were not canceled.
        auto ownedOp = conn->releaseAsyncOp();
        op = ownedOp.get();

        // This AsyncOp may be recycled. We expect timeout and canceled to be clean.
        // If this op was most recently used to connect, its state transitions won't have been
        // reset, so we do that here.
        MONGO_ASIO_INVARIANT_INLOCK(!op->canceled(), "AsyncOp has dirty canceled flag", op);
        MONGO_ASIO_INVARIANT_INLOCK(!op->timedOut(), "AsyncOp has dirty timeout flag", op);
        op->clearStateTransitions();

        // Now that we're inProgress, an external cancel can touch our op, but
        // not until we release the inProgressMutex.
        _inProgress.emplace(op, std::move(ownedOp));

        op->_cbHandle = std::move(cbHandle);
        op->_request = std::move(request);
        op->_onFinish = std::move(onFinish);
        op->_connectionPoolHandle = std::move(swConn.getValue());
        op->startProgress(getConnectionStartTime);

        // This ditches the lock and gets us onto the strand (so we're
        // threadsafe)
        op->_strand.post([this, op, getConnectionStartTime] {
            const auto timeout = op->_request.timeout;

            // Set timeout now that we have the correct request object
            if (timeout != RemoteCommandRequest::kNoTimeout) {
                // Subtract the time it took to get the connection from the pool from the request
                // timeout.
                auto getConnectionDuration = now() - getConnectionStartTime;
                if (getConnectionDuration >= timeout) {
                    // We only assume that the request timer is guaranteed to fire *after* the
                    // timeout duration - but make no stronger assumption. It is thus possible that
                    // we have already exceeded the timeout. In this case we timeout the operation
                    // manually.
                    std::stringstream msg;
                    msg << "Remote command timed out while waiting to get a connection from the "
                        << "pool, took " << getConnectionDuration << ", timeout was set to "
                        << timeout;
                    auto rs = ResponseStatus(ErrorCodes::NetworkInterfaceExceededTimeLimit,
                                             msg.str(),
                                             getConnectionDuration);
                    return _completeOperation(op, rs);
                }

                // The above conditional guarantees that the adjusted timeout will never underflow.
                MONGO_ASIO_INVARIANT(timeout > getConnectionDuration, "timeout underflowed", op);
                const auto adjustedTimeout = timeout - getConnectionDuration;
                const auto requestId = op->_request.id;

                try {
                    op->_timeoutAlarm =
                        op->_owner->_timerFactory->make(&op->_strand, adjustedTimeout);
                } catch (std::system_error& e) {
                    severe() << "Failed to construct timer for AsyncOp: " << e.what();
                    fassertFailed(40334);
                }

                std::shared_ptr<AsyncOp::AccessControl> access;
                std::size_t generation;
                {
                    stdx::lock_guard<stdx::mutex> lk(op->_access->mutex);
                    access = op->_access;
                    generation = access->id;
                }

                op->_timeoutAlarm->asyncWait(
                    [this, op, access, generation, requestId, adjustedTimeout](std::error_code ec) {
                        // We must pass a check for safe access before using op inside the
                        // callback or we may attempt access on an invalid pointer.
                        stdx::lock_guard<stdx::mutex> lk(access->mutex);
                        if (generation != access->id) {
                            // The operation has been cleaned up, do not access.
                            return;
                        }

                        if (!ec) {
                            LOG(2) << "Request " << requestId << " timed out"
                                   << ", adjusted timeout after getting connection from pool was "
                                   << adjustedTimeout << ", op was " << redact(op->toString());

                            op->timeOut_inlock();
                        } else {
                            LOG(2) << "Failed to time request " << requestId
                                   << "out: " << ec.message() << ", op was "
                                   << redact(op->toString());
                        }
                    });
            }

            _beginCommunication(op);
        });
    };

    _connectionPool.get(request.target, request.timeout, nextStep);
    return Status::OK();
}