StatusWith<std::vector<ClusterClientCursorParams::RemoteCursor>> establishCursors(
    OperationContext* opCtx,
    executor::TaskExecutor* executor,
    const NamespaceString& nss,
    const ReadPreferenceSetting readPref,
    const std::vector<std::pair<ShardId, BSONObj>>& remotes,
    bool allowPartialResults,
    BSONObj* viewDefinition) {
    // Construct the requests
    std::vector<AsyncRequestsSender::Request> requests;
    for (const auto& remote : remotes) {
        requests.emplace_back(remote.first, remote.second);
    }

    // Send the requests
    AsyncRequestsSender ars(opCtx,
                            executor,
                            nss.db().toString(),
                            std::move(requests),
                            readPref,
                            Shard::RetryPolicy::kIdempotent);

    // Get the responses
    std::vector<ClusterClientCursorParams::RemoteCursor> remoteCursors;
    Status status = Status::OK();
    while (!ars.done()) {
        auto response = ars.next();

        StatusWith<CursorResponse> swCursorResponse(
            response.swResponse.isOK()
                ? CursorResponse::parseFromBSON(response.swResponse.getValue().data)
                : response.swResponse.getStatus());

        if (swCursorResponse.isOK()) {
            remoteCursors.emplace_back(std::move(response.shardId),
                                       std::move(*response.shardHostAndPort),
                                       std::move(swCursorResponse.getValue()));
            continue;
        }

        // In the case a read is performed against a view, the shard primary can return an error
        // indicating that the underlying collection may be sharded. When this occurs the return
        // message will include an expanded view definition and collection namespace which we
        // need to store. This allows for a second attempt at the read directly against the
        // underlying collection.
        if (swCursorResponse.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
            auto& responseObj = response.swResponse.getValue().data;
            if (!responseObj.hasField("resolvedView")) {
                status = Status(ErrorCodes::InternalError,
                                str::stream() << "Missing field 'resolvedView' in document: "
                                              << responseObj);
                break;
            }

            auto resolvedViewObj = responseObj.getObjectField("resolvedView");
            if (resolvedViewObj.isEmpty()) {
                status = Status(ErrorCodes::InternalError,
                                str::stream() << "Field 'resolvedView' must be an object: "
                                              << responseObj);
                break;
            }

            status = std::move(swCursorResponse.getStatus());
            if (viewDefinition) {
                *viewDefinition = BSON("resolvedView" << resolvedViewObj.getOwned());
            }
            break;
        }

        // Unreachable host errors are swallowed if the 'allowPartialResults' option is set.
        if (allowPartialResults) {
            continue;
        }
        status = std::move(swCursorResponse.getStatus());
        break;
    }

    // If one of the remotes had an error, we make a best effort to finish retrieving responses for
    // other requests that were already sent, so that we can send killCursors to any cursors that we
    // know were established.
    if (!status.isOK()) {
        // Do not schedule any new requests.
        ars.stopRetrying();

        // Collect responses from all requests that were already sent.
        while (!ars.done()) {
            auto response = ars.next();

            // Check if the response contains an established cursor, and if so, store it.
            StatusWith<CursorResponse> swCursorResponse(
                response.swResponse.isOK()
                    ? CursorResponse::parseFromBSON(response.swResponse.getValue().data)
                    : response.swResponse.getStatus());

            if (swCursorResponse.isOK()) {
                remoteCursors.emplace_back(std::move(response.shardId),
                                           *response.shardHostAndPort,
                                           std::move(swCursorResponse.getValue()));
            }
        }

        // Schedule killCursors against all cursors that were established.
        for (const auto& remoteCursor : remoteCursors) {
            BSONObj cmdObj =
                KillCursorsRequest(nss, {remoteCursor.cursorResponse.getCursorId()}).toBSON();
            executor::RemoteCommandRequest request(
                remoteCursor.hostAndPort, nss.db().toString(), cmdObj, opCtx);

            // We do not process the response to the killCursors request (we make a good-faith
            // attempt at cleaning up the cursors, but ignore any returned errors).
            executor
                ->scheduleRemoteCommand(
                    request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {})
                .status_with_transitional_ignore();
        }

        return status;
    }

    return std::move(remoteCursors);
}
Example #2
0
std::vector<RemoteCursor> establishCursors(OperationContext* opCtx,
                                           executor::TaskExecutor* executor,
                                           const NamespaceString& nss,
                                           const ReadPreferenceSetting readPref,
                                           const std::vector<std::pair<ShardId, BSONObj>>& remotes,
                                           bool allowPartialResults,
                                           Shard::RetryPolicy retryPolicy) {
    // Construct the requests
    std::vector<AsyncRequestsSender::Request> requests;
    for (const auto& remote : remotes) {
        requests.emplace_back(remote.first, remote.second);
    }

    // Send the requests
    MultiStatementTransactionRequestsSender ars(
        opCtx, executor, nss.db().toString(), std::move(requests), readPref, retryPolicy);

    std::vector<RemoteCursor> remoteCursors;
    try {
        // Get the responses
        while (!ars.done()) {
            try {
                auto response = ars.next();
                // Note the shardHostAndPort may not be populated if there was an error, so be sure
                // to do this after parsing the cursor response to ensure the response was ok.
                // Additionally, be careful not to push into 'remoteCursors' until we are sure we
                // have a valid cursor, since the error handling path will attempt to clean up
                // anything in 'remoteCursors'
                auto cursors = CursorResponse::parseFromBSONMany(
                    uassertStatusOK(std::move(response.swResponse)).data);

                for (auto& cursor : cursors) {
                    if (cursor.isOK()) {
                        RemoteCursor remoteCursor;
                        remoteCursor.setCursorResponse(std::move(cursor.getValue()));
                        remoteCursor.setShardId(std::move(response.shardId));
                        remoteCursor.setHostAndPort(*response.shardHostAndPort);
                        remoteCursors.push_back(std::move(remoteCursor));
                    }
                }

                // Throw if there is any error and then the catch block below will do the cleanup.
                for (auto& cursor : cursors) {
                    uassertStatusOK(cursor.getStatus());
                }

            } catch (const DBException& ex) {
                // Retriable errors are swallowed if 'allowPartialResults' is true.
                if (allowPartialResults &&
                    std::find(RemoteCommandRetryScheduler::kAllRetriableErrors.begin(),
                              RemoteCommandRetryScheduler::kAllRetriableErrors.end(),
                              ex.code()) !=
                        RemoteCommandRetryScheduler::kAllRetriableErrors.end()) {
                    continue;
                }
                throw;  // Fail this loop.
            }
        }
        return remoteCursors;
    } catch (const DBException&) {
        // If one of the remotes had an error, we make a best effort to finish retrieving responses
        // for other requests that were already sent, so that we can send killCursors to any cursors
        // that we know were established.
        try {
            // Do not schedule any new requests.
            ars.stopRetrying();

            // Collect responses from all requests that were already sent.
            while (!ars.done()) {
                auto response = ars.next();

                // Check if the response contains an established cursor, and if so, store it.
                StatusWith<CursorResponse> swCursorResponse(
                    response.swResponse.isOK()
                        ? CursorResponse::parseFromBSON(response.swResponse.getValue().data)
                        : response.swResponse.getStatus());

                if (swCursorResponse.isOK()) {
                    RemoteCursor cursor;
                    cursor.setShardId(std::move(response.shardId));
                    cursor.setHostAndPort(*response.shardHostAndPort);
                    cursor.setCursorResponse(std::move(swCursorResponse.getValue()));
                    remoteCursors.push_back(std::move(cursor));
                }
            }

            // Schedule killCursors against all cursors that were established.
            killRemoteCursors(opCtx, executor, std::move(remoteCursors), nss);
        } catch (const DBException&) {
            // Ignore the new error and rethrow the original one.
        }

        throw;
    }
}