Exemplo n.º 1
0
ExampleAsset::~ExampleAsset()
{
   // If the asset manager does not own the asset then we own the
   // asset definition so delete it.
   if (!getOwned())
      delete mpAssetDefinition;
}
Exemplo n.º 2
0
StatusWith<std::vector<ClusterClientCursorParams::RemoteCursor>> establishCursors(
    OperationContext* opCtx,
    executor::TaskExecutor* executor,
    const NamespaceString& nss,
    const ReadPreferenceSetting readPref,
    const std::vector<std::pair<ShardId, BSONObj>>& remotes,
    bool allowPartialResults,
    BSONObj* viewDefinition) {
    // Construct the requests
    std::vector<AsyncRequestsSender::Request> requests;
    for (const auto& remote : remotes) {
        requests.emplace_back(remote.first, remote.second);
    }

    // Send the requests
    AsyncRequestsSender ars(opCtx,
                            executor,
                            nss.db().toString(),
                            std::move(requests),
                            readPref,
                            Shard::RetryPolicy::kIdempotent);

    // Get the responses
    std::vector<ClusterClientCursorParams::RemoteCursor> remoteCursors;
    Status status = Status::OK();
    while (!ars.done()) {
        auto response = ars.next();

        StatusWith<CursorResponse> swCursorResponse(
            response.swResponse.isOK()
                ? CursorResponse::parseFromBSON(response.swResponse.getValue().data)
                : response.swResponse.getStatus());

        if (swCursorResponse.isOK()) {
            remoteCursors.emplace_back(std::move(response.shardId),
                                       std::move(*response.shardHostAndPort),
                                       std::move(swCursorResponse.getValue()));
            continue;
        }

        // In the case a read is performed against a view, the shard primary can return an error
        // indicating that the underlying collection may be sharded. When this occurs the return
        // message will include an expanded view definition and collection namespace which we
        // need to store. This allows for a second attempt at the read directly against the
        // underlying collection.
        if (swCursorResponse.getStatus() == ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
            auto& responseObj = response.swResponse.getValue().data;
            if (!responseObj.hasField("resolvedView")) {
                status = Status(ErrorCodes::InternalError,
                                str::stream() << "Missing field 'resolvedView' in document: "
                                              << responseObj);
                break;
            }

            auto resolvedViewObj = responseObj.getObjectField("resolvedView");
            if (resolvedViewObj.isEmpty()) {
                status = Status(ErrorCodes::InternalError,
                                str::stream() << "Field 'resolvedView' must be an object: "
                                              << responseObj);
                break;
            }

            status = std::move(swCursorResponse.getStatus());
            if (viewDefinition) {
                *viewDefinition = BSON("resolvedView" << resolvedViewObj.getOwned());
            }
            break;
        }

        // Unreachable host errors are swallowed if the 'allowPartialResults' option is set.
        if (allowPartialResults) {
            continue;
        }
        status = std::move(swCursorResponse.getStatus());
        break;
    }

    // If one of the remotes had an error, we make a best effort to finish retrieving responses for
    // other requests that were already sent, so that we can send killCursors to any cursors that we
    // know were established.
    if (!status.isOK()) {
        // Do not schedule any new requests.
        ars.stopRetrying();

        // Collect responses from all requests that were already sent.
        while (!ars.done()) {
            auto response = ars.next();

            // Check if the response contains an established cursor, and if so, store it.
            StatusWith<CursorResponse> swCursorResponse(
                response.swResponse.isOK()
                    ? CursorResponse::parseFromBSON(response.swResponse.getValue().data)
                    : response.swResponse.getStatus());

            if (swCursorResponse.isOK()) {
                remoteCursors.emplace_back(std::move(response.shardId),
                                           *response.shardHostAndPort,
                                           std::move(swCursorResponse.getValue()));
            }
        }

        // Schedule killCursors against all cursors that were established.
        for (const auto& remoteCursor : remoteCursors) {
            BSONObj cmdObj =
                KillCursorsRequest(nss, {remoteCursor.cursorResponse.getCursorId()}).toBSON();
            executor::RemoteCommandRequest request(
                remoteCursor.hostAndPort, nss.db().toString(), cmdObj, opCtx);

            // We do not process the response to the killCursors request (we make a good-faith
            // attempt at cleaning up the cursors, but ignore any returned errors).
            executor
                ->scheduleRemoteCommand(
                    request, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {})
                .status_with_transitional_ignore();
        }

        return status;
    }

    return std::move(remoteCursors);
}
Exemplo n.º 3
0
void AsyncResultsMerger::handleBatchResponse(
    const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, size_t remoteIndex) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);

    auto& remote = _remotes[remoteIndex];

    // Clear the callback handle. This indicates that we are no longer waiting on a response from
    // 'remote'.
    remote.cbHandle = executor::TaskExecutor::CallbackHandle();

    // If we're in the process of shutting down then there's no need to process the batch.
    if (_lifecycleState != kAlive) {
        invariant(_lifecycleState == kKillStarted);

        // Make sure to wake up anyone waiting on '_currentEvent' if we're shutting down.
        signalCurrentEventIfReady_inlock();

        // Make a best effort to parse the response and retrieve the cursor id. We need the cursor
        // id in order to issue a killCursors command against it.
        if (cbData.response.isOK()) {
            auto cursorResponse = parseCursorResponse(cbData.response.getValue().data, remote);
            if (cursorResponse.isOK()) {
                remote.cursorId = cursorResponse.getValue().getCursorId();
            }
        }

        // If we're killed and we're not waiting on any more batches to come back, then we are ready
        // to kill the cursors on the remote hosts and clean up this cursor. Schedule the
        // killCursors command and signal that this cursor is safe now safe to destroy. We have to
        // promise not to touch any members of this class because 'this' could become invalid as
        // soon as we signal the event.
        if (!haveOutstandingBatchRequests_inlock()) {
            // If the event handle is invalid, then the executor is in the middle of shutting down,
            // and we can't schedule any more work for it to complete.
            if (_killCursorsScheduledEvent.isValid()) {
                scheduleKillCursors_inlock();
                _executor->signalEvent(_killCursorsScheduledEvent);
            }

            _lifecycleState = kKillComplete;
        }

        return;
    }

    // Early return from this point on signal anyone waiting on an event, if ready() is true.
    ScopeGuard signaller = MakeGuard(&AsyncResultsMerger::signalCurrentEventIfReady_inlock, this);

    StatusWith<CursorResponse> cursorResponseStatus(
        cbData.response.isOK() ? parseCursorResponse(cbData.response.getValue().data, remote)
                               : cbData.response.getStatus());

    if (!cursorResponseStatus.isOK()) {
        // In the case a read is performed against a view, the shard primary can return an error
        // indicating that the underlying collection may be sharded. When this occurs the return
        // message will include an expanded view definition and collection namespace which we need
        // to store. This allows for a second attempt at the read directly against the underlying
        // collection.
        if (cursorResponseStatus.getStatus() ==
            ErrorCodes::CommandOnShardedViewNotSupportedOnMongod) {
            auto& responseObj = cbData.response.getValue().data;
            if (!responseObj.hasField("resolvedView")) {
                remote.status = Status(ErrorCodes::InternalError,
                                       str::stream() << "Missing field 'resolvedView' in document: "
                                                     << responseObj);
                return;
            }

            auto resolvedViewObj = responseObj.getObjectField("resolvedView");
            if (resolvedViewObj.isEmpty()) {
                remote.status = Status(ErrorCodes::InternalError,
                                       str::stream() << "Field 'resolvedView' must be an object: "
                                                     << responseObj);
                return;
            }

            ClusterQueryResult result;
            result.setViewDefinition(resolvedViewObj.getOwned());

            remote.docBuffer.push(result);
            remote.cursorId = 0;
            remote.status = Status::OK();
            return;
        }

        auto shard = remote.getShard();
        if (!shard) {
            remote.status = Status(cursorResponseStatus.getStatus().code(),
                                   str::stream() << "Could not find shard " << *remote.shardId
                                                 << " containing host "
                                                 << remote.getTargetHost().toString());
        } else {
            shard->updateReplSetMonitor(remote.getTargetHost(), cursorResponseStatus.getStatus());

            // Retry initial cursor establishment if possible.  Never retry getMores to avoid
            // accidentally skipping results.
            if (!remote.cursorId && remote.retryCount < kMaxNumFailedHostRetryAttempts &&
                shard->isRetriableError(cursorResponseStatus.getStatus().code(),
                                        Shard::RetryPolicy::kIdempotent)) {
                invariant(remote.shardId);
                LOG(1) << "Initial cursor establishment failed with retriable error and will be "
                          "retried"
                       << causedBy(redact(cursorResponseStatus.getStatus()));

                ++remote.retryCount;

                // Since we potentially updated the targeter that the last host it chose might be
                // faulty, the call below may end up getting a different host.
                remote.status = askForNextBatch_inlock(remoteIndex);
                if (remote.status.isOK()) {
                    return;
                }

                // If we end up here, it means we failed to schedule the retry request, which is a
                // more
                // severe error that should not be retried. Just pass through to the error handling
                // logic below.
            } else {
                remote.status = cursorResponseStatus.getStatus();
            }
        }

        // Unreachable host errors are swallowed if the 'allowPartialResults' option is set. We
        // remove the unreachable host entirely from consideration by marking it as exhausted.
        if (_params.isAllowPartialResults) {
            remote.status = Status::OK();

            // Clear the results buffer and cursor id.
            std::queue<ClusterQueryResult> emptyBuffer;
            std::swap(remote.docBuffer, emptyBuffer);
            remote.cursorId = 0;
        }

        return;
    }

    // Cursor id successfully established.
    auto cursorResponse = std::move(cursorResponseStatus.getValue());
    remote.cursorId = cursorResponse.getCursorId();
    remote.initialCmdObj = boost::none;

    for (const auto& obj : cursorResponse.getBatch()) {
        // If there's a sort, we're expecting the remote node to give us back a sort key.
        if (!_params.sort.isEmpty() &&
            obj[ClusterClientCursorParams::kSortKeyField].type() != BSONType::Object) {
            remote.status = Status(ErrorCodes::InternalError,
                                   str::stream() << "Missing field '"
                                                 << ClusterClientCursorParams::kSortKeyField
                                                 << "' in document: "
                                                 << obj);
            return;
        }

        ClusterQueryResult result(obj);
        remote.docBuffer.push(result);
        ++remote.fetchedCount;
    }

    // If we're doing a sorted merge, then we have to make sure to put this remote onto the
    // merge queue.
    if (!_params.sort.isEmpty() && !cursorResponse.getBatch().empty()) {
        _mergeQueue.push(remoteIndex);
    }

    // If the cursor is tailable and we just received an empty batch, the next return value should
    // be boost::none in order to indicate the end of the batch.
    if (_params.isTailable && !remote.hasNext()) {
        _eofNext = true;
    }

    // If even after receiving this batch we still don't have anything buffered (i.e. the batchSize
    // was zero), then can schedule work to retrieve the next batch right away.
    //
    // We do not ask for the next batch if the cursor is tailable, as batches received from remote
    // tailable cursors should be passed through to the client without asking for more batches.
    if (!_params.isTailable && !remote.hasNext() && !remote.exhausted()) {
        remote.status = askForNextBatch_inlock(remoteIndex);
        if (!remote.status.isOK()) {
            return;
        }
    }

    // ScopeGuard requires dismiss on success, but we want waiter to be signalled on success as
    // well as failure.
    signaller.Dismiss();
    signalCurrentEventIfReady_inlock();
}