コード例 #1
0
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult,
                                            Fetcher::NextAction* nextAction,
                                            BSONObjBuilder* getMoreBob) {
    if (!fetchResult.isOK()) {
        _finishCallback(nullptr, fetchResult.getStatus());
        return;
    }

    auto batchData(fetchResult.getValue());
    auto&& documents = batchData.documents;

    if (documents.empty()) {
        warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from "
                  << _source;
    }

    // We may be called with multiple batches leading to a need to grow _indexSpecs.
    _indexSpecs.reserve(_indexSpecs.size() + documents.size());
    _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end());

    // The fetcher will continue to call with kGetMore until an error or the last batch.
    if (*nextAction == Fetcher::NextAction::kGetMore) {
        invariant(getMoreBob);
        getMoreBob->append("getMore", batchData.cursorId);
        getMoreBob->append("collection", batchData.nss.coll());
        return;
    }

    // We have all of the indexes now, so we can start cloning the collection data.
    auto&& scheduleResult = _scheduleDbWorkFn(
        stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1));
    if (!scheduleResult.isOK()) {
        _finishCallback(nullptr, scheduleResult.getStatus());
        return;
    }

    _dbWorkCallbackHandle = scheduleResult.getValue();
}
コード例 #2
0
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult,
                                            Fetcher::NextAction* nextAction,
                                            BSONObjBuilder* getMoreBob) {
    const bool collectionIsEmpty = fetchResult == ErrorCodes::NamespaceNotFound;
    if (collectionIsEmpty) {
        // Schedule collection creation and finish callback.
        auto&& scheduleResult =
            _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackArgs& cbd) {
                if (!cbd.status.isOK()) {
                    _finishCallback(cbd.status);
                    return;
                }
                auto txn = cbd.txn;
                txn->setReplicatedWrites(false);
                auto&& createStatus = _storageInterface->createCollection(txn, _destNss, _options);
                _finishCallback(createStatus);
            });
        if (!scheduleResult.isOK()) {
            _finishCallback(scheduleResult.getStatus());
        }
        return;
    };
    if (!fetchResult.isOK()) {
        Status newStatus{fetchResult.getStatus().code(),
                         str::stream() << "During listIndexes call on collection '"
                                       << _sourceNss.ns()
                                       << "' there was an error '"
                                       << fetchResult.getStatus().reason()
                                       << "'"};

        _finishCallback(newStatus);
        return;
    }

    auto batchData(fetchResult.getValue());
    auto&& documents = batchData.documents;

    if (documents.empty()) {
        warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from "
                  << _source;
    }

    UniqueLock lk(_mutex);
    // We may be called with multiple batches leading to a need to grow _indexSpecs.
    _indexSpecs.reserve(_indexSpecs.size() + documents.size());
    for (auto&& doc : documents) {
        if (StringData("_id_") == doc["name"].str()) {
            _idIndexSpec = doc;
            continue;
        }
        _indexSpecs.push_back(doc);
    }
    lk.unlock();

    // The fetcher will continue to call with kGetMore until an error or the last batch.
    if (*nextAction == Fetcher::NextAction::kGetMore) {
        invariant(getMoreBob);
        getMoreBob->append("getMore", batchData.cursorId);
        getMoreBob->append("collection", batchData.nss.coll());
        return;
    }

    // We have all of the indexes now, so we can start cloning the collection data.
    auto&& scheduleResult = _scheduleDbWorkFn(
        stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1));
    if (!scheduleResult.isOK()) {
        _finishCallback(scheduleResult.getStatus());
        return;
    }
}
コード例 #3
0
ファイル: collection_cloner.cpp プロジェクト: i80and/mongo
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult,
                                            Fetcher::NextAction* nextAction,
                                            BSONObjBuilder* getMoreBob) {
    const bool collectionIsEmpty = fetchResult == ErrorCodes::NamespaceNotFound;
    if (collectionIsEmpty) {
        // Schedule collection creation and finish callback.
        auto&& scheduleResult =
            _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackArgs& cbd) {
                if (!cbd.status.isOK()) {
                    _finishCallback(cbd.status);
                    return;
                }
                auto opCtx = cbd.opCtx;
                UnreplicatedWritesBlock uwb(opCtx);
                auto&& createStatus =
                    _storageInterface->createCollection(opCtx, _destNss, _options);
                _finishCallback(createStatus);
            });
        if (!scheduleResult.isOK()) {
            _finishCallback(scheduleResult.getStatus());
        }
        return;
    };
    if (!fetchResult.isOK()) {
        _finishCallback(fetchResult.getStatus().withContext(
            str::stream() << "listIndexes call failed on collection '" << _sourceNss.ns() << "'"));
        return;
    }

    auto batchData(fetchResult.getValue());
    auto&& documents = batchData.documents;

    if (documents.empty()) {
        warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from "
                  << _source;
    }

    UniqueLock lk(_mutex);
    // When listing indexes by UUID, the sync source may use a different name for the collection
    // as result of renaming or two-phase drop. As the index spec also includes a 'ns' field, this
    // must be rewritten.
    BSONObjBuilder nsFieldReplacementBuilder;
    nsFieldReplacementBuilder.append("ns", _sourceNss.ns());
    BSONElement nsFieldReplacementElem = nsFieldReplacementBuilder.done().firstElement();

    // We may be called with multiple batches leading to a need to grow _indexSpecs.
    _indexSpecs.reserve(_indexSpecs.size() + documents.size());
    for (auto&& doc : documents) {
        // The addField replaces the 'ns' field with the correct name, see above.
        if (StringData("_id_") == doc["name"].str()) {
            _idIndexSpec = doc.addField(nsFieldReplacementElem);
            continue;
        }
        _indexSpecs.push_back(doc.addField(nsFieldReplacementElem));
    }
    lk.unlock();

    // The fetcher will continue to call with kGetMore until an error or the last batch.
    if (*nextAction == Fetcher::NextAction::kGetMore) {
        invariant(getMoreBob);
        getMoreBob->append("getMore", batchData.cursorId);
        getMoreBob->append("collection", batchData.nss.coll());
        return;
    }

    // We have all of the indexes now, so we can start cloning the collection data.
    auto&& scheduleResult = _scheduleDbWorkFn(
        [=](const executor::TaskExecutor::CallbackArgs& cbd) { _beginCollectionCallback(cbd); });
    if (!scheduleResult.isOK()) {
        _finishCallback(scheduleResult.getStatus());
        return;
    }
}
コード例 #4
0
ファイル: oplog_fetcher.cpp プロジェクト: ChineseDr/mongo
void OplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
                             BSONObjBuilder* getMoreBob) {
    // if target cut connections between connecting and querying (for
    // example, because it stepped down) we might not have a cursor
    if (!result.isOK()) {
        LOG(2) << "Error returned from oplog query: " << result.getStatus();
        _onShutdown(result.getStatus());
        return;
    }

    const auto& queryResponse = result.getValue();
    rpc::ReplSetMetadata metadata;

    // Forward metadata (containing liveness information) to data replicator external state.
    bool receivedMetadata =
        queryResponse.otherFields.metadata.hasElement(rpc::kReplSetMetadataFieldName);
    if (receivedMetadata) {
        const auto& metadataObj = queryResponse.otherFields.metadata;
        auto metadataResult = rpc::ReplSetMetadata::readFromMetadata(metadataObj);
        if (!metadataResult.isOK()) {
            error() << "invalid replication metadata from sync source " << _fetcher.getSource()
                    << ": " << metadataResult.getStatus() << ": " << metadataObj;
            _onShutdown(metadataResult.getStatus());
            return;
        }
        metadata = metadataResult.getValue();
        _dataReplicatorExternalState->processMetadata(metadata);
    }

    const auto& documents = queryResponse.documents;
    auto firstDocToApply = documents.cbegin();

    if (!documents.empty()) {
        LOG(2) << "oplog fetcher read " << documents.size()
               << " operations from remote oplog starting at " << documents.front()["ts"]
               << " and ending at " << documents.back()["ts"];
    } else {
        LOG(2) << "oplog fetcher read 0 operations from remote oplog";
    }

    auto opTimeWithHash = getLastOpTimeWithHashFetched();

    // Check start of remote oplog and, if necessary, stop fetcher to execute rollback.
    if (queryResponse.first) {
        auto status = checkRemoteOplogStart(documents, opTimeWithHash);
        if (!status.isOK()) {
            // Stop oplog fetcher and execute rollback.
            _onShutdown(status, opTimeWithHash);
            return;
        }

        // If this is the first batch and no rollback is needed, skip the first document.
        firstDocToApply++;
    }

    auto validateResult = OplogFetcher::validateDocuments(
        documents, queryResponse.first, opTimeWithHash.opTime.getTimestamp());
    if (!validateResult.isOK()) {
        _onShutdown(validateResult.getStatus(), opTimeWithHash);
        return;
    }
    auto info = validateResult.getValue();

    // TODO: back pressure handling will be added in SERVER-23499.
    _enqueueDocumentsFn(firstDocToApply, documents.cend(), info, queryResponse.elapsedMillis);

    // Update last fetched info.
    if (firstDocToApply != documents.cend()) {
        opTimeWithHash = info.lastDocument;
        LOG(3) << "batch resetting last fetched optime: " << opTimeWithHash.opTime
               << "; hash: " << opTimeWithHash.value;

        stdx::unique_lock<stdx::mutex> lock(_mutex);
        _lastFetched = opTimeWithHash;
    }

    if (_dataReplicatorExternalState->shouldStopFetching(_fetcher.getSource(), metadata)) {
        _onShutdown(Status(ErrorCodes::InvalidSyncSource,
                           str::stream() << "sync source " << _fetcher.getSource().toString()
                                         << " (last optime: "
                                         << metadata.getLastOpVisible().toString()
                                         << "; sync source index: "
                                         << metadata.getSyncSourceIndex()
                                         << "; primary index: "
                                         << metadata.getPrimaryIndex()
                                         << ") is no longer valid"),
                    opTimeWithHash);
        return;
    }

    // No more data. Stop processing and return Status::OK along with last
    // fetch info.
    if (!getMoreBob) {
        _onShutdown(Status::OK(), opTimeWithHash);
        return;
    }

    getMoreBob->appendElements(makeGetMoreCommandObject(_dataReplicatorExternalState,
                                                        queryResponse.nss,
                                                        queryResponse.cursorId,
                                                        _awaitDataTimeout));
}
コード例 #5
0
void AbstractOplogFetcher::_callback(const Fetcher::QueryResponseStatus& result,
                                     BSONObjBuilder* getMoreBob) {
    Status responseStatus =
        _checkForShutdownAndConvertStatus(result.getStatus(), "error in fetcher batch callback");
    if (ErrorCodes::CallbackCanceled == responseStatus) {
        LOG(1) << _getComponentName() << " oplog query cancelled to " << _getSource() << ": "
               << redact(responseStatus);
        _finishCallback(responseStatus);
        return;
    }

    // If target cut connections between connecting and querying (for
    // example, because it stepped down) we might not have a cursor.
    if (!responseStatus.isOK()) {
        BSONObj findCommandObj =
            _makeFindCommandObject(_nss, _getLastOpTimeWithHashFetched().opTime);
        BSONObj metadataObj = _makeMetadataObject();
        {
            stdx::lock_guard<stdx::mutex> lock(_mutex);
            if (_fetcherRestarts == _maxFetcherRestarts) {
                log() << "Error returned from oplog query (no more query restarts left): "
                      << redact(responseStatus);
            } else {
                log() << "Restarting oplog query due to error: " << redact(responseStatus)
                      << ". Last fetched optime (with hash): " << _lastFetched
                      << ". Restarts remaining: " << (_maxFetcherRestarts - _fetcherRestarts);
                _fetcherRestarts++;
                // Destroying current instance in _shuttingDownFetcher will possibly block.
                _shuttingDownFetcher.reset();
                // Move the old fetcher into the shutting down instance.
                _shuttingDownFetcher.swap(_fetcher);
                // Create and start fetcher with current term and new starting optime.
                _fetcher = _makeFetcher(findCommandObj, metadataObj);
                auto scheduleStatus = _scheduleFetcher_inlock();
                if (scheduleStatus.isOK()) {
                    log() << "Scheduled new oplog query " << _fetcher->toString();
                    return;
                }
                error() << "Error scheduling new oplog query: " << redact(scheduleStatus)
                        << ". Returning current oplog query error: " << redact(responseStatus);
            }
        }
        _finishCallback(responseStatus);
        return;
    }

    // Reset fetcher restart counter on successful response.
    {
        stdx::lock_guard<stdx::mutex> lock(_mutex);
        invariant(_isActive_inlock());
        _fetcherRestarts = 0;
    }

    if (_isShuttingDown()) {
        _finishCallback(
            Status(ErrorCodes::CallbackCanceled, _getComponentName() + " shutting down"));
        return;
    }

    // At this point we have a successful batch and can call the subclass's _onSuccessfulBatch.
    const auto& queryResponse = result.getValue();
    auto batchResult = _onSuccessfulBatch(queryResponse);
    if (!batchResult.isOK()) {
        // The stopReplProducer fail point expects this to return successfully. If another fail
        // point wants this to return unsuccessfully, it should use a different error code.
        if (batchResult.getStatus() == ErrorCodes::FailPointEnabled) {
            _finishCallback(Status::OK());
            return;
        }
        _finishCallback(batchResult.getStatus());
        return;
    }

    // No more data. Stop processing and return Status::OK.
    if (!getMoreBob) {
        _finishCallback(Status::OK());
        return;
    }

    // We have now processed the batch and should move forward our view of _lastFetched. Note that
    // the _lastFetched value will not be updated until the _onSuccessfulBatch function is
    // completed.
    const auto& documents = queryResponse.documents;
    if (documents.size() > 0) {
        auto lastDocRes = AbstractOplogFetcher::parseOpTimeWithHash(documents.back());
        if (!lastDocRes.isOK()) {
            _finishCallback(lastDocRes.getStatus());
            return;
        }
        auto lastDoc = lastDocRes.getValue();
        LOG(3) << _getComponentName()
               << " setting last fetched optime ahead after batch: " << lastDoc.opTime
               << "; hash: " << lastDoc.value;

        stdx::lock_guard<stdx::mutex> lock(_mutex);
        _lastFetched = lastDoc;
    }

    // Check for shutdown to save an unnecessary `getMore` request.
    if (_isShuttingDown()) {
        _finishCallback(
            Status(ErrorCodes::CallbackCanceled, _getComponentName() + " shutting down"));
        return;
    }

    // The _onSuccessfulBatch function returns the `getMore` command we want to send.
    getMoreBob->appendElements(batchResult.getValue());
}