void MultiApplier::_callback(const executor::TaskExecutor::CallbackArgs& cbd) { if (!cbd.status.isOK()) { _finishCallback(cbd.status, _operations); return; } invariant(!_operations.empty()); StatusWith<OpTime> applyStatus(ErrorCodes::InternalError, "not mutated"); try { auto txn = cc().makeOperationContext(); // Refer to multiSyncApply() and multiInitialSyncApply() in sync_tail.cpp. txn->setReplicatedWrites(false); // allow us to get through the magic barrier txn->lockState()->setIsBatchWriter(true); applyStatus = _multiApply(txn.get(), _operations, _applyOperation); } catch (...) { applyStatus = exceptionToStatus(); } if (!applyStatus.isOK()) { _finishCallback(applyStatus.getStatus(), _operations); return; } _finishCallback(applyStatus.getValue().getTimestamp(), _operations); }
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { const bool collectionIsEmpty = fetchResult == ErrorCodes::NamespaceNotFound; if (collectionIsEmpty) { // Schedule collection creation and finish callback. auto&& scheduleResult = _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackArgs& cbd) { if (!cbd.status.isOK()) { _finishCallback(cbd.status); return; } auto txn = cbd.txn; txn->setReplicatedWrites(false); auto&& createStatus = _storageInterface->createCollection(txn, _destNss, _options); _finishCallback(createStatus); }); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); } return; }; if (!fetchResult.isOK()) { Status newStatus{fetchResult.getStatus().code(), str::stream() << "During listIndexes call on collection '" << _sourceNss.ns() << "' there was an error '" << fetchResult.getStatus().reason() << "'"}; _finishCallback(newStatus); return; } auto batchData(fetchResult.getValue()); auto&& documents = batchData.documents; if (documents.empty()) { warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from " << _source; } UniqueLock lk(_mutex); // We may be called with multiple batches leading to a need to grow _indexSpecs. _indexSpecs.reserve(_indexSpecs.size() + documents.size()); for (auto&& doc : documents) { if (StringData("_id_") == doc["name"].str()) { _idIndexSpec = doc; continue; } _indexSpecs.push_back(doc); } lk.unlock(); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); return; } // We have all of the indexes now, so we can start cloning the collection data. auto&& scheduleResult = _scheduleDbWorkFn( stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1)); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); return; } }