void CollectionCloner::_findCallback(const StatusWith<Fetcher::BatchData>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!fetchResult.isOK()) { _work(fetchResult.getStatus()); return; } auto batchData(fetchResult.getValue()); _documents = batchData.documents; bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction; auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind( &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch)); if (!scheduleResult.isOK()) { _work(scheduleResult.getStatus()); return; } if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); } _active = true; _dbWorkCallbackHandle = scheduleResult.getValue(); }
void CollectionCloner::_findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { if (!fetchResult.isOK()) { _finishCallback(nullptr, fetchResult.getStatus()); return; } auto batchData(fetchResult.getValue()); _documents = batchData.documents; bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction; auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind( &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch)); if (!scheduleResult.isOK()) { _finishCallback(nullptr, scheduleResult.getStatus()); return; } if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); } _dbWorkCallbackHandle = scheduleResult.getValue(); }
void CollectionCloner::_listIndexesCallback(const StatusWith<Fetcher::BatchData>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!fetchResult.isOK()) { _work(fetchResult.getStatus()); return; } auto batchData(fetchResult.getValue()); auto&& documents = batchData.documents; if (documents.empty()) { warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from " << _source; } // We may be called with multiple batches leading to a need to grow _indexSpecs. _indexSpecs.reserve(_indexSpecs.size() + documents.size()); _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end()); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); _active = true; return; } // We have all of the indexes now, so we can start cloning the collection data. auto&& scheduleResult = _scheduleDbWorkFn( stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1)); if (!scheduleResult.isOK()) { _work(scheduleResult.getStatus()); return; } _active = true; _dbWorkCallbackHandle = scheduleResult.getValue(); }
void CollectionCloner::_findCallback(const StatusWith<Fetcher::QueryResponse>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { if (!fetchResult.isOK()) { Status newStatus{fetchResult.getStatus().code(), str::stream() << "While querying collection '" << _sourceNss.ns() << "' there was an error '" << fetchResult.getStatus().reason() << "'"}; // TODO: cancel active inserts? _finishCallback(newStatus); return; } auto batchData(fetchResult.getValue()); bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction; if (batchData.documents.size() > 0) { LockGuard lk(_mutex); _documents.insert(_documents.end(), batchData.documents.begin(), batchData.documents.end()); } else if (!batchData.first) { warning() << "No documents returned in batch; ns: " << _sourceNss << ", cursorId:" << batchData.cursorId << ", isLastBatch:" << lastBatch; } auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind( &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch)); if (!scheduleResult.isOK()) { Status newStatus{scheduleResult.getStatus().code(), str::stream() << "While cloning collection '" << _sourceNss.ns() << "' there was an error '" << scheduleResult.getStatus().reason() << "'"}; _finishCallback(newStatus); return; } if (!lastBatch) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); } }
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { const bool collectionIsEmpty = fetchResult == ErrorCodes::NamespaceNotFound; if (collectionIsEmpty) { // Schedule collection creation and finish callback. auto&& scheduleResult = _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackArgs& cbd) { if (!cbd.status.isOK()) { _finishCallback(cbd.status); return; } auto txn = cbd.txn; txn->setReplicatedWrites(false); auto&& createStatus = _storageInterface->createCollection(txn, _destNss, _options); _finishCallback(createStatus); }); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); } return; }; if (!fetchResult.isOK()) { Status newStatus{fetchResult.getStatus().code(), str::stream() << "During listIndexes call on collection '" << _sourceNss.ns() << "' there was an error '" << fetchResult.getStatus().reason() << "'"}; _finishCallback(newStatus); return; } auto batchData(fetchResult.getValue()); auto&& documents = batchData.documents; if (documents.empty()) { warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from " << _source; } UniqueLock lk(_mutex); // We may be called with multiple batches leading to a need to grow _indexSpecs. _indexSpecs.reserve(_indexSpecs.size() + documents.size()); for (auto&& doc : documents) { if (StringData("_id_") == doc["name"].str()) { _idIndexSpec = doc; continue; } _indexSpecs.push_back(doc); } lk.unlock(); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); return; } // We have all of the indexes now, so we can start cloning the collection data. auto&& scheduleResult = _scheduleDbWorkFn( stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1)); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); return; } }
void CollectionCloner::_handleARMResultsCallback( const executor::TaskExecutor::CallbackArgs& cbd, std::shared_ptr<OnCompletionGuard> onCompletionGuard) { auto setResultAndCancelRemainingWork = [this](std::shared_ptr<OnCompletionGuard> guard, Status status) { stdx::lock_guard<stdx::mutex> lock(_mutex); guard->setResultAndCancelRemainingWork_inlock(lock, status); return; }; if (!cbd.status.isOK()) { // Wait for active inserts to complete. waitForDbWorker(); Status newStatus = cbd.status.withContext(str::stream() << "Error querying collection '" << _sourceNss.ns()); setResultAndCancelRemainingWork(onCompletionGuard, cbd.status); return; } // Pull the documents from the ARM into a buffer until the entire batch has been processed. bool lastBatch; { UniqueLock lk(_mutex); auto nextBatchStatus = _bufferNextBatchFromArm(lk); if (!nextBatchStatus.isOK()) { if (_options.uuid && (nextBatchStatus.code() == ErrorCodes::OperationFailed || nextBatchStatus.code() == ErrorCodes::CursorNotFound)) { // With these errors, it's possible the collection was dropped while we were // cloning. If so, we'll execute the drop during oplog application, so it's OK to // just stop cloning. This is only safe if cloning by UUID; if we are cloning by // name, we have no way to detect if the collection was dropped and another // collection with the same name created in the interim. _verifyCollectionWasDropped(lk, nextBatchStatus, onCompletionGuard, cbd.opCtx); } else { onCompletionGuard->setResultAndCancelRemainingWork_inlock(lk, nextBatchStatus); } return; } // Check if this is the last batch of documents to clone. lastBatch = _arm->remotesExhausted(); } // Schedule the next document batch insertion. auto&& scheduleResult = _scheduleDbWorkFn([=](const executor::TaskExecutor::CallbackArgs& cbd) { _insertDocumentsCallback(cbd, lastBatch, onCompletionGuard); }); if (!scheduleResult.isOK()) { Status newStatus = scheduleResult.getStatus().withContext( str::stream() << "Error cloning collection '" << _sourceNss.ns() << "'"); setResultAndCancelRemainingWork(onCompletionGuard, scheduleResult.getStatus()); return; } MONGO_FAIL_POINT_BLOCK(initialSyncHangCollectionClonerAfterHandlingBatchResponse, nssData) { const BSONObj& data = nssData.getData(); auto nss = data["nss"].str(); // Only hang when cloning the specified collection, or if no collection was specified. if (nss.empty() || _destNss.toString() == nss) { while (MONGO_FAIL_POINT(initialSyncHangCollectionClonerAfterHandlingBatchResponse) && !_isShuttingDown()) { log() << "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point " "enabled for " << _destNss.toString() << ". Blocking until fail point is disabled."; mongo::sleepsecs(1); } } } // If the remote cursors are not exhausted, schedule this callback again to handle // the impending cursor response. if (!lastBatch) { Status scheduleStatus = _scheduleNextARMResultsCallback(onCompletionGuard); if (!scheduleStatus.isOK()) { setResultAndCancelRemainingWork(onCompletionGuard, scheduleStatus); return; } } }
void CollectionCloner::_listIndexesCallback(const Fetcher::QueryResponseStatus& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { const bool collectionIsEmpty = fetchResult == ErrorCodes::NamespaceNotFound; if (collectionIsEmpty) { // Schedule collection creation and finish callback. auto&& scheduleResult = _scheduleDbWorkFn([this](const executor::TaskExecutor::CallbackArgs& cbd) { if (!cbd.status.isOK()) { _finishCallback(cbd.status); return; } auto opCtx = cbd.opCtx; UnreplicatedWritesBlock uwb(opCtx); auto&& createStatus = _storageInterface->createCollection(opCtx, _destNss, _options); _finishCallback(createStatus); }); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); } return; }; if (!fetchResult.isOK()) { _finishCallback(fetchResult.getStatus().withContext( str::stream() << "listIndexes call failed on collection '" << _sourceNss.ns() << "'")); return; } auto batchData(fetchResult.getValue()); auto&& documents = batchData.documents; if (documents.empty()) { warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from " << _source; } UniqueLock lk(_mutex); // When listing indexes by UUID, the sync source may use a different name for the collection // as result of renaming or two-phase drop. As the index spec also includes a 'ns' field, this // must be rewritten. BSONObjBuilder nsFieldReplacementBuilder; nsFieldReplacementBuilder.append("ns", _sourceNss.ns()); BSONElement nsFieldReplacementElem = nsFieldReplacementBuilder.done().firstElement(); // We may be called with multiple batches leading to a need to grow _indexSpecs. _indexSpecs.reserve(_indexSpecs.size() + documents.size()); for (auto&& doc : documents) { // The addField replaces the 'ns' field with the correct name, see above. if (StringData("_id_") == doc["name"].str()) { _idIndexSpec = doc.addField(nsFieldReplacementElem); continue; } _indexSpecs.push_back(doc.addField(nsFieldReplacementElem)); } lk.unlock(); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); return; } // We have all of the indexes now, so we can start cloning the collection data. auto&& scheduleResult = _scheduleDbWorkFn( [=](const executor::TaskExecutor::CallbackArgs& cbd) { _beginCollectionCallback(cbd); }); if (!scheduleResult.isOK()) { _finishCallback(scheduleResult.getStatus()); return; } }