void CollectionCloner::wait() { // If a fetcher is inactive, wait() has no effect. _listIndexesFetcher.wait(); _findFetcher.wait(); waitForDbWorker(); }
void CollectionCloner::_handleARMResultsCallback( const executor::TaskExecutor::CallbackArgs& cbd, std::shared_ptr<OnCompletionGuard> onCompletionGuard) { auto setResultAndCancelRemainingWork = [this](std::shared_ptr<OnCompletionGuard> guard, Status status) { stdx::lock_guard<stdx::mutex> lock(_mutex); guard->setResultAndCancelRemainingWork_inlock(lock, status); return; }; if (!cbd.status.isOK()) { // Wait for active inserts to complete. waitForDbWorker(); Status newStatus = cbd.status.withContext(str::stream() << "Error querying collection '" << _sourceNss.ns()); setResultAndCancelRemainingWork(onCompletionGuard, cbd.status); return; } // Pull the documents from the ARM into a buffer until the entire batch has been processed. bool lastBatch; { UniqueLock lk(_mutex); auto nextBatchStatus = _bufferNextBatchFromArm(lk); if (!nextBatchStatus.isOK()) { if (_options.uuid && (nextBatchStatus.code() == ErrorCodes::OperationFailed || nextBatchStatus.code() == ErrorCodes::CursorNotFound)) { // With these errors, it's possible the collection was dropped while we were // cloning. If so, we'll execute the drop during oplog application, so it's OK to // just stop cloning. This is only safe if cloning by UUID; if we are cloning by // name, we have no way to detect if the collection was dropped and another // collection with the same name created in the interim. _verifyCollectionWasDropped(lk, nextBatchStatus, onCompletionGuard, cbd.opCtx); } else { onCompletionGuard->setResultAndCancelRemainingWork_inlock(lk, nextBatchStatus); } return; } // Check if this is the last batch of documents to clone. lastBatch = _arm->remotesExhausted(); } // Schedule the next document batch insertion. auto&& scheduleResult = _scheduleDbWorkFn([=](const executor::TaskExecutor::CallbackArgs& cbd) { _insertDocumentsCallback(cbd, lastBatch, onCompletionGuard); }); if (!scheduleResult.isOK()) { Status newStatus = scheduleResult.getStatus().withContext( str::stream() << "Error cloning collection '" << _sourceNss.ns() << "'"); setResultAndCancelRemainingWork(onCompletionGuard, scheduleResult.getStatus()); return; } MONGO_FAIL_POINT_BLOCK(initialSyncHangCollectionClonerAfterHandlingBatchResponse, nssData) { const BSONObj& data = nssData.getData(); auto nss = data["nss"].str(); // Only hang when cloning the specified collection, or if no collection was specified. if (nss.empty() || _destNss.toString() == nss) { while (MONGO_FAIL_POINT(initialSyncHangCollectionClonerAfterHandlingBatchResponse) && !_isShuttingDown()) { log() << "initialSyncHangCollectionClonerAfterHandlingBatchResponse fail point " "enabled for " << _destNss.toString() << ". Blocking until fail point is disabled."; mongo::sleepsecs(1); } } } // If the remote cursors are not exhausted, schedule this callback again to handle // the impending cursor response. if (!lastBatch) { Status scheduleStatus = _scheduleNextARMResultsCallback(onCompletionGuard); if (!scheduleStatus.isOK()) { setResultAndCancelRemainingWork(onCompletionGuard, scheduleStatus); return; } } }