void CollectionCloner::_beginCollectionCallback(const ReplicationExecutor::CallbackData& cbd) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!cbd.status.isOK()) { _work(cbd.status); return; } OperationContext* txn = cbd.txn; Status status = _storageInterface->beginCollection(txn, _destNss, _options, _indexSpecs); if (!status.isOK()) { _work(status); return; } Status scheduleStatus = _findFetcher.schedule(); if (!scheduleStatus.isOK()) { _work(scheduleStatus); return; } _active = true; }
void CollectionCloner::_findCallback(const StatusWith<Fetcher::BatchData>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!fetchResult.isOK()) { _work(fetchResult.getStatus()); return; } auto batchData(fetchResult.getValue()); _documents = batchData.documents; bool lastBatch = *nextAction == Fetcher::NextAction::kNoAction; auto&& scheduleResult = _scheduleDbWorkFn(stdx::bind( &CollectionCloner::_insertDocumentsCallback, this, stdx::placeholders::_1, lastBatch)); if (!scheduleResult.isOK()) { _work(scheduleResult.getStatus()); return; } if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); } _active = true; _dbWorkCallbackHandle = scheduleResult.getValue(); }
void DatabaseCloner::_collectionClonerCallback(const Status& status, const NamespaceString& nss) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; // Forward collection cloner result to caller. // Failure to clone a collection does not stop the database cloner // from cloning the rest of the collections in the listCollections result. _collectionWork(status, nss); _currentCollectionClonerIter++; LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace(); if (_currentCollectionClonerIter != _collectionCloners.end()) { Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter); if (!startStatus.isOK()) { LOG(1) << " failed to start collection cloning on " << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus; _work(startStatus); return; } _active = true; return; } _work(Status::OK()); }
void CollectionCloner::_listIndexesCallback(const StatusWith<Fetcher::BatchData>& fetchResult, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!fetchResult.isOK()) { _work(fetchResult.getStatus()); return; } auto batchData(fetchResult.getValue()); auto&& documents = batchData.documents; if (documents.empty()) { warning() << "No indexes found for collection " << _sourceNss.ns() << " while cloning from " << _source; } // We may be called with multiple batches leading to a need to grow _indexSpecs. _indexSpecs.reserve(_indexSpecs.size() + documents.size()); _indexSpecs.insert(_indexSpecs.end(), documents.begin(), documents.end()); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); _active = true; return; } // We have all of the indexes now, so we can start cloning the collection data. auto&& scheduleResult = _scheduleDbWorkFn( stdx::bind(&CollectionCloner::_beginCollectionCallback, this, stdx::placeholders::_1)); if (!scheduleResult.isOK()) { _work(scheduleResult.getStatus()); return; } _active = true; _dbWorkCallbackHandle = scheduleResult.getValue(); }
void CollectionCloner::_insertDocumentsCallback(const ReplicationExecutor::CallbackData& cbd, bool lastBatch) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!cbd.status.isOK()) { _work(cbd.status); return; } Status status = _storageInterface->insertDocuments(cbd.txn, _destNss, _documents); if (!status.isOK()) { _work(status); return; } if (!lastBatch) { _active = true; return; } _work(Status::OK()); }
void ZWorkSchedulable::run(uint32_t dtime) { _work(dtime); }
void DatabaseCloner::_listCollectionsCallback(const StatusWith<Fetcher::BatchData>& result, Fetcher::NextAction* nextAction, BSONObjBuilder* getMoreBob) { boost::lock_guard<boost::mutex> lk(_mutex); _active = false; if (!result.isOK()) { _work(result.getStatus()); return; } auto batchData(result.getValue()); auto&& documents = batchData.documents; // We may be called with multiple batches leading to a need to grow _collectionInfos. _collectionInfos.reserve(_collectionInfos.size() + documents.size()); std::copy_if(documents.begin(), documents.end(), std::back_inserter(_collectionInfos), _listCollectionsPredicate); // The fetcher will continue to call with kGetMore until an error or the last batch. if (*nextAction == Fetcher::NextAction::kGetMore) { invariant(getMoreBob); getMoreBob->append("getMore", batchData.cursorId); getMoreBob->append("collection", batchData.nss.coll()); _active = true; return; } // Nothing to do for an empty database. if (_collectionInfos.empty()) { _work(Status::OK()); return; } _collectionNamespaces.reserve(_collectionInfos.size()); std::set<std::string> seen; for (auto&& info : _collectionInfos) { BSONElement nameElement = info.getField(kNameFieldName); if (nameElement.eoo()) { _work(Status(ErrorCodes::FailedToParse, str::stream() << "collection info must contain '" << kNameFieldName << "' " << "field : " << info)); return; } if (nameElement.type() != mongo::String) { _work(Status(ErrorCodes::TypeMismatch, str::stream() << "'" << kNameFieldName << "' field must be a string: " << info)); return; } const std::string collectionName = nameElement.String(); if (seen.find(collectionName) != seen.end()) { _work(Status(ErrorCodes::DuplicateKey, str::stream() << "collection info contains duplicate collection name " << "'" << collectionName << "': " << info)); return; } BSONElement optionsElement = info.getField(kOptionsFieldName); if (optionsElement.eoo()) { _work(Status(ErrorCodes::FailedToParse, str::stream() << "collection info must contain '" << kOptionsFieldName << "' " << "field : " << info)); return; } if (!optionsElement.isABSONObj()) { _work(Status(ErrorCodes::TypeMismatch, str::stream() << "'" << kOptionsFieldName << "' field must be an object: " << info)); return; } const BSONObj optionsObj = optionsElement.Obj(); CollectionOptions options; Status parseStatus = options.parse(optionsObj); if (!parseStatus.isOK()) { _work(parseStatus); return; } seen.insert(collectionName); _collectionNamespaces.emplace_back(_dbname, collectionName); auto&& nss = *_collectionNamespaces.crbegin(); try { _collectionCloners.emplace_back( _executor, _source, nss, options, stdx::bind(&DatabaseCloner::_collectionClonerCallback, this, stdx::placeholders::_1, nss), _createStorageInterface()); } catch (const UserException& ex) { _work(ex.toStatus()); return; } } for (auto&& collectionCloner : _collectionCloners) { collectionCloner.setScheduleDbWorkFn(_scheduleDbWorkFn); } // Start first collection cloner. _currentCollectionClonerIter = _collectionCloners.begin(); LOG(1) << " cloning collection " << _currentCollectionClonerIter->getSourceNamespace(); Status startStatus = _startCollectionCloner(*_currentCollectionClonerIter); if (!startStatus.isOK()) { LOG(1) << " failed to start collection cloning on " << _currentCollectionClonerIter->getSourceNamespace() << ": " << startStatus; _work(startStatus); return; } _active = true; }