virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { Lock::GlobalWrite globalWriteLock(txn->lockState()); string source = cmdObj.getStringField( name.c_str() ); string target = cmdObj.getStringField( "to" ); // We stay in source context the whole time. This is mostly to set the CurOp namespace. Client::Context ctx(txn, source); if ( !NamespaceString::validCollectionComponent(target.c_str()) ) { errmsg = "invalid collection name: " + target; return false; } if ( source.empty() || target.empty() ) { errmsg = "invalid command syntax"; return false; } if (!fromRepl) { // If it got through on the master, need to allow it here too Status sourceStatus = userAllowedWriteNS(source); if (!sourceStatus.isOK()) { errmsg = "error with source namespace: " + sourceStatus.reason(); return false; } Status targetStatus = userAllowedWriteNS(target); if (!targetStatus.isOK()) { errmsg = "error with target namespace: " + targetStatus.reason(); return false; } } Database* const sourceDB = dbHolder().get(txn, nsToDatabase(source)); Collection* const sourceColl = sourceDB ? sourceDB->getCollection(txn, source) : NULL; if (!sourceColl) { errmsg = "source namespace does not exist"; return false; } { // Ensure that collection name does not exceed maximum length. // Ensure that index names do not push the length over the max. // Iterator includes unfinished indexes. IndexCatalog::IndexIterator sourceIndIt = sourceColl->getIndexCatalog()->getIndexIterator( txn, true ); int longestIndexNameLength = 0; while ( sourceIndIt.more() ) { int thisLength = sourceIndIt.next()->indexName().length(); if ( thisLength > longestIndexNameLength ) longestIndexNameLength = thisLength; } unsigned int longestAllowed = min(int(NamespaceString::MaxNsCollectionLen), int(NamespaceString::MaxNsLen) - 2/*strlen(".$")*/ - longestIndexNameLength); if (target.size() > longestAllowed) { StringBuilder sb; sb << "collection name length of " << target.size() << " exceeds maximum length of " << longestAllowed << ", allowing for index names"; errmsg = sb.str(); return false; } } const std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, sourceDB, cmdObj); // Dismissed on success ScopeGuard indexBuildRestorer = MakeGuard(IndexBuilder::restoreIndexes, indexesInProg); bool unused; Database* const targetDB = dbHolder().getOrCreate(txn, nsToDatabase(target), unused); { WriteUnitOfWork wunit(txn); // Check if the target namespace exists and if dropTarget is true. // If target exists and dropTarget is not true, return false. if (targetDB->getCollection(txn, target)) { if (!cmdObj["dropTarget"].trueValue()) { errmsg = "target namespace exists"; return false; } Status s = targetDB->dropCollection(txn, target); if ( !s.isOK() ) { errmsg = s.toString(); return false; } } // If we are renaming in the same database, just // rename the namespace and we're done. if (sourceDB == targetDB) { Status s = targetDB->renameCollection(txn, source, target, cmdObj["stayTemp"].trueValue() ); if (!s.isOK()) { return appendCommandStatus(result, s); } if (!fromRepl) { repl::logOp(txn, "c", (dbname + ".$cmd").c_str(), cmdObj); } wunit.commit(); indexBuildRestorer.Dismiss(); return true; } } // If we get here, we are renaming across databases, so we must copy all the data and // indexes, then remove the source collection. // Create the target collection. It will be removed if we fail to copy the collection. // TODO use a temp collection and unset the temp flag on success. Collection* targetColl = NULL; { CollectionOptions options; options.setNoIdIndex(); if (sourceColl->isCapped()) { // TODO stop assuming storageSize == cappedSize options.capped = true; options.cappedSize = sourceColl->getRecordStore()->storageSize(txn); } WriteUnitOfWork wunit(txn); // No logOp necessary because the entire renameCollection command is one logOp. targetColl = targetDB->createCollection(txn, target, options); if (!targetColl) { errmsg = "Failed to create target collection."; return false; } wunit.commit(); } // Dismissed on success ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target); MultiIndexBlock indexer(txn, targetColl); indexer.allowInterruption(); // Copy the index descriptions from the source collection, adjusting the ns field. { std::vector<BSONObj> indexesToCopy; IndexCatalog::IndexIterator sourceIndIt = sourceColl->getIndexCatalog()->getIndexIterator( txn, true ); while (sourceIndIt.more()) { const BSONObj currIndex = sourceIndIt.next()->infoObj(); // Process the source index. BSONObjBuilder newIndex; newIndex.append("ns", target); newIndex.appendElementsUnique(currIndex); indexesToCopy.push_back(newIndex.obj()); } indexer.init(indexesToCopy); } { // Copy over all the data from source collection to target collection. boost::scoped_ptr<RecordIterator> sourceIt(sourceColl->getIterator(txn)); while (!sourceIt->isEOF()) { txn->checkForInterrupt(false); const BSONObj obj = sourceColl->docFor(txn, sourceIt->getNext()); WriteUnitOfWork wunit(txn); // No logOp necessary because the entire renameCollection command is one logOp. Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus(); if (!status.isOK()) return appendCommandStatus(result, status); wunit.commit(); } } Status status = indexer.doneInserting(); if (!status.isOK()) return appendCommandStatus(result, status); { // Getting here means we successfully built the target copy. We now remove the // source collection and finalize the rename. WriteUnitOfWork wunit(txn); Status status = sourceDB->dropCollection(txn, source); if (!status.isOK()) return appendCommandStatus(result, status); indexer.commit(); if (!fromRepl) { repl::logOp(txn, "c", (dbname + ".$cmd").c_str(), cmdObj); } wunit.commit(); } indexBuildRestorer.Dismiss(); targetCollectionDropper.Dismiss(); return true; }
void DocumentSourceOut::initialize() { invariant(_mongod); DBClientBase* conn = _mongod->directClient(); // Save the original collection options and index specs so we can check they didn't change // during computation. _originalOutOptions = _mongod->getCollectionOptions(_outputNs); _originalIndexes = conn->getIndexSpecs(_outputNs.ns()); // Check if it's sharded or capped to make sure we have a chance of succeeding before we do all // the work. If the collection becomes capped during processing, the collection options will // have changed, and the $out will fail. If it becomes sharded during processing, the final // rename will fail. uassert(17017, str::stream() << "namespace '" << _outputNs.ns() << "' is sharded so it can't be used for $out'", !_mongod->isSharded(_outputNs)); uassert(17152, str::stream() << "namespace '" << _outputNs.ns() << "' is capped so it can't be used for $out", _originalOutOptions["capped"].eoo()); // We will write all results into a temporary collection, then rename the temporary collection // to be the target collection once we are done. _tempNs = NamespaceString(str::stream() << _outputNs.db() << ".tmp.agg_out." << aggOutCounter.addAndFetch(1)); // Create output collection, copying options from existing collection if any. { BSONObjBuilder cmd; cmd << "create" << _tempNs.coll(); cmd << "temp" << true; cmd.appendElementsUnique(_originalOutOptions); BSONObj info; bool ok = conn->runCommand(_outputNs.db().toString(), cmd.done(), info); uassert(16994, str::stream() << "failed to create temporary $out collection '" << _tempNs.ns() << "': " << info.toString(), ok); } // copy indexes to _tempNs for (std::list<BSONObj>::const_iterator it = _originalIndexes.begin(); it != _originalIndexes.end(); ++it) { MutableDocument index((Document(*it))); index.remove("_id"); // indexes shouldn't have _ids but some existing ones do index["ns"] = Value(_tempNs.ns()); BSONObj indexBson = index.freeze().toBson(); conn->insert(_tempNs.getSystemIndexesCollection(), indexBson); BSONObj err = conn->getLastErrorDetailed(); uassert(16995, str::stream() << "copying index for $out failed." << " index: " << indexBson << " error: " << err, DBClientWithCommands::getLastErrorString(err).empty()); } _initialized = true; }
Status renameCollection(OperationContext* txn, const NamespaceString& source, const NamespaceString& target, bool dropTarget, bool stayTemp) { DisableDocumentValidation validationDisabler(txn); ScopedTransaction transaction(txn, MODE_X); Lock::GlobalWrite globalWriteLock(txn->lockState()); // We stay in source context the whole time. This is mostly to set the CurOp namespace. OldClientContext ctx(txn, source.ns()); bool userInitiatedWritesAndNotPrimary = txn->writesAreReplicated() && !repl::getGlobalReplicationCoordinator()->canAcceptWritesFor(source); if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::NotMaster, str::stream() << "Not primary while renaming collection " << source.ns() << " to " << target.ns()); } Database* const sourceDB = dbHolder().get(txn, source.db()); Collection* const sourceColl = sourceDB ? sourceDB->getCollection(source.ns()) : nullptr; if (!sourceColl) { return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist"); } { // Ensure that collection name does not exceed maximum length. // Ensure that index names do not push the length over the max. // Iterator includes unfinished indexes. IndexCatalog::IndexIterator sourceIndIt = sourceColl->getIndexCatalog()->getIndexIterator(txn, true); int longestIndexNameLength = 0; while (sourceIndIt.more()) { int thisLength = sourceIndIt.next()->indexName().length(); if (thisLength > longestIndexNameLength) longestIndexNameLength = thisLength; } unsigned int longestAllowed = std::min(int(NamespaceString::MaxNsCollectionLen), int(NamespaceString::MaxNsLen) - 2 /*strlen(".$")*/ - longestIndexNameLength); if (target.size() > longestAllowed) { StringBuilder sb; sb << "collection name length of " << target.size() << " exceeds maximum length of " << longestAllowed << ", allowing for index names"; return Status(ErrorCodes::InvalidLength, sb.str()); } } BackgroundOperation::assertNoBgOpInProgForNs(source.ns()); Database* const targetDB = dbHolder().openDb(txn, target.db()); { WriteUnitOfWork wunit(txn); // Check if the target namespace exists and if dropTarget is true. // If target exists and dropTarget is not true, return false. if (targetDB->getCollection(target)) { if (!dropTarget) { printStackTrace(); return Status(ErrorCodes::NamespaceExists, "target namespace exists"); } Status s = targetDB->dropCollection(txn, target.ns()); if (!s.isOK()) { return s; } } // If we are renaming in the same database, just // rename the namespace and we're done. if (sourceDB == targetDB) { Status s = targetDB->renameCollection(txn, source.ns(), target.ns(), stayTemp); if (!s.isOK()) { return s; } getGlobalServiceContext()->getOpObserver()->onRenameCollection( txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp); wunit.commit(); return Status::OK(); } wunit.commit(); } // If we get here, we are renaming across databases, so we must copy all the data and // indexes, then remove the source collection. // Create the target collection. It will be removed if we fail to copy the collection. // TODO use a temp collection and unset the temp flag on success. Collection* targetColl = nullptr; { CollectionOptions options = sourceColl->getCatalogEntry()->getCollectionOptions(txn); WriteUnitOfWork wunit(txn); // No logOp necessary because the entire renameCollection command is one logOp. bool shouldReplicateWrites = txn->writesAreReplicated(); txn->setReplicatedWrites(false); targetColl = targetDB->createCollection(txn, target.ns(), options, false); // _id index build with others later. txn->setReplicatedWrites(shouldReplicateWrites); if (!targetColl) { return Status(ErrorCodes::OutOfDiskSpace, "Failed to create target collection."); } wunit.commit(); } // Dismissed on success ScopeGuard targetCollectionDropper = MakeGuard(dropCollection, txn, targetDB, target.ns()); MultiIndexBlock indexer(txn, targetColl); indexer.allowInterruption(); // Copy the index descriptions from the source collection, adjusting the ns field. { std::vector<BSONObj> indexesToCopy; IndexCatalog::IndexIterator sourceIndIt = sourceColl->getIndexCatalog()->getIndexIterator(txn, true); while (sourceIndIt.more()) { const BSONObj currIndex = sourceIndIt.next()->infoObj(); // Process the source index. BSONObjBuilder newIndex; newIndex.append("ns", target.ns()); newIndex.appendElementsUnique(currIndex); indexesToCopy.push_back(newIndex.obj()); } indexer.init(indexesToCopy); } { // Copy over all the data from source collection to target collection. auto cursor = sourceColl->getCursor(txn); while (auto record = cursor->next()) { txn->checkForInterrupt(); const auto obj = record->data.releaseToBson(); WriteUnitOfWork wunit(txn); // No logOp necessary because the entire renameCollection command is one logOp. bool shouldReplicateWrites = txn->writesAreReplicated(); txn->setReplicatedWrites(false); Status status = targetColl->insertDocument(txn, obj, &indexer, true).getStatus(); txn->setReplicatedWrites(shouldReplicateWrites); if (!status.isOK()) return status; wunit.commit(); } } Status status = indexer.doneInserting(); if (!status.isOK()) return status; { // Getting here means we successfully built the target copy. We now remove the // source collection and finalize the rename. WriteUnitOfWork wunit(txn); bool shouldReplicateWrites = txn->writesAreReplicated(); txn->setReplicatedWrites(false); Status status = sourceDB->dropCollection(txn, source.ns()); txn->setReplicatedWrites(shouldReplicateWrites); if (!status.isOK()) return status; indexer.commit(); getGlobalServiceContext()->getOpObserver()->onRenameCollection( txn, NamespaceString(source), NamespaceString(target), dropTarget, stayTemp); wunit.commit(); } targetCollectionDropper.Dismiss(); return Status::OK(); }
bool ClientInfo::getLastError( const BSONObj& options , BSONObjBuilder& result , bool fromWriteBackListener ) { set<string> * shards = getPrev(); if ( shards->size() == 0 ) { result.appendNull( "err" ); return true; } vector<WBInfo> writebacks; // handle single server if ( shards->size() == 1 ) { string theShard = *(shards->begin() ); ShardConnection conn( theShard , "", true ); BSONObj res; bool ok = false; try{ ok = conn->runCommand( "admin" , options , res ); } catch( std::exception &e ){ warning() << "could not get last error from shard " << theShard << causedBy( e ) << endl; // Catch everything that happens here, since we need to ensure we return our connection when we're // finished. conn.done(); return false; } res = res.getOwned(); conn.done(); _addWriteBack( writebacks , res ); // hit other machines just to block for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) { string temp = *i; if ( temp == theShard ) continue; ShardConnection conn( temp , "" ); try { _addWriteBack( writebacks , conn->getLastErrorDetailed() ); } catch( std::exception &e ){ warning() << "could not clear last error from shard " << temp << causedBy( e ) << endl; } conn.done(); } clearSinceLastGetError(); if ( writebacks.size() ){ vector<BSONObj> v = _handleWriteBacks( writebacks , fromWriteBackListener ); if ( v.size() == 0 && fromWriteBackListener ) { // ok } else { assert( v.size() == 1 ); result.appendElements( v[0] ); result.appendElementsUnique( res ); result.append( "writebackGLE" , v[0] ); result.append( "initialGLEHost" , theShard ); } } else { result.append( "singleShard" , theShard ); result.appendElements( res ); } return ok; } BSONArrayBuilder bbb( result.subarrayStart( "shards" ) ); BSONObjBuilder shardRawGLE; long long n = 0; int updatedExistingStat = 0; // 0 is none, -1 has but false, 1 has true // hit each shard vector<string> errors; vector<BSONObj> errorObjects; for ( set<string>::iterator i = shards->begin(); i != shards->end(); i++ ) { string theShard = *i; bbb.append( theShard ); ShardConnection conn( theShard , "", true ); BSONObj res; bool ok = false; try { ok = conn->runCommand( "admin" , options , res ); shardRawGLE.append( theShard , res ); } catch( std::exception &e ){ // Safe to return here, since we haven't started any extra processing yet, just collecting // responses. warning() << "could not get last error from a shard " << theShard << causedBy( e ) << endl; conn.done(); return false; } _addWriteBack( writebacks, res ); string temp = DBClientWithCommands::getLastErrorString( res ); if ( conn->type() != ConnectionString::SYNC && ( ok == false || temp.size() ) ) { errors.push_back( temp ); errorObjects.push_back( res ); } n += res["n"].numberLong(); if ( res["updatedExisting"].type() ) { if ( res["updatedExisting"].trueValue() ) updatedExistingStat = 1; else if ( updatedExistingStat == 0 ) updatedExistingStat = -1; } conn.done(); } bbb.done(); result.append( "shardRawGLE" , shardRawGLE.obj() ); result.appendNumber( "n" , n ); if ( updatedExistingStat ) result.appendBool( "updatedExisting" , updatedExistingStat > 0 ); // hit other machines just to block for ( set<string>::const_iterator i=sinceLastGetError().begin(); i!=sinceLastGetError().end(); ++i ) { string temp = *i; if ( shards->count( temp ) ) continue; ShardConnection conn( temp , "" ); try { _addWriteBack( writebacks, conn->getLastErrorDetailed() ); } catch( std::exception &e ){ warning() << "could not clear last error from a shard " << temp << causedBy( e ) << endl; } } clearSinceLastGetError(); if ( errors.size() == 0 ) { result.appendNull( "err" ); _handleWriteBacks( writebacks , fromWriteBackListener ); return true; } result.append( "err" , errors[0].c_str() ); { // errs BSONArrayBuilder all( result.subarrayStart( "errs" ) ); for ( unsigned i=0; i<errors.size(); i++ ) { all.append( errors[i].c_str() ); } all.done(); } { // errObjects BSONArrayBuilder all( result.subarrayStart( "errObjects" ) ); for ( unsigned i=0; i<errorObjects.size(); i++ ) { all.append( errorObjects[i] ); } all.done(); } _handleWriteBacks( writebacks , fromWriteBackListener ); return true; }