void operator()(DBClientCursorBatchIterator &i) { const string to_dbname = nsToDatabase(to_collection); while (i.moreInCurrentBatch()) { if (n % 128 == 127) { time_t now = time(0); if (now - lastLog >= 60) { // report progress if (lastLog) { log() << "clone " << to_collection << ' ' << n << endl; } lastLog = now; } mayInterrupt(_mayBeInterrupted); } BSONObj js = i.nextSafe(); ++n; if (isindex) { verify(strstr(from_collection, "system.indexes")); storedForLater->push_back(fixindex(js, to_dbname).getOwned()); } else { try { Client::ReadContext ctx(to_collection); if (_isCapped) { NamespaceDetails *d = nsdetails(to_collection); verify(d->isCapped()); BSONObj pk = js["$_"].Obj(); BSONObjBuilder rowBuilder; BSONObjIterator it(js); while (it.moreWithEOO()) { BSONElement e = it.next(); if (e.eoo()) { break; } if (!mongoutils::str::equals(e.fieldName(), "$_")) { rowBuilder.append(e); } } BSONObj row = rowBuilder.obj(); d->insertObjectIntoCappedWithPK(pk, row, NamespaceDetails::NO_LOCKTREE); } else { insertObject(to_collection, js, 0, logForRepl); } } catch (UserException& e) { error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; throw; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } } }
/* copy the specified collection isindex - if true, this is system.indexes collection, in which we do some transformation when copying. */ void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, BSONObj query) { auto_ptr<DBClientCursor> c; { dbtemprelease r; c = conn->query( from_collection, query, 0, 0, 0, slaveOk ? Option_SlaveOk : 0 ); } assert( c.get() ); long long n = 0; time_t saveLast = time( 0 ); while ( 1 ) { { dbtemprelease r; if ( !c->more() ) break; } BSONObj tmp = c->next(); /* assure object is valid. note this will slow us down a good bit. */ if ( !tmp.valid() ) { out() << "skipping corrupt object from " << from_collection << '\n'; continue; } ++n; BSONObj js = tmp; if ( isindex ) { assert( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); } try { theDataFileMgr.insert(to_collection, js); if ( logForRepl ) logOp("i", to_collection, js); } catch( UserException& e ) { log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } }
void operator()( DBClientCursorBatchIterator &i ) { mongolock l( true ); if ( context ) { context->relocked(); } while( i.moreInCurrentBatch() ) { if ( n % 128 == 127 /*yield some*/ ) { time_t now = time(0); if( now - lastLog >= 60 ) { // report progress if( lastLog ) log() << "clone " << to_collection << ' ' << n << endl; lastLog = now; } mayInterrupt( _mayBeInterrupted ); dbtempreleaseif t( _mayYield ); } BSONObj tmp = i.nextSafe(); /* assure object is valid. note this will slow us down a little. */ if ( !tmp.valid() ) { stringstream ss; ss << "Cloner: skipping corrupt object from " << from_collection; BSONElement e = tmp.firstElement(); try { e.validate(); ss << " firstElement: " << e; } catch( ... ) { ss << " firstElement corrupt"; } out() << ss.str() << endl; continue; } ++n; BSONObj js = tmp; if ( isindex ) { verify( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); storedForLater->push_back( js.getOwned() ); continue; } try { theDataFileMgr.insertWithObjMod(to_collection, js); if ( logForRepl ) logOp("i", to_collection, js); getDur().commitIfNeeded(); } catch( UserException& e ) { log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } }
/* copy the specified collection isindex - if true, this is system.indexes collection, in which we do some transformation when copying. */ void Cloner::copy(const char *from_collection, const char *to_collection, bool isindex, bool logForRepl, bool masterSameProcess, bool slaveOk, Query query) { auto_ptr<DBClientCursor> c; { dbtemprelease r; c = conn->query( from_collection, query, 0, 0, 0, Option_NoCursorTimeout | ( slaveOk ? Option_SlaveOk : 0 ) ); } list<BSONObj> storedForLater; assert( c.get() ); long long n = 0; time_t saveLast = time( 0 ); while ( 1 ) { { dbtemprelease r; if ( !c->more() ) break; } BSONObj tmp = c->next(); /* assure object is valid. note this will slow us down a little. */ if ( !tmp.valid() ) { stringstream ss; ss << "skipping corrupt object from " << from_collection; BSONElement e = tmp.firstElement(); try { e.validate(); ss << " firstElement: " << e; } catch( ... ){ ss << " firstElement corrupt"; } out() << ss.str() << endl; continue; } ++n; BSONObj js = tmp; if ( isindex ) { assert( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); storedForLater.push_back( js.getOwned() ); continue; } try { theDataFileMgr.insert(to_collection, js); if ( logForRepl ) logOp("i", to_collection, js); } catch( UserException& e ) { log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } if ( storedForLater.size() ){ for ( list<BSONObj>::iterator i = storedForLater.begin(); i!=storedForLater.end(); i++ ){ BSONObj js = *i; try { theDataFileMgr.insert(to_collection, js); if ( logForRepl ) logOp("i", to_collection, js); } catch( UserException& e ) { log() << "warning: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; } } } }
void operator()( DBClientCursorBatchIterator &i ) { Lock::GlobalWrite lk; if ( context ) { context->relocked(); } while( i.moreInCurrentBatch() ) { if ( n % 128 == 127 /*yield some*/ ) { time_t now = time(0); if( now - lastLog >= 60 ) { // report progress if( lastLog ) log() << "clone " << to_collection << ' ' << n << endl; lastLog = now; } mayInterrupt( _mayBeInterrupted ); dbtempreleaseif t( _mayYield ); } BSONObj tmp = i.nextSafe(); /* assure object is valid. note this will slow us down a little. */ if ( !tmp.valid() ) { stringstream ss; ss << "Cloner: skipping corrupt object from " << from_collection; BSONElement e = tmp.firstElement(); try { e.validate(); ss << " firstElement: " << e; } catch( ... ) { ss << " firstElement corrupt"; } out() << ss.str() << endl; continue; } ++n; BSONObj js = tmp; if ( isindex ) { verify( strstr(from_collection, "system.indexes") ); js = fixindex(tmp); storedForLater->push_back( js.getOwned() ); continue; } try { // add keys for presorting DiskLoc loc = theDataFileMgr.insertWithObjMod(to_collection, js); loc.assertOk(); if (_sortersForIndex != NULL) { // add key to SortersForNS for (SortersForIndex::iterator iSorter = _sortersForIndex->begin(); iSorter != _sortersForIndex->end(); ++iSorter) { iSorter->second.preSortPhase.addKeys(iSorter->second.spec, js, loc, false); } } if ( logForRepl ) logOp("i", to_collection, js); getDur().commitIfNeeded(); } catch( UserException& e ) { error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; throw; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } }
void operator()( DBClientCursorBatchIterator &i ) { Lock::GlobalWrite lk; context.relocked(); bool createdCollection = false; Collection* collection = NULL; while( i.moreInCurrentBatch() ) { if ( numSeen % 128 == 127 /*yield some*/ ) { collection = NULL; time_t now = time(0); if( now - lastLog >= 60 ) { // report progress if( lastLog ) log() << "clone " << to_collection << ' ' << numSeen << endl; lastLog = now; } mayInterrupt( _mayBeInterrupted ); dbtempreleaseif t( _mayYield ); } if ( isindex == false && collection == NULL ) { collection = context.db()->getCollection( to_collection ); if ( !collection ) { massert( 17321, str::stream() << "collection dropped during clone [" << to_collection << "]", !createdCollection ); createdCollection = true; collection = context.db()->createCollection( txn, to_collection ); verify( collection ); } } BSONObj tmp = i.nextSafe(); /* assure object is valid. note this will slow us down a little. */ const Status status = validateBSON(tmp.objdata(), tmp.objsize()); if (!status.isOK()) { out() << "Cloner: skipping corrupt object from " << from_collection << ": " << status.reason(); continue; } ++numSeen; BSONObj js = tmp; if ( isindex ) { verify(nsToCollectionSubstring(from_collection) == "system.indexes"); js = fixindex(context.db()->name(), tmp); indexesToBuild->push_back( js.getOwned() ); continue; } verify(nsToCollectionSubstring(from_collection) != "system.indexes"); StatusWith<DiskLoc> loc = collection->insertDocument( txn, js, true ); if ( !loc.isOK() ) { error() << "error: exception cloning object in " << from_collection << ' ' << loc.toString() << " obj:" << js; } uassertStatusOK( loc.getStatus() ); if ( logForRepl ) logOp(txn, "i", to_collection, js); getDur().commitIfNeeded(); RARELY if ( time( 0 ) - saveLast > 60 ) { log() << numSeen << " objects cloned so far from collection " << from_collection; saveLast = time( 0 ); } } }
void operator()(DBClientCursorBatchIterator &i) { const string to_dbname = nsToDatabase(to_collection); while (i.moreInCurrentBatch()) { if (n % 128 == 127) { time_t now = time(0); if (now - lastLog >= 60) { // report progress if (lastLog) { log() << "clone " << to_collection << ' ' << n << endl; } lastLog = now; } mayInterrupt(_mayBeInterrupted); } BSONObj js = i.nextSafe(); ++n; if (isindex) { verify(nsToCollectionSubstring(from_collection) == "system.indexes"); storedForLater->push_back(fixindex(js, to_dbname).getOwned()); } else { try { LOCK_REASON(lockReason, "cloner: copying documents into local collection"); Client::ReadContext ctx(to_collection, lockReason); if (_isCapped) { Collection *cl = getCollection(to_collection); verify(cl->isCapped()); BSONObj pk = js["$_"].Obj(); BSONObjBuilder rowBuilder; BSONObjIterator it(js); while (it.moreWithEOO()) { BSONElement e = it.next(); if (e.eoo()) { break; } if (!mongoutils::str::equals(e.fieldName(), "$_")) { rowBuilder.append(e); } } BSONObj row = rowBuilder.obj(); CappedCollection *cappedCl = cl->as<CappedCollection>(); bool indexBitChanged = false; cappedCl->insertObjectWithPK(pk, row, Collection::NO_LOCKTREE, &indexBitChanged); // Hack copied from Collection::insertObject. TODO: find a better way to do this if (indexBitChanged) { cl->noteMultiKeyChanged(); } } else { insertObject(to_collection, js, 0, logForRepl); } } catch (UserException& e) { error() << "error: exception cloning object in " << from_collection << ' ' << e.what() << " obj:" << js.toString() << '\n'; throw; } RARELY if ( time( 0 ) - saveLast > 60 ) { log() << n << " objects cloned so far from collection " << from_collection << endl; saveLast = time( 0 ); } } } }