void run() { DurTransaction txn; create(); ASSERT_EQUALS( 2, nExtents() ); DiskLoc l[ 8 ]; for ( int i = 0; i < 8; ++i ) { StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigObj(), true ); ASSERT( status.isOK() ); l[ i ] = status.getValue(); ASSERT( !l[ i ].isNull() ); //ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); //if ( i > 3 ) // ASSERT( l[ i ] == l[ i - 4 ] ); } ASSERT( nRecords() == 8 ); // Too big BSONObjBuilder bob; bob.appendOID( "_id", NULL, true ); bob.append( "a", string( MinExtentSize + 500, 'a' ) ); // min extent size is now 4096 BSONObj bigger = bob.done(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, false ); ASSERT( !status.isOK() ); ASSERT_EQUALS( 0, nRecords() ); }
void getMe(BSONObj& me) { const string myname = getHostName(); Client::Transaction transaction(0); // local.me is an identifier for a server for getLastError w:2+ if (!Collection::findOne("local.me", BSONObj(), me) || !me.hasField("host") || me["host"].String() != myname) { // cleaning out local.me requires write // lock. This is a rare operation, so it should // be ok if (!Lock::isWriteLocked("local")) { throw RetryWithWriteLock(); } // clean out local.me deleteObjects("local.me", BSONObj(), false, false); // repopulate BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.append( "host", myname ); me = b.obj(); updateObjects("local.me", me, BSONObj(), true, false); } transaction.commit(0); }
bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){ WriteBackListener::init( *conn_in ); DBClientBase* conn = getVersionable( conn_in ); verify( conn ); // errors thrown above BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , "" ); cmdBuilder.appendBool( "init", true ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendOID( "serverID" , &serverID ); cmdBuilder.appendBool( "authoritative" , true ); BSONObj cmd = cmdBuilder.obj(); LOG(1) << "initializing shard connection to " << conn->toString() << endl; LOG(2) << "initial sharding settings : " << cmd << endl; bool ok = conn->runCommand( "admin" , cmd , result ); // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1 // Result is false, but will still initialize serverID and configdb if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ || result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ )) { ok = true; } LOG(3) << "initial sharding result : " << result << endl; return ok; }
static BSONObj bigObj() { BSONObjBuilder b; b.appendOID("_id", 0, true); string as( 187, 'a' ); b.append( "a", as ); return b.obj(); }
void SyncSourceFeedback::ensureMe(OperationContext* txn) { string myname = getHostName(); { Lock::DBLock dlk(txn->lockState(), "local", MODE_X); WriteUnitOfWork wunit(txn); Client::Context ctx(txn, "local"); // local.me is an identifier for a server for getLastError w:2+ if (!Helpers::getSingleton(txn, "local.me", _me) || !_me.hasField("host") || _me["host"].String() != myname) { // clean out local.me Helpers::emptyCollection(txn, "local.me"); // repopulate BSONObjBuilder b; b.appendOID("_id", 0, true); b.append("host", myname); _me = b.obj(); Helpers::putSingleton(txn, "local.me", _me); } wunit.commit(); // _me is used outside of a read lock, so we must copy it out of the mmap _me = _me.getOwned(); } }
void _insert( Request& r , DbMessage& d, ChunkManager* manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } Chunk& c = manager->findChunk( o ); log(4) << " server:" << c.getShard() << " " << o << endl; insert( c.getShard() , r.getns() , o ); c.splitIfShould( o.objsize() ); } }
BSONObj StorageEngine::getoid(string oid) { OID oidobj; oidobj.init(oid); BSONObjBuilder ob; ob.appendOID("_id",&oidobj); return ob.obj(); }
bool initShardVersion( DBClientBase& conn_in, BSONObj& result ){ WriteBackListener::init( conn_in ); DBClientBase* conn = getVersionable( &conn_in ); assert( conn ); // errors thrown above BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , "" ); cmdBuilder.appendBool( "init", true ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendOID( "serverID" , &serverID ); cmdBuilder.appendBool( "authoritative" , true ); BSONObj cmd = cmdBuilder.obj(); LOG(1) << "initializing shard connection to " << conn->toString() << endl; LOG(2) << "initial sharding settings : " << cmd << endl; bool ok = conn->runCommand( "admin" , cmd , result ); LOG(3) << "initial sharding result : " << result << endl; return ok; }
bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){ WriteBackListener::init( *conn_in ); bool ok; DBClientBase* conn = NULL; try { // May throw if replica set primary is down conn = getVersionable( conn_in ); dassert( conn ); // errors thrown above BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , "" ); cmdBuilder.appendBool( "init", true ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendOID( "serverID" , &serverID ); cmdBuilder.appendBool( "authoritative" , true ); BSONObj cmd = cmdBuilder.obj(); LOG(1) << "initializing shard connection to " << conn->toString() << endl; LOG(2) << "initial sharding settings : " << cmd << endl; ok = conn->runCommand("admin", cmd, result, 0); } catch( const DBException& ex ) { bool ignoreFailure = ShardConnection::ignoreInitialVersionFailure && conn_in->type() == ConnectionString::SET; if ( !ignoreFailure ) throw; // Using initShardVersion is not strictly required when talking to replica sets - it is // preferred to do so because it registers mongos early with the mongod. This info is // also sent by checkShardVersion before a connection is used for a write or read. OCCASIONALLY { warning() << "failed to initialize new replica set connection version, " << "will initialize on first use" << endl; } return true; } // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1 // Result is false, but will still initialize serverID and configdb if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ || result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ )) { ok = true; } LOG(3) << "initial sharding result : " << result << endl; return ok; }
BSONObj BatchedCommandResponse::toBSON() const { BSONObjBuilder builder; if (_isOkSet) builder.append(ok(), _ok); if (_isErrCodeSet) builder.append(errCode(), _errCode); if (_isErrMessageSet) builder.append(errMessage(), _errMessage); if (_isNModifiedSet) builder.appendNumber(nModified(), _nModified); if (_isNSet) builder.appendNumber(n(), _n); if (_upsertDetails.get()) { BSONArrayBuilder upsertedBuilder(builder.subarrayStart(upsertDetails())); for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin(); it != _upsertDetails->end(); ++it) { BSONObj upsertedDetailsDocument = (*it)->toBSON(); upsertedBuilder.append(upsertedDetailsDocument); } upsertedBuilder.done(); } if (_isLastOpSet) { if (_lastOp.getTerm() != repl::OpTime::kUninitializedTerm) { _lastOp.append(&builder, "opTime"); } else { builder.append("opTime", _lastOp.getTimestamp()); } } if (_isElectionIdSet) builder.appendOID(electionId(), const_cast<OID*>(&_electionId)); if (_writeErrorDetails.get()) { BSONArrayBuilder errDetailsBuilder(builder.subarrayStart(writeErrors())); for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin(); it != _writeErrorDetails->end(); ++it) { BSONObj errDetailsDocument = (*it)->toBSON(); errDetailsBuilder.append(errDetailsDocument); } errDetailsBuilder.done(); } if (_wcErrDetails.get()) { builder.append(writeConcernError(), _wcErrDetails->toBSON()); } return builder.obj(); }
void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags ) { ChunkManagerPtr manager = conf->getChunkManager(ns); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << ns << " " << o << endl; uasserted( 14842 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 30; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , ns , o , flags); // r.gotInsert(); // if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); break; } catch ( StaleConfigException& e ) { int logLevel = i < ( maxTries / 2 ); LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; // r.reset(); unsigned long long old = manager->getSequenceNumber(); manager = conf->getChunkManager(ns); LOG( logLevel ) << " sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl; if (!manager) { uasserted(14843, "collection no longer sharded"); } } sleepmillis( i * 20 ); } }
void run(){ int secsToSleep = 0; while ( 1 ){ try { ScopedDbConnection conn( _addr ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){ log() << "writebacklisten command failed! " << result << endl; conn.done(); continue; } } log(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ){ string ns = data["ns"].valuestrsafe(); int len; Message m( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , m.header()->valid() ); grid.getDBConfig( ns )->getChunkManager( ns , true ); Request r( m , 0 ); r.process(); } else { log() << "unknown writeBack result: " << result << endl; } conn.done(); secsToSleep = 0; } catch ( std::exception e ){ log() << "WriteBackListener exception : " << e.what() << endl; } catch ( ... ){ log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { while ( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 10; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& e ) { log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; r.reset(); manager = r.getChunkManager(); uassert(14804, "collection no longer sharded", manager); } sleepmillis( i * 200 ); } assert( inShutdown() || gotThrough ); } }
void insert( const BSONObj &o ) { if ( o["_id"].eoo() ) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID( "_id", &oid ); b.appendElements( o ); _collection->insertDocument( &_txn, b.obj(), false ); } else { _collection->insertDocument( &_txn, o, false ); } }
void insert(const char* s) { WriteUnitOfWork wunit(&_txn); const BSONObj o = fromjson(s); if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); _collection->insertDocument(&_txn, b.obj(), false); } else { _collection->insertDocument(&_txn, o, false); } wunit.commit(); }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } bool gotThrough = false; for ( int i=0; i<10; i++ ){ try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& ){ log(1) << "retrying insert because of StaleConfigException: " << o << endl; r.reset(); manager = r.getChunkManager(); } sleepmillis( i * 200 ); } assert( gotThrough ); } }
void insert(const char* s) { WriteUnitOfWork wunit(&_opCtx); const BSONObj o = fromjson(s); OpDebug* const nullOpDebug = nullptr; if (o["_id"].eoo()) { BSONObjBuilder b; OID oid; oid.init(); b.appendOID("_id", &oid); b.appendElements(o); _collection->insertDocument(&_opCtx, b.obj(), nullOpDebug, false); } else { _collection->insertDocument(&_opCtx, o, nullOpDebug, false); } wunit.commit(); }
void _groupInserts( ChunkManagerPtr manager, vector<BSONObj>& inserts, map<ChunkPtr,vector<BSONObj> >& insertsForChunks ){ // Redo all inserts for chunks which have changed map<ChunkPtr,vector<BSONObj> >::iterator i = insertsForChunks.begin(); while( ! insertsForChunks.empty() && i != insertsForChunks.end() ){ if( ! manager->compatibleWith( i->first ) ){ inserts.insert( inserts.end(), i->second.begin(), i->second.end() ); insertsForChunks.erase( i++ ); } else ++i; } // Figure out inserts we haven't chunked yet for( vector<BSONObj>::iterator i = inserts.begin(); i != inserts.end(); ++i ){ BSONObj o = *i; if ( ! manager->hasShardKey( o ) ) { bool bad = true; // Add autogenerated _id to item and see if we now have a shard key if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { // TODO: log() << "tried to insert object with no valid shard key for " << manager->getShardKey() << " : " << o << endl; uassert( 8011, str::stream() << "tried to insert object with no valid shard key for " << manager->getShardKey().toString() << " : " << o.toString(), false ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); insertsForChunks[manager->findChunk(o)].push_back(o); } inserts.clear(); }
bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ){ BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , ns.c_str() ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendTimestamp( "version" , version ); cmdBuilder.appendOID( "serverID" , &serverID ); if ( authoritative ) cmdBuilder.appendBool( "authoritative" , 1 ); Shard s = Shard::make( conn.getServerAddress() ); cmdBuilder.append( "shard" , s.getName() ); cmdBuilder.append( "shardHost" , s.getConnString() ); BSONObj cmd = cmdBuilder.obj(); log(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << " " << &conn << endl; return conn.runCommand( "admin" , cmd , result ); }
void Model::save( bool check ){ ScopedDbConnection conn( modelServer() ); BSONObjBuilder b; serialize( b ); if ( _id.isEmpty() ){ OID oid; oid.init(); b.appendOID( "_id" , &oid ); BSONObj o = b.obj(); conn->insert( getNS() , o ); _id = o["_id"].wrap().getOwned(); log(4) << "inserted new model " << getNS() << " " << o << endl; } else { BSONElement id = _id["_id"]; b.append( id ); BSONObjBuilder qb; qb.append( id ); BSONObj q = qb.obj(); BSONObj o = b.obj(); log(4) << "updated old model" << getNS() << " " << q << " " << o << endl; conn->update( getNS() , q , o ); } string errmsg = ""; if ( check ) errmsg = conn->getLastError(); conn.done(); if ( check && errmsg.size() ) throw UserException( (string)"error on Model::save: " + errmsg ); }
void SyncSourceFeedback::ensureMe() { string myname = getHostName(); { Client::WriteContext ctx("local"); // local.me is an identifier for a server for getLastError w:2+ if (!Helpers::getSingleton("local.me", _me) || !_me.hasField("host") || _me["host"].String() != myname) { // clean out local.me Helpers::emptyCollection("local.me"); // repopulate BSONObjBuilder b; b.appendOID("_id", 0, true); b.append("host", myname); _me = b.obj(); Helpers::putSingleton("local.me", _me); } } }
// Adds a wOpTime and a wElectionId field to a set of gle options static BSONObj buildGLECmdWithOpTime( const BSONObj& gleOptions, const OpTime& opTime, const OID& electionId ) { BSONObjBuilder builder; BSONObjIterator it( gleOptions ); for ( int i = 0; it.more(); ++i ) { BSONElement el = it.next(); // Make sure first element is getLastError : 1 if ( i == 0 ) { StringData elName( el.fieldName() ); if ( !elName.equalCaseInsensitive( "getLastError" ) ) { builder.append( "getLastError", 1 ); } } builder.append( el ); } builder.appendTimestamp( "wOpTime", opTime.asDate() ); builder.appendOID( "wElectionId", const_cast<OID*>(&electionId) ); return builder.obj(); }
bool replHandshake(DBClientConnection *conn) { BSONObj me; { dblock l; // local.me is an identifier for a server for getLastError w:2+ if ( ! Helpers::getSingleton( "local.me" , me ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); me = b.obj(); Helpers::putSingleton( "local.me" , me ); } } BSONObjBuilder cmd; cmd.appendAs( me["_id"] , "handshake" ); BSONObj res; bool ok = conn->runCommand( "admin" , cmd.obj() , res ); // ignoring for now on purpose for older versions log(ok) << "replHandshake res not: " << ok << " res: " << res << endl; return true; }
bool replHandshake(DBClientConnection *conn) { string myname = getHostName(); BSONObj me; { Lock::DBWrite l("local"); // local.me is an identifier for a server for getLastError w:2+ if ( ! Helpers::getSingleton( "local.me" , me ) || ! me.hasField("host") || me["host"].String() != myname ) { // clean out local.me Helpers::emptyCollection("local.me"); // repopulate BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.append( "host", myname ); me = b.obj(); Helpers::putSingleton( "local.me" , me ); } } BSONObjBuilder cmd; cmd.appendAs( me["_id"] , "handshake" ); if (theReplSet) { cmd.append("member", theReplSet->selfId()); cmd.append("config", theReplSet->myConfig().asBson()); } BSONObj res; bool ok = conn->runCommand( "admin" , cmd.obj() , res ); // ignoring for now on purpose for older versions LOG( ok ? 1 : 0 ) << "replHandshake res not: " << ok << " res: " << res << endl; return true; }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding. map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards try { while ( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl; uasserted( 8011 , "tried to insert object with no valid shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); insertsForChunk[manager->findChunk(o)].push_back(o); } for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) { ChunkPtr c = it->first; vector<BSONObj> objs = it->second; const int maxTries = 30; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl; insert( c->getShard() , r.getns() , objs , flags); int bytesWritten = 0; for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) { r.gotInsert(); // Record the correct number of individual inserts bytesWritten += (*vecIt).objsize(); } if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( bytesWritten ); gotThrough = true; break; } catch ( StaleConfigException& e ) { int logLevel = i < ( maxTries / 2 ); LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl; r.reset(); manager = r.getChunkManager(); if( ! manager ) { uasserted(14804, "collection no longer sharded"); } unsigned long long old = manager->getSequenceNumber(); LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl; } sleepmillis( i * 20 ); } assert( inShutdown() || gotThrough ); // not caught below } } catch (const UserException&){ if (!d.moreJSObjs()){ throw; } // Ignore and keep going. ContinueOnError is implied with sharding. } }
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){ if ( value->IsString() ){ if ( sname == "$where" ) b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); else b.append( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsFunction() ){ b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsNumber() ){ b.append( sname.c_str() , value->ToNumber()->Value() ); return; } if ( value->IsArray() ){ BSONObj sub = v8ToMongo( value->ToObject() ); b.appendArray( sname.c_str() , sub ); return; } if ( value->IsDate() ){ b.appendDate( sname.c_str() , (unsigned long long )(v8::Date::Cast( *value )->NumberValue()) ); return; } if ( value->IsObject() ){ string s = toSTLString( value ); if ( s.size() && s[0] == '/' ){ s = s.substr( 1 ); string r = s.substr( 0 , s.find( "/" ) ); string o = s.substr( s.find( "/" ) + 1 ); b.appendRegex( sname.c_str() , r.c_str() , o.c_str() ); } else if ( value->ToObject()->GetPrototype()->IsObject() && value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( String::New( "isObjectId" ) ) ){ OID oid; oid.init( toSTLString( value ) ); b.appendOID( sname.c_str() , &oid ); } else { BSONObj sub = v8ToMongo( value->ToObject() ); b.append( sname.c_str() , sub ); } return; } if ( value->IsBoolean() ){ b.appendBool( sname.c_str() , value->ToBoolean()->Value() ); return; } else if ( value->IsUndefined() ){ return; } else if ( value->IsNull() ){ b.appendNull( sname.c_str() ); return; } cout << "don't know how to covert to mongo field [" << name << "]\t" << value << endl; }
void WriteBackListener::run() { int secsToSleep = 0; while ( ! inShutdown() ) { if ( ! Shard::isAShardNode( _addr ) ) { log(1) << _addr << " is not a shard node" << endl; sleepsecs( 60 ); continue; } try { ScopedDbConnection conn( _addr ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) { log() << "writebacklisten command failed! " << result << endl; conn.done(); continue; } } log(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ) { string ns = data["ns"].valuestrsafe(); ConnectionIdent cid( "" , 0 ); OID wid; if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) { string s = ""; if ( data["instanceIdent"].type() == String ) s = data["instanceIdent"].String(); cid = ConnectionIdent( s , data["connectionId"].numberLong() ); wid = data["id"].OID(); } else { warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl; } int len; // not used, but needed for next call Message m( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , m.header()->valid() ); DBConfigPtr db = grid.getDBConfig( ns ); ShardChunkVersion needVersion( data["version"] ); LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString() << " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3) if ( logLevel ) log(1) << debugString( m ) << endl; if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ) { // this means when the write went originally, the version was old // if we're here, it means we've already updated the config, so don't need to do again //db->getChunkManager( ns , true ); // SERVER-1349 } else { // we received a writeback object that was sent to a previous version of a shard // the actual shard may not have the object the writeback operation is for // we need to reload the chunk manager and get the new shard versions db->getChunkManager( ns , true ); } // do request and then call getLastError // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError BSONObj gle; try { Request r( m , 0 ); r.init(); ClientInfo * ci = r.getClientInfo(); if (!noauth) { ci->getAuthenticationInfo()->authorize("admin", internalSecurity.user); } ci->noAutoSplit(); r.process(); ci->newRequest(); // this so we flip prev and cur shards BSONObjBuilder b; if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) { b.appendBool( "commandFailed" , true ); } gle = b.obj(); ci->clearSinceLastGetError(); } catch ( DBException& e ) { error() << "error processing writeback: " << e << endl; BSONObjBuilder b; b.append( "err" , e.toString() ); e.getInfo().append( b ); gle = b.obj(); } { scoped_lock lk( _seenWritebacksLock ); WBStatus& s = _seenWritebacks[cid]; s.id = wid; s.gle = gle; } } else if ( result["noop"].trueValue() ) { // no-op } else { log() << "unknown writeBack result: " << result << endl; } conn.done(); secsToSleep = 0; continue; } catch ( std::exception& e ) { if ( inShutdown() ) { // we're shutting down, so just clean up return; } log() << "WriteBackListener exception : " << e.what() << endl; // It's possible this shard was removed Shard::reloadShardInfo(); } catch ( ... ) { log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } log() << "WriteBackListener exiting : address no longer in cluster " << _addr; }
void WriteBackListener::run() { int secsToSleep = 0; scoped_ptr<ChunkVersion> lastNeededVersion; int lastNeededCount = 0; bool needsToReloadShardInfo = false; while ( ! inShutdown() ) { if ( ! Shard::isAShardNode( _addr ) ) { LOG(1) << _addr << " is not a shard node" << endl; sleepsecs( 60 ); continue; } try { if (needsToReloadShardInfo) { // It's possible this shard was removed Shard::reloadShardInfo(); needsToReloadShardInfo = false; } scoped_ptr<ScopedDbConnection> conn( ScopedDbConnection::getInternalScopedDbConnection( _addr ) ); BSONObj result; { BSONObjBuilder cmd; cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data if ( ! conn->get()->runCommand( "admin" , cmd.obj() , result ) ) { result = result.getOwned(); log() << "writebacklisten command failed! " << result << endl; conn->done(); continue; } } conn->done(); LOG(1) << "writebacklisten result: " << result << endl; BSONObj data = result.getObjectField( "data" ); if ( data.getBoolField( "writeBack" ) ) { string ns = data["ns"].valuestrsafe(); ConnectionIdent cid( "" , 0 ); OID wid; if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) { string s = ""; if ( data["instanceIdent"].type() == String ) s = data["instanceIdent"].String(); cid = ConnectionIdent( s , data["connectionId"].numberLong() ); wid = data["id"].OID(); } else { warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl; } int len; // not used, but needed for next call Message msg( (void*)data["msg"].binData( len ) , false ); massert( 10427 , "invalid writeback message" , msg.header()->valid() ); DBConfigPtr db = grid.getDBConfig( ns ); ChunkVersion needVersion = ChunkVersion::fromBSON( data, "version" ); // // TODO: Refactor the sharded strategy to correctly handle all sharding state changes itself, // we can't rely on WBL to do this for us b/c anything could reset our state in-between. // We should always reload here for efficiency when possible, but staleness is also caught in the // loop below. // ChunkManagerPtr manager; ShardPtr primary; db->getChunkManagerOrPrimary( ns, manager, primary ); ChunkVersion currVersion; if( manager ) currVersion = manager->getVersion(); LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString() << " mine : " << currVersion.toString() << endl; LOG(1) << msg.toString() << endl; // // We should reload only if we need to update our version to be compatible *and* we // haven't already done so. This avoids lots of reloading when we remove/add a sharded collection // bool alreadyReloaded = lastNeededVersion && lastNeededVersion->isEquivalentTo( needVersion ); if( alreadyReloaded ){ LOG(1) << "wbl already reloaded config information for version " << needVersion << ", at version " << currVersion << endl; } else if( lastNeededVersion ) { log() << "new version change detected to " << needVersion.toString() << ", " << lastNeededCount << " writebacks processed at " << lastNeededVersion->toString() << endl; lastNeededCount = 0; } // // Set our lastNeededVersion for next time // lastNeededVersion.reset( new ChunkVersion( needVersion ) ); lastNeededCount++; // // Determine if we should reload, if so, reload // bool shouldReload = ! needVersion.isWriteCompatibleWith( currVersion ) && ! alreadyReloaded; if( shouldReload && currVersion.isSet() && needVersion.isSet() && currVersion.hasCompatibleEpoch( needVersion ) ) { // // If we disagree about versions only, reload the chunk manager // db->getChunkManagerIfExists( ns, true ); } else if( shouldReload ){ // // If we disagree about anything else, reload the full db // warning() << "reloading config data for " << db->getName() << ", " << "wanted version " << needVersion.toString() << " but currently have version " << currVersion.toString() << endl; db->reload(); } // do request and then call getLastError // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError BSONObj gle; int attempts = 0; while ( true ) { attempts++; try { Request r( msg , 0 ); r.init(); r.d().reservedField() |= Reserved_FromWriteback; ClientInfo * ci = r.getClientInfo(); if (!noauth) { ci->getAuthorizationManager()->grantInternalAuthorization( "_writebackListener"); } ci->noAutoSplit(); r.process( attempts ); ci->newRequest(); // this so we flip prev and cur shards BSONObjBuilder b; string errmsg; if ( ! ci->getLastError( "admin", BSON( "getLastError" << 1 ), b, errmsg, true ) ) { b.appendBool( "commandFailed" , true ); if( ! b.hasField( "errmsg" ) ){ b.append( "errmsg", errmsg ); gle = b.obj(); } else if( errmsg.size() > 0 ){ // Rebuild GLE object with errmsg // TODO: Make this less clumsy by improving GLE interface gle = b.obj(); if( gle["errmsg"].type() == String ){ BSONObj gleNoErrmsg = gle.filterFieldsUndotted( BSON( "errmsg" << 1 ), false ); BSONObjBuilder bb; bb.appendElements( gleNoErrmsg ); bb.append( "errmsg", gle["errmsg"].String() + " ::and:: " + errmsg ); gle = bb.obj().getOwned(); } } } else{ gle = b.obj(); } if ( gle["code"].numberInt() == 9517 ) { log() << "new version change detected, " << lastNeededCount << " writebacks processed previously" << endl; lastNeededVersion.reset(); lastNeededCount = 1; log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl; LOG(1) << "writeback error : " << gle << endl; // // Bringing this in line with the similar retry logic elsewhere // // TODO: Reloading the chunk manager may not help if we dropped a // collection, but we don't actually have that info in the writeback // error // if( attempts <= 2 ){ db->getChunkManagerIfExists( ns, true ); } else{ versionManager.forceRemoteCheckShardVersionCB( ns ); sleepsecs( attempts - 1 ); } uassert( 15884, str::stream() << "Could not reload chunk manager after " << attempts << " attempts.", attempts <= 4 ); continue; } ci->clearSinceLastGetError(); } catch ( DBException& e ) { error() << "error processing writeback: " << e << endl; BSONObjBuilder b; e.getInfo().append( b, "err", "code" ); gle = b.obj(); } break; } { scoped_lock lk( _seenWritebacksLock ); WBStatus& s = _seenWritebacks[cid]; s.id = wid; s.gle = gle; } } else if ( result["noop"].trueValue() ) { // no-op } else { log() << "unknown writeBack result: " << result << endl; } secsToSleep = 0; continue; } catch ( std::exception& e ) { // Attention! Do not call any method that would throw an exception // (or assert) in this block. if ( inShutdown() ) { // we're shutting down, so just clean up return; } log() << "WriteBackListener exception : " << e.what() << endl; needsToReloadShardInfo = true; } catch ( ... ) { log() << "WriteBackListener uncaught exception!" << endl; } secsToSleep++; sleepsecs(secsToSleep); if ( secsToSleep > 10 ) secsToSleep = 0; } log() << "WriteBackListener exiting : address no longer in cluster " << _addr; }
bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){ WriteBackListener::init( *conn_in ); bool ok; DBClientBase* conn = NULL; try { // May throw if replica set primary is down conn = getVersionable( conn_in ); dassert( conn ); // errors thrown above BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , "" ); cmdBuilder.appendBool( "init", true ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendOID( "serverID" , &serverID ); cmdBuilder.appendBool( "authoritative" , true ); BSONObj cmd = cmdBuilder.obj(); LOG(1) << "initializing shard connection to " << conn->toString() << endl; LOG(2) << "initial sharding settings : " << cmd << endl; ok = conn->runCommand("admin", cmd, result, 0); } catch( const DBException& ) { if ( conn_in->type() != ConnectionString::SET ) { throw; } // NOTE: Only old-style cluster operations will talk via DBClientReplicaSets - using // checkShardVersion is required (which includes initShardVersion information) if these // connections are used. OCCASIONALLY { warning() << "failed to initialize new replica set connection version, " << "will initialize on first use" << endl; } return true; } // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1 // Result is false, but will still initialize serverID and configdb if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ || result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ )) { ok = true; } // Record the connection wire version if sent in the response, initShardVersion is a // handshake for mongos->mongod connections. if ( !result["minWireVersion"].eoo() ) { int minWireVersion = result["minWireVersion"].numberInt(); int maxWireVersion = result["maxWireVersion"].numberInt(); conn->setWireVersions( minWireVersion, maxWireVersion ); } LOG(3) << "initial sharding result : " << result << endl; return ok; }
void pass(int p) { DurTransaction txn; create(); ASSERT_EQUALS( 2, nExtents() ); BSONObj b = bigObj(); int N = MinExtentSize / b.objsize() * nExtents() + 5; int T = N - 4; DiskLoc truncAt; //DiskLoc l[ 8 ]; for ( int i = 0; i < N; ++i ) { BSONObj bb = bigObj(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bb, true ); ASSERT( status.isOK() ); DiskLoc a = status.getValue(); if( T == i ) truncAt = a; ASSERT( !a.isNull() ); /*ASSERT_EQUALS( i < 2 ? i + 1 : 3 + i % 2, nRecords() ); if ( i > 3 ) ASSERT( l[ i ] == l[ i - 4 ] );*/ } ASSERT( nRecords() < N ); DiskLoc last, first; { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::BACKWARD)); runner->getNext(NULL, &last); ASSERT( !last.isNull() ); } { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &first); ASSERT( !first.isNull() ); ASSERT( first != last ) ; } collection()->temp_cappedTruncateAfter(&txn, truncAt, false); ASSERT_EQUALS( collection()->numRecords() , 28u ); { DiskLoc loc; auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::FORWARD)); runner->getNext(NULL, &loc); ASSERT( first == loc); } { auto_ptr<Runner> runner(InternalPlanner::collectionScan(ns(), collection(), InternalPlanner::BACKWARD)); DiskLoc loc; runner->getNext(NULL, &loc); ASSERT( last != loc ); ASSERT( !last.isNull() ); } // Too big BSONObjBuilder bob; bob.appendOID("_id", 0, true); bob.append( "a", string( MinExtentSize + 300, 'a' ) ); BSONObj bigger = bob.done(); StatusWith<DiskLoc> status = collection()->insertDocument( &txn, bigger, true ); ASSERT( !status.isOK() ); ASSERT_EQUALS( 0, nRecords() ); }