void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); c->splitIfShould( o.objsize() ); } }
void insertSharded( DBConfigPtr conf, const char* ns, BSONObj& o, int flags ) { ChunkManagerPtr manager = conf->getChunkManager(ns); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << ns << " " << o << endl; uasserted( 14842 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 30; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , ns , o , flags); // r.gotInsert(); // if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); break; } catch ( StaleConfigException& e ) { int logLevel = i < ( maxTries / 2 ); LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; // r.reset(); unsigned long long old = manager->getSequenceNumber(); manager = conf->getChunkManager(ns); LOG( logLevel ) << " sequenece number - old: " << old << " new: " << manager->getSequenceNumber() << endl; if (!manager) { uasserted(14843, "collection no longer sharded"); } } sleepmillis( i * 20 ); } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { while ( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 10; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& e ) { log( i < ( maxTries / 2 ) ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; r.reset(); manager = r.getChunkManager(); uassert(14804, "collection no longer sharded", manager); } sleepmillis( i * 200 ); } assert( inShutdown() || gotThrough ); } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ){ while ( d.moreJSObjs() ){ BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ){ bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ){ BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ){ log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; throw UserException( 8011 , "tried to insert object without shard key" ); } } bool gotThrough = false; for ( int i=0; i<10; i++ ){ try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o ); r.gotInsert(); c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& ){ log(1) << "retrying insert because of StaleConfigException: " << o << endl; r.reset(); manager = r.getChunkManager(); } sleepmillis( i * 200 ); } assert( gotThrough ); } }
/** * Splits the chunks touched based from the targeter stats if needed. */ static void splitIfNeeded( const string& ns, const TargeterStats& stats ) { if ( !Chunk::ShouldAutoSplit ) { return; } DBConfigPtr config; try { config = grid.getDBConfig( ns ); } catch ( const DBException& ex ) { warning() << "failed to get database config for " << ns << " while checking for auto-split: " << causedBy( ex ) << endl; return; } ChunkManagerPtr chunkManager; ShardPtr dummyShard; config->getChunkManagerOrPrimary( ns, chunkManager, dummyShard ); if ( !chunkManager ) { return; } for ( map<BSONObj, int>::const_iterator it = stats.chunkSizeDelta.begin(); it != stats.chunkSizeDelta.end(); ++it ) { ChunkPtr chunk; try { chunk = chunkManager->findIntersectingChunk( it->first ); } catch ( const AssertionException& ex ) { warning() << "could not find chunk while checking for auto-split: " << causedBy( ex ) << endl; return; } chunk->splitIfShould( it->second ); } }
void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){ int flags = d.pullInt(); BSONObj query = d.nextJsObj(); uassert( 10201 , "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); BSONObj chunkFinder = query; bool upsert = flags & UpdateOption_Upsert; bool multi = flags & UpdateOption_Multi; uassert( 10202 , "can't mix multi and upsert and sharding" , ! ( upsert && multi ) ); if ( upsert && !(manager->hasShardKey(toupdate) || (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query)))) { throw UserException( 8012 , "can't upsert something without shard key" ); } bool save = false; if ( ! manager->hasShardKey( query ) ){ if ( multi ){ } else if ( strcmp( query.firstElement().fieldName() , "_id" ) || query.nFields() != 1 ){ throw UserException( 8013 , "can't do non-multi update with query that doesn't have the shard key" ); } else { save = true; chunkFinder = toupdate; } } if ( ! save ){ if ( toupdate.firstElement().fieldName()[0] == '$' ){ BSONObjIterator ops(toupdate); while(ops.more()){ BSONElement op(ops.next()); if (op.type() != Object) continue; BSONObjIterator fields(op.embeddedObject()); while(fields.more()){ const string field = fields.next().fieldName(); uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field)); } } } else if ( manager->hasShardKey( toupdate ) ){ uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 ); } else { uasserted(12376, "shard key must be in update object"); } } if ( multi ){ set<Shard> shards; manager->getShardsForQuery( shards , chunkFinder ); int * x = (int*)(r.d().afterNS()); x[0] |= UpdateOption_Broadcast; for ( set<Shard>::iterator i=shards.begin(); i!=shards.end(); i++){ doWrite( dbUpdate , r , *i , false ); } } else { int left = 5; while ( true ){ try { ChunkPtr c = manager->findChunk( chunkFinder ); doWrite( dbUpdate , r , c->getShard() ); c->splitIfShould( d.msg().header()->dataLen() ); break; } catch ( StaleConfigException& e ){ if ( left <= 0 ) throw e; left--; log() << "update failed b/c of StaleConfigException, retrying " << " left:" << left << " ns: " << r.getns() << " query: " << query << endl; r.reset( false ); manager = r.getChunkManager(); } } } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding. map<ChunkPtr, vector<BSONObj> > insertsForChunk; // Group bulk insert for appropriate shards try { while ( d.moreJSObjs() ) { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object with no valid shard key: " << r.getns() << " " << o << endl; uasserted( 8011 , "tried to insert object with no valid shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); insertsForChunk[manager->findChunk(o)].push_back(o); } for (map<ChunkPtr, vector<BSONObj> >::iterator it = insertsForChunk.begin(); it != insertsForChunk.end(); ++it) { ChunkPtr c = it->first; vector<BSONObj> objs = it->second; const int maxTries = 30; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl; insert( c->getShard() , r.getns() , objs , flags); int bytesWritten = 0; for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) { r.gotInsert(); // Record the correct number of individual inserts bytesWritten += (*vecIt).objsize(); } if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( bytesWritten ); gotThrough = true; break; } catch ( StaleConfigException& e ) { int logLevel = i < ( maxTries / 2 ); LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents because of StaleConfigException: " << e << endl; r.reset(); manager = r.getChunkManager(); if( ! manager ) { uasserted(14804, "collection no longer sharded"); } unsigned long long old = manager->getSequenceNumber(); LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl; } sleepmillis( i * 20 ); } assert( inShutdown() || gotThrough ); // not caught below } } catch (const UserException&){ if (!d.moreJSObjs()){ throw; } // Ignore and keep going. ContinueOnError is implied with sharding. } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager ) { const int flags = d.reservedField(); bool keepGoing = flags & InsertOption_KeepGoing; // modified before assertion if should abort while ( d.moreJSObjs() ) { try { BSONObj o = d.nextJsObj(); if ( ! manager->hasShardKey( o ) ) { bool bad = true; if ( manager->getShardKey().partOfShardKey( "_id" ) ) { BSONObjBuilder b; b.appendOID( "_id" , 0 , true ); b.appendElements( o ); o = b.obj(); bad = ! manager->hasShardKey( o ); } if ( bad ) { log() << "tried to insert object without shard key: " << r.getns() << " " << o << endl; uasserted( 8011 , "tried to insert object without shard key" ); } } // Many operations benefit from having the shard key early in the object o = manager->getShardKey().moveToFront(o); const int maxTries = 30; bool gotThrough = false; for ( int i=0; i<maxTries; i++ ) { try { ChunkPtr c = manager->findChunk( o ); log(4) << " server:" << c->getShard().toString() << " " << o << endl; insert( c->getShard() , r.getns() , o , flags); r.gotInsert(); if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( o.objsize() ); gotThrough = true; break; } catch ( StaleConfigException& e ) { int logLevel = i < ( maxTries / 2 ); LOG( logLevel ) << "retrying insert because of StaleConfigException: " << e << " object: " << o << endl; r.reset(); unsigned long long old = manager->getSequenceNumber(); manager = r.getChunkManager(); LOG( logLevel ) << " sequence number - old: " << old << " new: " << manager->getSequenceNumber() << endl; if (!manager) { keepGoing = false; uasserted(14804, "collection no longer sharded"); } } sleepmillis( i * 20 ); } assert( inShutdown() || gotThrough ); // not caught below } catch (const UserException&){ if (!keepGoing || !d.moreJSObjs()){ throw; } // otherwise ignore and keep going } } }
void _insert( Request& r , DbMessage& d, ChunkManagerPtr manager, vector<BSONObj>& insertsRemaining, map<ChunkPtr, vector<BSONObj> > insertsForChunks, int retries = 0 ) { uassert( 16055, str::stream() << "too many retries during bulk insert, " << insertsRemaining.size() << " inserts remaining", retries < 30 ); uassert( 16056, str::stream() << "shutting down server during bulk insert, " << insertsRemaining.size() << " inserts remaining", ! inShutdown() ); const int flags = d.reservedField() | InsertOption_ContinueOnError; // ContinueOnError is always on when using sharding. _groupInserts( manager, insertsRemaining, insertsForChunks ); while( ! insertsForChunks.empty() ){ ChunkPtr c = insertsForChunks.begin()->first; vector<BSONObj>& objs = insertsForChunks.begin()->second; const Shard& shard = c->getShard(); const string& ns = r.getns(); ShardConnection dbcon( shard, ns, manager ); try { LOG(4) << " server:" << c->getShard().toString() << " bulk insert " << objs.size() << " documents" << endl; // Taken from single-shard bulk insert, should not need multiple methods in future // insert( c->getShard() , r.getns() , objs , flags); // It's okay if the version is set here, an exception will be thrown if the version is incompatible dbcon.setVersion(); dbcon->insert( ns , objs , flags); // TODO: Option for safe inserts here - can then use this for all inserts dbcon.done(); int bytesWritten = 0; for (vector<BSONObj>::iterator vecIt = objs.begin(); vecIt != objs.end(); ++vecIt) { r.gotInsert(); // Record the correct number of individual inserts bytesWritten += (*vecIt).objsize(); } // TODO: The only reason we're grouping by chunks here is for auto-split, more efficient // to track this separately and bulk insert to shards if ( r.getClientInfo()->autoSplitOk() ) c->splitIfShould( bytesWritten ); } catch ( StaleConfigException& e ) { // Cleanup the connection dbcon.done(); // Assume the inserts did *not* succeed, so we don't want to erase them int logLevel = retries < 2; LOG( logLevel ) << "retrying bulk insert of " << objs.size() << " documents to chunk " << c << " because of StaleConfigException: " << e << endl; if( retries > 2 ){ versionManager.forceRemoteCheckShardVersionCB( e.getns() ); } // TODO: Replace with actual chunk handling code, simplify request r.reset(); manager = r.getChunkManager(); if( ! manager ) { // TODO : We can probably handle this better? uasserted( 14804, "collection no longer sharded" ); } // End TODO // We may need to regroup at least some of our inserts since our chunk manager may have changed _insert( r, d, manager, insertsRemaining, insertsForChunks, retries + 1 ); return; } catch( UserException& ){ // Unexpected exception, so don't clean up the conn dbcon.kill(); // These inserts won't be retried, as something weird happened here insertsForChunks.erase( insertsForChunks.begin() ); // Throw if this is the last chunk bulk-inserted to if( insertsForChunks.empty() ){ throw; } } insertsForChunks.erase( insertsForChunks.begin() ); } }
void _update( Request& r , DbMessage& d, ChunkManagerPtr manager ){ int flags = d.pullInt(); BSONObj query = d.nextJsObj(); uassert( 10201 , "invalid update" , d.moreJSObjs() ); BSONObj toupdate = d.nextJsObj(); BSONObj chunkFinder = query; bool upsert = flags & UpdateOption_Upsert; bool multi = flags & UpdateOption_Multi; if ( multi ) uassert( 10202 , "can't mix multi and upsert and sharding" , ! upsert ); if ( upsert && !(manager->hasShardKey(toupdate) || (toupdate.firstElement().fieldName()[0] == '$' && manager->hasShardKey(query)))) { throw UserException( 8012 , "can't upsert something without shard key" ); } bool save = false; if ( ! manager->hasShardKey( query ) ){ if ( multi ){ } else if ( query.nFields() != 1 || strcmp( query.firstElement().fieldName() , "_id" ) ){ throw UserException( 8013 , "can't do update with query that doesn't have the shard key" ); } else { save = true; chunkFinder = toupdate; } } if ( ! save ){ if ( toupdate.firstElement().fieldName()[0] == '$' ){ BSONObjIterator ops(toupdate); while(ops.more()){ BSONElement op(ops.next()); if (op.type() != Object) continue; BSONObjIterator fields(op.embeddedObject()); while(fields.more()){ const string field = fields.next().fieldName(); uassert(13123, "Can't modify shard key's value", ! manager->getShardKey().partOfShardKey(field)); } } } else if ( manager->hasShardKey( toupdate ) ){ uassert( 8014, "change would move shards!", manager->getShardKey().compare( query , toupdate ) == 0 ); } else { uasserted(12376, "shard key must be in update object"); } } if ( multi ){ vector<shared_ptr<ChunkRange> > chunks; manager->getChunksForQuery( chunks , chunkFinder ); set<Shard> seen; for ( vector<shared_ptr<ChunkRange> >::iterator i=chunks.begin(); i!=chunks.end(); i++){ shared_ptr<ChunkRange> c = *i; if ( seen.count( c->getShard() ) ) continue; doWrite( dbUpdate , r , c->getShard() ); seen.insert( c->getShard() ); } } else { ChunkPtr c = manager->findChunk( chunkFinder ); doWrite( dbUpdate , r , c->getShard() ); c->splitIfShould( d.msg().header()->dataLen() ); } }