bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const { if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb == shardingState.getConfigServer() ) return true; result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() << "given" << configdb ) ); errmsg = str::stream() << "mongos specified a different config database string : " << "stored : " << shardingState.getConfigServer() << " vs given : " << configdb; return false; } if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } if ( locked ) { ShardedConnectionInfo::addHook(); shardingState.enable( configdb ); configServer.init( configdb ); return true; } Lock::GlobalWrite lk; return checkConfigOrInit( configdb , authoritative , errmsg , result , true ); }
bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const { if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb == shardingState.getConfigServer() ) return true; result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() << "given" << configdb ) ); errmsg = "specified a different configdb!"; return false; } if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } if ( locked ) { shardingState.enable( configdb ); configServer.init( configdb ); return true; } dblock lk; return checkConfigOrInit( configdb , authoritative , errmsg , result , true ); }
void logDeleteOpForSharding(OperationContext* txn, const char* ns, const BSONObj& obj, bool notInActiveChunk) { ShardingState* shardingState = ShardingState::get(txn); if (shardingState->enabled()) shardingState->migrationSourceManager()->logDeleteOp(txn, ns, obj, notInActiveChunk); }
/** * @ return true if not in sharded mode or if version for this client is ok */ bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) { if ( ! shardingState.enabled() ) return true; ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); if ( ! info ) { // this means the client has nothing sharded // so this allows direct connections to do whatever they want // which i think is the correct behavior return true; } if ( info->inForceVersionOkMode() ) { return true; } // TODO // all collections at some point, be sharded or not, will have a version (and a ShardChunkManager) // for now, we remove the sharding state of dropped collection // so delayed request may come in. This has to be fixed. ConfigVersion clientVersion = info->getVersion(ns); ConfigVersion version; if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) { return true; } if ( version == 0 && clientVersion > 0 ) { stringstream ss; ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion; errmsg = ss.str(); return false; } if ( clientVersion >= version ) return true; if ( clientVersion == 0 ) { stringstream ss; ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version; errmsg = ss.str(); return false; } if ( version.majorVersion() == clientVersion.majorVersion() ) { // this means there was just a split // since on a split w/o a migrate this server is ok // going to accept return true; } stringstream ss; ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion; errmsg = ss.str(); return false; }
bool haveLocalShardingInfo( const string& ns ) { if ( ! shardingState.enabled() ) return false; if ( ! shardingState.hasVersion( ns ) ) return false; return ShardedConnectionInfo::get(false) > 0; }
/** * @ return true if not in sharded mode or if version for this client is ok */ bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){ if ( ! shardingState.enabled() ) return true; ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); if ( ! info ){ // this means the client has nothing sharded // so this allows direct connections to do whatever they want // which i think is the correct behavior return true; } if ( info->inForceVersionOkMode() ){ return true; } ConfigVersion version; if ( ! shardingState.hasVersion( ns , version ) ){ return true; } ConfigVersion clientVersion = info->getVersion(ns); if ( version == 0 && clientVersion > 0 ){ stringstream ss; ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion; errmsg = ss.str(); return false; } if ( clientVersion >= version ) return true; if ( clientVersion == 0 ){ stringstream ss; ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version; errmsg = ss.str(); return false; } if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){ // this means there was just a split // since on a split w/o a migrate this server is ok // going to accept write return true; } stringstream ss; ss << "your version is too old ns: " + ns << " global: " << version << " client: " << clientVersion; errmsg = ss.str(); return false; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); bool authoritative = cmdObj.getBoolField( "authoritative" ); string configdb = cmdObj["configdb"].valuestrsafe(); { // configdb checking if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb != shardingState.getConfigServer() ) { errmsg = "specified a different configdb!"; return false; } } else { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } shardingState.enable( configdb ); configServer.init( configdb ); } } if ( cmdObj["shard"].type() == String ) { shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } { // setting up ids if ( cmdObj["serverID"].type() != jstOID ) { // TODO: fix this //errmsg = "need serverID to be an OID"; //return 0; } else { OID clientId = cmdObj["serverID"].__oid(); if ( ! info->hasID() ) { info->setID( clientId ); } else if ( clientId != info->getID() ) { errmsg = "server id has changed!"; return 0; } } } unsigned long long version = extractVersion( cmdObj["version"] , errmsg ); if ( errmsg.size() ) { return false; } string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to speciy fully namespace"; return false; } const ConfigVersion oldVersion = info->getVersion(ns); const ConfigVersion globalVersion = shardingState.getVersion(ns); if ( oldVersion > 0 && globalVersion == 0 ) { // this had been reset info->setVersion( ns , 0 ); } if ( version == 0 && globalVersion == 0 ) { // this connection is cleaning itself info->setVersion( ns , 0 ); return true; } if ( version == 0 && globalVersion > 0 ) { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); result.appendTimestamp( "globalVersion" , globalVersion ); result.appendTimestamp( "oldVersion" , oldVersion ); errmsg = "dropping needs to be authoritative"; return false; } log() << "wiping data for: " << ns << endl; result.appendTimestamp( "beforeDrop" , globalVersion ); // only setting global version on purpose // need clients to re-find meta-data shardingState.resetVersion( ns ); info->setVersion( ns , 0 ); return true; } if ( version < oldVersion ) { errmsg = "you already have a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "oldVersion" , oldVersion ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( version < globalVersion ) { while ( shardingState.inCriticalMigrateSection() ) { dbtemprelease r; sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section" << endl; } errmsg = "going to older version for global for collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ) { // need authoritative for first look result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "first time for collection '" + ns + "'"; return false; } { dbtemprelease unlock; ShardChunkVersion currVersion = version; if ( ! shardingState.trySetVersion( ns , currVersion ) ) { errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , currVersion ); return false; } } info->setVersion( ns , version ); result.appendTimestamp( "oldVersion" , oldVersion ); result.append( "ok" , 1 ); return true; }
/** * @ return true if not in sharded mode or if version for this client is ok */ bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted ) { if ( ! shardingState.enabled() ) return true; if ( ! isMasterNs( ns.c_str() ) ) { // right now connections to secondaries aren't versioned at all return true; } ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); if ( ! info ) { // this means the client has nothing sharded // so this allows direct connections to do whatever they want // which i think is the correct behavior return true; } if ( info->inForceVersionOkMode() ) { return true; } // TODO // all collections at some point, be sharded or not, will have a version (and a ShardChunkManager) // for now, we remove the sharding state of dropped collection // so delayed request may come in. This has to be fixed. ConfigVersion clientVersion = info->getVersion(ns); ConfigVersion version; if ( ! shardingState.hasVersion( ns , version ) && ! clientVersion.isSet() ) { return true; } // The versions we're going to compare, saved for future use received = clientVersion; wanted = version; if ( ! version.isSet() && clientVersion.isSet() ) { stringstream ss; ss << "collection was dropped or this shard no longer valid version"; errmsg = ss.str(); return false; } if ( clientVersion >= version ) return true; if ( ! clientVersion.isSet() ) { stringstream ss; ss << "client in sharded mode, but doesn't have version set for this collection"; errmsg = ss.str(); return false; } if ( version.majorVersion() == clientVersion.majorVersion() ) { // this means there was just a split // since on a split w/o a migrate this server is ok // going to accept return true; } stringstream ss; ss << "your version is too old"; errmsg = ss.str(); return false; }
/** * @ return true if not in sharded mode or if version for this client is ok */ bool shardVersionOk( const string& ns , string& errmsg, ChunkVersion& received, ChunkVersion& wanted ) { if ( ! shardingState.enabled() ) return true; if ( ! isMasterNs( ns.c_str() ) ) { // right now connections to secondaries aren't versioned at all return true; } ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); if ( ! info ) { // this means the client has nothing sharded // so this allows direct connections to do whatever they want // which i think is the correct behavior return true; } if ( info->inForceVersionOkMode() ) { return true; } // TODO : all collections at some point, be sharded or not, will have a version // (and a CollectionMetadata) received = info->getVersion( ns ); wanted = shardingState.getVersion( ns ); if( received.isWriteCompatibleWith( wanted ) ) return true; // // Figure out exactly why not compatible, send appropriate error message // The versions themselves are returned in the error, so not needed in messages here // // Check epoch first, to send more meaningful message, since other parameters probably // won't match either if( ! wanted.hasCompatibleEpoch( received ) ){ errmsg = str::stream() << "version epoch mismatch detected for " << ns << ", " << "the collection may have been dropped and recreated"; return false; } if( ! wanted.isSet() && received.isSet() ){ errmsg = str::stream() << "this shard no longer contains chunks for " << ns << ", " << "the collection may have been dropped"; return false; } if( wanted.isSet() && ! received.isSet() ){ errmsg = str::stream() << "this shard contains versioned chunks for " << ns << ", " << "but no version set in request"; return false; } if( wanted.majorVersion() != received.majorVersion() ){ // // Could be > or < - wanted is > if this is the source of a migration, // wanted < if this is the target of a migration // errmsg = str::stream() << "version mismatch detected for " << ns << ", " << "stored major version " << wanted.majorVersion() << " does not match received " << received.majorVersion(); return false; } // Those are all the reasons the versions can mismatch verify( false ); return false; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ // Debugging code for SERVER-1633. Commands have already a coarser timer for // normal operation. Timer timer; vector<int> laps; lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); bool authoritative = cmdObj.getBoolField( "authoritative" ); string configdb = cmdObj["configdb"].valuestrsafe(); { // configdb checking if ( configdb.size() == 0 ){ errmsg = "no configdb"; return false; } if ( shardingState.enabled() ){ if ( configdb != shardingState.getConfigServer() ){ errmsg = "specified a different configdb!"; return false; } } else { if ( ! authoritative ){ result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } shardingState.enable( configdb ); configServer.init( configdb ); } } // SERVER-1633 laps.push_back( timer.millis() ); if ( cmdObj["shard"].type() == String ){ shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } { // setting up ids if ( cmdObj["serverID"].type() != jstOID ){ // TODO: fix this //errmsg = "need serverID to be an OID"; //return 0; } else { OID clientId = cmdObj["serverID"].__oid(); if ( ! info->hasID() ){ info->setID( clientId ); } else if ( clientId != info->getID() ){ errmsg = "server id has changed!"; return 0; } } } // SERVER-1633 laps.push_back( timer.millis() ); unsigned long long version = extractVersion( cmdObj["version"] , errmsg ); if ( errmsg.size() ){ return false; } string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ){ errmsg = "need to speciy fully namespace"; return false; } ConfigVersion& oldVersion = info->getVersion(ns); ConfigVersion& globalVersion = shardingState.getVersion(ns); if ( oldVersion > 0 && globalVersion == 0 ){ // this had been reset oldVersion = 0; } if ( version == 0 && globalVersion == 0 ){ // this connection is cleaning itself oldVersion = 0; return 1; } // SERVER-1633 laps.push_back( timer.millis() ); if ( version == 0 && globalVersion > 0 ){ if ( ! authoritative ){ result.appendBool( "need_authoritative" , true ); result.appendTimestamp( "globalVersion" , globalVersion ); result.appendTimestamp( "oldVersion" , oldVersion ); errmsg = "dropping needs to be authoritative"; return 0; } log() << "wiping data for: " << ns << endl; result.appendTimestamp( "beforeDrop" , globalVersion ); // only setting global version on purpose // need clients to re-find meta-data globalVersion = 0; oldVersion = 0; return 1; } if ( version < oldVersion ){ errmsg = "you already have a newer version"; result.appendTimestamp( "oldVersion" , oldVersion ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } // SERVER-1633 laps.push_back( timer.millis() ); if ( version < globalVersion ){ while ( shardingState.inCriticalMigrateSection() ){ dbtemprelease r; sleepmillis(2); log() << "waiting till out of critical section" << endl; } errmsg = "going to older version for global"; result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ){ // need authoritative for first look result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); errmsg = "first time for this ns"; return false; } // SERVER-1633 laps.push_back( timer.millis() ); { dbtemprelease unlock; shardingState.getChunkMatcher( ns ); } result.appendTimestamp( "oldVersion" , oldVersion ); oldVersion = version; globalVersion = version; // SERVER-1633 ostringstream lapString; lapString << name /* command name */ << " partials: " ; for (size_t i = 1; i<laps.size(); ++i){ lapString << (laps[i] - laps[i-1]) / 1000 << " "; } lapString << endl; logIfSlow( timer, lapString.str() ); result.append( "ok" , 1 ); return 1; }