bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); bool authoritative = cmdObj.getBoolField( "authoritative" ); string configdb = cmdObj["configdb"].valuestrsafe(); { // configdb checking if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb != shardingState.getConfigServer() ) { errmsg = "specified a different configdb!"; return false; } } else { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } shardingState.enable( configdb ); configServer.init( configdb ); } } if ( cmdObj["shard"].type() == String ) { shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } { // setting up ids if ( cmdObj["serverID"].type() != jstOID ) { // TODO: fix this //errmsg = "need serverID to be an OID"; //return 0; } else { OID clientId = cmdObj["serverID"].__oid(); if ( ! info->hasID() ) { info->setID( clientId ); } else if ( clientId != info->getID() ) { errmsg = "server id has changed!"; return 0; } } } unsigned long long version = extractVersion( cmdObj["version"] , errmsg ); if ( errmsg.size() ) { return false; } string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to speciy fully namespace"; return false; } const ConfigVersion oldVersion = info->getVersion(ns); const ConfigVersion globalVersion = shardingState.getVersion(ns); if ( oldVersion > 0 && globalVersion == 0 ) { // this had been reset info->setVersion( ns , 0 ); } if ( version == 0 && globalVersion == 0 ) { // this connection is cleaning itself info->setVersion( ns , 0 ); return true; } if ( version == 0 && globalVersion > 0 ) { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); result.appendTimestamp( "globalVersion" , globalVersion ); result.appendTimestamp( "oldVersion" , oldVersion ); errmsg = "dropping needs to be authoritative"; return false; } log() << "wiping data for: " << ns << endl; result.appendTimestamp( "beforeDrop" , globalVersion ); // only setting global version on purpose // need clients to re-find meta-data shardingState.resetVersion( ns ); info->setVersion( ns , 0 ); return true; } if ( version < oldVersion ) { errmsg = "you already have a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "oldVersion" , oldVersion ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( version < globalVersion ) { while ( shardingState.inCriticalMigrateSection() ) { dbtemprelease r; sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section" << endl; } errmsg = "going to older version for global for collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ) { // need authoritative for first look result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "first time for collection '" + ns + "'"; return false; } { dbtemprelease unlock; ShardChunkVersion currVersion = version; if ( ! shardingState.trySetVersion( ns , currVersion ) ) { errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , currVersion ); return false; } } info->setVersion( ns , version ); result.appendTimestamp( "oldVersion" , oldVersion ); result.append( "ok" , 1 ); return true; }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { // Steps // 1. check basic config // 2. extract params from command // 3. fast check // 4. slow check (LOCKS) // step 1 lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); // make sure we have the mongos id for writebacks if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) ) return false; bool authoritative = cmdObj.getBoolField( "authoritative" ); // check config server is ok or enable sharding if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) ) return false; // check shard name/hosts are correct if ( cmdObj["shard"].type() == String ) { shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } // Handle initial shard connection if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ) { result.append( "initialized", true ); return true; } // we can run on a slave up to here if ( ! isMaster( "admin" ) ) { result.append( "errmsg" , "not master" ); result.append( "note" , "from post init in setShardVersion" ); return false; } // step 2 string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify namespace"; return false; } const ConfigVersion version = ConfigVersion( extractVersion( cmdObj["version"] , errmsg ), OID() ); if ( errmsg.size() ) return false; // step 3 const ConfigVersion oldVersion = info->getVersion(ns); const ConfigVersion globalVersion = shardingState.getVersion(ns); oldVersion.addToBSON( result, "oldVersion" ); if ( globalVersion.isSet() && version.isSet() ) { // this means there is no reset going on an either side // so its safe to make some assumptions if ( version.isEquivalentTo( globalVersion ) ) { // mongos and mongod agree! if ( ! oldVersion.isEquivalentTo( version ) ) { if ( oldVersion < globalVersion ) { info->setVersion( ns , version ); } else if ( authoritative ) { // this means there was a drop and our version is reset info->setVersion( ns , version ); } else { result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "verifying drop on '" + ns + "'"; return false; } } return true; } } // step 4 // this is because of a weird segfault I saw and I can't see why this should ever be set massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 ); Lock::GlobalWrite setShardVersionLock; // TODO: can we get rid of this?? if ( oldVersion.isSet() && ! globalVersion.isSet() ) { // this had been reset info->setVersion( ns , ShardChunkVersion( 0, OID() ) ); } if ( ! version.isSet() && ! globalVersion.isSet() ) { // this connection is cleaning itself info->setVersion( ns , ShardChunkVersion( 0, OID() ) ); return true; } if ( ! version.isSet() && globalVersion.isSet() ) { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); globalVersion.addToBSON( result, "globalVersion" ); errmsg = "dropping needs to be authoritative"; return false; } log() << "wiping data for: " << ns << endl; globalVersion.addToBSON( result, "beforeDrop" ); // only setting global version on purpose // need clients to re-find meta-data shardingState.resetVersion( ns ); info->setVersion( ns , ShardChunkVersion( 0, OID() ) ); return true; } if ( version < oldVersion ) { errmsg = "this connection already had a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); version.addToBSON( result, "newVersion" ); globalVersion.addToBSON( result, "globalVersion" ); return false; } if ( version < globalVersion ) { while ( shardingState.inCriticalMigrateSection() ) { dbtemprelease r; sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section" << endl; } errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'"; result.append( "ns" , ns ); version.addToBSON( result, "version" ); globalVersion.addToBSON( result, "globalVersion" ); result.appendBool( "reloadConfig" , true ); return false; } if ( ! globalVersion.isSet() && ! authoritative ) { // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which // should require a reload. // TODO: Maybe a more elegant way of doing this while ( shardingState.inCriticalMigrateSection() ) { dbtemprelease r; sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section for version reset" << endl; } // need authoritative for first look result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "first time for collection '" + ns + "'"; return false; } Timer relockTime; { dbtemprelease unlock; ShardChunkVersion currVersion = version; if ( ! shardingState.trySetVersion( ns , currVersion ) ) { errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'"; result.append( "ns" , ns ); version.addToBSON( result, "version" ); globalVersion.addToBSON( result, "globalVersion" ); return false; } } if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) { log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl; } info->setVersion( ns , version ); return true; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool){ // Debugging code for SERVER-1633. Commands have already a coarser timer for // normal operation. Timer timer; vector<int> laps; lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); bool authoritative = cmdObj.getBoolField( "authoritative" ); string configdb = cmdObj["configdb"].valuestrsafe(); { // configdb checking if ( configdb.size() == 0 ){ errmsg = "no configdb"; return false; } if ( shardingState.enabled() ){ if ( configdb != shardingState.getConfigServer() ){ errmsg = "specified a different configdb!"; return false; } } else { if ( ! authoritative ){ result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } shardingState.enable( configdb ); configServer.init( configdb ); } } // SERVER-1633 laps.push_back( timer.millis() ); if ( cmdObj["shard"].type() == String ){ shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } { // setting up ids if ( cmdObj["serverID"].type() != jstOID ){ // TODO: fix this //errmsg = "need serverID to be an OID"; //return 0; } else { OID clientId = cmdObj["serverID"].__oid(); if ( ! info->hasID() ){ info->setID( clientId ); } else if ( clientId != info->getID() ){ errmsg = "server id has changed!"; return 0; } } } // SERVER-1633 laps.push_back( timer.millis() ); unsigned long long version = extractVersion( cmdObj["version"] , errmsg ); if ( errmsg.size() ){ return false; } string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ){ errmsg = "need to speciy fully namespace"; return false; } ConfigVersion& oldVersion = info->getVersion(ns); ConfigVersion& globalVersion = shardingState.getVersion(ns); if ( oldVersion > 0 && globalVersion == 0 ){ // this had been reset oldVersion = 0; } if ( version == 0 && globalVersion == 0 ){ // this connection is cleaning itself oldVersion = 0; return 1; } // SERVER-1633 laps.push_back( timer.millis() ); if ( version == 0 && globalVersion > 0 ){ if ( ! authoritative ){ result.appendBool( "need_authoritative" , true ); result.appendTimestamp( "globalVersion" , globalVersion ); result.appendTimestamp( "oldVersion" , oldVersion ); errmsg = "dropping needs to be authoritative"; return 0; } log() << "wiping data for: " << ns << endl; result.appendTimestamp( "beforeDrop" , globalVersion ); // only setting global version on purpose // need clients to re-find meta-data globalVersion = 0; oldVersion = 0; return 1; } if ( version < oldVersion ){ errmsg = "you already have a newer version"; result.appendTimestamp( "oldVersion" , oldVersion ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } // SERVER-1633 laps.push_back( timer.millis() ); if ( version < globalVersion ){ while ( shardingState.inCriticalMigrateSection() ){ dbtemprelease r; sleepmillis(2); log() << "waiting till out of critical section" << endl; } errmsg = "going to older version for global"; result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ){ // need authoritative for first look result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); errmsg = "first time for this ns"; return false; } // SERVER-1633 laps.push_back( timer.millis() ); { dbtemprelease unlock; shardingState.getChunkMatcher( ns ); } result.appendTimestamp( "oldVersion" , oldVersion ); oldVersion = version; globalVersion = version; // SERVER-1633 ostringstream lapString; lapString << name /* command name */ << " partials: " ; for (size_t i = 1; i<laps.size(); ++i){ lapString << (laps[i] - laps[i-1]) / 1000 << " "; } lapString << endl; logIfSlow( timer, lapString.str() ); result.append( "ok" , 1 ); return 1; }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { // Steps // 1. check basic config // 2. extract params from command // 3. fast check // 4. slow check (LOCKS) // step 1 lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); // make sure we have the mongos id for writebacks if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) ) return false; bool authoritative = cmdObj.getBoolField( "authoritative" ); // check config server is ok or enable sharding if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) ) return false; // check shard name/hosts are correct if ( cmdObj["shard"].type() == String ) { shardingState.gotShardName( cmdObj["shard"].String() ); } // Handle initial shard connection if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ){ result.append( "initialized", true ); // Send back wire version to let mongos know what protocol we can speak result.append( "minWireVersion", minWireVersion ); result.append( "maxWireVersion", maxWireVersion ); return true; } // we can run on a slave up to here if ( ! isMaster( "admin" ) ) { result.append( "errmsg" , "not master" ); result.append( "note" , "from post init in setShardVersion" ); return false; } // step 2 string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify namespace"; return false; } if( ! ChunkVersion::canParseBSON( cmdObj, "version" ) ){ errmsg = "need to specify version"; return false; } const ChunkVersion version = ChunkVersion::fromBSON( cmdObj, "version" ); // step 3 const ChunkVersion oldVersion = info->getVersion(ns); const ChunkVersion globalVersion = shardingState.getVersion(ns); oldVersion.addToBSON( result, "oldVersion" ); if ( globalVersion.isSet() && version.isSet() ) { // this means there is no reset going on an either side // so its safe to make some assumptions if ( version.isWriteCompatibleWith( globalVersion ) ) { // mongos and mongod agree! if ( ! oldVersion.isWriteCompatibleWith( version ) ) { if ( oldVersion < globalVersion && oldVersion.hasCompatibleEpoch(globalVersion) ) { info->setVersion( ns , version ); } else if ( authoritative ) { // this means there was a drop and our version is reset info->setVersion( ns , version ); } else { result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "verifying drop on '" + ns + "'"; return false; } } return true; } } // step 4 // this is because of a weird segfault I saw and I can't see why this should ever be set massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 ); if ( oldVersion.isSet() && ! globalVersion.isSet() ) { // this had been reset info->setVersion( ns , ChunkVersion( 0, OID() ) ); } if ( ! version.isSet() && ! globalVersion.isSet() ) { // this connection is cleaning itself info->setVersion( ns , ChunkVersion( 0, OID() ) ); return true; } // Cases below all either return OR fall-through to remote metadata reload. if ( version.isSet() || !globalVersion.isSet() ) { // Not Dropping // TODO: Refactor all of this if ( version < oldVersion && version.hasCompatibleEpoch( oldVersion ) ) { errmsg = "this connection already had a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); version.addToBSON( result, "newVersion" ); globalVersion.addToBSON( result, "globalVersion" ); return false; } // TODO: Refactor all of this if ( version < globalVersion && version.hasCompatibleEpoch( globalVersion ) ) { while ( shardingState.inCriticalMigrateSection() ) { log() << "waiting till out of critical section" << endl; shardingState.waitTillNotInCriticalSection( 10 ); } errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'"; result.append( "ns" , ns ); version.addToBSON( result, "version" ); globalVersion.addToBSON( result, "globalVersion" ); result.appendBool( "reloadConfig" , true ); return false; } if ( ! globalVersion.isSet() && ! authoritative ) { // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which // should require a reload. while ( shardingState.inCriticalMigrateSection() ) { log() << "waiting till out of critical section" << endl; shardingState.waitTillNotInCriticalSection( 10 ); } // need authoritative for first look result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "first time for collection '" + ns + "'"; return false; } // Fall through to metadata reload below } else { // Dropping if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); globalVersion.addToBSON( result, "globalVersion" ); errmsg = "dropping needs to be authoritative"; return false; } // Fall through to metadata reload below } ChunkVersion currVersion; Status status = shardingState.refreshMetadataIfNeeded( ns, version, &currVersion ); if (!status.isOK()) { // The reload itself was interrupted or confused here errmsg = str::stream() << "could not refresh metadata for " << ns << " with requested shard version " << version.toString() << ", stored shard version is " << currVersion.toString() << causedBy( status.reason() ); warning() << errmsg << endl; result.append( "ns" , ns ); version.addToBSON( result, "version" ); currVersion.addToBSON( result, "globalVersion" ); result.appendBool( "reloadConfig", true ); return false; } else if ( !version.isWriteCompatibleWith( currVersion ) ) { // We reloaded a version that doesn't match the version mongos was trying to // set. errmsg = str::stream() << "requested shard version differs from" << " config shard version for " << ns << ", requested version is " << version.toString() << " but found version " << currVersion.toString(); OCCASIONALLY warning() << errmsg << endl; // WARNING: the exact fields below are important for compatibility with mongos // version reload. result.append( "ns" , ns ); currVersion.addToBSON( result, "globalVersion" ); // If this was a reset of a collection or the last chunk moved out, inform mongos to // do a full reload. if (currVersion.epoch() != version.epoch() || !currVersion.isSet() ) { result.appendBool( "reloadConfig", true ); // Zero-version also needed to trigger full mongos reload, sadly // TODO: Make this saner, and less impactful (full reload on last chunk is bad) ChunkVersion( 0, 0, OID() ).addToBSON( result, "version" ); // For debugging version.addToBSON( result, "origVersion" ); } else { version.addToBSON( result, "version" ); } return false; } info->setVersion( ns , version ); return true; }
bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) { // Steps // 1. check basic config // 2. extract params from command // 3. fast check // 4. slow check (LOCKS) // step 1 lastError.disableForCommand(); ShardedConnectionInfo* info = ShardedConnectionInfo::get( true ); bool authoritative = cmdObj.getBoolField( "authoritative" ); // check config server is ok or enable sharding if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) ) return false; // check shard name/hosts are correct if ( cmdObj["shard"].type() == String ) { shardingState.gotShardName( cmdObj["shard"].String() ); shardingState.gotShardHost( cmdObj["shardHost"].String() ); } // make sure we have the mongos id for writebacks if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) ) return false; // step 2 string ns = cmdObj["setShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to speciy namespace"; return false; } const ConfigVersion version = extractVersion( cmdObj["version"] , errmsg ); if ( errmsg.size() ) return false; // step 3 const ConfigVersion oldVersion = info->getVersion(ns); const ConfigVersion globalVersion = shardingState.getVersion(ns); result.appendTimestamp( "oldVersion" , oldVersion ); if ( globalVersion > 0 && version > 0 ) { // this means there is no reset going on an either side // so its safe to make some assuptions if ( version == globalVersion ) { // mongos and mongod agree! if ( oldVersion != version ) { assert( oldVersion < globalVersion ); info->setVersion( ns , version ); } return true; } } // step 4 // this is because of a weird segfault I saw and I can't see why this should ever be set massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 ); dblock setShardVersionLock; // TODO: can we get rid of this?? if ( oldVersion > 0 && globalVersion == 0 ) { // this had been reset info->setVersion( ns , 0 ); } if ( version == 0 && globalVersion == 0 ) { // this connection is cleaning itself info->setVersion( ns , 0 ); return true; } if ( version == 0 && globalVersion > 0 ) { if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); result.append( "ns" , ns ); result.appendTimestamp( "globalVersion" , globalVersion ); errmsg = "dropping needs to be authoritative"; return false; } log() << "wiping data for: " << ns << endl; result.appendTimestamp( "beforeDrop" , globalVersion ); // only setting global version on purpose // need clients to re-find meta-data shardingState.resetVersion( ns ); info->setVersion( ns , 0 ); return true; } if ( version < oldVersion ) { errmsg = "this connection already had a newer version of collection '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "newVersion" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); return false; } if ( version < globalVersion ) { while ( shardingState.inCriticalMigrateSection() ) { dbtemprelease r; sleepmillis(2); OCCASIONALLY log() << "waiting till out of critical section" << endl; } errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , globalVersion ); result.appendBool( "reloadConfig" , true ); return false; } if ( globalVersion == 0 && ! authoritative ) { // need authoritative for first look result.append( "ns" , ns ); result.appendBool( "need_authoritative" , true ); errmsg = "first time for collection '" + ns + "'"; return false; } Timer relockTime; { dbtemprelease unlock; ShardChunkVersion currVersion = version; if ( ! shardingState.trySetVersion( ns , currVersion ) ) { errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'"; result.append( "ns" , ns ); result.appendTimestamp( "version" , version ); result.appendTimestamp( "globalVersion" , currVersion ); return false; } } if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) { log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl; } info->setVersion( ns , version ); return true; }