void remove( const string& name ) { scoped_lock lk( _mutex ); for ( ShardMap::iterator i = _lookup.begin(); i!=_lookup.end(); ) { ShardPtr s = i->second; if ( s->getName() == name ) { _lookup.erase(i++); } else { ++i; } } for ( ShardMap::iterator i = _rsLookup.begin(); i!=_rsLookup.end(); ) { ShardPtr s = i->second; if ( s->getName() == name ) { _rsLookup.erase(i++); } else { ++i; } } }
bool run( const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool ) { string ns; if ( !FieldParser::extract( cmdObj, nsField, &ns, &errmsg ) ) { return false; } if ( ns.size() == 0 ) { errmsg = "no namespace specified"; return false; } vector<BSONObj> bounds; if ( !FieldParser::extract( cmdObj, boundsField, &bounds, &errmsg ) ) { return false; } if ( bounds.size() == 0 ) { errmsg = "no bounds were specified"; return false; } if ( bounds.size() != 2 ) { errmsg = "only a min and max bound may be specified"; return false; } BSONObj minKey = bounds[0]; BSONObj maxKey = bounds[1]; if ( minKey.isEmpty() ) { errmsg = "no min key specified"; return false; } if ( maxKey.isEmpty() ) { errmsg = "no max key specified"; return false; } ShardPtr mergeShard = guessMergeShard( NamespaceString( ns ), minKey ); if ( !mergeShard ) { errmsg = (string)"could not find shard for merge range starting at " + minKey.toString(); return false; } BSONObjBuilder remoteCmdObjB; remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::nsField() ] ); remoteCmdObjB.append( cmdObj[ MergeChunksPassCommand::boundsField() ] ); remoteCmdObjB.append( MergeChunksPassCommand::configField(), configServer.getPrimary().getAddress().toString() ); remoteCmdObjB.append( MergeChunksPassCommand::shardNameField(), mergeShard->getName() ); BSONObj remoteResult; // Throws, but handled at level above. Don't want to rewrap to preserve exception // formatting. ScopedDbConnection conn( mergeShard->getAddress() ); bool ok = conn->runCommand( "admin", remoteCmdObjB.obj(), remoteResult ); conn.done(); // Always refresh our chunks afterwards refreshChunkCache( NamespaceString( ns ) ); result.appendElements( remoteResult ); return ok; }
/** * Updates the remote cached version on the remote shard host (primary, in the case of replica * sets) if needed with a fully-qualified shard version for the given namespace: * config server(s) + shard name + shard version * * If no remote cached version has ever been set, an initial shard version is sent. * * If the namespace is empty and no version has ever been sent, the config server + shard name * is sent to the remote shard host to initialize the connection as coming from mongos. * NOTE: This initialization is *best-effort only*. Operations which wish to correctly version * must send the namespace. * * Config servers are special and are not (unless otherwise a shard) kept up to date with this * protocol. This is safe so long as config servers only contain unversioned collections. * * It is an error to call checkShardVersion with an unversionable connection (isVersionableCB). * * @return true if we contacted the remote host */ bool checkShardVersion( DBClientBase * conn_in , const string& ns , ChunkManagerPtr refManager, bool authoritative , int tryNumber ) { // TODO: cache, optimize, etc... // Empty namespaces are special - we require initialization but not versioning if (ns.size() == 0) { return initShardVersionEmptyNS(conn_in); } DBConfigPtr conf = grid.getDBConfig( ns ); if ( ! conf ) return false; DBClientBase* conn = getVersionable( conn_in ); verify(conn); // errors thrown above unsigned long long officialSequenceNumber = 0; ShardPtr primary; ChunkManagerPtr manager; if (authoritative) conf->getChunkManagerIfExists(ns, true); conf->getChunkManagerOrPrimary(ns, manager, primary); if (manager) officialSequenceNumber = manager->getSequenceNumber(); // Check this manager against the reference manager if( manager ){ Shard shard = Shard::make( conn->getServerAddress() ); if (refManager && !refManager->compatibleWith(*manager, shard.getName())) { const ChunkVersion refVersion(refManager->getVersion(shard.getName())); const ChunkVersion currentVersion(manager->getVersion(shard.getName())); string msg(str::stream() << "manager (" << currentVersion.toString() << " : " << manager->getSequenceNumber() << ") " << "not compatible with reference manager (" << refVersion.toString() << " : " << refManager->getSequenceNumber() << ") " << "on shard " << shard.getName() << " (" << shard.getAddress().toString() << ")"); throw SendStaleConfigException(ns, msg, refVersion, currentVersion); } } else if( refManager ){ Shard shard = Shard::make(conn->getServerAddress()); string msg( str::stream() << "not sharded (" << ( (manager.get() == 0) ? string( "<none>" ) : str::stream() << manager->getSequenceNumber() ) << ") but has reference manager (" << refManager->getSequenceNumber() << ") " << "on conn " << conn->getServerAddress() << " (" << conn_in->getServerAddress() << ")" ); throw SendStaleConfigException(ns, msg, refManager->getVersion(shard.getName()), ChunkVersion::UNSHARDED()); } // Do not send setShardVersion to collections on the config servers - this causes problems // when config servers are also shards and get SSV with conflicting names. // TODO: Make config servers regular shards if (primary && primary->getName() == "config") { return false; } // Has the ChunkManager been reloaded since the last time we updated the shard version over // this connection? If we've never updated the shard version, do so now. unsigned long long sequenceNumber = 0; if (connectionShardStatus.getSequence(conn, ns, &sequenceNumber)) { if (sequenceNumber == officialSequenceNumber) { return false; } } // Now that we're sure we're sending SSV and not to a single config server, get the shard Shard shard = Shard::make(conn->getServerAddress()); ChunkVersion version = ChunkVersion(0, 0, OID()); if (manager) version = manager->getVersion(shard.getName()); LOG(1) << "setting shard version of " << version << " for " << ns << " on shard " << shard.toString(); LOG(3) << "last version sent with chunk manager iteration " << sequenceNumber << ", current chunk manager iteration is " << officialSequenceNumber; BSONObj result; if (setShardVersion(*conn, ns, configServer.modelServer(), version, manager.get(), authoritative, result)) { LOG(1) << " setShardVersion success: " << result; connectionShardStatus.setSequence( conn , ns , officialSequenceNumber ); return true; } LOG(1) << " setShardVersion failed!\n" << result << endl; if ( result["need_authoritative"].trueValue() ) massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative ); if ( ! authoritative ) { // use the original connection and get a fresh versionable connection // since conn can be invalidated (or worse, freed) after the failure checkShardVersion(conn_in, ns, refManager, 1, tryNumber + 1); return true; } if ( result["reloadConfig"].trueValue() ) { if( result["version"].timestampTime() == 0 ){ warning() << "reloading full configuration for " << conf->name() << ", connection state indicates significant version changes"; // reload db conf->reload(); } else { // reload config conf->getChunkManager( ns , true ); } } const int maxNumTries = 7; if ( tryNumber < maxNumTries ) { LOG( tryNumber < ( maxNumTries / 2 ) ? 1 : 0 ) << "going to retry checkShardVersion shard: " << shard.toString() << " " << result; sleepmillis( 10 * tryNumber ); // use the original connection and get a fresh versionable connection // since conn can be invalidated (or worse, freed) after the failure checkShardVersion(conn_in, ns, refManager, true, tryNumber + 1); return true; } string errmsg = str::stream() << "setShardVersion failed shard: " << shard.toString() << " " << result; log() << " " << errmsg << endl; massert( 10429 , errmsg , 0 ); return true; }