bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) { // Currently this function is called after a getVersion(), which is the first "check", and the assumption here // is that we don't do anything nearly as long as a remote query in a thread between then and now. // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it // would be likely that the version may have changed in the meantime without waiting for or fetching config results. // TODO: Mutex-per-namespace? LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl; _configServerTickets.waitForTicket(); TicketHolderReleaser needTicketFrom( &_configServerTickets ); // fast path - double-check if requested version is at the same version as this chunk manager before verifying // against config server // // This path will short-circuit the version set if another thread already managed to update the version in the // meantime. First check is from getVersion(). // // cases: // + this shard updated the version for a migrate's commit (FROM side) // a client reloaded chunk state from config and picked the newest version // + two clients reloaded // one triggered the 'slow path' (below) // when the second's request gets here, the version is already current ConfigVersion storedVersion; ShardChunkManagerPtr currManager; { scoped_lock lk( _mutex ); ChunkManagersMap::const_iterator it = _chunks.find( ns ); if ( it != _chunks.end() ) currManager = it->second; if ( it != _chunks.end() && ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) ) return true; } LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl; // slow path - requested version is different than the current chunk manager's, if one exists, so must check for // newest version in the config server // // cases: // + a chunk moved TO here // (we don't bump up the version on the TO side but the commit to config does use higher version) // a client reloads from config an issued the request // + there was a take over from a secondary // the secondary had no state (managers) at all, so every client request will fall here // + a stale client request a version that's not current anymore // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer; ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName, currManager ) ); { scoped_lock lk( _mutex ); // since we loaded the chunk manager unlocked, other thread may have done the same // make sure we keep the freshest config info only ChunkManagersMap::const_iterator it = _chunks.find( ns ); if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) { _chunks[ns] = p; } ShardChunkVersion oldVersion = version; version = p->getVersion(); return oldVersion.isEquivalentTo( version ); } }
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) { // Currently this function is called after a getVersion(), which is the first "check", and the assumption here // is that we don't do anything nearly as long as a remote query in a thread between then and now. // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it // would be likely that the version may have changed in the meantime without waiting for or fetching config results. // TODO: Mutex-per-namespace? LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl; _configServerTickets.waitForTicket(); TicketHolderReleaser needTicketFrom( &_configServerTickets ); // fast path - double-check if requested version is at the same version as this chunk manager before verifying // against config server // // This path will short-circuit the version set if another thread already managed to update the version in the // meantime. First check is from getVersion(). // // cases: // + this shard updated the version for a migrate's commit (FROM side) // a client reloaded chunk state from config and picked the newest version // + two clients reloaded // one triggered the 'slow path' (below) // when the second's request gets here, the version is already current ConfigVersion storedVersion; ShardChunkManagerPtr currManager; { scoped_lock lk( _mutex ); ChunkManagersMap::const_iterator it = _chunks.find( ns ); if( it == _chunks.end() ){ // TODO: We need better semantic distinction between *no manager found* and // *manager of version zero found* log() << "no current chunk manager found for this shard, will initialize" << endl; } else{ currManager = it->second; if( ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) ) return true; } } LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl; // slow path - requested version is different than the current chunk manager's, if one exists, so must check for // newest version in the config server // // cases: // + a chunk moved TO here // (we don't bump up the version on the TO side but the commit to config does use higher version) // a client reloads from config an issued the request // + there was a take over from a secondary // the secondary had no state (managers) at all, so every client request will fall here // + a stale client request a version that's not current anymore // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer; // If our epochs aren't compatible, it's not useful to use the old manager for chunk diffs if( currManager && ! currManager->getCollVersion().hasCompatibleEpoch( version ) ){ warning() << "detected incompatible version epoch in new version " << version << ", old version was " << currManager->getCollVersion() << endl; currManager.reset(); } ShardChunkManagerPtr p( ShardChunkManager::make( c , ns , _shardName, currManager ) ); // Handle the case where the collection isn't sharded more gracefully if( p->getKey().isEmpty() ){ version = ConfigVersion( 0, OID() ); // There was an error getting any data for this collection, return false return false; } { // NOTE: This lock prevents the ns version from changing while a write operation occurs. Lock::DBRead readLk(ns); // This lock prevents simultaneous metadata changes using the same map scoped_lock lk( _mutex ); // since we loaded the chunk manager unlocked, other thread may have done the same // make sure we keep the freshest config info only ChunkManagersMap::const_iterator it = _chunks.find( ns ); if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) { _chunks[ns] = p; } ChunkVersion oldVersion = version; version = p->getVersion(); return oldVersion.isEquivalentTo( version ); } }