Exemple #1
0
    Status ShardingState::refreshMetadataIfNeeded( const string& ns,
                                                   const ChunkVersion& reqShardVersion,
                                                   ChunkVersion* latestShardVersion )
    {
        // The _configServerTickets serializes this process such that only a small number of threads
        // can try to refresh at the same time.

        LOG( 2 ) << "metadata refresh requested for " << ns << " at shard version "
                 << reqShardVersion << endl;

        //
        // Queuing of refresh requests starts here when remote reload is needed. This may take time.
        // TODO: Explicitly expose the queuing discipline.
        //

        _configServerTickets.waitForTicket();
        TicketHolderReleaser needTicketFrom( &_configServerTickets );

        //
        // Fast path - check if the requested version is at a higher version than the current
        // metadata version or a different epoch before verifying against config server.
        //

        CollectionMetadataPtr storedMetadata;
        {
            scoped_lock lk( _mutex );
            CollectionMetadataMap::iterator it = _collMetadata.find( ns );
            if ( it != _collMetadata.end() ) storedMetadata = it->second;
        }
        ChunkVersion storedShardVersion;
        if ( storedMetadata ) storedShardVersion = storedMetadata->getShardVersion();
        *latestShardVersion = storedShardVersion;

        if ( storedShardVersion >= reqShardVersion &&
             storedShardVersion.epoch() == reqShardVersion.epoch() ) {

            // Don't need to remotely reload if we're in the same epoch with a >= version
            return Status::OK();
        }

        //
        // Slow path - remotely reload
        //
        // Cases:
        // A) Initial config load and/or secondary take-over.
        // B) Migration TO this shard finished, notified by mongos.
        // C) Dropping a collection, notified (currently) by mongos.
        // D) Stale client wants to reload metadata with a different *epoch*, so we aren't sure.

        if ( storedShardVersion.epoch() != reqShardVersion.epoch() ) {
            // Need to remotely reload if our epochs aren't the same, to verify
            LOG( 1 ) << "metadata change requested for " << ns << ", from shard version "
                     << storedShardVersion << " to " << reqShardVersion
                     << ", need to verify with config server" << endl;
        }
        else {
            // Need to remotely reload since our epochs aren't the same but our version is greater
            LOG( 1 ) << "metadata version update requested for " << ns
                     << ", from shard version " << storedShardVersion << " to " << reqShardVersion
                     << ", need to verify with config server" << endl;
        }

        return doRefreshMetadata( ns, reqShardVersion, true, latestShardVersion );
    }
Exemple #2
0
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {

    // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
    // is that we don't do anything nearly as long as a remote query in a thread between then and now.
    // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
    // would be likely that the version may have changed in the meantime without waiting for or fetching config results.

    // TODO:  Mutex-per-namespace?

    LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;

    _configServerTickets.waitForTicket();
    TicketHolderReleaser needTicketFrom( &_configServerTickets );

    // fast path - double-check if requested version is at the same version as this chunk manager before verifying
    // against config server
    //
    // This path will short-circuit the version set if another thread already managed to update the version in the
    // meantime.  First check is from getVersion().
    //
    // cases:
    //   + this shard updated the version for a migrate's commit (FROM side)
    //     a client reloaded chunk state from config and picked the newest version
    //   + two clients reloaded
    //     one triggered the 'slow path' (below)
    //     when the second's request gets here, the version is already current
    ConfigVersion storedVersion;
    ShardChunkManagerPtr currManager;
    {
        scoped_lock lk( _mutex );
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it != _chunks.end() ) currManager = it->second;
        if ( it != _chunks.end() && ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
            return true;
    }

    LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;

    // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
    // newest version in the config server
    //
    // cases:
    //   + a chunk moved TO here
    //     (we don't bump up the version on the TO side but the commit to config does use higher version)
    //     a client reloads from config an issued the request
    //   + there was a take over from a secondary
    //     the secondary had no state (managers) at all, so every client request will fall here
    //   + a stale client request a version that's not current anymore

    // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
    const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
    ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName, currManager ) );

    {
        scoped_lock lk( _mutex );

        // since we loaded the chunk manager unlocked, other thread may have done the same
        // make sure we keep the freshest config info only
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
            _chunks[ns] = p;
        }

        ShardChunkVersion oldVersion = version;
        version = p->getVersion();
        return oldVersion.isEquivalentTo( version );
    }
}
Exemple #3
0
    bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {

        // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
        // is that we don't do anything nearly as long as a remote query in a thread between then and now.
        // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
        // would be likely that the version may have changed in the meantime without waiting for or fetching config results.

        // TODO:  Mutex-per-namespace?
        
        LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;
        
        _configServerTickets.waitForTicket();
        TicketHolderReleaser needTicketFrom( &_configServerTickets );

        // fast path - double-check if requested version is at the same version as this chunk manager before verifying
        // against config server
        //
        // This path will short-circuit the version set if another thread already managed to update the version in the
        // meantime.  First check is from getVersion().
        //
        // cases:
        //   + this shard updated the version for a migrate's commit (FROM side)
        //     a client reloaded chunk state from config and picked the newest version
        //   + two clients reloaded
        //     one triggered the 'slow path' (below)
        //     when the second's request gets here, the version is already current
        ConfigVersion storedVersion;
        ShardChunkManagerPtr currManager;
        {
            scoped_lock lk( _mutex );
            ChunkManagersMap::const_iterator it = _chunks.find( ns );
            if( it == _chunks.end() ){

                // TODO: We need better semantic distinction between *no manager found* and
                // *manager of version zero found*
                log() << "no current chunk manager found for this shard, will initialize" << endl;
            }
            else{
                currManager = it->second;
                if( ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
                    return true;
            }
        }
        
        LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;

        // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
        // newest version in the config server
        //
        // cases:
        //   + a chunk moved TO here
        //     (we don't bump up the version on the TO side but the commit to config does use higher version)
        //     a client reloads from config an issued the request
        //   + there was a take over from a secondary
        //     the secondary had no state (managers) at all, so every client request will fall here
        //   + a stale client request a version that's not current anymore

        // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
        const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;

        // If our epochs aren't compatible, it's not useful to use the old manager for chunk diffs
        if( currManager && ! currManager->getCollVersion().hasCompatibleEpoch( version ) ){

            warning() << "detected incompatible version epoch in new version " << version
                      << ", old version was " << currManager->getCollVersion() << endl;

            currManager.reset();
        }

        ShardChunkManagerPtr p( ShardChunkManager::make( c , ns , _shardName, currManager ) );

        // Handle the case where the collection isn't sharded more gracefully
        if( p->getKey().isEmpty() ){
            version = ConfigVersion( 0, OID() );
            // There was an error getting any data for this collection, return false
            return false;
        }

        {
            // NOTE: This lock prevents the ns version from changing while a write operation occurs.
            Lock::DBRead readLk(ns);
            
            // This lock prevents simultaneous metadata changes using the same map
            scoped_lock lk( _mutex );

            // since we loaded the chunk manager unlocked, other thread may have done the same
            // make sure we keep the freshest config info only
            ChunkManagersMap::const_iterator it = _chunks.find( ns );
            if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
                _chunks[ns] = p;
            }

            ChunkVersion oldVersion = version;
            version = p->getVersion();
            return oldVersion.isEquivalentTo( version );
        }
    }