Beispiel #1
0
        // Validates that the ranges and versions are valid given the chunks
        void validate( const BSONArray& chunks, RangeMap* ranges, ShardChunkVersion maxVersion, const VersionMap& maxShardVersions ){

            BSONObjIterator it( chunks );
            int chunkCount = 0;
            ShardChunkVersion foundMaxVersion;
            VersionMap foundMaxShardVersions;

            //
            // Validate that all the chunks are there and collect versions
            //

            while( it.more() ){

                BSONObj chunkDoc = it.next().Obj();
                chunkCount++;

                if( ranges != NULL ){

                    // log() << "Validating chunk " << chunkDoc << " size : " << ranges->size() << " vs " << chunkCount << endl;

                    RangeMap::iterator chunkRange = ranges->find( _inverse ? chunkDoc["max"].Obj() : chunkDoc["min"].Obj() );

                    ASSERT( chunkRange != ranges->end() );
                    ASSERT( chunkRange->second.woCompare( _inverse ? chunkDoc["min"].Obj() : chunkDoc["max"].Obj() ) == 0 );
                }

                ShardChunkVersion version = ShardChunkVersion::fromBSON( chunkDoc["lastmod"] );
                if( version > foundMaxVersion ) foundMaxVersion = version;

                ShardChunkVersion shardMaxVersion = foundMaxShardVersions[ chunkDoc["shard"].String() ];
                if( version > shardMaxVersion ) foundMaxShardVersions[ chunkDoc["shard"].String() ] = version;
            }
            // Make sure all chunks are accounted for
            if( ranges != NULL ) ASSERT( chunkCount == (int) ranges->size() );

            // log() << "Validating that all shard versions are up to date..." << endl;

            // Validate that all the versions are the same
            ASSERT( foundMaxVersion.isEquivalentTo( maxVersion ) );

            for( VersionMap::iterator it = foundMaxShardVersions.begin(); it != foundMaxShardVersions.end(); it++ ){

                ShardChunkVersion foundVersion = it->second;
                VersionMap::const_iterator maxIt = maxShardVersions.find( it->first );

                ASSERT( maxIt != maxShardVersions.end() );
                ASSERT( foundVersion.isEquivalentTo( maxIt->second ) );
            }
            // Make sure all shards are accounted for
            ASSERT( foundMaxShardVersions.size() == maxShardVersions.size() );
        }
Beispiel #2
0
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {

    // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
    // is that we don't do anything nearly as long as a remote query in a thread between then and now.
    // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
    // would be likely that the version may have changed in the meantime without waiting for or fetching config results.

    // TODO:  Mutex-per-namespace?

    LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;

    _configServerTickets.waitForTicket();
    TicketHolderReleaser needTicketFrom( &_configServerTickets );

    // fast path - double-check if requested version is at the same version as this chunk manager before verifying
    // against config server
    //
    // This path will short-circuit the version set if another thread already managed to update the version in the
    // meantime.  First check is from getVersion().
    //
    // cases:
    //   + this shard updated the version for a migrate's commit (FROM side)
    //     a client reloaded chunk state from config and picked the newest version
    //   + two clients reloaded
    //     one triggered the 'slow path' (below)
    //     when the second's request gets here, the version is already current
    ConfigVersion storedVersion;
    ShardChunkManagerPtr currManager;
    {
        scoped_lock lk( _mutex );
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it != _chunks.end() ) currManager = it->second;
        if ( it != _chunks.end() && ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
            return true;
    }

    LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;

    // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
    // newest version in the config server
    //
    // cases:
    //   + a chunk moved TO here
    //     (we don't bump up the version on the TO side but the commit to config does use higher version)
    //     a client reloads from config an issued the request
    //   + there was a take over from a secondary
    //     the secondary had no state (managers) at all, so every client request will fall here
    //   + a stale client request a version that's not current anymore

    // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
    const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
    ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName, currManager ) );

    {
        scoped_lock lk( _mutex );

        // since we loaded the chunk manager unlocked, other thread may have done the same
        // make sure we keep the freshest config info only
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
            _chunks[ns] = p;
        }

        ShardChunkVersion oldVersion = version;
        version = p->getVersion();
        return oldVersion.isEquivalentTo( version );
    }
}