コード例 #1
0
ファイル: d_state.cpp プロジェクト: Kenterfie/mongo
    /**
     * @ return true if not in sharded mode
                     or if version for this client is ok
     */
    bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ) {
        if ( ! shardingState.enabled() )
            return true;

        ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );

        if ( ! info ) {
            // this means the client has nothing sharded
            // so this allows direct connections to do whatever they want
            // which i think is the correct behavior
            return true;
        }

        if ( info->inForceVersionOkMode() ) {
            return true;
        }

        // TODO
        //   all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
        //   for now, we remove the sharding state of dropped collection
        //   so delayed request may come in. This has to be fixed.
        ConfigVersion clientVersion = info->getVersion(ns);
        ConfigVersion version;
        if ( ! shardingState.hasVersion( ns , version ) && clientVersion == 0 ) {
            return true;
        }


        if ( version == 0 && clientVersion > 0 ) {
            stringstream ss;
            ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
            errmsg = ss.str();
            return false;
        }

        if ( clientVersion >= version )
            return true;


        if ( clientVersion == 0 ) {
            stringstream ss;
            ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
            errmsg = ss.str();
            return false;
        }

        if ( version.majorVersion() == clientVersion.majorVersion() ) {
            // this means there was just a split
            // since on a split w/o a migrate this server is ok
            // going to accept 
            return true;
        }

        stringstream ss;
        ss << "your version is too old  ns: " + ns << " global: " << version << " client: " << clientVersion;
        errmsg = ss.str();
        return false;
    }
コード例 #2
0
ファイル: d_state.cpp プロジェクト: jit/mongo
    /**
     * @ return true if not in sharded mode
                     or if version for this client is ok
     */
    bool shardVersionOk( const string& ns , bool isWriteOp , string& errmsg ){
        if ( ! shardingState.enabled() )
            return true;

        ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );

        if ( ! info ){
            // this means the client has nothing sharded
            // so this allows direct connections to do whatever they want
            // which i think is the correct behavior
            return true;
        }
        
        if ( info->inForceVersionOkMode() ){
            return true;
        }

        ConfigVersion version;    
        if ( ! shardingState.hasVersion( ns , version ) ){
            return true;
        }

        ConfigVersion clientVersion = info->getVersion(ns);

        if ( version == 0 && clientVersion > 0 ){
            stringstream ss;
            ss << "collection was dropped or this shard no longer valied version: " << version << " clientVersion: " << clientVersion;
            errmsg = ss.str();
            return false;
        }
        
        if ( clientVersion >= version )
            return true;
        

        if ( clientVersion == 0 ){
            stringstream ss;
            ss << "client in sharded mode, but doesn't have version set for this collection: " << ns << " myVersion: " << version;
            errmsg = ss.str();
            return false;
        }

        if ( isWriteOp && version.majorVersion() == clientVersion.majorVersion() ){
            // this means there was just a split 
            // since on a split w/o a migrate this server is ok
            // going to accept write
            return true;
        }

        stringstream ss;
        ss << "your version is too old  ns: " + ns << " global: " << version << " client: " << clientVersion;
        errmsg = ss.str();
        return false;
    }
コード例 #3
0
ファイル: d_state.cpp プロジェクト: jjwchoy/mongo
/**
 * @ return true if not in sharded mode
                 or if version for this client is ok
 */
bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted ) {
    if ( ! shardingState.enabled() )
        return true;

    if ( ! isMasterNs( ns.c_str() ) )  {
        // right now connections to secondaries aren't versioned at all
        return true;
    }

    ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );

    if ( ! info ) {
        // this means the client has nothing sharded
        // so this allows direct connections to do whatever they want
        // which i think is the correct behavior
        return true;
    }

    if ( info->inForceVersionOkMode() ) {
        return true;
    }

    // TODO
    //   all collections at some point, be sharded or not, will have a version (and a ShardChunkManager)
    //   for now, we remove the sharding state of dropped collection
    //   so delayed request may come in. This has to be fixed.
    ConfigVersion clientVersion = info->getVersion(ns);
    ConfigVersion version;
    if ( ! shardingState.hasVersion( ns , version ) && ! clientVersion.isSet() ) {
        return true;
    }

    // The versions we're going to compare, saved for future use
    received = clientVersion;
    wanted = version;

    if ( ! version.isSet() && clientVersion.isSet() ) {
        stringstream ss;
        ss << "collection was dropped or this shard no longer valid version";
        errmsg = ss.str();
        return false;
    }

    if ( clientVersion >= version )
        return true;


    if ( ! clientVersion.isSet() ) {
        stringstream ss;
        ss << "client in sharded mode, but doesn't have version set for this collection";
        errmsg = ss.str();
        return false;
    }

    if ( version.majorVersion() == clientVersion.majorVersion() ) {
        // this means there was just a split
        // since on a split w/o a migrate this server is ok
        // going to accept
        return true;
    }

    stringstream ss;
    ss << "your version is too old";
    errmsg = ss.str();
    return false;
}
コード例 #4
0
ファイル: d_state.cpp プロジェクト: jjwchoy/mongo
    bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {

        // Steps
        // 1. check basic config
        // 2. extract params from command
        // 3. fast check
        // 4. slow check (LOCKS)

        // step 1

        lastError.disableForCommand();
        ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );

        // make sure we have the mongos id for writebacks
        if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
            return false;

        bool authoritative = cmdObj.getBoolField( "authoritative" );

        // check config server is ok or enable sharding
        if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
            return false;

        // check shard name/hosts are correct
        if ( cmdObj["shard"].type() == String ) {
            shardingState.gotShardName( cmdObj["shard"].String() );
            shardingState.gotShardHost( cmdObj["shardHost"].String() );
        }


        // Handle initial shard connection
        if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ) {
            result.append( "initialized", true );
            return true;
        }

        // we can run on a slave up to here
        if ( ! isMaster( "admin" ) ) {
            result.append( "errmsg" , "not master" );
            result.append( "note" , "from post init in setShardVersion" );
            return false;
        }

        // step 2

        string ns = cmdObj["setShardVersion"].valuestrsafe();
        if ( ns.size() == 0 ) {
            errmsg = "need to specify namespace";
            return false;
        }

        const ConfigVersion version = ConfigVersion( extractVersion( cmdObj["version"] , errmsg ), OID() );
        if ( errmsg.size() )
            return false;

        // step 3

        const ConfigVersion oldVersion = info->getVersion(ns);
        const ConfigVersion globalVersion = shardingState.getVersion(ns);

        oldVersion.addToBSON( result, "oldVersion" );

        if ( globalVersion.isSet() && version.isSet() ) {
            // this means there is no reset going on an either side
            // so its safe to make some assumptions

            if ( version.isEquivalentTo( globalVersion ) ) {
                // mongos and mongod agree!
                if ( ! oldVersion.isEquivalentTo( version ) ) {
                    if ( oldVersion < globalVersion ) {
                        info->setVersion( ns , version );
                    }
                    else if ( authoritative ) {
                        // this means there was a drop and our version is reset
                        info->setVersion( ns , version );
                    }
                    else {
                        result.append( "ns" , ns );
                        result.appendBool( "need_authoritative" , true );
                        errmsg = "verifying drop on '" + ns + "'";
                        return false;
                    }
                }
                return true;
            }

        }

        // step 4

        // this is because of a weird segfault I saw and I can't see why this should ever be set
        massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );

        Lock::GlobalWrite setShardVersionLock; // TODO: can we get rid of this??

        if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
            // this had been reset
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
        }

        if ( ! version.isSet() && ! globalVersion.isSet() ) {
            // this connection is cleaning itself
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( ! version.isSet() && globalVersion.isSet() ) {
            if ( ! authoritative ) {
                result.appendBool( "need_authoritative" , true );
                result.append( "ns" , ns );
                globalVersion.addToBSON( result, "globalVersion" );
                errmsg = "dropping needs to be authoritative";
                return false;
            }
            log() << "wiping data for: " << ns << endl;
            globalVersion.addToBSON( result, "beforeDrop" );
            // only setting global version on purpose
            // need clients to re-find meta-data
            shardingState.resetVersion( ns );
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( version < oldVersion ) {
            errmsg = "this connection already had a newer version of collection '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "newVersion" );
            globalVersion.addToBSON( result, "globalVersion" );
            return false;
        }

        if ( version < globalVersion ) {
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section" << endl;
            }
            errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "version" );
            globalVersion.addToBSON( result, "globalVersion" );
            result.appendBool( "reloadConfig" , true );
            return false;
        }

        if ( ! globalVersion.isSet() && ! authoritative ) {
            // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which
            // should require a reload.
            // TODO: Maybe a more elegant way of doing this
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section for version reset" << endl;
            }

            // need authoritative for first look
            result.append( "ns" , ns );
            result.appendBool( "need_authoritative" , true );
            errmsg = "first time for collection '" + ns + "'";
            return false;
        }

        Timer relockTime;
        {
            dbtemprelease unlock;

            ShardChunkVersion currVersion = version;
            if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
                errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
                result.append( "ns" , ns );
                version.addToBSON( result, "version" );
                globalVersion.addToBSON( result, "globalVersion" );
                return false;
            }
        }
        if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) {
            log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl;
        }

        info->setVersion( ns , version );
        return true;
    }
コード例 #5
0
ファイル: d_state.cpp プロジェクト: jjwchoy/mongo
bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {

    // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
    // is that we don't do anything nearly as long as a remote query in a thread between then and now.
    // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
    // would be likely that the version may have changed in the meantime without waiting for or fetching config results.

    // TODO:  Mutex-per-namespace?

    LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;

    _configServerTickets.waitForTicket();
    TicketHolderReleaser needTicketFrom( &_configServerTickets );

    // fast path - double-check if requested version is at the same version as this chunk manager before verifying
    // against config server
    //
    // This path will short-circuit the version set if another thread already managed to update the version in the
    // meantime.  First check is from getVersion().
    //
    // cases:
    //   + this shard updated the version for a migrate's commit (FROM side)
    //     a client reloaded chunk state from config and picked the newest version
    //   + two clients reloaded
    //     one triggered the 'slow path' (below)
    //     when the second's request gets here, the version is already current
    ConfigVersion storedVersion;
    ShardChunkManagerPtr currManager;
    {
        scoped_lock lk( _mutex );
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it != _chunks.end() ) currManager = it->second;
        if ( it != _chunks.end() && ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
            return true;
    }

    LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;

    // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
    // newest version in the config server
    //
    // cases:
    //   + a chunk moved TO here
    //     (we don't bump up the version on the TO side but the commit to config does use higher version)
    //     a client reloads from config an issued the request
    //   + there was a take over from a secondary
    //     the secondary had no state (managers) at all, so every client request will fall here
    //   + a stale client request a version that's not current anymore

    // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
    const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;
    ShardChunkManagerPtr p( new ShardChunkManager( c , ns , _shardName, currManager ) );

    {
        scoped_lock lk( _mutex );

        // since we loaded the chunk manager unlocked, other thread may have done the same
        // make sure we keep the freshest config info only
        ChunkManagersMap::const_iterator it = _chunks.find( ns );
        if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
            _chunks[ns] = p;
        }

        ShardChunkVersion oldVersion = version;
        version = p->getVersion();
        return oldVersion.isEquivalentTo( version );
    }
}
コード例 #6
0
ファイル: d_state.cpp プロジェクト: 524777134/mongo
    /**
     * @ return true if not in sharded mode
                     or if version for this client is ok
     */
    bool shardVersionOk( const string& ns , string& errmsg, ConfigVersion& received, ConfigVersion& wanted ) {

        if ( ! shardingState.enabled() )
            return true;

        if ( ! isMasterNs( ns.c_str() ) )  {
            // right now connections to secondaries aren't versioned at all
            return true;
        }

        ShardedConnectionInfo* info = ShardedConnectionInfo::get( false );

        if ( ! info ) {
            // this means the client has nothing sharded
            // so this allows direct connections to do whatever they want
            // which i think is the correct behavior
            return true;
        }

        if ( info->inForceVersionOkMode() ) {
            return true;
        }

        // TODO : all collections at some point, be sharded or not, will have a version
        //  (and a ShardChunkManager)
        received = info->getVersion( ns );
        wanted = shardingState.getVersion( ns );

        if( received.isWriteCompatibleWith( wanted ) ) return true;

        //
        // Figure out exactly why not compatible, send appropriate error message
        // The versions themselves are returned in the error, so not needed in messages here
        //

        // Check epoch first, to send more meaningful message, since other parameters probably
        // won't match either
        if( ! wanted.hasCompatibleEpoch( received ) ){
            errmsg = str::stream() << "version epoch mismatch detected for " << ns << ", "
                                   << "the collection may have been dropped and recreated";
            return false;
        }

        if( ! wanted.isSet() && received.isSet() ){
            errmsg = str::stream() << "this shard no longer contains chunks for " << ns << ", "
                                   << "the collection may have been dropped";
            return false;
        }

        if( wanted.isSet() && ! received.isSet() ){
            errmsg = str::stream() << "this shard contains versioned chunks for " << ns << ", "
                                   << "but no version set in request";
            return false;
        }

        if( wanted.majorVersion() != received.majorVersion() ){

            //
            // Could be > or < - wanted is > if this is the source of a migration,
            // wanted < if this is the target of a migration
            //

            errmsg = str::stream() << "version mismatch detected for " << ns << ", "
                                   << "stored major version " << wanted.majorVersion()
                                   << " does not match received " << received.majorVersion();
            return false;
        }

        // Those are all the reasons the versions can mismatch
        verify( false );

        return false;

    }
コード例 #7
0
ファイル: d_state.cpp プロジェクト: 524777134/mongo
    bool ShardingState::trySetVersion( const string& ns , ConfigVersion& version /* IN-OUT */ ) {

        // Currently this function is called after a getVersion(), which is the first "check", and the assumption here
        // is that we don't do anything nearly as long as a remote query in a thread between then and now.
        // Otherwise it may be worth adding an additional check without the _configServerMutex below, since then it
        // would be likely that the version may have changed in the meantime without waiting for or fetching config results.

        // TODO:  Mutex-per-namespace?
        
        LOG( 2 ) << "trying to set shard version of " << version.toString() << " for '" << ns << "'" << endl;
        
        _configServerTickets.waitForTicket();
        TicketHolderReleaser needTicketFrom( &_configServerTickets );

        // fast path - double-check if requested version is at the same version as this chunk manager before verifying
        // against config server
        //
        // This path will short-circuit the version set if another thread already managed to update the version in the
        // meantime.  First check is from getVersion().
        //
        // cases:
        //   + this shard updated the version for a migrate's commit (FROM side)
        //     a client reloaded chunk state from config and picked the newest version
        //   + two clients reloaded
        //     one triggered the 'slow path' (below)
        //     when the second's request gets here, the version is already current
        ConfigVersion storedVersion;
        ShardChunkManagerPtr currManager;
        {
            scoped_lock lk( _mutex );
            ChunkManagersMap::const_iterator it = _chunks.find( ns );
            if( it == _chunks.end() ){

                // TODO: We need better semantic distinction between *no manager found* and
                // *manager of version zero found*
                log() << "no current chunk manager found for this shard, will initialize" << endl;
            }
            else{
                currManager = it->second;
                if( ( storedVersion = it->second->getVersion() ).isEquivalentTo( version ) )
                    return true;
            }
        }
        
        LOG( 2 ) << "verifying cached version " << storedVersion.toString() << " and new version " << version.toString() << " for '" << ns << "'" << endl;

        // slow path - requested version is different than the current chunk manager's, if one exists, so must check for
        // newest version in the config server
        //
        // cases:
        //   + a chunk moved TO here
        //     (we don't bump up the version on the TO side but the commit to config does use higher version)
        //     a client reloads from config an issued the request
        //   + there was a take over from a secondary
        //     the secondary had no state (managers) at all, so every client request will fall here
        //   + a stale client request a version that's not current anymore

        // Can't lock default mutex while creating ShardChunkManager, b/c may have to create a new connection to myself
        const string c = (_configServer == _shardHost) ? "" /* local */ : _configServer;

        // If our epochs aren't compatible, it's not useful to use the old manager for chunk diffs
        if( currManager && ! currManager->getCollVersion().hasCompatibleEpoch( version ) ){

            warning() << "detected incompatible version epoch in new version " << version
                      << ", old version was " << currManager->getCollVersion() << endl;

            currManager.reset();
        }

        ShardChunkManagerPtr p( ShardChunkManager::make( c , ns , _shardName, currManager ) );

        // Handle the case where the collection isn't sharded more gracefully
        if( p->getKey().isEmpty() ){
            version = ConfigVersion( 0, OID() );
            // There was an error getting any data for this collection, return false
            return false;
        }

        {
            // NOTE: This lock prevents the ns version from changing while a write operation occurs.
            Lock::DBRead readLk(ns);
            
            // This lock prevents simultaneous metadata changes using the same map
            scoped_lock lk( _mutex );

            // since we loaded the chunk manager unlocked, other thread may have done the same
            // make sure we keep the freshest config info only
            ChunkManagersMap::const_iterator it = _chunks.find( ns );
            if ( it == _chunks.end() || p->getVersion() >= it->second->getVersion() ) {
                _chunks[ns] = p;
            }

            ChunkVersion oldVersion = version;
            version = p->getVersion();
            return oldVersion.isEquivalentTo( version );
        }
    }