/**
 * @brief 获取MySql数据库连接
 * @return 返回MySql数据库连接
 */
MySqlConnectionPool::ConnectionPtr MySqlConnectionPool::connection()
{
    ConnectionPtr connPtr(new MYSQL);

    std::time_t begin;
    std::time(&begin);

    while (1) {
        boost::mutex::scoped_lock lock(_mutex);

        if (_connContainer.size() == 0) {
            std::time_t now;
            std::time(&now);
            if (now - begin > 10) {
                this->createConnection();
                begin = now;
            }

            continue;
        }

        connPtr = _connContainer.front();
        _connContainer.pop_front();
        if (connPtr.get()) {
            break;

        } else {
            this->createConnection();
            continue;;
        }
    }

    return connPtr;
}
/**
 * @brief 检查MySql连接
 */
void MySqlConnectionPool::checkDbConnections()
{
    boost::mutex::scoped_lock lock(_mutex);

    int i = _connContainer.size();
    while (i) {
        ConnectionPtr connPtr(new MYSQL);
        connPtr = _connContainer.front();
        _connContainer.pop_front();

        std::string query("SELECT CURDATE();");

        if (!connPtr.get()) {
            std::cout << "startCheckDbConnection connPtr is null" << std::endl;
        }

        if (::mysql_ping(&*connPtr)) {
            std::cout << "mysql ping  is error" << std::endl;
        }

        if (::mysql_real_query(&*connPtr, query.c_str(), (unsigned int)strlen(query.c_str()))) {
            std::cout << ::mysql_error(&*connPtr) << std::endl;

            ::mysql_close(&*connPtr);
            if (!::mysql_real_connect(&*connPtr, _serverAddr.c_str(), _username.c_str(), _passwd.c_str(),
                    _dbname.c_str(), 0, NULL, 0)) {
                std::cout << ::mysql_error(&*connPtr) << std::endl;
                this->createConnection();
            }

        } else {
            std::vector<std::string> resVector;
            MYSQL_RES *res;
            MYSQL_ROW row;

            res = ::mysql_store_result(&*connPtr);
            row = ::mysql_fetch_row(res);
            while (row) {
                for (int i = 0; i < ::mysql_num_fields(res); ++i) {
                    resVector.push_back(std::string(row[i]));
                }
                row = ::mysql_fetch_row(res);
            }
            ::mysql_free_result(res);
        }

        if (connPtr.get()) {
            _connContainer.push_back(connPtr);
        }

        --i;
    }
}
/**
 * @brief 创建MySql数据库连接
 * @return 返回MySql数据库连接的指针
 */
MySqlConnectionPool::ConnectionPtr MySqlConnectionPool::createConnection()
{
    ConnectionPtr connPtr(new MYSQL);

    *connPtr = *(::mysql_init(NULL));

    if (!::mysql_real_connect(&*connPtr, _serverAddr.c_str(), _username.c_str(), _passwd.c_str(),
            _dbname.c_str(), 0, NULL, 0)) {
        std::cout << ::mysql_error(&*connPtr) << std::endl;
        return connPtr;
    }

    if (::mysql_set_character_set(&*connPtr, "utf8")) {
        std::cout << ::mysql_error(&*connPtr) << std::endl;
        return connPtr;
    }

    std::string query("SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED;");
    if (::mysql_real_query(&*connPtr, query.c_str(), (unsigned int)strlen(query.c_str()))) {
        std::cout << ::mysql_error(&*connPtr) << std::endl;
        return connPtr;
    }
    query.clear();
    query = "USE "; query += _dbname; query += ";";
    if (::mysql_real_query(&*connPtr, query.c_str(), (unsigned int)strlen(query.c_str()))) {
        std::cout << ::mysql_error(&*connPtr) << std::endl;
        return connPtr;
    }
    query.clear();
    query = "SET GLOBAL init_connect='set autocommit=1';";
    if (::mysql_real_query(&*connPtr, query.c_str(), (unsigned int)strlen(query.c_str()))) {
        std::cout << ::mysql_error(&*connPtr) << std::endl;
        return connPtr;
    }

    if (connPtr.get()) {
        _connContainer.push_back(connPtr);

    }

    return connPtr;
}
Example #4
0
    void ClientInfo::_setupAuth() {
        std::string adminNs = "admin";
        DBConfigPtr config = grid.getDBConfig(adminNs);
        Shard shard = config->getShard(adminNs);
        scoped_ptr<ScopedDbConnection> connPtr(
                ScopedDbConnection::getInternalScopedDbConnection(shard.getConnString(), 30.0));
        ScopedDbConnection& conn = *connPtr;

        //
        // Note: The connection mechanism here is *not* ideal, and should not be used elsewhere.
        // It is safe in this particular case because the admin database is always on the config
        // server and does not move.
        //

        AuthorizationManager* authManager = new AuthorizationManager(new AuthExternalStateImpl());
        Status status = authManager->initialize(conn.get());
        massert(16479,
                mongoutils::str::stream() << "Error initializing AuthorizationManager: "
                                          << status.reason(),
                status == Status::OK());
        setAuthorizationManager(authManager);
    }
Example #5
0
    void Balancer::run() {

        // this is the body of a BackgroundJob so if we throw here we're basically ending the balancer thread prematurely
        while ( ! inShutdown() ) {

            if ( ! _init() ) {
                log() << "will retry to initialize balancer in one minute" << endl;
                sleepsecs( 60 );
                continue;
            }

            break;
        }

        int sleepTime = 30;

        // getConnectioString and dist lock constructor does not throw, which is what we expect on while
        // on the balancer thread
        ConnectionString config = configServer.getConnectionString();
        DistributedLock balanceLock( config , "balancer" );

        while ( ! inShutdown() ) {

            try {

                scoped_ptr<ScopedDbConnection> connPtr(
                        ScopedDbConnection::getInternalScopedDbConnection( config.toString() ) );
                ScopedDbConnection& conn = *connPtr;

                // ping has to be first so we keep things in the config server in sync
                _ping( conn.conn() );

                // use fresh shard state
                Shard::reloadShardInfo();

                // refresh chunk size (even though another balancer might be active)
                Chunk::refreshChunkSize();

                BSONObj balancerConfig;
                // now make sure we should even be running
                if ( ! grid.shouldBalance( "", &balancerConfig ) ) {
                    LOG(1) << "skipping balancing round because balancing is disabled" << endl;

                    // Ping again so scripts can determine if we're active without waiting
                    _ping( conn.conn(), true );

                    conn.done();
                    
                    sleepsecs( sleepTime );
                    continue;
                }

                sleepTime = balancerConfig["_nosleep"].trueValue() ? 30 : 6;
                
                uassert( 13258 , "oids broken after resetting!" , _checkOIDs() );

                {
                    dist_lock_try lk( &balanceLock , "doing balance round" );
                    if ( ! lk.got() ) {
                        LOG(1) << "skipping balancing round because another balancer is active" << endl;

                        // Ping again so scripts can determine if we're active without waiting
                        _ping( conn.conn(), true );

                        conn.done();
                        
                        sleepsecs( sleepTime ); // no need to wake up soon
                        continue;
                    }
                    
                    LOG(1) << "*** start balancing round" << endl;

                    vector<CandidateChunkPtr> candidateChunks;
                    _doBalanceRound( conn.conn() , &candidateChunks );
                    if ( candidateChunks.size() == 0 ) {
                        LOG(1) << "no need to move any chunk" << endl;
                        _balancedLastTime = 0;
                    }
                    else {
                        _balancedLastTime = _moveChunks( &candidateChunks, balancerConfig["_secondaryThrottle"].trueValue() );
                    }
                    
                    LOG(1) << "*** end of balancing round" << endl;
                }

                // Ping again so scripts can determine if we're active without waiting
                _ping( conn.conn(), true );
                
                conn.done();

                sleepsecs( _balancedLastTime ? sleepTime / 6 : sleepTime );
            }
            catch ( std::exception& e ) {
                log() << "caught exception while doing balance: " << e.what() << endl;

                // Just to match the opening statement if in log level 1
                LOG(1) << "*** End of balancing round" << endl;

                sleepsecs( sleepTime ); // sleep a fair amount b/c of error
                continue;
            }
        }

    }
Example #6
0
    bool MetadataLoader::initChunks(const CollectionType& collDoc,
                                    const string& ns,
                                    const string& shard,
                                    const CollectionManager* oldManager,
                                    CollectionManager* manager,
                                    string* errMsg) {

        map<string,ChunkVersion> versionMap;
        manager->_maxCollVersion = ChunkVersion(0, 0, collDoc.getEpoch());

        // Check to see if we should use the old version or not.
        if (oldManager) {

            ChunkVersion oldVersion = oldManager->getMaxShardVersion();

            if (oldVersion.isSet() && oldVersion.hasCompatibleEpoch(collDoc.getEpoch())) {

                // Our epoch for coll version and shard version should be the same.
                verify(oldManager->getMaxCollVersion().hasCompatibleEpoch(collDoc.getEpoch()));

                versionMap[shard] = oldManager->_maxShardVersion;
                manager->_maxCollVersion = oldManager->_maxCollVersion;

                // TODO: This could be made more efficient if copying not required, but
                // not as frequently reloaded as in mongos.
                manager->_chunksMap = oldManager->_chunksMap;

                LOG(2) << "loading new chunks for collection " << ns
                       << " using old chunk manager w/ version "
                       << oldManager->getMaxShardVersion()
                       << " and " << manager->_chunksMap.size() << " chunks" << endl;
            }
        }

        // Exposes the new 'manager's range map and version to the "differ," who
        // would ultimately be responsible of filling them up.
        SCMConfigDiffTracker differ(shard);
        differ.attach(ns, manager->_chunksMap, manager->_maxCollVersion, versionMap);

        try {

            scoped_ptr<ScopedDbConnection> connPtr(
                ScopedDbConnection::getInternalScopedDbConnection(_configLoc.toString(), 30));
            ScopedDbConnection& conn = *connPtr;

            auto_ptr<DBClientCursor> cursor = conn->query(ChunkType::ConfigNS,
                                                          differ.configDiffQuery());

            if (!cursor.get()) {
                // 'errMsg' was filled by the getChunkCursor() call.
                manager->_maxCollVersion = ChunkVersion();
                manager->_chunksMap.clear();
                connPtr->done();
                return false;
            }

            // Diff tracker should *always* find at least one chunk if collection exists.
            int diffsApplied = differ.calculateConfigDiff(*cursor);
            if (diffsApplied > 0) {

                LOG(2) << "loaded " << diffsApplied
                       << " chunks into new chunk manager for " << ns
                       << " with version " << manager->_maxCollVersion << endl;

                manager->_maxShardVersion = versionMap[shard];
                manager->fillRanges();
                connPtr->done();
                return true;
            }
            else if(diffsApplied == 0) {

                *errMsg = str::stream() << "no chunks found when reloading " << ns
                                        << ", previous version was "
                                        << manager->_maxCollVersion.toString();

                warning() << *errMsg << endl;

                manager->_maxCollVersion = ChunkVersion();
                manager->_chunksMap.clear();
                connPtr->done();
                return false;
            }
            else{

                // TODO: make this impossible by making sure we don't migrate / split on this
                // shard during the reload.  No chunks were found for the ns.

                *errMsg = str::stream() << "invalid chunks found when reloading " << ns
                                        << ", previous version was "
                                        << manager->_maxCollVersion.toString()
                                        << ", this should be rare";

                warning() << errMsg << endl;

                manager->_maxCollVersion = ChunkVersion();
                manager->_chunksMap.clear();
                connPtr->done();
                return false;
            }
        }
        catch (const DBException& e) {
            *errMsg = str::stream() << "caught exception accessing the config servers"
                                    << causedBy(e);

            // We deliberately do not return connPtr to the pool, since it was involved
            // with the error here.

            return false;
        }
    }
Example #7
0
int ConfigServer::checkConfigVersion( bool upgrade ) {
    int cur = dbConfigVersion();
    if ( cur == VERSION )
        return 0;

    if ( cur == 0 ) {
        scoped_ptr<ScopedDbConnection> conn(
            ScopedDbConnection::getScopedDbConnection( _primary.getConnString() ) );

        // If the cluster has not previously been initialized, we need to set the version before using so
        // subsequent mongoses use the config data the same way.  This requires all three config servers online
        // initially.
        try {
            conn->get()->insert( "config.version" , BSON( "_id" << 1 << "version" << VERSION ) );
        }
        catch( DBException& ) {
            error() << "All config servers must initially be reachable for the cluster to be initialized." << endl;
            throw;
        }

        pool.flush();
        verify( VERSION == dbConfigVersion( conn->conn() ) );
        conn->done();
        return 0;
    }

    if ( cur == 2 ) {

        // need to upgrade
        verify( VERSION == 3 );
        if ( ! upgrade ) {
            log() << "newer version of mongo meta data\n"
                  << "need to --upgrade after shutting all mongos down"
                  << endl;
            return -9;
        }

        scoped_ptr<ScopedDbConnection> connPtr(
            ScopedDbConnection::getInternalScopedDbConnection( _primary.getConnString() ) );
        ScopedDbConnection& conn = *connPtr;

        // do a backup
        string backupName;
        {
            stringstream ss;
            ss << "config-backup-" << terseCurrentTime(false);
            backupName = ss.str();
        }
        log() << "backing up config to: " << backupName << endl;
        conn->copyDatabase( "config" , backupName );

        map<string,string> hostToShard;
        set<string> shards;
        // shards
        {
            unsigned n = 0;
            auto_ptr<DBClientCursor> c = conn->query( ShardNS::shard , BSONObj() );
            while ( c->more() ) {
                BSONObj o = c->next();
                string host = o["host"].String();

                string name = "";

                BSONElement id = o["_id"];
                if ( id.type() == String ) {
                    name = id.String();
                }
                else {
                    stringstream ss;
                    ss << "shard" << hostToShard.size();
                    name = ss.str();
                }

                hostToShard[host] = name;
                shards.insert( name );
                n++;
            }

            verify( n == hostToShard.size() );
            verify( n == shards.size() );

            conn->remove( ShardNS::shard , BSONObj() );

            for ( map<string,string>::iterator i=hostToShard.begin(); i != hostToShard.end(); i++ ) {
                conn->insert( ShardNS::shard , BSON( "_id" << i->second << "host" << i->first ) );
            }
        }

        // databases
        {
            auto_ptr<DBClientCursor> c = conn->query( ShardNS::database , BSONObj() );
            map<string,BSONObj> newDBs;
            unsigned n = 0;
            while ( c->more() ) {
                BSONObj old = c->next();
                n++;

                if ( old["name"].eoo() ) {
                    // already done
                    newDBs[old["_id"].String()] = old;
                    continue;
                }

                BSONObjBuilder b(old.objsize());
                b.appendAs( old["name"] , "_id" );

                BSONObjIterator i(old);
                while ( i.more() ) {
                    BSONElement e = i.next();
                    if ( strcmp( "_id" , e.fieldName() ) == 0 ||
                            strcmp( "name" , e.fieldName() ) == 0 ) {
                        continue;
                    }

                    b.append( e );
                }

                BSONObj x = b.obj();
                log() << old << "\n\t" << x << endl;
                newDBs[old["name"].String()] = x;
            }

            verify( n == newDBs.size() );

            conn->remove( ShardNS::database , BSONObj() );

            for ( map<string,BSONObj>::iterator i=newDBs.begin(); i!=newDBs.end(); i++ ) {
                conn->insert( ShardNS::database , i->second );
            }

        }

        // chunks
        {
            unsigned num = 0;
            map<string,BSONObj> chunks;
            auto_ptr<DBClientCursor> c = conn->query( ShardNS::chunk , BSONObj() );
            while ( c->more() ) {
                BSONObj x = c->next();
                BSONObjBuilder b;

                string id = Chunk::genID( x["ns"].String() , x["min"].Obj() );
                b.append( "_id" , id );

                BSONObjIterator i(x);
                while ( i.more() ) {
                    BSONElement e = i.next();
                    if ( strcmp( e.fieldName() , "_id" ) == 0 )
                        continue;
                    b.append( e );
                }

                BSONObj n = b.obj();
                log() << x << "\n\t" << n << endl;
                chunks[id] = n;
                num++;
            }

            verify( num == chunks.size() );

            conn->remove( ShardNS::chunk , BSONObj() );
            for ( map<string,BSONObj>::iterator i=chunks.begin(); i!=chunks.end(); i++ ) {
                conn->insert( ShardNS::chunk , i->second );
            }

        }

        conn->update( "config.version" , BSONObj() , BSON( "_id" << 1 << "version" << VERSION ) );
        conn.done();
        pool.flush();
        return 1;
    }

    log() << "don't know how to upgrade " << cur << " to " << VERSION << endl;
    return -8;
}
enum TVerdict CTS_ResolveAddress::doTestStepL(void)
/**
 * Resolve a hostname to address (reverse dns lookup)
 * @return The test step verdict  
 */
	{				
	TPtrC connPtr(KNameDefault);
	TBuf<10> keyName;
	keyName = KDestAddr; 
	
	// Get destination address from config file	 		
	TBool returnValue = GetStringFromConfig(KResolveAddress, keyName, connPtr);
	if (!returnValue)
		{
		LogExtra((TText8*)__FILE__, __LINE__, ESevrErr, KEConfigFile);;	 
		return EFail;
		}			
	
	// Create address
	TInetAddr ipAddr;
	TInt err = ipAddr.Input(connPtr);	
		
	// Resolve address
	TNameEntry result;
	RSocketServ sockServ;
	User::LeaveIfError(sockServ.Connect());
	RHostResolver resolver;
		
	// Does not have functionality to use an exisiting explicit connection 
	// (like ResolveName) but this test step is not used in any of the test cases.
	err = resolver.Open(sockServ, KAfInet, KProtocolInetTcp);
	TESTE(err==KErrNone,err);

	err = resolver.GetByAddress(ipAddr, result);
	TESTE(err==KErrNone,err);
	
	// Clean up
	resolver.Close();
	sockServ.Close();
		
	// Return result depending on what is expected	
	TBool expectTimeout = EFalse;
	TBool expectSuccess = ETrue;
	GetBoolFromConfig(KResolveName, KExpectTimeout, expectTimeout);
	GetBoolFromConfig(KResolveName, KExpectSuccess, expectSuccess);
	
	if (err != KErrNone)
		{
		if ((err==KErrNotFound) && !expectSuccess)
			return EPass;
		if ((err==KErrTimedOut) && expectTimeout)
			return EPass;
		else
			return EFail;
		}
	// No error so...
	if (expectSuccess)
		return EPass;
	else
		return EFail;	
	}