Esempio n. 1
0
    Status Strategy::commandOpUnsharded(const std::string& db,
                                        const BSONObj& command,
                                        int options,
                                        const std::string& versionedNS,
                                        CommandResult* cmdResult) {

        // Note that this implementation will not handle targeting retries and fails when the
        // sharding metadata is too stale
        auto status = grid.catalogCache()->getDatabase(db);
        if (!status.isOK()) {
            mongoutils::str::stream ss;
            ss << "Passthrough command failed: " << command.toString()
               << " on ns " << versionedNS << ". Caused by " << causedBy(status.getStatus());
            return Status(ErrorCodes::IllegalOperation, ss);
        }

        shared_ptr<DBConfig> conf = status.getValue();
        if (conf->isSharded(versionedNS)) {
            mongoutils::str::stream ss;
            ss << "Passthrough command failed: " << command.toString()
               << " on ns " << versionedNS << ". Cannot run on sharded namespace.";
            return Status(ErrorCodes::IllegalOperation, ss);
        }

        Shard primaryShard = conf->getPrimary();

        BSONObj shardResult;
        try {
            ShardConnection conn(primaryShard.getConnString(), "");

            // TODO: this can throw a stale config when mongos is not up-to-date -- fix.
            if (!conn->runCommand(db, command, shardResult, options)) {
                conn.done();
                return Status(ErrorCodes::OperationFailed,
                              str::stream() << "Passthrough command failed: " << command
                                            << " on ns " << versionedNS
                                            << "; result: " << shardResult);
            }
            conn.done();
        }
        catch (const DBException& ex) {
            return ex.toStatus();
        }

        // Fill out the command result.
        cmdResult->shardTarget = primaryShard;
        cmdResult->result = shardResult;
        cmdResult->target = primaryShard.getConnString();

        return Status::OK();
    }
    Status DBClientShardResolver::chooseWriteHost( const string& shardName,
                                                   ConnectionString* shardHost ) const {

        // Declare up here for parsing later
        string errMsg;

        // Special-case for config
        if (shardName == "config") {
            *shardHost = ConnectionString::parse( configServer.modelServer(), errMsg );
            dassert( errMsg == "" );
            return Status::OK();
        }

        //
        // First get the information about the shard from the shard cache
        //

        // Internally uses our shard cache, does no reload
        Shard shard = Shard::findIfExists( shardName );
        if ( shard.getName() == "" ) {
            return Status( ErrorCodes::ShardNotFound,
                           string("unknown shard name ") + shardName );
        }
        return findMaster(shard.getConnString().toString(), shardHost);
    }
Esempio n. 3
0
    bool CmdAuthenticate::getUserObj(const string& dbname, const string& user, BSONObj& userObj, string& pwd) {
        if (user == internalSecurity.user) {
            uassert(15890, "key file must be used to log in with internal user",
                    !cmdLine.keyFile.empty());
            pwd = internalSecurity.pwd;
        }
        else {
            string systemUsers = dbname + ".system.users";
            DBConfigPtr config = grid.getDBConfig( systemUsers );
            Shard s = config->getShard( systemUsers );

            static BSONObj userPattern = BSON("user" << 1);

            scoped_ptr<ScopedDbConnection> conn(
                    ScopedDbConnection::getInternalScopedDbConnection( s.getConnString(), 30.0 ) );
            OCCASIONALLY conn->get()->ensureIndex(systemUsers, userPattern, false, "user_1");
            {
                BSONObjBuilder b;
                b << "user" << user;
                BSONObj query = b.done();
                userObj = conn->get()->findOne(systemUsers, query, 0, QueryOption_SlaveOk);
                if( userObj.isEmpty() ) {
                    log() << "auth: couldn't find user " << user << ", " << systemUsers << endl;
                    conn->done(); // return to pool
                    return false;
                }
            }

            pwd = userObj.getStringField("pwd");

            conn->done(); // return to pool
        }
        return true;
    }
Esempio n. 4
0
    bool setShardVersion(DBClientBase& conn,
                         const string& ns,
                         const string& configServerPrimary,
                         ChunkVersion version,
                         ChunkManager* manager,
                         bool authoritative,
                         BSONObj& result) {

        BSONObjBuilder cmdBuilder;
        cmdBuilder.append("setShardVersion", ns);
        cmdBuilder.append("configdb", configServerPrimary);

        Shard s = Shard::make(conn.getServerAddress());
        cmdBuilder.append("shard", s.getName());
        cmdBuilder.append("shardHost", s.getConnString());

        if (ns.size() > 0) {
            version.addToBSON(cmdBuilder);
        }
        else {
            cmdBuilder.append("init", true);
        }

        if (authoritative) {
            cmdBuilder.appendBool("authoritative", 1);
        }

        BSONObj cmd = cmdBuilder.obj();

        LOG(1) << "    setShardVersion  " << s.getName() << " " << conn.getServerAddress()
               << "  " << ns << "  " << cmd
               << (manager ? string(str::stream() << " " << manager->getSequenceNumber()) : "");

        return conn.runCommand("admin", cmd, result, 0);
    }
Esempio n. 5
0
    ShardConnection::ShardConnection(const Shard& s, const string& ns, ChunkManagerPtr manager)
        : _addr(s.getConnString()),
          _ns(ns),
          _manager( manager ) {

        _init();
    }
Esempio n. 6
0
 void set( const string& name , const Shard& s , bool setName = true , bool setAddr = true ) {
     scoped_lock lk( _mutex );
     ShardPtr ss( new Shard( s ) );
     if ( setName )
         _lookup[name] = ss;
     if ( setAddr )
         _installHost( s.getConnString() , ss );
 }
Esempio n. 7
0
    bool Chunk::moveAndCommit(const Shard& to,
                              long long chunkSize /* bytes */,
                              const WriteConcernOptions* writeConcern,
                              bool waitForDelete,
                              int maxTimeMS,
                              BSONObj& res) const {
        uassert( 10167 ,  "can't move shard to its current location!" , getShard() != to );

        log() << "moving chunk ns: " << _manager->getns() << " moving ( " << toString() << ") "
              << _shard.toString() << " -> " << to.toString();

        Shard from = _shard;
        ScopedDbConnection fromconn(from.getConnString());

        BSONObjBuilder builder;
        builder.append("moveChunk", _manager->getns());
        builder.append("from", from.getAddress().toString());
        builder.append("to", to.getAddress().toString());
        // NEEDED FOR 2.0 COMPATIBILITY
        builder.append("fromShard", from.getName());
        builder.append("toShard", to.getName());
        ///////////////////////////////
        builder.append("min", _min);
        builder.append("max", _max);
        builder.append("maxChunkSizeBytes", chunkSize);
        builder.append("shardId", genID());
        builder.append("configdb", configServer.modelServer());

        // For legacy secondary throttle setting.
        bool secondaryThrottle = true;
        if (writeConcern &&
                writeConcern->wNumNodes <= 1 &&
                writeConcern->wMode.empty()) {
            secondaryThrottle = false;
        }

        builder.append("secondaryThrottle", secondaryThrottle);

        if (secondaryThrottle && writeConcern) {
            builder.append("writeConcern", writeConcern->toBSON());
        }

        builder.append("waitForDelete", waitForDelete);
        builder.append(LiteParsedQuery::cmdOptionMaxTimeMS, maxTimeMS);
        builder.append("epoch", _manager->getVersion().epoch());

        bool worked = fromconn->runCommand("admin", builder.done(), res);
        fromconn.done();

        LOG( worked ? 1 : 0 ) << "moveChunk result: " << res;

        // if succeeded, needs to reload to pick up the new location
        // if failed, mongos may be stale
        // reload is excessive here as the failure could be simply because collection metadata is taken
        _manager->reload();

        return worked;
    }
Esempio n. 8
0
    void ChunkManager::calcInitSplitsAndShards( const Shard& primary,
                                                const vector<BSONObj>* initPoints,
                                                const vector<Shard>* initShards,
                                                vector<BSONObj>* splitPoints,
                                                vector<Shard>* shards ) const
    {
        verify( _chunkMap.size() == 0 );

        unsigned long long numObjects = 0;
        Chunk c(this, _keyPattern.getKeyPattern().globalMin(),
                      _keyPattern.getKeyPattern().globalMax(), primary);

        if ( !initPoints || !initPoints->size() ) {
            // discover split points
            {
                // get stats to see if there is any data
                ScopedDbConnection shardConn(primary.getConnString());

                numObjects = shardConn->count( getns() );
                shardConn.done();
            }

            if ( numObjects > 0 )
                c.pickSplitVector( *splitPoints , Chunk::MaxChunkSize );

            // since docs alread exists, must use primary shard
            shards->push_back( primary );
        } else {
            // make sure points are unique and ordered
            set<BSONObj> orderedPts;
            for ( unsigned i = 0; i < initPoints->size(); ++i ) {
                BSONObj pt = (*initPoints)[i];
                orderedPts.insert( pt );
            }
            for ( set<BSONObj>::iterator it = orderedPts.begin(); it != orderedPts.end(); ++it ) {
                splitPoints->push_back( *it );
            }

            if ( !initShards || !initShards->size() ) {
                // If not specified, only use the primary shard (note that it's not safe for mongos
                // to put initial chunks on other shards without the primary mongod knowing).
                shards->push_back( primary );
            } else {
                std::copy( initShards->begin() , initShards->end() , std::back_inserter(*shards) );
            }
        }
    }
Esempio n. 9
0
    bool setShardVersion( DBClientBase & conn , const string& ns , ShardChunkVersion version , bool authoritative , BSONObj& result ){

        BSONObjBuilder cmdBuilder;
        cmdBuilder.append( "setShardVersion" , ns.c_str() );
        cmdBuilder.append( "configdb" , configServer.modelServer() );
        cmdBuilder.appendTimestamp( "version" , version );
        cmdBuilder.appendOID( "serverID" , &serverID );
        if ( authoritative )
            cmdBuilder.appendBool( "authoritative" , 1 );

        Shard s = Shard::make( conn.getServerAddress() );
        cmdBuilder.append( "shard" , s.getName() );
        cmdBuilder.append( "shardHost" , s.getConnString() );
        BSONObj cmd = cmdBuilder.obj();
        
        log(1) << "    setShardVersion  " << s.getName() << " " << conn.getServerAddress() << "  " << ns << "  " << cmd << " " << &conn << endl;
        
        return conn.runCommand( "admin" , cmd , result );
    }
Esempio n. 10
0
    void ClientInfo::_setupAuth() {
        std::string adminNs = "admin";
        DBConfigPtr config = grid.getDBConfig(adminNs);
        Shard shard = config->getShard(adminNs);
        scoped_ptr<ScopedDbConnection> connPtr(
                ScopedDbConnection::getInternalScopedDbConnection(shard.getConnString(), 30.0));
        ScopedDbConnection& conn = *connPtr;

        //
        // Note: The connection mechanism here is *not* ideal, and should not be used elsewhere.
        // It is safe in this particular case because the admin database is always on the config
        // server and does not move.
        //

        AuthorizationManager* authManager = new AuthorizationManager(new AuthExternalStateImpl());
        Status status = authManager->initialize(conn.get());
        massert(16479,
                mongoutils::str::stream() << "Error initializing AuthorizationManager: "
                                          << status.reason(),
                status == Status::OK());
        setAuthorizationManager(authManager);
    }
Esempio n. 11
0
    void AuthenticationInfo::_checkLocalHostSpecialAdmin() {
        if (noauth || !_isLocalHost || !_isLocalHostAndLocalHostIsAuthorizedForAll) {
            return;
        }

        string adminNs = "admin.system.users";

        DBConfigPtr config = grid.getDBConfig( adminNs );
        Shard s = config->getShard( adminNs );

        //
        // Note: The connection mechanism here is *not* ideal, and should not be used elsewhere.
        // It is safe in this particular case because the admin database is always on the config
        // server and does not move.
        //
        scoped_ptr<ScopedDbConnection> conn(
                ScopedDbConnection::getInternalScopedDbConnection(s.getConnString(), 30.0));

        BSONObj result = (*conn)->findOne("admin.system.users", Query());
        if( result.isEmpty() ) {
            if( ! _warned ) {
                // you could get a few of these in a race, but that's ok
                _warned = true;
                log() << "note: no users configured in admin.system.users, allowing localhost access" << endl;
            }

            // Must return conn to pool
            // TODO: Check for errors during findOne(), or just let the conn die?
            conn->done();
            _isLocalHostAndLocalHostIsAuthorizedForAll = true;
            return;
        }

        // Must return conn to pool
        conn->done();
        _isLocalHostAndLocalHostIsAuthorizedForAll = false;
    }
Esempio n. 12
0
 ShardConnection::ShardConnection( const Shard& s , const string& ns )
     : _addr( s.getConnString() ) , _ns( ns ) {
     _init();
 }
Esempio n. 13
0
 bool lockNamespaceOnServer( const Shard& shard, const string& ns ){
     ScopedDbConnection conn( shard.getConnString() );
     bool res = lockNamespaceOnServer( conn.conn() , ns );
     conn.done();
     return res;
 }
Esempio n. 14
0
 ShardConnection::ShardConnection( const Shard& s , const string& ns, bool ignoreDirect )
     : _addr( s.getConnString() ) , _ns( ns ) {
     _init( ignoreDirect );
 }
Esempio n. 15
0
    Status DBClientShardResolver::chooseWriteHost( const string& shardName,
                                                   ConnectionString* shardHost ) const {

        // Declare up here for parsing later
        string errMsg;

        // Special-case for config and admin
        if ( shardName == "config" || shardName == "admin" ) {
            *shardHost = ConnectionString::parse( configServer.modelServer(), errMsg );
            dassert( errMsg == "" );
            return Status::OK();
        }

        //
        // First get the information about the shard from the shard cache
        //

        // Internally uses our shard cache, does no reload
        Shard shard = Shard::findIfExists( shardName );
        if ( shard.getName() == "" ) {
            return Status( ErrorCodes::ShardNotFound,
                           string("unknown shard name ") + shardName );
        }

        ConnectionString rawShardHost = ConnectionString::parse( shard.getConnString(), errMsg );
        dassert( errMsg == "" );
        dassert( rawShardHost.type() == ConnectionString::SET
                 || rawShardHost.type() == ConnectionString::MASTER );

        if ( rawShardHost.type() == ConnectionString::MASTER ) {
            *shardHost = rawShardHost;
            return Status::OK();
        }

        //
        // If we need to, then get the particular node we're targeting in the replica set
        //

        // Does not reload the monitor if it doesn't currently exist
        ReplicaSetMonitorPtr replMonitor = ReplicaSetMonitor::get( rawShardHost.getSetName(),
                                                                   false );
        if ( !replMonitor ) {
            return Status( ErrorCodes::ReplicaSetNotFound,
                           string("unknown replica set ") + rawShardHost.getSetName() );
        }

        try {
            // This can throw when we don't find a master!
            HostAndPort masterHostAndPort = replMonitor->getMaster();
            *shardHost = ConnectionString::parse( masterHostAndPort.toString( true ), errMsg );
            dassert( errMsg == "" );
            return Status::OK();
        }
        catch ( const DBException& ) {
            return Status( ErrorCodes::HostNotFound,
                           string("could not contact primary for replica set ")
                           + replMonitor->getName() );
        }

        // Unreachable
        dassert( false );
        return Status( ErrorCodes::UnknownError, "" );
    }
Esempio n. 16
0
 ScopedDbConnection::ScopedDbConnection(const Shard& shard, double socketTimeout )
     : _host( shard.getConnString() ) , _conn( pool.get(_host, socketTimeout) ), _socketTimeout( socketTimeout ) {
     _setSocketTimeout();
 }
Esempio n. 17
0
            bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {
                string target = cmdObj.firstElement().valuestrsafe();
                Shard s = Shard::make( target );
                if ( ! grid.knowAboutShard( s.getConnString() ) ) {
                    errmsg = "unknown shard";
                    return false;
                }

                ScopedDbConnection conn( configServer.getPrimary() );

                // If the server is not yet draining chunks, put it in draining mode.
                BSONObj searchDoc = BSON( "_id" << s.getName() );
                BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
                BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
                if ( shardDoc.isEmpty() ) {

                    // TODO prevent move chunks to this shard.

                    log() << "going to start draining shard: " << s.getName() << endl;
                    BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
                    conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);

                    errmsg = conn->getLastError();
                    if ( errmsg.size() ) {
                        log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
                        return false;
                    }

                    Shard::reloadShardInfo();

                    result.append( "msg"   , "draining started successfully" );
                    result.append( "state" , "started" );
                    result.append( "shard" , s.getName() );
                    conn.done();
                    return true;
                }

                // If the server has been completely drained, remove it from the ConfigDB.
                // Check not only for chunks but also databases.
                BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
                long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
                BSONObj primaryDoc = BSON( "primary" << shardDoc[ "_id" ].str() );
                long long dbCount = conn->count( "config.databases" , primaryDoc );
                if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
                    log() << "going to remove shard: " << s.getName() << endl;
                    conn->remove( "config.shards" , searchDoc );

                    errmsg = conn->getLastError();
                    if ( errmsg.size() ) {
                        log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
                        return false;
                    }

                    Shard::removeShard( shardDoc[ "_id" ].str() );
                    Shard::reloadShardInfo();

                    result.append( "msg"   , "removeshard completed successfully" );
                    result.append( "state" , "completed" );
                    result.append( "shard" , s.getName() );
                    conn.done();
                    return true;
                }

                // If the server is already in draining mode, just report on its progress.
                // Report on databases (not just chunks) that are left too.
                result.append( "msg"  , "draining ongoing" );
                result.append( "state" , "ongoing" );
                BSONObjBuilder inner;
                inner.append( "chunks" , chunkCount );
                inner.append( "dbs" , dbCount );
                result.append( "remaining" , inner.obj() );

                conn.done();
                return true;
            }
Esempio n. 18
0
 ScopedDbConnection::ScopedDbConnection(const Shard& shard )
     : _host( shard.getConnString() ) , _conn( pool.get(_host) ){
 }
Esempio n. 19
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            ClientBasic* client = ClientBasic::getCurrent();
            AuthorizationSession* authSession = client->getAuthorizationSession();
            if ( strcmp( ns , "inprog" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::inprog);
                audit::logInProgAuthzCheck(
                        client, q.query, isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run inprog", isAuthorized);

                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    ScopedDbConnection conn(shard.getConnString());
                    BSONObj temp = conn->findOne( r.getns() , q.query );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn.done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                const bool isAuthorized = authSession->isAuthorizedForActionsOnResource(
                        ResourcePattern::forClusterResource(), ActionType::killop);
                audit::logKillOpAuthzCheck(
                        client,
                        q.query,
                        isAuthorized ? ErrorCodes::OK : ErrorCodes::Unauthorized);
                uassert(ErrorCodes::Unauthorized, "not authorized to run killop", isAuthorized);

                BSONElement e = q.query["op"];
                if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        ScopedDbConnection conn(s.getConnString());
                        conn->findOne( r.getns() , BSON( "op" << opid ) );
                        conn.done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                warning() << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }
Esempio n. 20
0
            bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
                string target = cmdObj.firstElement().valuestrsafe();
                Shard s = Shard::make( target );
                if ( ! grid.knowAboutShard( s.getConnString() ) ) {
                    errmsg = "unknown shard";
                    return false;
                }

                ScopedDbConnection conn( configServer.getPrimary() );

                if (conn->count("config.shards", BSON("_id" << NE << s.getName() << ShardFields::draining(true)))){
                    conn.done();
                    errmsg = "Can't have more than one draining shard at a time";
                    return false;
                }

                if (conn->count("config.shards", BSON("_id" << NE << s.getName())) == 0){
                    conn.done();
                    errmsg = "Can't remove last shard";
                    return false;
                }

                BSONObj primaryDoc = BSON( "_id" << NE << "local" << "primary" << s.getName() );
                BSONObj dbInfo; // appended at end of result on success
                {
                    boost::scoped_ptr<DBClientCursor> cursor (conn->query("config.databases", primaryDoc));
                    if (cursor->more()) { // skip block and allocations if empty
                        BSONObjBuilder dbInfoBuilder;
                        dbInfoBuilder.append("note", "you need to drop or movePrimary these databases");
                        BSONArrayBuilder dbs(dbInfoBuilder.subarrayStart("dbsToMove"));

                        while (cursor->more()){
                            BSONObj db = cursor->nextSafe();
                            dbs.append(db["_id"]);
                        }
                        dbs.doneFast();

                        dbInfo = dbInfoBuilder.obj();
                    }
                }

                // If the server is not yet draining chunks, put it in draining mode.
                BSONObj searchDoc = BSON( "_id" << s.getName() );
                BSONObj drainingDoc = BSON( "_id" << s.getName() << ShardFields::draining(true) );
                BSONObj shardDoc = conn->findOne( "config.shards", drainingDoc );
                if ( shardDoc.isEmpty() ) {

                    // TODO prevent move chunks to this shard.

                    log() << "going to start draining shard: " << s.getName() << endl;
                    BSONObj newStatus = BSON( "$set" << BSON( ShardFields::draining(true) ) );
                    conn->update( "config.shards" , searchDoc , newStatus, false /* do no upsert */);

                    errmsg = conn->getLastError();
                    if ( errmsg.size() ) {
                        log() << "error starting remove shard: " << s.getName() << " err: " << errmsg << endl;
                        return false;
                    }

                    BSONObj primaryLocalDoc = BSON("_id" << "local" <<  "primary" << s.getName() );
                    PRINT(primaryLocalDoc);
                    if (conn->count("config.databases", primaryLocalDoc)) {
                        log() << "This shard is listed as primary of local db. Removing entry." << endl;
                        conn->remove("config.databases", BSON("_id" << "local"));
                        errmsg = conn->getLastError();
                        if ( errmsg.size() ) {
                            log() << "error removing local db: " << errmsg << endl;
                            return false;
                        }
                    }

                    Shard::reloadShardInfo();

                    result.append( "msg"   , "draining started successfully" );
                    result.append( "state" , "started" );
                    result.append( "shard" , s.getName() );
                    result.appendElements(dbInfo);
                    conn.done();
                    return true;
                }

                // If the server has been completely drained, remove it from the ConfigDB.
                // Check not only for chunks but also databases.
                BSONObj shardIDDoc = BSON( "shard" << shardDoc[ "_id" ].str() );
                long long chunkCount = conn->count( "config.chunks" , shardIDDoc );
                long long dbCount = conn->count( "config.databases" , primaryDoc );
                if ( ( chunkCount == 0 ) && ( dbCount == 0 ) ) {
                    log() << "going to remove shard: " << s.getName() << endl;
                    conn->remove( "config.shards" , searchDoc );

                    errmsg = conn->getLastError();
                    if ( errmsg.size() ) {
                        log() << "error concluding remove shard: " << s.getName() << " err: " << errmsg << endl;
                        return false;
                    }

                    Shard::removeShard( shardDoc[ "_id" ].str() );
                    Shard::reloadShardInfo();

                    result.append( "msg"   , "removeshard completed successfully" );
                    result.append( "state" , "completed" );
                    result.append( "shard" , s.getName() );
                    conn.done();
                    return true;
                }

                // If the server is already in draining mode, just report on its progress.
                // Report on databases (not just chunks) that are left too.
                result.append( "msg"  , "draining ongoing" );
                result.append( "state" , "ongoing" );
                BSONObjBuilder inner;
                inner.append( "chunks" , chunkCount );
                inner.append( "dbs" , dbCount );
                result.append( "remaining" , inner.obj() );
                result.appendElements(dbInfo);

                conn.done();
                return true;
            }
Esempio n. 21
0
        bool handleSpecialNamespaces( Request& r , QueryMessage& q ) {
            const char * ns = r.getns();
            ns = strstr( r.getns() , ".$cmd.sys." );
            if ( ! ns )
                return false;
            ns += 10;

            BSONObjBuilder b;
            vector<Shard> shards;

            AuthorizationManager* authManager =
                    ClientBasic::getCurrent()->getAuthorizationManager();

            if ( strcmp( ns , "inprog" ) == 0 ) {
                uassert(16545,
                        "not authorized to run inprog",
                        authManager->checkAuthorization(AuthorizationManager::SERVER_RESOURCE_NAME,
                                                        ActionType::inprog));

                Shard::getAllShards( shards );

                BSONArrayBuilder arr( b.subarrayStart( "inprog" ) );

                for ( unsigned i=0; i<shards.size(); i++ ) {
                    Shard shard = shards[i];
                    scoped_ptr<ScopedDbConnection> conn(
                            ScopedDbConnection::getScopedDbConnection( shard.getConnString() ) );
                    BSONObj temp = conn->get()->findOne( r.getns() , q.query );
                    if ( temp["inprog"].isABSONObj() ) {
                        BSONObjIterator i( temp["inprog"].Obj() );
                        while ( i.more() ) {
                            BSONObjBuilder x;

                            BSONObjIterator j( i.next().Obj() );
                            while( j.more() ) {
                                BSONElement e = j.next();
                                if ( str::equals( e.fieldName() , "opid" ) ) {
                                    stringstream ss;
                                    ss << shard.getName() << ':' << e.numberInt();
                                    x.append( "opid" , ss.str() );
                                }
                                else if ( str::equals( e.fieldName() , "client" ) ) {
                                    x.appendAs( e , "client_s" );
                                }
                                else {
                                    x.append( e );
                                }
                            }
                            arr.append( x.obj() );
                        }
                    }
                    conn->done();
                }

                arr.done();
            }
            else if ( strcmp( ns , "killop" ) == 0 ) {
                uassert(16546,
                        "not authorized to run killop",
                        authManager->checkAuthorization(AuthorizationManager::SERVER_RESOURCE_NAME,
                                                        ActionType::killop));

                BSONElement e = q.query["op"];
                if ( e.type() != String ) {
                    b.append( "err" , "bad op" );
                    b.append( e );
                }
                else {
                    b.append( e );
                    string s = e.String();
                    string::size_type i = s.find( ':' );
                    if ( i == string::npos ) {
                        b.append( "err" , "bad opid" );
                    }
                    else {
                        string shard = s.substr( 0 , i );
                        int opid = atoi( s.substr( i + 1 ).c_str() );
                        b.append( "shard" , shard );
                        b.append( "shardid" , opid );

                        log() << "want to kill op: " << e << endl;
                        Shard s(shard);

                        scoped_ptr<ScopedDbConnection> conn(
                                ScopedDbConnection::getScopedDbConnection( s.getConnString() ) );
                        conn->get()->findOne( r.getns() , BSON( "op" << opid ) );
                        conn->done();
                    }
                }
            }
            else if ( strcmp( ns , "unlock" ) == 0 ) {
                b.append( "err" , "can't do unlock through mongos" );
            }
            else {
                LOG( LL_WARNING ) << "unknown sys command [" << ns << "]" << endl;
                return false;
            }

            BSONObj x = b.done();
            replyToQuery(0, r.p(), r.m(), x);
            return true;
        }