Exemplo n.º 1
0
        virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
            string coll = cmdObj[ "captrunc" ].valuestrsafe();
            uassert( 13416, "captrunc must specify a collection", !coll.empty() );
            NamespaceString nss( dbname, coll );
            int n = cmdObj.getIntField( "n" );
            bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?

            Client::WriteContext ctx(txn,  nss.ns() );
            Collection* collection = ctx.ctx().db()->getCollection( txn, nss.ns() );
            massert( 13417, "captrunc collection not found or empty", collection);

            boost::scoped_ptr<Runner> runner(InternalPlanner::collectionScan(txn,
                                                                             nss.ns(),
                                                                             collection,
                                                                             InternalPlanner::BACKWARD));
            DiskLoc end;
            // We remove 'n' elements so the start is one past that
            for( int i = 0; i < n + 1; ++i ) {
                Runner::RunnerState state = runner->getNext(NULL, &end);
                massert( 13418, "captrunc invalid n", Runner::RUNNER_ADVANCED == state);
            }
            collection->temp_cappedTruncateAfter( txn, end, inc );
            ctx.commit();
            return true;
        }
Exemplo n.º 2
0
        bool run(const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            log() << "test only command sleep invoked" << endl;
            long long millis = 10 * 1000;

            if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
                millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
            }
            else if (cmdObj["secs"].isNumber()) {
                millis = cmdObj["secs"].numberLong() * 1000;
            }
            else if (cmdObj["millis"].isNumber()) {
                millis = cmdObj["millis"].numberLong();
            }

            if(cmdObj.getBoolField("w")) {
                Lock::GlobalWrite lk;
                sleepmillis(millis);
            }
            else {
                Lock::GlobalRead lk;
                sleepmillis(millis);
            }

            // Interrupt point for testing (e.g. maxTimeMS).
            killCurrentOp.checkForInterrupt();

            return true;
        }
Exemplo n.º 3
0
        virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;

            if (!checkAuth(errmsg, result)) {
                return false;
            }

            if( cmdObj.hasElement("forceInitialSyncFailure") ) {
                replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
                return true;
            }

            if( !check(errmsg, result) )
                return false;

            if( cmdObj.hasElement("blind") ) {
                replSetBlind = cmdObj.getBoolField("blind");
                return true;
            }

            if (cmdObj.hasElement("sethbmsg")) {
                replset::sethbmsg(cmdObj["sethbmsg"].String());
                return true;
            }

            return false;
        }
Exemplo n.º 4
0
        bool run(OperationContext* txn, const string& ns, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            log() << "test only command sleep invoked" << endl;
            long long millis = 10 * 1000;

            if (cmdObj["secs"].isNumber() && cmdObj["millis"].isNumber()) {
                millis = cmdObj["secs"].numberLong() * 1000 + cmdObj["millis"].numberLong();
            }
            else if (cmdObj["secs"].isNumber()) {
                millis = cmdObj["secs"].numberLong() * 1000;
            }
            else if (cmdObj["millis"].isNumber()) {
                millis = cmdObj["millis"].numberLong();
            }

            if(cmdObj.getBoolField("w")) {
                ScopedTransaction transaction(txn, MODE_X);
                Lock::GlobalWrite lk(txn->lockState());
                sleepmillis(millis);
            }
            else {
                ScopedTransaction transaction(txn, MODE_S);
                Lock::GlobalRead lk(txn->lockState());
                sleepmillis(millis);
            }

            // Interrupt point for testing (e.g. maxTimeMS).
            txn->checkForInterrupt();

            return true;
        }
Exemplo n.º 5
0
        virtual bool run(OperationContext* txn, const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {
            string coll = cmdObj[ "captrunc" ].valuestrsafe();
            uassert( 13416, "captrunc must specify a collection", !coll.empty() );
            NamespaceString nss( dbname, coll );
            int n = cmdObj.getIntField( "n" );
            bool inc = cmdObj.getBoolField( "inc" ); // inclusive range?

            OldClientWriteContext ctx(txn,  nss.ns() );
            Collection* collection = ctx.getCollection();
            massert( 13417, "captrunc collection not found or empty", collection);

            RecordId end;
            {
                boost::scoped_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(txn,
                                                                                     nss.ns(),
                                                                                     collection,
                                                                                     InternalPlanner::BACKWARD));
                // We remove 'n' elements so the start is one past that
                for( int i = 0; i < n + 1; ++i ) {
                    PlanExecutor::ExecState state = exec->getNext(NULL, &end);
                    massert( 13418, "captrunc invalid n", PlanExecutor::ADVANCED == state);
                }
            }
            WriteUnitOfWork wuow(txn);
            collection->temp_cappedTruncateAfter( txn, end, inc );
            wuow.commit();
            return true;
        }
Exemplo n.º 6
0
    virtual bool run(OperationContext* opCtx,
                     const string& dbname,
                     const BSONObj& cmdObj,
                     BSONObjBuilder& result) {
        const NamespaceString fullNs = CommandHelpers::parseNsCollectionRequired(dbname, cmdObj);
        if (!fullNs.isValid()) {
            return CommandHelpers::appendCommandStatus(
                result,
                {ErrorCodes::InvalidNamespace,
                 str::stream() << "collection name " << fullNs.ns() << " is not valid"});
        }

        int n = cmdObj.getIntField("n");
        bool inc = cmdObj.getBoolField("inc");  // inclusive range?

        if (n <= 0) {
            return CommandHelpers::appendCommandStatus(
                result, {ErrorCodes::BadValue, "n must be a positive integer"});
        }

        // Lock the database in mode IX and lock the collection exclusively.
        AutoGetCollection autoColl(opCtx, fullNs, MODE_IX, MODE_X);
        Collection* collection = autoColl.getCollection();
        if (!collection) {
            return CommandHelpers::appendCommandStatus(
                result,
                {ErrorCodes::NamespaceNotFound,
                 str::stream() << "collection " << fullNs.ns() << " does not exist"});
        }

        if (!collection->isCapped()) {
            return CommandHelpers::appendCommandStatus(
                result, {ErrorCodes::IllegalOperation, "collection must be capped"});
        }

        RecordId end;
        {
            // Scan backwards through the collection to find the document to start truncating from.
            // We will remove 'n' documents, so start truncating from the (n + 1)th document to the
            // end.
            auto exec = InternalPlanner::collectionScan(
                opCtx, fullNs.ns(), collection, PlanExecutor::NO_YIELD, InternalPlanner::BACKWARD);

            for (int i = 0; i < n + 1; ++i) {
                PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
                if (PlanExecutor::ADVANCED != state) {
                    return CommandHelpers::appendCommandStatus(
                        result,
                        {ErrorCodes::IllegalOperation,
                         str::stream() << "invalid n, collection contains fewer than " << n
                                       << " documents"});
                }
            }
        }

        collection->cappedTruncateAfter(opCtx, end, inc);

        return true;
    }
Exemplo n.º 7
0
    bool run(OperationContext* opCtx,
             const std::string& ns,
             const BSONObj& cmdObj,
             BSONObjBuilder& result) {
        log() << "test only command sleep invoked";
        long long millis = 0;

        if (cmdObj["secs"] || cmdObj["seconds"] || cmdObj["millis"]) {
            uassert(51153,
                    "Only one of 'secs' and 'seconds' may be specified",
                    !(cmdObj["secs"] && cmdObj["seconds"]));

            if (auto secsElem = cmdObj["secs"]) {
                uassert(34344, "'secs' must be a number.", secsElem.isNumber());
                millis += secsElem.numberLong() * 1000;
            } else if (auto secondsElem = cmdObj["seconds"]) {
                uassert(51154, "'seconds' must be a number.", secondsElem.isNumber());
                millis += secondsElem.numberLong() * 1000;
            }

            if (auto millisElem = cmdObj["millis"]) {
                uassert(34345, "'millis' must be a number.", millisElem.isNumber());
                millis += millisElem.numberLong();
            }
        } else {
            millis = 10 * 1000;
        }

        StringData lockTarget;
        if (cmdObj["lockTarget"]) {
            lockTarget = cmdObj["lockTarget"].checkAndGetStringData();
        }

        if (!cmdObj["lock"]) {
            // Legacy implementation
            if (cmdObj.getBoolField("w")) {
                _sleepInLock(opCtx, millis, MODE_X, lockTarget);
            } else {
                _sleepInLock(opCtx, millis, MODE_S, lockTarget);
            }
        } else {
            uassert(34346, "Only one of 'w' and 'lock' may be set.", !cmdObj["w"]);

            std::string lock(cmdObj.getStringField("lock"));
            if (lock == "none") {
                opCtx->sleepFor(Milliseconds(millis));
            } else if (lock == "w") {
                _sleepInLock(opCtx, millis, MODE_X, lockTarget);
            } else {
                uassert(34347, "'lock' must be one of 'r', 'w', 'none'.", lock == "r");
                _sleepInLock(opCtx, millis, MODE_S, lockTarget);
            }
        }

        // Interrupt point for testing (e.g. maxTimeMS).
        opCtx->checkForInterrupt();

        return true;
    }
Exemplo n.º 8
0
    virtual bool run(OperationContext* txn,
                     const string& dbname,
                     BSONObj& cmdObj,
                     int,
                     string& errmsg,
                     BSONObjBuilder& result) {
        const std::string fullNs = parseNsCollectionRequired(dbname, cmdObj);
        int n = cmdObj.getIntField("n");
        bool inc = cmdObj.getBoolField("inc");  // inclusive range?

        if (n <= 0) {
            return appendCommandStatus(result,
                                       {ErrorCodes::BadValue, "n must be a positive integer"});
        }

        OldClientWriteContext ctx(txn, fullNs);
        Collection* collection = ctx.getCollection();

        if (!collection) {
            return appendCommandStatus(
                result,
                {ErrorCodes::NamespaceNotFound,
                 str::stream() << "collection " << fullNs << " does not exist"});
        }

        if (!collection->isCapped()) {
            return appendCommandStatus(result,
                                       {ErrorCodes::IllegalOperation, "collection must be capped"});
        }

        RecordId end;
        {
            // Scan backwards through the collection to find the document to start truncating from.
            // We will remove 'n' documents, so start truncating from the (n + 1)th document to the
            // end.
            std::unique_ptr<PlanExecutor> exec(InternalPlanner::collectionScan(
                txn, fullNs, collection, PlanExecutor::YIELD_MANUAL, InternalPlanner::BACKWARD));

            for (int i = 0; i < n + 1; ++i) {
                PlanExecutor::ExecState state = exec->getNext(nullptr, &end);
                if (PlanExecutor::ADVANCED != state) {
                    return appendCommandStatus(result,
                                               {ErrorCodes::IllegalOperation,
                                                str::stream()
                                                    << "invalid n, collection contains fewer than "
                                                    << n << " documents"});
                }
            }
        }

        collection->temp_cappedTruncateAfter(txn, end, inc);

        return true;
    }
Exemplo n.º 9
0
        void run(){
            int secsToSleep = 0;
            while ( 1 ){
                try {
                    ScopedDbConnection conn( _addr );
                    
                    BSONObj result;
                    
                    {
                        BSONObjBuilder cmd;
                        cmd.appendOID( "writebacklisten" , &serverID );
                        if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ){
                            log() <<  "writebacklisten command failed!  "  << result << endl;
                            conn.done();
                            continue;
                        }

                    }
                    
                    log(1) << "writebacklisten result: " << result << endl;
                    
                    BSONObj data = result.getObjectField( "data" );
                    if ( data.getBoolField( "writeBack" ) ){
                        string ns = data["ns"].valuestrsafe();

                        int len;

                        Message m( (void*)data["msg"].binData( len ) , false );
                        massert( 10427 ,  "invalid writeback message" , m.header()->valid() );                        

                        grid.getDBConfig( ns )->getChunkManager( ns , true );
                        
                        Request r( m , 0 );
                        r.process();
                    }
                    else {
                        log() << "unknown writeBack result: " << result << endl;
                    }
                    
                    conn.done();
                    secsToSleep = 0;
                }
                catch ( std::exception e ){
                    log() << "WriteBackListener exception : " << e.what() << endl;
                }
                catch ( ... ){
                    log() << "WriteBackListener uncaught exception!" << endl;
                }
                secsToSleep++;
                sleepsecs(secsToSleep);
                if ( secsToSleep > 10 )
                    secsToSleep = 0;
            }
        }
Exemplo n.º 10
0
    void checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative ){
        // TODO: cache, optimize, etc...
        
        WriteBackListener::init( conn );

        DBConfigPtr conf = grid.getDBConfig( ns );
        if ( ! conf )
            return;
        
        ShardChunkVersion version = 0;
        unsigned long long officialSequenceNumber = 0;

        ChunkManagerPtr manager;
        const bool isSharded = conf->isSharded( ns );
        if ( isSharded ){
            manager = conf->getChunkManager( ns , authoritative );
            officialSequenceNumber = manager->getSequenceNumber();
        }

        unsigned long long & sequenceNumber = checkShardVersionLastSequence[ make_pair(&conn,ns) ];        
        if ( sequenceNumber == officialSequenceNumber )
            return;

        if ( isSharded ){
            version = manager->getVersion( Shard::make( conn.getServerAddress() ) );
        }
        
        log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns 
               << " my last seq: " << sequenceNumber << "  current: " << officialSequenceNumber 
               << " version: " << version << " manager: " << manager.get()
               << endl;
        
        BSONObj result;
        if ( setShardVersion( conn , ns , version , authoritative , result ) ){
            // success!
            log(1) << "      setShardVersion success!" << endl;
            sequenceNumber = officialSequenceNumber;
            dassert( sequenceNumber == checkShardVersionLastSequence[ make_pair(&conn,ns) ] );
            return;
        }

        log(1) << "       setShardVersion failed!\n" << result << endl;

        if ( result.getBoolField( "need_authoritative" ) )
            massert( 10428 ,  "need_authoritative set but in authoritative mode already" , ! authoritative );
        
        if ( ! authoritative ){
            checkShardVersion( conn , ns , 1 );
            return;
        }
        
        log() << "     setShardVersion failed: " << result << endl;
        massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 );
    }
Exemplo n.º 11
0
void Command::execCommandClientBasic(OperationContext* txn,
                                     Command* c,
                                     ClientBasic& client,
                                     int queryOptions,
                                     const char* ns,
                                     BSONObj& cmdObj,
                                     BSONObjBuilder& result) {
    std::string dbname = nsToDatabase(ns);

    if (cmdObj.getBoolField("help")) {
        stringstream help;
        help << "help for: " << c->name << " ";
        c->help(help);
        result.append("help", help.str());
        result.append("lockType", c->isWriteCommandForConfigServer() ? 1 : 0);
        appendCommandStatus(result, true, "");
        return;
    }

    Status status = _checkAuthorization(c, &client, dbname, cmdObj);
    if (!status.isOK()) {
        appendCommandStatus(result, status);
        return;
    }

    c->_commandsExecuted.increment();

    if (c->shouldAffectCommandCounter()) {
        globalOpCounters.gotCommand();
    }

    std::string errmsg;
    bool ok = false;
    try {
        ok = c->run(txn, dbname, cmdObj, queryOptions, errmsg, result);
    } catch (const DBException& e) {
        const int code = e.getCode();

        // Codes for StaleConfigException
        if (code == RecvStaleConfigCode || code == SendStaleConfigCode) {
            throw;
        }

        errmsg = e.what();
        result.append("code", code);
    }

    if (!ok) {
        c->_commandsFailed.increment();
    }

    appendCommandStatus(result, ok, errmsg);
}
Exemplo n.º 12
0
    bool Command::runAgainstRegistered(const char *ns, BSONObj& jsobj, BSONObjBuilder& anObjBuilder) {
        const char *p = strchr(ns, '.');
        if ( !p ) return false;
        if ( strcmp(p, ".$cmd") != 0 ) return false;

        bool ok = false;
        bool valid = false;

        BSONElement e;
        e = jsobj.firstElement();

        map<string,Command*>::iterator i;

        if ( e.eoo() )
            ;
        /* check for properly registered command objects.  Note that all the commands below should be
           migrated over to the command object format.
           */
        else if ( (i = _commands->find(e.fieldName())) != _commands->end() ) {
            valid = true;
            string errmsg;
            Command *c = i->second;
            if ( c->adminOnly() && strncmp(ns, "admin", 5) != 0 ) {
                ok = false;
                errmsg = "access denied";
            }
            else if ( jsobj.getBoolField( "help" ) ){
                stringstream help;
                help << "help for: " << e.fieldName() << " ";
                c->help( help );
                anObjBuilder.append( "help" , help.str() );
            }
            else {
                ok = c->run(ns, jsobj, errmsg, anObjBuilder, false);
            }

            BSONObj tmp = anObjBuilder.asTempObj();
            bool have_ok = tmp.hasField("ok");
            bool have_errmsg = tmp.hasField("errmsg");

            if (!have_ok)
                anObjBuilder.append( "ok" , ok ? 1.0 : 0.0 );
            
            if ( !ok && !have_errmsg) {
                anObjBuilder.append("errmsg", errmsg);
                uassert_nothrow(errmsg.c_str());
            }
            return true;
        }
        
        return false;
    }
Exemplo n.º 13
0
    void DBConfig::unserialize(const BSONObj& from) {
        log(1) << "DBConfig unserialize: " << _name << " " << from << endl;
        assert( _name == from["_id"].String() );

        _shardingEnabled = from.getBoolField("partitioned");
        _primary.reset( from.getStringField("primary") );

        // In the 1.5.x series, we used to have collection metadata nested in the database entry. The 1.6.x series
        // had migration code that ported that info to where it belongs now: the 'collections' collection. We now
        // just assert that we're not migrating from a 1.5.x directly into a 1.7.x without first converting.
        BSONObj sharded = from.getObjectField( "sharded" );
        if ( ! sharded.isEmpty() )
            uasserted( 13509 , "can't migrate from 1.5.x release to the current one; need to upgrade to 1.6.x first");
    }
Exemplo n.º 14
0
    bool run(OperationContext* txn,
             const string& ns,
             BSONObj& cmdObj,
             int,
             string& errmsg,
             BSONObjBuilder& result) {
        log() << "test only command sleep invoked" << endl;
        long long millis = 0;

        if (cmdObj["secs"] || cmdObj["millis"]) {
            if (cmdObj["secs"]) {
                uassert(34344, "'secs' must be a number.", cmdObj["secs"].isNumber());
                millis += cmdObj["secs"].numberLong() * 1000;
            }
            if (cmdObj["millis"]) {
                uassert(34345, "'millis' must be a number.", cmdObj["millis"].isNumber());
                millis += cmdObj["millis"].numberLong();
            }
        } else {
            millis = 10 * 1000;
        }

        if (!cmdObj["lock"]) {
            // Legacy implementation
            if (cmdObj.getBoolField("w")) {
                _sleepInWriteLock(txn, millis);
            } else {
                _sleepInReadLock(txn, millis);
            }
        } else {
            uassert(34346, "Only one of 'w' and 'lock' may be set.", !cmdObj["w"]);

            std::string lock(cmdObj.getStringField("lock"));
            if (lock == "none") {
                sleepmillis(millis);
            } else if (lock == "w") {
                _sleepInWriteLock(txn, millis);
            } else {
                uassert(34347, "'lock' must be one of 'r', 'w', 'none'.", lock == "r");
                _sleepInReadLock(txn, millis);
            }
        }

        // Interrupt point for testing (e.g. maxTimeMS).
        txn->checkForInterrupt();

        return true;
    }
Exemplo n.º 15
0
 void DBConfig::unserialize(const BSONObj& from){
     _name = from.getStringField("name");
     _partitioned = from.getBoolField("partitioned");
     _primary = from.getStringField("primary");
     
     _sharded.clear();
     BSONObj sharded = from.getObjectField( "sharded" );
     if ( ! sharded.isEmpty() ){
         BSONObjIterator i(sharded);
         while ( i.more() ){
             BSONElement e = i.next();
             uassert( "sharded things have to be objects" , e.type() == Object );
             _sharded[e.fieldName()] = e.embeddedObject();
         }
     }
 }
Exemplo n.º 16
0
    void Command::execCommandClientBasic(OperationContext* txn,
                                         Command * c ,
                                         ClientBasic& client,
                                         int queryOptions,
                                         const char *ns,
                                         BSONObj& cmdObj,
                                         BSONObjBuilder& result,
                                         bool fromRepl ) {
        std::string dbname = nsToDatabase(ns);

        if (cmdObj.getBoolField("help")) {
            stringstream help;
            help << "help for: " << c->name << " ";
            c->help( help );
            result.append( "help" , help.str() );
            result.append("lockType", c->isWriteCommandForConfigServer() ? 1 : 0);
            appendCommandStatus(result, true, "");
            return;
        }

        Status status = _checkAuthorization(c, &client, dbname, cmdObj, fromRepl);
        if (!status.isOK()) {
            appendCommandStatus(result, status);
            return;
        }

        std::string errmsg;
        bool ok;
        try {
            ok = c->run( txn, dbname , cmdObj, queryOptions, errmsg, result, false );
        }
        catch (DBException& e) {
            ok = false;
            int code = e.getCode();
            if (code == RecvStaleConfigCode) { // code for StaleConfigException
                throw;
            }

            stringstream ss;
            ss << "exception: " << e.what();
            errmsg = ss.str();
            result.append( "code" , code );
        }

        appendCommandStatus(result, ok, errmsg);
    }
Exemplo n.º 17
0
        virtual bool run(OperationContext* txn,
                         const string& dbname,
                         BSONObj& cmdObj,
                         int,
                         string& errmsg,
                         BSONObjBuilder& result,
                         bool fromRepl) {

            ScopedTransaction transaction(txn, MODE_X);
            Lock::GlobalWrite globalWriteLock(txn->lockState());

            ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
            if (getGlobalReplicationCoordinator()->getSettings().usingReplSets()) {
                const MemberState memberState = replCoord->getMemberState();
                if (memberState.startup()) {
                    return appendCommandStatus(result, Status(ErrorCodes::NotYetInitialized,
                                                              "no replication yet active"));
                }
                if (memberState.primary() ||
                        !replCoord->setFollowerMode(MemberState::RS_STARTUP2)) {
                    return appendCommandStatus(result, Status(ErrorCodes::NotSecondary,
                                                              "primaries cannot resync"));
                }
                BackgroundSync::get()->setInitialSyncRequestedFlag(true);
                return true;
            }

            // below this comment pertains only to master/slave replication
            if ( cmdObj.getBoolField( "force" ) ) {
                if ( !waitForSyncToFinish(txn, errmsg ) )
                    return false;
                replAllDead = "resync forced";
            }
            // TODO(dannenberg) replAllDead is bad and should be removed when masterslave is removed
            if (!replAllDead) {
                errmsg = "not dead, no need to resync";
                return false;
            }
            if ( !waitForSyncToFinish(txn, errmsg ) )
                return false;

            ReplSource::forceResyncDead( txn, "client" );
            result.append( "info", "triggered resync for all sources" );

            return true;
        }
Exemplo n.º 18
0
    virtual bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
        log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;
        if( cmdObj.hasElement("forceInitialSyncFailure") ) {
            replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
            return true;
        }

        // may not need this, but if removed check all tests still work:
        if( !check(errmsg, result) )
            return false;

        if( cmdObj.hasElement("blind") ) {
            replSetBlind = cmdObj.getBoolField("blind");
            return true;
        }
        return false;
    }
Exemplo n.º 19
0
 void DBConfig::unserialize(const BSONObj& from){
     _name = from.getStringField("name");
     _shardingEnabled = from.getBoolField("partitioned");
     _primary = from.getStringField("primary");
     
     _sharded.clear();
     BSONObj sharded = from.getObjectField( "sharded" );
     if ( ! sharded.isEmpty() ){
         BSONObjIterator i(sharded);
         while ( i.more() ){
             BSONElement e = i.next();
             uassert( 10182 ,  "sharded things have to be objects" , e.type() == Object );
             BSONObj c = e.embeddedObject();
             uassert( 10183 ,  "key has to be an object" , c["key"].type() == Object );
             _sharded[e.fieldName()] = CollectionInfo( c["key"].embeddedObject() , 
                                                       c["unique"].trueValue() );
         }
     }
 }
Exemplo n.º 20
0
        virtual bool run(OperationContext* txn,
                         const string& dbname,
                         BSONObj& cmdObj,
                         int,
                         string& errmsg,
                         BSONObjBuilder& result,
                         bool fromRepl) {

            const std::string ns = parseNs(dbname, cmdObj);
            Lock::GlobalWrite globalWriteLock;
            Client::Context ctx(ns);
            if (replSettings.usingReplSets()) {
                if (!theReplSet) {
                    errmsg = "no replication yet active";
                    return false;
                }
                if (theReplSet->isPrimary()) {
                    errmsg = "primaries cannot resync";
                    return false;
                }
                return theReplSet->resync(errmsg);
            }

            // below this comment pertains only to master/slave replication
            if ( cmdObj.getBoolField( "force" ) ) {
                if ( !waitForSyncToFinish( errmsg ) )
                    return false;
                replAllDead = "resync forced";
            }
            if ( !replAllDead ) {
                errmsg = "not dead, no need to resync";
                return false;
            }
            if ( !waitForSyncToFinish( errmsg ) )
                return false;

            ReplSource::forceResyncDead( txn, "client" );
            result.append( "info", "triggered resync for all sources" );
            return true;
        }
Exemplo n.º 21
0
/**
 * @brief getGeneratedCertList
 *      retrieve certificates list
 * @return
 */
QVariantList Database::getGeneratedCertList(){

    auto_ptr<DBClientCursor> cursor = con.query("ssldashboard.generatedcerts", Query());

    QVariantList listOfCerts;

    while (cursor->more()){

        QVariantMap certElement;

        BSONObj currentObj =cursor->next();

        certElement.insert(CERT_START_DATE           ,currentObj.getField(CERT_START_DATE).date().toString().data());
        certElement.insert(CERT_END_DATE             ,currentObj.getField(CERT_END_DATE).date().toString().data());
        certElement.insert(CERT_RECORD_DATE_FIELD    ,currentObj.getField(CERT_RECORD_DATE_FIELD).date().toString().data());
        certElement.insert(CERT_IS_CA                ,currentObj.getBoolField(CERT_IS_CA));
        certElement.insert(CERT_COMMON_NAME          ,currentObj.getStringField(CERT_COMMON_NAME));
        certElement.insert(CERT_SEQ_NUM              ,currentObj.getIntField(CERT_SEQ_NUM));
        certElement.insert(CERT_SIGN_BY_SERIAL       ,currentObj.getIntField(CERT_SIGN_BY_SERIAL));
        listOfCerts << certElement;
    }
    return listOfCerts;
}
Exemplo n.º 22
0
    bool DBConfig::unserialize(const BSONObj& from){
        log(1) << "DBConfig unserialize: " << _name << " " << from << endl;
        assert( _name == from["_id"].String() );

        _shardingEnabled = from.getBoolField("partitioned");
        _primary.reset( from.getStringField("primary") );

        // this is a temporary migration thing
        BSONObj sharded = from.getObjectField( "sharded" );
        if ( sharded.isEmpty() )
             return false;
        
        BSONObjIterator i(sharded);
        while ( i.more() ){
            BSONElement e = i.next();
            uassert( 10182 ,  "sharded things have to be objects" , e.type() == Object );
            
            BSONObj c = e.embeddedObject();
            uassert( 10183 ,  "key has to be an object" , c["key"].type() == Object );
            
            _collections[e.fieldName()].shard( this , e.fieldName() , c["key"].Obj() , c["unique"].trueValue() );
        }
        return true;
    }
Exemplo n.º 23
0
        virtual bool run(OperationContext* txn, const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) {
            log() << "replSet replSetTest command received: " << cmdObj.toString() << rsLog;

            if( cmdObj.hasElement("forceInitialSyncFailure") ) {
                replSetForceInitialSyncFailure = (unsigned) cmdObj["forceInitialSyncFailure"].Number();
                return true;
            }

            Status status = getGlobalReplicationCoordinator()->checkReplEnabledForCommand(&result);
            if (!status.isOK())
                return appendCommandStatus(result, status);

            if( cmdObj.hasElement("blind") ) {
                replSetBlind = cmdObj.getBoolField("blind");
                return true;
            }

            if (cmdObj.hasElement("sethbmsg")) {
                sethbmsg(cmdObj["sethbmsg"].String());
                return true;
            }

            return false;
        }
Exemplo n.º 24
0
        bool run(const string& , BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result, bool) {

            lastError.disableForCommand();
            ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );

            bool authoritative = cmdObj.getBoolField( "authoritative" );

            string configdb = cmdObj["configdb"].valuestrsafe();
            {
                // configdb checking
                if ( configdb.size() == 0 ) {
                    errmsg = "no configdb";
                    return false;
                }

                if ( shardingState.enabled() ) {
                    if ( configdb != shardingState.getConfigServer() ) {
                        errmsg = "specified a different configdb!";
                        return false;
                    }
                }
                else {
                    if ( ! authoritative ) {
                        result.appendBool( "need_authoritative" , true );
                        errmsg = "first setShardVersion";
                        return false;
                    }
                    shardingState.enable( configdb );
                    configServer.init( configdb );
                }
            }

            if ( cmdObj["shard"].type() == String ) {
                shardingState.gotShardName( cmdObj["shard"].String() );
                shardingState.gotShardHost( cmdObj["shardHost"].String() );
            }

            {
                // setting up ids
                if ( cmdObj["serverID"].type() != jstOID ) {
                    // TODO: fix this
                    //errmsg = "need serverID to be an OID";
                    //return 0;
                }
                else {
                    OID clientId = cmdObj["serverID"].__oid();
                    if ( ! info->hasID() ) {
                        info->setID( clientId );
                    }
                    else if ( clientId != info->getID() ) {
                        errmsg = "server id has changed!";
                        return 0;
                    }
                }
            }

            unsigned long long version = extractVersion( cmdObj["version"] , errmsg );

            if ( errmsg.size() ) {
                return false;
            }

            string ns = cmdObj["setShardVersion"].valuestrsafe();
            if ( ns.size() == 0 ) {
                errmsg = "need to speciy fully namespace";
                return false;
            }

            const ConfigVersion oldVersion = info->getVersion(ns);
            const ConfigVersion globalVersion = shardingState.getVersion(ns);

            if ( oldVersion > 0 && globalVersion == 0 ) {
                // this had been reset
                info->setVersion( ns , 0 );
            }

            if ( version == 0 && globalVersion == 0 ) {
                // this connection is cleaning itself
                info->setVersion( ns , 0 );
                return true;
            }

            if ( version == 0 && globalVersion > 0 ) {
                if ( ! authoritative ) {
                    result.appendBool( "need_authoritative" , true );
                    result.append( "ns" , ns );
                    result.appendTimestamp( "globalVersion" , globalVersion );
                    result.appendTimestamp( "oldVersion" , oldVersion );
                    errmsg = "dropping needs to be authoritative";
                    return false;
                }
                log() << "wiping data for: " << ns << endl;
                result.appendTimestamp( "beforeDrop" , globalVersion );
                // only setting global version on purpose
                // need clients to re-find meta-data
                shardingState.resetVersion( ns );
                info->setVersion( ns , 0 );
                return true;
            }

            if ( version < oldVersion ) {
                errmsg = "you already have a newer version of collection '" + ns + "'";
                result.append( "ns" , ns );
                result.appendTimestamp( "oldVersion" , oldVersion );
                result.appendTimestamp( "newVersion" , version );
                result.appendTimestamp( "globalVersion" , globalVersion );
                return false;
            }

            if ( version < globalVersion ) {
                while ( shardingState.inCriticalMigrateSection() ) {
                    dbtemprelease r;
                    sleepmillis(2);
                    OCCASIONALLY log() << "waiting till out of critical section" << endl;
                }
                errmsg = "going to older version for global for collection '" + ns + "'";
                result.append( "ns" , ns );
                result.appendTimestamp( "version" , version );
                result.appendTimestamp( "globalVersion" , globalVersion );
                return false;
            }

            if ( globalVersion == 0 && ! cmdObj.getBoolField( "authoritative" ) ) {
                // need authoritative for first look
                result.append( "ns" , ns );
                result.appendBool( "need_authoritative" , true );
                errmsg = "first time for collection '" + ns + "'";
                return false;
            }

            {
                dbtemprelease unlock;

                ShardChunkVersion currVersion = version;
                if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
                    errmsg = str::stream() << "client version differs from config's for colleciton '" << ns << "'";
                    result.append( "ns" , ns );
                    result.appendTimestamp( "version" , version );
                    result.appendTimestamp( "globalVersion" , currVersion );
                    return false;
                }
            }

            info->setVersion( ns , version );
            result.appendTimestamp( "oldVersion" , oldVersion );
            result.append( "ok" , 1 );

            return true;
        }
Exemplo n.º 25
0
    /**
     * @return true if had to do something
     */
    bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ) {
        // TODO: cache, optimize, etc...

        WriteBackListener::init( conn );

        DBConfigPtr conf = grid.getDBConfig( ns );
        if ( ! conf )
            return false;

        unsigned long long officialSequenceNumber = 0;

        ChunkManagerPtr manager;
        const bool isSharded = conf->isSharded( ns );
        if ( isSharded ) {
            manager = conf->getChunkManager( ns , authoritative );
            officialSequenceNumber = manager->getSequenceNumber();
        }

        // has the ChunkManager been reloaded since the last time we updated the connection-level version?
        // (ie, last time we issued the setShardVersions below)
        unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns);
        if ( sequenceNumber == officialSequenceNumber ) {
            return false;
        }


        ShardChunkVersion version = 0;
        if ( isSharded ) {
            version = manager->getVersion( Shard::make( conn.getServerAddress() ) );
        }

        log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns
               << " my last seq: " << sequenceNumber << "  current: " << officialSequenceNumber
               << " version: " << version << " manager: " << manager.get()
               << endl;

        BSONObj result;
        if ( setShardVersion( conn , ns , version , authoritative , result ) ) {
            // success!
            LOG(1) << "      setShardVersion success: " << result << endl;
            connectionShardStatus.setSequence( &conn , ns , officialSequenceNumber );
            return true;
        }

        log(1) << "       setShardVersion failed!\n" << result << endl;

        if ( result.getBoolField( "need_authoritative" ) )
            massert( 10428 ,  "need_authoritative set but in authoritative mode already" , ! authoritative );

        if ( ! authoritative ) {
            checkShardVersion( conn , ns , 1 , tryNumber + 1 );
            return true;
        }

        if ( tryNumber < 4 ) {
            log(1) << "going to retry checkShardVersion" << endl;
            sleepmillis( 10 );
            checkShardVersion( conn , ns , 1 , tryNumber + 1 );
            return true;
        }

        log() << "     setShardVersion failed: " << result << endl;
        massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 );
        return true;
    }
Exemplo n.º 26
0
    bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) {

        // Steps
        // 1. check basic config
        // 2. extract params from command
        // 3. fast check
        // 4. slow check (LOCKS)

        // step 1

        lastError.disableForCommand();
        ShardedConnectionInfo* info = ShardedConnectionInfo::get( true );

        // make sure we have the mongos id for writebacks
        if ( ! checkMongosID( info , cmdObj["serverID"] , errmsg ) )
            return false;

        bool authoritative = cmdObj.getBoolField( "authoritative" );

        // check config server is ok or enable sharding
        if ( ! checkConfigOrInit( cmdObj["configdb"].valuestrsafe() , authoritative , errmsg , result ) )
            return false;

        // check shard name/hosts are correct
        if ( cmdObj["shard"].type() == String ) {
            shardingState.gotShardName( cmdObj["shard"].String() );
            shardingState.gotShardHost( cmdObj["shardHost"].String() );
        }


        // Handle initial shard connection
        if( cmdObj["version"].eoo() && cmdObj["init"].trueValue() ) {
            result.append( "initialized", true );
            return true;
        }

        // we can run on a slave up to here
        if ( ! isMaster( "admin" ) ) {
            result.append( "errmsg" , "not master" );
            result.append( "note" , "from post init in setShardVersion" );
            return false;
        }

        // step 2

        string ns = cmdObj["setShardVersion"].valuestrsafe();
        if ( ns.size() == 0 ) {
            errmsg = "need to specify namespace";
            return false;
        }

        const ConfigVersion version = ConfigVersion( extractVersion( cmdObj["version"] , errmsg ), OID() );
        if ( errmsg.size() )
            return false;

        // step 3

        const ConfigVersion oldVersion = info->getVersion(ns);
        const ConfigVersion globalVersion = shardingState.getVersion(ns);

        oldVersion.addToBSON( result, "oldVersion" );

        if ( globalVersion.isSet() && version.isSet() ) {
            // this means there is no reset going on an either side
            // so its safe to make some assumptions

            if ( version.isEquivalentTo( globalVersion ) ) {
                // mongos and mongod agree!
                if ( ! oldVersion.isEquivalentTo( version ) ) {
                    if ( oldVersion < globalVersion ) {
                        info->setVersion( ns , version );
                    }
                    else if ( authoritative ) {
                        // this means there was a drop and our version is reset
                        info->setVersion( ns , version );
                    }
                    else {
                        result.append( "ns" , ns );
                        result.appendBool( "need_authoritative" , true );
                        errmsg = "verifying drop on '" + ns + "'";
                        return false;
                    }
                }
                return true;
            }

        }

        // step 4

        // this is because of a weird segfault I saw and I can't see why this should ever be set
        massert( 13647 , str::stream() << "context should be empty here, is: " << cc().getContext()->ns() , cc().getContext() == 0 );

        Lock::GlobalWrite setShardVersionLock; // TODO: can we get rid of this??

        if ( oldVersion.isSet() && ! globalVersion.isSet() ) {
            // this had been reset
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
        }

        if ( ! version.isSet() && ! globalVersion.isSet() ) {
            // this connection is cleaning itself
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( ! version.isSet() && globalVersion.isSet() ) {
            if ( ! authoritative ) {
                result.appendBool( "need_authoritative" , true );
                result.append( "ns" , ns );
                globalVersion.addToBSON( result, "globalVersion" );
                errmsg = "dropping needs to be authoritative";
                return false;
            }
            log() << "wiping data for: " << ns << endl;
            globalVersion.addToBSON( result, "beforeDrop" );
            // only setting global version on purpose
            // need clients to re-find meta-data
            shardingState.resetVersion( ns );
            info->setVersion( ns , ShardChunkVersion( 0, OID() ) );
            return true;
        }

        if ( version < oldVersion ) {
            errmsg = "this connection already had a newer version of collection '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "newVersion" );
            globalVersion.addToBSON( result, "globalVersion" );
            return false;
        }

        if ( version < globalVersion ) {
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section" << endl;
            }
            errmsg = "shard global version for collection is higher than trying to set to '" + ns + "'";
            result.append( "ns" , ns );
            version.addToBSON( result, "version" );
            globalVersion.addToBSON( result, "globalVersion" );
            result.appendBool( "reloadConfig" , true );
            return false;
        }

        if ( ! globalVersion.isSet() && ! authoritative ) {
            // Needed b/c when the last chunk is moved off a shard, the version gets reset to zero, which
            // should require a reload.
            // TODO: Maybe a more elegant way of doing this
            while ( shardingState.inCriticalMigrateSection() ) {
                dbtemprelease r;
                sleepmillis(2);
                OCCASIONALLY log() << "waiting till out of critical section for version reset" << endl;
            }

            // need authoritative for first look
            result.append( "ns" , ns );
            result.appendBool( "need_authoritative" , true );
            errmsg = "first time for collection '" + ns + "'";
            return false;
        }

        Timer relockTime;
        {
            dbtemprelease unlock;

            ShardChunkVersion currVersion = version;
            if ( ! shardingState.trySetVersion( ns , currVersion ) ) {
                errmsg = str::stream() << "client version differs from config's for collection '" << ns << "'";
                result.append( "ns" , ns );
                version.addToBSON( result, "version" );
                globalVersion.addToBSON( result, "globalVersion" );
                return false;
            }
        }
        if ( relockTime.millis() >= ( cmdLine.slowMS - 10 ) ) {
            log() << "setShardVersion - relocking slow: " << relockTime.millis() << endl;
        }

        info->setVersion( ns , version );
        return true;
    }
Exemplo n.º 27
0
    void Command::execCommandClientBasic(Command * c ,
                                         ClientBasic& client,
                                         int queryOptions,
                                         const char *ns,
                                         BSONObj& cmdObj,
                                         BSONObjBuilder& result,
                                         bool fromRepl ) {
        verify(c);

        std::string dbname = nsToDatabase(ns);

        // Access control checks
        if (!noauth) {
            std::vector<Privilege> privileges;
            c->addRequiredPrivileges(dbname, cmdObj, &privileges);
            AuthorizationManager* authManager = client.getAuthorizationManager();
            if (!authManager->checkAuthForPrivileges(privileges).isOK()) {
                result.append("note", str::stream() << "not authorized for command: " <<
                                    c->name << " on database " << dbname);
                appendCommandStatus(result, false, "unauthorized");
                return;
            }
        }
        if (c->adminOnly() && c->localHostOnlyIfNoAuth(cmdObj) && noauth &&
                !client.getIsLocalHostConnection()) {
            log() << "command denied: " << cmdObj.toString() << endl;
            appendCommandStatus(result,
                               false,
                               "unauthorized: this command must run from localhost when running db "
                               "without auth");
            return;
        }
        if (c->adminOnly() && !startsWith(ns, "admin.")) {
            log() << "command denied: " << cmdObj.toString() << endl;
            appendCommandStatus(result, false, "access denied - use admin db");
            return;
        }
        // End of access control checks

        if (cmdObj.getBoolField("help")) {
            stringstream help;
            help << "help for: " << c->name << " ";
            c->help( help );
            result.append( "help" , help.str() );
            result.append( "lockType" , c->locktype() );
            appendCommandStatus(result, true, "");
            return;
        }
        std::string errmsg;
        bool ok;
        try {
            ok = c->run( dbname , cmdObj, queryOptions, errmsg, result, false );
        }
        catch (DBException& e) {
            ok = false;
            int code = e.getCode();
            if (code == RecvStaleConfigCode) { // code for StaleConfigException
                throw;
            }

            stringstream ss;
            ss << "exception: " << e.what();
            errmsg = ss.str();
            result.append( "code" , code );
        }

        appendCommandStatus(result, ok, errmsg);
    }
Exemplo n.º 28
0
    void WriteBackListener::run() {
        int secsToSleep = 0;
        while ( ! inShutdown() ) {
            
            if ( ! Shard::isAShardNode( _addr ) ) {
                log(1) << _addr << " is not a shard node" << endl;
                sleepsecs( 60 );
                continue;
            }

            try {
                ScopedDbConnection conn( _addr );

                BSONObj result;

                {
                    BSONObjBuilder cmd;
                    cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
                    if ( ! conn->runCommand( "admin" , cmd.obj() , result ) ) {
                        log() <<  "writebacklisten command failed!  "  << result << endl;
                        conn.done();
                        continue;
                    }

                }

                log(1) << "writebacklisten result: " << result << endl;

                BSONObj data = result.getObjectField( "data" );
                if ( data.getBoolField( "writeBack" ) ) {
                    string ns = data["ns"].valuestrsafe();

                    ConnectionIdent cid( "" , 0 );
                    OID wid;
                    if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) {
                        string s = "";
                        if ( data["instanceIdent"].type() == String )
                            s = data["instanceIdent"].String();
                        cid = ConnectionIdent( s , data["connectionId"].numberLong() );
                        wid = data["id"].OID();
                    }
                    else {
                        warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl;
                    }

                    int len; // not used, but needed for next call
                    Message m( (void*)data["msg"].binData( len ) , false );
                    massert( 10427 ,  "invalid writeback message" , m.header()->valid() );

                    DBConfigPtr db = grid.getDBConfig( ns );
                    ShardChunkVersion needVersion( data["version"] );

                    LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
                           << " mine : " << db->getChunkManager( ns )->getVersion().toString() << endl;// TODO change to log(3)

                    if ( logLevel ) log(1) << debugString( m ) << endl;

                    if ( needVersion.isSet() && needVersion <= db->getChunkManager( ns )->getVersion() ) {
                        // this means when the write went originally, the version was old
                        // if we're here, it means we've already updated the config, so don't need to do again
                        //db->getChunkManager( ns , true ); // SERVER-1349
                    }
                    else {
                        // we received a writeback object that was sent to a previous version of a shard
                        // the actual shard may not have the object the writeback operation is for
                        // we need to reload the chunk manager and get the new shard versions
                        db->getChunkManager( ns , true );
                    }

                    // do request and then call getLastError
                    // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError

                    BSONObj gle;
                    try {
                        
                        Request r( m , 0 );
                        r.init();

                        ClientInfo * ci = r.getClientInfo();
                        if (!noauth) {
                            ci->getAuthenticationInfo()->authorize("admin", internalSecurity.user);
                        }
                        ci->noAutoSplit();

                        r.process();
                        
                        ci->newRequest(); // this so we flip prev and cur shards

                        BSONObjBuilder b;
                        if ( ! ci->getLastError( BSON( "getLastError" << 1 ) , b , true ) ) {
                            b.appendBool( "commandFailed" , true );
                        }
                        gle = b.obj();

                        ci->clearSinceLastGetError();
                    }
                    catch ( DBException& e ) {
                        error() << "error processing writeback: " << e << endl;
                        BSONObjBuilder b;
                        b.append( "err" , e.toString() );
                        e.getInfo().append( b );
                        gle = b.obj();
                    }

                    {
                        scoped_lock lk( _seenWritebacksLock );
                        WBStatus& s = _seenWritebacks[cid];
                        s.id = wid;
                        s.gle = gle;
                    }
                }
                else if ( result["noop"].trueValue() ) {
                    // no-op
                }
                else {
                    log() << "unknown writeBack result: " << result << endl;
                }

                conn.done();
                secsToSleep = 0;
                continue;
            }
            catch ( std::exception& e ) {

                if ( inShutdown() ) {
                    // we're shutting down, so just clean up
                    return;
                }

                log() << "WriteBackListener exception : " << e.what() << endl;

                // It's possible this shard was removed
                Shard::reloadShardInfo();
            }
            catch ( ... ) {
                log() << "WriteBackListener uncaught exception!" << endl;
            }
            secsToSleep++;
            sleepsecs(secsToSleep);
            if ( secsToSleep > 10 )
                secsToSleep = 0;
        }

        log() << "WriteBackListener exiting : address no longer in cluster " << _addr;

    }
Exemplo n.º 29
0
    /* called on a reconfig AND on initiate
       throws
       @param initial true when initiating
    */
    void checkMembersUpForConfigChange(const ReplSetConfig& cfg, bool initial) {
        int failures = 0, majority = 0;
        int me = 0;
        stringstream selfs;
        for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
            if( i->h.isSelf() ) {
                me++;
                if( me > 1 )
                    selfs << ',';
                selfs << i->h.toString();
                if( !i->potentiallyHot() ) {
                    uasserted(13420, "initiation and reconfiguration of a replica set must be sent to a node that can become primary");
                }
                majority += i->votes;
            }
        }
        majority = (majority / 2) + 1;
        
        uassert(13278, "bad config: isSelf is true for multiple hosts: " + selfs.str(), me <= 1); // dups?
        if( me != 1 ) {
            stringstream ss;
            ss << "can't find self in the replset config";
            if( !cmdLine.isDefaultPort() ) ss << " my port: " << cmdLine.port;
            if( me != 0 ) ss << " found: " << me;
            uasserted(13279, ss.str());
        }

        for( vector<ReplSetConfig::MemberCfg>::const_iterator i = cfg.members.begin(); i != cfg.members.end(); i++ ) {
            // we know we're up
            if (i->h.isSelf()) {
                continue;
            }

            BSONObj res;
            {
                bool ok = false;
                try {
                    int theirVersion = -1000;
                    ok = requestHeartbeat(cfg._id, "", i->h.toString(), res, -1, theirVersion, initial/*check if empty*/);
                    if( theirVersion >= cfg.version ) {
                        stringstream ss;
                        ss << "replSet member " << i->h.toString() << " has too new a config version (" << theirVersion << ") to reconfigure";
                        uasserted(13259, ss.str());
                    }
                }
                catch(DBException& e) {
                    log() << "replSet cmufcc requestHeartbeat " << i->h.toString() << " : " << e.toString() << rsLog;
                }
                catch(...) {
                    log() << "replSet cmufcc error exception in requestHeartbeat?" << rsLog;
                }
                if( res.getBoolField("mismatch") )
                    uasserted(13145, "set name does not match the set name host " + i->h.toString() + " expects");
                if( *res.getStringField("set") ) {
                    if( cfg.version <= 1 ) {
                        // this was to be initiation, no one shoudl be initiated already.
                        uasserted(13256, "member " + i->h.toString() + " is already initiated");
                    }
                    else {
                        // Assure no one has a newer config.
                        if( res["v"].Int() >= cfg.version ) {
                            uasserted(13341, "member " + i->h.toString() + " has a config version >= to the new cfg version; cannot change config");
                        }
                    }
                }
                if( !ok && !res["rs"].trueValue() ) {
                    if( !res.isEmpty() ) {
                        /* strange.  got a response, but not "ok". log it. */
                        log() << "replSet warning " << i->h.toString() << " replied: " << res.toString() << rsLog;
                    }

                    bool allowFailure = false;
                    failures += i->votes;
                    if( res.isEmpty() && !initial && failures >= majority ) {
                        const Member* m = theReplSet->findById( i->_id );
                        if( m ) {
                            assert( m->h().toString() == i->h.toString() );
                        }
                        // it's okay if the down member isn't part of the config,
                        // we might be adding a new member that isn't up yet
                        allowFailure = true;
                    }

                    if( !allowFailure ) {
                        string msg = string("need all members up to initiate, not ok : ") + i->h.toString();
                        if( !initial )
                            msg = string("need most members up to reconfigure, not ok : ") + i->h.toString();
                        uasserted(13144, msg);
                    }
                }
            }
            if( initial ) {
                bool hasData = res["hasData"].Bool();
                uassert(13311, "member " + i->h.toString() + " has data already, cannot initiate set.  All members except initiator must be empty.",
                        !hasData || i->h.isSelf());
            }
        }
    }
Exemplo n.º 30
0
    void WriteBackListener::run() {

        int secsToSleep = 0;
        scoped_ptr<ChunkVersion> lastNeededVersion;
        int lastNeededCount = 0;
        bool needsToReloadShardInfo = false;

        while ( ! inShutdown() ) {

            if ( ! Shard::isAShardNode( _addr ) ) {
                LOG(1) << _addr << " is not a shard node" << endl;
                sleepsecs( 60 );
                continue;
            }

            try {
                if (needsToReloadShardInfo) {
                    // It's possible this shard was removed
                    Shard::reloadShardInfo();
                    needsToReloadShardInfo = false;
                }

                scoped_ptr<ScopedDbConnection> conn(
                        ScopedDbConnection::getInternalScopedDbConnection( _addr ) );

                BSONObj result;

                {
                    BSONObjBuilder cmd;
                    cmd.appendOID( "writebacklisten" , &serverID ); // Command will block for data
                    if ( ! conn->get()->runCommand( "admin" , cmd.obj() , result ) ) {
                        result = result.getOwned();
                        log() <<  "writebacklisten command failed!  "  << result << endl;
                        conn->done();
                        continue;
                    }

                }
                conn->done();

                LOG(1) << "writebacklisten result: " << result << endl;

                BSONObj data = result.getObjectField( "data" );
                if ( data.getBoolField( "writeBack" ) ) {
                    string ns = data["ns"].valuestrsafe();

                    ConnectionIdent cid( "" , 0 );
                    OID wid;
                    if ( data["connectionId"].isNumber() && data["id"].type() == jstOID ) {
                        string s = "";
                        if ( data["instanceIdent"].type() == String )
                            s = data["instanceIdent"].String();
                        cid = ConnectionIdent( s , data["connectionId"].numberLong() );
                        wid = data["id"].OID();
                    }
                    else {
                        warning() << "mongos/mongod version mismatch (1.7.5 is the split)" << endl;
                    }

                    int len; // not used, but needed for next call
                    Message msg( (void*)data["msg"].binData( len ) , false );
                    massert( 10427 ,  "invalid writeback message" , msg.header()->valid() );

                    DBConfigPtr db = grid.getDBConfig( ns );
                    ChunkVersion needVersion = ChunkVersion::fromBSON( data, "version" );

                    //
                    // TODO: Refactor the sharded strategy to correctly handle all sharding state changes itself,
                    // we can't rely on WBL to do this for us b/c anything could reset our state in-between.
                    // We should always reload here for efficiency when possible, but staleness is also caught in the
                    // loop below.
                    //

                    ChunkManagerPtr manager;
                    ShardPtr primary;
                    db->getChunkManagerOrPrimary( ns, manager, primary );

                    ChunkVersion currVersion;
                    if( manager ) currVersion = manager->getVersion();

                    LOG(1) << "connectionId: " << cid << " writebackId: " << wid << " needVersion : " << needVersion.toString()
                           << " mine : " << currVersion.toString() << endl;

                    LOG(1) << msg.toString() << endl;

                    //
                    // We should reload only if we need to update our version to be compatible *and* we
                    // haven't already done so.  This avoids lots of reloading when we remove/add a sharded collection
                    //

                    bool alreadyReloaded = lastNeededVersion &&
                                           lastNeededVersion->isEquivalentTo( needVersion );

                    if( alreadyReloaded ){

                        LOG(1) << "wbl already reloaded config information for version "
                               << needVersion << ", at version " << currVersion << endl;
                    }
                    else if( lastNeededVersion ) {

                        log() << "new version change detected to " << needVersion.toString()
                              << ", " << lastNeededCount << " writebacks processed at "
                              << lastNeededVersion->toString() << endl;

                        lastNeededCount = 0;
                    }

                    //
                    // Set our lastNeededVersion for next time
                    //

                    lastNeededVersion.reset( new ChunkVersion( needVersion ) );
                    lastNeededCount++;

                    //
                    // Determine if we should reload, if so, reload
                    //

                    bool shouldReload = ! needVersion.isWriteCompatibleWith( currVersion ) &&
                                        ! alreadyReloaded;

                    if( shouldReload && currVersion.isSet()
                                     && needVersion.isSet()
                                     && currVersion.hasCompatibleEpoch( needVersion ) )
                    {

                        //
                        // If we disagree about versions only, reload the chunk manager
                        //

                        db->getChunkManagerIfExists( ns, true );
                    }
                    else if( shouldReload ){

                        //
                        // If we disagree about anything else, reload the full db
                        //

                        warning() << "reloading config data for " << db->getName() << ", "
                                  << "wanted version " << needVersion.toString()
                                  << " but currently have version " << currVersion.toString() << endl;

                        db->reload();
                    }

                    // do request and then call getLastError
                    // we have to call getLastError so we can return the right fields to the user if they decide to call getLastError

                    BSONObj gle;
                    int attempts = 0;
                    while ( true ) {
                        attempts++;

                        try {

                            Request r( msg , 0 );
                            r.init();

                            r.d().reservedField() |= Reserved_FromWriteback;

                            ClientInfo * ci = r.getClientInfo();
                            if (!noauth) {
                                ci->getAuthorizationManager()->grantInternalAuthorization(
                                        "_writebackListener");
                            }
                            ci->noAutoSplit();

                            r.process( attempts );

                            ci->newRequest(); // this so we flip prev and cur shards

                            BSONObjBuilder b;
                            string errmsg;
                            if ( ! ci->getLastError( "admin",
                                                     BSON( "getLastError" << 1 ),
                                                     b,
                                                     errmsg,
                                                     true ) )
                            {
                                b.appendBool( "commandFailed" , true );
                                if( ! b.hasField( "errmsg" ) ){

                                    b.append( "errmsg", errmsg );
                                    gle = b.obj();
                                }
                                else if( errmsg.size() > 0 ){

                                    // Rebuild GLE object with errmsg
                                    // TODO: Make this less clumsy by improving GLE interface
                                    gle = b.obj();

                                    if( gle["errmsg"].type() == String ){

                                        BSONObj gleNoErrmsg =
                                                gle.filterFieldsUndotted( BSON( "errmsg" << 1 ),
                                                                          false );
                                        BSONObjBuilder bb;
                                        bb.appendElements( gleNoErrmsg );
                                        bb.append( "errmsg", gle["errmsg"].String() +
                                                             " ::and:: " +
                                                             errmsg );
                                        gle = bb.obj().getOwned();
                                    }
                                }
                            }
                            else{
                                gle = b.obj();
                            }

                            if ( gle["code"].numberInt() == 9517 ) {

                                log() << "new version change detected, "
                                      << lastNeededCount << " writebacks processed previously" << endl;

                                lastNeededVersion.reset();
                                lastNeededCount = 1;

                                log() << "writeback failed because of stale config, retrying attempts: " << attempts << endl;
                                LOG(1) << "writeback error : " << gle << endl;

                                //
                                // Bringing this in line with the similar retry logic elsewhere
                                //
                                // TODO: Reloading the chunk manager may not help if we dropped a
                                // collection, but we don't actually have that info in the writeback
                                // error
                                //

                                if( attempts <= 2 ){
                                    db->getChunkManagerIfExists( ns, true );
                                }
                                else{
                                    versionManager.forceRemoteCheckShardVersionCB( ns );
                                    sleepsecs( attempts - 1 );
                                }

                                uassert( 15884, str::stream()
                                         << "Could not reload chunk manager after "
                                         << attempts << " attempts.", attempts <= 4 );

                                continue;
                            }

                            ci->clearSinceLastGetError();
                        }
                        catch ( DBException& e ) {
                            error() << "error processing writeback: " << e << endl;
                            BSONObjBuilder b;
                            e.getInfo().append( b, "err", "code" );
                            gle = b.obj();
                        }

                        break;
                    }

                    {
                        scoped_lock lk( _seenWritebacksLock );
                        WBStatus& s = _seenWritebacks[cid];
                        s.id = wid;
                        s.gle = gle;
                    }
                }
                else if ( result["noop"].trueValue() ) {
                    // no-op
                }
                else {
                    log() << "unknown writeBack result: " << result << endl;
                }

                secsToSleep = 0;
                continue;
            }
            catch ( std::exception& e ) {
                // Attention! Do not call any method that would throw an exception
                // (or assert) in this block.

                if ( inShutdown() ) {
                    // we're shutting down, so just clean up
                    return;
                }

                log() << "WriteBackListener exception : " << e.what() << endl;

                needsToReloadShardInfo = true;
            }
            catch ( ... ) {
                log() << "WriteBackListener uncaught exception!" << endl;
            }
            secsToSleep++;
            sleepsecs(secsToSleep);
            if ( secsToSleep > 10 )
                secsToSleep = 0;
        }

        log() << "WriteBackListener exiting : address no longer in cluster " << _addr;

    }