Esempio n. 1
0
    /* db - database name
       path - db directory
    */
    /*static*/ void Database::closeDatabase( const char *db, const string& path ) {
        assertInWriteLock();
        
        Client::Context * ctx = cc().getContext();
        assert( ctx );
        assert( ctx->inDB( db , path ) );
        Database *database = ctx->db();
        assert( database->name == db );
        
        oplogCheckCloseDatabase( database ); // oplog caches some things, dirty its caches

        if( BackgroundOperation::inProgForDb(db) ) { 
            log() << "warning: bg op in prog during close db? " << db << endl;
        }

        /* important: kill all open cursors on the database */
        string prefix(db);
        prefix += '.';
        ClientCursor::invalidate(prefix.c_str());

        NamespaceDetailsTransient::clearForPrefix( prefix.c_str() );

        dbHolder.erase( db, path );
        ctx->clear();
        delete database; // closes files
    }
Esempio n. 2
0
    void profile( const Client& c , CurOp& currentOp ) {
        assertInWriteLock();

        Database *db = c.database();
        DEV assert( db );
        const char *ns = db->profileName.c_str();
        
        // build object
        profileBufBuilder.reset();
        BSONObjBuilder b(profileBufBuilder);
        b.appendDate("ts", jsTime());
        currentOp.debug().append( b );

        b.append("client", c.clientAddress() );

        if ( c.getAuthenticationInfo() )
            b.append( "user" , c.getAuthenticationInfo()->getUser( nsToDatabase( ns ) ) );

        BSONObj p = b.done();

        // write: not replicated
        NamespaceDetails *d = db->namespaceIndex.details(ns);
        if( d ) {
            int len = p.objsize();
            Record *r = theDataFileMgr.fast_oplog_insert(d, ns, len);
            memcpy(getDur().writingPtr(r->data, len), p.objdata(), len);
        }
        else { 
            static time_t last;
            if( time(0) > last+10 ) {
                log() << "profile: warning ns " << ns << " does not exist" << endl;
                last = time(0);
            }
        }
    }
Esempio n. 3
0
    /** write an op to the oplog that is already built.
        todo : make _logOpRS() call this so we don't repeat ourself?
        */
    void _logOpObjRS(const BSONObj& op) {
        DEV assertInWriteLock();

        const OpTime ts = op["ts"]._opTime();
        long long h = op["h"].numberLong();

        {
            const char *logns = rsoplog;
            if ( rsOplogDetails == 0 ) {
                Client::Context ctx( logns , dbpath, 0, false);
                localDB = ctx.db();
                assert( localDB );
                rsOplogDetails = nsdetails(logns);
                massert(13389, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
            }
            Client::Context ctx( logns , localDB, false );
            {
                int len = op.objsize();
                Record *r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len);
                memcpy(getDur().writingPtr(r->data, len), op.objdata(), len);
            }
            /* todo: now() has code to handle clock skew.  but if the skew server to server is large it will get unhappy.
                     this code (or code in now() maybe) should be improved.
                     */
            if( theReplSet ) {
                if( !(theReplSet->lastOpTimeWritten<ts) ) {
                    log() << "replSet error possible failover clock skew issue? " << theReplSet->lastOpTimeWritten.toString() << ' ' << endl;
                }
                theReplSet->lastOpTimeWritten = ts;
                theReplSet->lastH = h;
                ctx.getClient()->setLastOp( ts.asDate() );
            }
        }
    }
Esempio n. 4
0
    void profile( const Client& c , CurOp& currentOp, int millis) {
        assertInWriteLock();
        
        string info = currentOp.debug().str.str();

        profileBufBuilder.reset();
        BSONObjBuilder b(profileBufBuilder);
        b.appendDate("ts", jsTime());
        b.append("info", info);
        b.append("millis", (double) millis);
        if ( currentOp.getNS() )
            b.append( "ns" , currentOp.getNS() );
        b.append("client", c.clientAddress() );

        BSONObj p = b.done();

        // write: not replicated
        Database *db = c.database();
        const char *ns = db->profileName.c_str();
        NamespaceDetails *d = db->namespaceIndex.details(ns);
        if( d ) {
            int len = p.objsize();
            Record *r = theDataFileMgr.fast_oplog_insert(d, ns, len);
            memcpy(getDur().writingPtr(r->data, len), p.objdata(), len);
        }
        else { 
            static time_t last;
            if( time(0) > last+10 ) {
                log() << "profile: warning ns " << ns << " does not exist" << endl;
                last = time(0);
            }
        }
    }
Esempio n. 5
0
File: oplog.cpp Progetto: nosh/mongo
/** @param fromRepl false if from ApplyOpsCmd
    @return true if was and update should have happened and the document DNE.  see replset initial sync code.
 */
bool applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
    assertInWriteLock();
    LOG(6) << "applying op: " << op << endl;
    bool failedUpdate = false;

    OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

    const char *names[] = { "o", "ns", "op", "b" };
    BSONElement fields[4];
    op.getFields(4, names, fields);

    BSONObj o;
    if( fields[0].isABSONObj() )
        o = fields[0].embeddedObject();

    const char *ns = fields[1].valuestrsafe();
    NamespaceDetails *nsd = nsdetails(ns);

    // operation type -- see logOp() comments for types
    const char *opType = fields[2].valuestrsafe();

    if ( *opType == 'i' ) {
        opCounters->gotInsert();

        const char *p = strchr(ns, '.');
        if ( p && strcmp(p, ".system.indexes") == 0 ) {
            // updates aren't allowed for indexes -- so we will do a regular insert. if index already
            // exists, that is ok.
            theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
        }
        else {
            // do upserts for inserts as we might get replayed more than once
            OpDebug debug;
            BSONElement _id;
            if( !o.getObjectID(_id) ) {
                /* No _id.  This will be very slow. */
                Timer t;
                updateObjects(ns, o, o, true, false, false, debug );
                if( t.millis() >= 2 ) {
                    RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                }
            }
            else {
                /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                RARELY if (nsd && !nsd->capped) {
                    ensureHaveIdIndex(ns);    // otherwise updates will be slow
                }

                /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                          then.  very few upserts will not be inserts...
                          */
                BSONObjBuilder b;
                b.append(_id);
                updateObjects(ns, o, b.done(), true, false, false , debug );
            }
        }
    }
Esempio n. 6
0
    /* we write to local.opload.$main:
         { ts : ..., op: ..., ns: ..., o: ... }
       ts: an OpTime timestamp
       op:
        "i" insert
        "u" update
        "d" delete
        "c" db cmd
        "db" declares presence of a database (ns is set to the db name + '.')
        "n" no op
       logNS - e.g. "local.oplog.$main"
       bb:
         if not null, specifies a boolean to pass along to the other side as b: param.
         used for "justOne" or "upsert" flags on 'd', 'u'
       first: true
         when set, indicates this is the first thing we have logged for this database.
         thus, the slave does not need to copy down all the data when it sees this.
    */
    static void _logOp(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb, const OpTime &ts ) {
        if ( strncmp(ns, "local.", 6) == 0 ){
            if ( strncmp(ns, "local.slaves", 12) == 0 ){
                resetSlaveCache();
            }
            return;
        }

        DEV assertInWriteLock();
        
        Client::Context context;
        
        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        BSONObjBuilder b;
        b.appendTimestamp("ts", ts.asDate());
        b.append("op", opstr);
        b.append("ns", ns);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done();
        int posz = partial.objsize();
        int len = posz + obj.objsize() + 1 + 2 /*o:*/;

        Record *r;
        if ( strncmp( logNS, "local.", 6 ) == 0 ) { // For now, assume this is olog main
            if ( localOplogMainDetails == 0 ) {
                Client::Context ctx("local.", dbpath, 0, false);
                localOplogDB = ctx.db();
                localOplogMainDetails = nsdetails(logNS);
            }
            Client::Context ctx( "" , localOplogDB, false );
            r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
        } else {
            Client::Context ctx( logNS, dbpath, 0, false );
            assert( nsdetails( logNS ) );
            r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
        }

        char *p = r->data;
        memcpy(p, partial.objdata(), posz);
        *((unsigned *)p) += obj.objsize() + 1 + 2;
        p += posz - 1;
        *p++ = (char) Object;
        *p++ = 'o';
        *p++ = 0;
        memcpy(p, obj.objdata(), obj.objsize());
        p += obj.objsize();
        *p = EOO;
        
        if ( logLevel >= 6 ) {
            BSONObj temp(r);
            log( 6 ) << "logging op:" << temp << endl;
        }
        
        context.getClient()->setLastOp( ts );
    }
Esempio n. 7
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        assertInWriteLock();
        LOG(6) << "applying op: " << op << endl;

        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        const char *names[] = { "o", "ns", "op", "b" };
        BSONElement fields[4];
        op.getFields(4, names, fields);

        BSONObj o;
        if( fields[0].isABSONObj() )
            o = fields[0].embeddedObject();
            
        const char *ns = fields[1].valuestrsafe();

        // operation type -- see logOp() comments for types
        const char *opType = fields[2].valuestrsafe();

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                OpDebug debug;
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false, debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    BSONObjBuilder b;
                    b.append(_id);
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();
            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            OpDebug debug;
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();
            if ( opType[1] == 0 )
                deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe());
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();
            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else {
            throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) );
        }

    }
Esempio n. 8
0
    /* we write to local.opload.$main:
         { ts : ..., op: ..., ns: ..., o: ... }
       ts: an OpTime timestamp
       op:
        "i" insert
        "u" update
        "d" delete
        "c" db cmd
        "db" declares presence of a database (ns is set to the db name + '.')
        "n" no op
       logNS - where to log it.  0/null means "local.oplog.$main".
       bb:
         if not null, specifies a boolean to pass along to the other side as b: param.
         used for "justOne" or "upsert" flags on 'd', 'u'
       first: true
         when set, indicates this is the first thing we have logged for this database.
         thus, the slave does not need to copy down all the data when it sees this.

       note this is used for single collection logging even when --replSet is enabled.
    */
    static void _logOpOld(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
        DEV assertInWriteLock();
        static BufBuilder bufbuilder(8*1024);

        if ( strncmp(ns, "local.", 6) == 0 ) {
            if ( strncmp(ns, "local.slaves", 12) == 0 ) {
                resetSlaveCache();
            }
            return;
        }

        const OpTime ts = OpTime::now();
        Client::Context context;

        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        bufbuilder.reset();
        BSONObjBuilder b(bufbuilder);
        b.appendTimestamp("ts", ts.asDate());
        b.append("op", opstr);
        b.append("ns", ns);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done(); // partial is everything except the o:... part.

        int po_sz = partial.objsize();
        int len = po_sz + obj.objsize() + 1 + 2 /*o:*/;

        Record *r;
        if( logNS == 0 ) {
            logNS = "local.oplog.$main";
            if ( localOplogMainDetails == 0 ) {
                Client::Context ctx( logNS , dbpath, 0, false);
                localDB = ctx.db();
                assert( localDB );
                localOplogMainDetails = nsdetails(logNS);
                assert( localOplogMainDetails );
            }
            Client::Context ctx( logNS , localDB, false );
            r = theDataFileMgr.fast_oplog_insert(localOplogMainDetails, logNS, len);
        }
        else {
            Client::Context ctx( logNS, dbpath, 0, false );
            assert( nsdetails( logNS ) );
            // first we allocate the space, then we fill it below.
            r = theDataFileMgr.fast_oplog_insert( nsdetails( logNS ), logNS, len);
        }

        append_O_Obj(r->data, partial, obj);

        context.getClient()->setLastOp( ts.asDate() );

        if ( logLevel >= 6 ) {
            BSONObj temp(r);
            log( 6 ) << "logging op:" << temp << endl;
        }

    }
Esempio n. 9
0
    static void _logOpRS(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) {
        DEV assertInWriteLock();

        if ( strncmp(ns, "local.", 6) == 0 ) {
            if ( strncmp(ns, "local.slaves", 12) == 0 )
                resetSlaveCache();
            return;
        }

        const OpTime ts = OpTime::now();
        long long hashNew;
        if( theReplSet ) {
            massert(13312, "replSet error : logOp() but not primary?", theReplSet->box.getState().primary());
            hashNew = (theReplSet->lastH * 131 + ts.asLL()) * 17 + theReplSet->selfId();
        }
        else {
            // must be initiation
            assert( *ns == 0 );
            hashNew = 0;
        }

        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        logopbufbuilder.reset();
        BSONObjBuilder b(logopbufbuilder);
        b.appendTimestamp("ts", ts.asDate());
        b.append("h", hashNew);
        b.append("op", opstr);
        b.append("ns", ns);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done();
        int posz = partial.objsize();
        int len = posz + obj.objsize() + 1 + 2 /*o:*/;

        Record *r;
        DEV assert( logNS == 0 );
        {
            const char *logns = rsoplog;
            if ( rsOplogDetails == 0 ) {
                Client::Context ctx( logns , dbpath, 0, false);
                localDB = ctx.db();
                assert( localDB );
                rsOplogDetails = nsdetails(logns);
                massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails);
            }
            Client::Context ctx( logns , localDB, false );
            r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len);
            /* todo: now() has code to handle clock skew.  but if the skew server to server is large it will get unhappy.
                     this code (or code in now() maybe) should be improved.
                     */
            if( theReplSet ) {
                if( !(theReplSet->lastOpTimeWritten<ts) ) {
                    log() << "replSet ERROR possible failover clock skew issue? " << theReplSet->lastOpTimeWritten << ' ' << ts << rsLog;
                    log() << "replSet " << theReplSet->isPrimary() << rsLog;
                }
                theReplSet->lastOpTimeWritten = ts;
                theReplSet->lastH = hashNew;
                ctx.getClient()->setLastOp( ts.asDate() );
            }
        }

        append_O_Obj(r->data, partial, obj);

        if ( logLevel >= 6 ) {
            BSONObj temp(r);
            log( 6 ) << "logOp:" << temp << endl;
        }
    }
Esempio n. 10
0
    void applyOperation_inlock(const BSONObj& op , bool fromRepl ) {
        OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters;

        if( logLevel >= 6 )
            log() << "applying op: " << op << endl;

        assertInWriteLock();

        OpDebug debug;
        BSONObj o = op.getObjectField("o");
        const char *ns = op.getStringField("ns");
        // operation type -- see logOp() comments for types
        const char *opType = op.getStringField("op");

        if ( *opType == 'i' ) {
            opCounters->gotInsert();

            const char *p = strchr(ns, '.');
            if ( p && strcmp(p, ".system.indexes") == 0 ) {
                // updates aren't allowed for indexes -- so we will do a regular insert. if index already
                // exists, that is ok.
                theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize());
            }
            else {
                // do upserts for inserts as we might get replayed more than once
                BSONElement _id;
                if( !o.getObjectID(_id) ) {
                    /* No _id.  This will be very slow. */
                    Timer t;
                    updateObjects(ns, o, o, true, false, false , debug );
                    if( t.millis() >= 2 ) {
                        RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl;
                    }
                }
                else {
                    BSONObjBuilder b;
                    b.append(_id);

                    /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */
                    RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow

                    /* todo : it may be better to do an insert here, and then catch the dup key exception and do update
                              then.  very few upserts will not be inserts...
                              */
                    updateObjects(ns, o, b.done(), true, false, false , debug );
                }
            }
        }
        else if ( *opType == 'u' ) {
            opCounters->gotUpdate();

            RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow
            updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug );
        }
        else if ( *opType == 'd' ) {
            opCounters->gotDelete();

            if ( opType[1] == 0 )
                deleteObjects(ns, o, op.getBoolField("b"));
            else
                assert( opType[1] == 'b' ); // "db" advertisement
        }
        else if ( *opType == 'n' ) {
            // no op
        }
        else if ( *opType == 'c' ) {
            opCounters->gotCommand();

            BufBuilder bb;
            BSONObjBuilder ob;
            _runCommands(ns, o, bb, ob, true, 0);
        }
        else {
            stringstream ss;
            ss << "unknown opType [" << opType << "]";
            throw MsgAssertionException( 13141 , ss.str() );
        }

    }