コード例 #1
0
ファイル: oplog.cpp プロジェクト: chmanie/mongo
    static void _logOpOld(OperationContext* txn,
                          const char *opstr,
                          const char *ns,
                          const char *logNS,
                          const BSONObj& obj,
                          BSONObj *o2,
                          bool *bb,
                          bool fromMigrate ) {
        Lock::DBWrite lk(txn->lockState(), "local");
        WriteUnitOfWork wunit(txn);
        static BufBuilder bufbuilder(8*1024); // todo there is likely a mutex on this constructor

        if ( strncmp(ns, "local.", 6) == 0 ) {
            if ( strncmp(ns, "local.slaves", 12) == 0 ) {
                resetSlaveCache();
            }
            return;
        }

        mutex::scoped_lock lk2(newOpMutex);

        OpTime ts(getNextGlobalOptime());
        newOptimeNotifier.notify_all();

        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        bufbuilder.reset();
        BSONObjBuilder b(bufbuilder);
        b.appendTimestamp("ts", ts.asDate());
        b.append("op", opstr);
        b.append("ns", ns);
        if (fromMigrate) 
            b.appendBool("fromMigrate", true);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done(); // partial is everything except the o:... part.

        if( logNS == 0 ) {
            logNS = "local.oplog.$main";
        }

        if ( localOplogMainCollection == 0 ) {
            Client::Context ctx(txn, logNS);
            localDB = ctx.db();
            verify( localDB );
            localOplogMainCollection = localDB->getCollection(txn, logNS);
            verify( localOplogMainCollection );
        }

        Client::Context ctx(txn, logNS , localDB);
        OplogDocWriter writer( partial, obj );
        checkOplogInsert( localOplogMainCollection->insertDocument( txn, &writer, false ) );

        ctx.getClient()->setLastOp( ts );
        wunit.commit();
    }
コード例 #2
0
ファイル: oplog.cpp プロジェクト: chmanie/mongo
    /** write an op to the oplog that is already built.
        todo : make _logOpRS() call this so we don't repeat ourself?
        */
    void _logOpObjRS(OperationContext* txn, const BSONObj& op) {
        Lock::DBWrite lk(txn->lockState(), "local");
        // XXX soon this needs to be part of an outer WUOW not its own.
        // We can't do this yet due to locking limitations.
        WriteUnitOfWork wunit(txn);

        const OpTime ts = op["ts"]._opTime();
        long long h = op["h"].numberLong();

        {
            if ( localOplogRSCollection == 0 ) {
                Client::Context ctx(txn, rsoplog);

                localDB = ctx.db();
                verify( localDB );
                localOplogRSCollection = localDB->getCollection(txn, rsoplog);
                massert(13389,
                        "local.oplog.rs missing. did you drop it? if so restart server",
                        localOplogRSCollection);
            }
            Client::Context ctx(txn, rsoplog, localDB);
            checkOplogInsert(localOplogRSCollection->insertDocument(txn, op, false));

            /* todo: now() has code to handle clock skew.  but if the skew server to server is large it will get unhappy.
                     this code (or code in now() maybe) should be improved.
                     */
            if( theReplSet ) {
                if( !(theReplSet->lastOpTimeWritten<ts) ) {
                    log() << "replication oplog stream went back in time. previous timestamp: "
                          << theReplSet->lastOpTimeWritten << " newest timestamp: " << ts
                          << ". attempting to sync directly from primary." << endl;
                    BSONObjBuilder result;
                    Status status =
                            theReplSet->forceSyncFrom(theReplSet->box.getPrimary()->fullName(),
                                                      &result);
                    if (!status.isOK()) {
                        log() << "Can't sync from primary: " << status;
                    }
                }
                theReplSet->lastOpTimeWritten = ts;
                theReplSet->lastH = h;
                ctx.getClient()->setLastOp( ts );

                BackgroundSync::notify();
            }
        }

        setNewOptime(ts);
        wunit.commit();
    }
コード例 #3
0
ファイル: oplog.cpp プロジェクト: AndrewCEmil/mongo
    /** write an op to the oplog that is already built.
        todo : make _logOpRS() call this so we don't repeat ourself?
        */
    void _logOpObjRS(const BSONObj& op) {
        OperationContextImpl txn;
        Lock::DBWrite lk(txn.lockState(), "local");

        const OpTime ts = op["ts"]._opTime();
        long long h = op["h"].numberLong();

        {
            if ( localOplogRSCollection == 0 ) {
                Client::Context ctx(rsoplog, storageGlobalParams.dbpath);
                localDB = ctx.db();
                verify( localDB );
                localOplogRSCollection = localDB->getCollection( &txn, rsoplog );
                massert(13389,
                        "local.oplog.rs missing. did you drop it? if so restart server",
                        localOplogRSCollection);
            }
            Client::Context ctx(rsoplog, localDB);
            checkOplogInsert( localOplogRSCollection->insertDocument( &txn, op, false ) );

            /* todo: now() has code to handle clock skew.  but if the skew server to server is large it will get unhappy.
                     this code (or code in now() maybe) should be improved.
                     */
            if( theReplSet ) {
                if( !(theReplSet->lastOpTimeWritten<ts) ) {
                    log() << "replication oplog stream went back in time. previous timestamp: "
                          << theReplSet->lastOpTimeWritten << " newest timestamp: " << ts
                          << ". attempting to sync directly from primary." << endl;
                    std::string errmsg;
                    BSONObjBuilder result;
                    if (!theReplSet->forceSyncFrom(theReplSet->box.getPrimary()->fullName(),
                                                   errmsg, result)) {
                        log() << "Can't sync from primary: " << errmsg << endl;
                    }
                }
                theReplSet->lastOpTimeWritten = ts;
                theReplSet->lastH = h;
                ctx.getClient()->setLastOp( ts );

                BackgroundSync::notify();
            }
        }

        setNewOptime(ts);
    }
コード例 #4
0
ファイル: oplog.cpp プロジェクト: TylerBrock/mongo
    /** write an op to the oplog that is already built.
        todo : make _logOpRS() call this so we don't repeat ourself?
        */
    OpTime _logOpObjRS(OperationContext* txn, const BSONObj& op) {
        Lock::DBLock lk(txn->lockState(), "local", newlm::MODE_X);
        // XXX soon this needs to be part of an outer WUOW not its own.
        // We can't do this yet due to locking limitations.
        WriteUnitOfWork wunit(txn);

        const OpTime ts = op["ts"]._opTime();
        long long hash = op["h"].numberLong();

        {
            if ( localOplogRSCollection == 0 ) {
                Client::Context ctx(txn, rsoplog);

                localDB = ctx.db();
                verify( localDB );
                localOplogRSCollection = localDB->getCollection(txn, rsoplog);
                massert(13389,
                        "local.oplog.rs missing. did you drop it? if so restart server",
                        localOplogRSCollection);
            }
            Client::Context ctx(txn, rsoplog, localDB);
            checkOplogInsert(localOplogRSCollection->insertDocument(txn, op, false));

            ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();
            OpTime myLastOptime = replCoord->getMyLastOptime();
            if (!(myLastOptime < ts)) {
                severe() << "replication oplog stream went back in time. previous timestamp: "
                         << myLastOptime << " newest timestamp: " << ts;
                fassertFailedNoTrace(18905);
            }
            
            BackgroundSync* bgsync = BackgroundSync::get();
            // Keep this up-to-date, in case we step up to primary.
            bgsync->setLastAppliedHash(hash);

            ctx.getClient()->setLastOp( ts );
            
            replCoord->setMyLastOptime(txn, ts);
            bgsync->notify();
        }

        setNewOptime(ts);
        wunit.commit();
        return ts;
    }
コード例 #5
0
ファイル: oplog.cpp プロジェクト: chmanie/mongo
    static void _logOpRS(OperationContext* txn,
                         const char *opstr,
                         const char *ns,
                         const char *logNS,
                         const BSONObj& obj,
                         BSONObj *o2,
                         bool *bb,
                         bool fromMigrate ) {
        Lock::DBWrite lk1(txn->lockState(), "local");
        WriteUnitOfWork wunit(txn);

        if ( strncmp(ns, "local.", 6) == 0 ) {
            if ( strncmp(ns, "local.slaves", 12) == 0 )
                resetSlaveCache();
            return;
        }

        mutex::scoped_lock lk2(newOpMutex);

        OpTime ts(getNextGlobalOptime());
        newOptimeNotifier.notify_all();

        long long hashNew;
        if( theReplSet ) {
            if (!theReplSet->box.getState().primary()) {
                log() << "replSet error : logOp() but not primary";
                fassertFailed(17405);
            }
            hashNew = (theReplSet->lastH * 131 + ts.asLL()) * 17 + theReplSet->selfId();
        }
        else {
            // must be initiation
            verify( *ns == 0 );
            hashNew = 0;
        }

        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        logopbufbuilder.reset();
        BSONObjBuilder b(logopbufbuilder);
        b.appendTimestamp("ts", ts.asDate());
        b.append("h", hashNew);
        b.append("v", OPLOG_VERSION);
        b.append("op", opstr);
        b.append("ns", ns);
        if (fromMigrate) 
            b.appendBool("fromMigrate", true);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done();

        DEV verify( logNS == 0 ); // check this was never a master/slave master

        if ( localOplogRSCollection == 0 ) {
            Client::Context ctx(txn, rsoplog);
            localDB = ctx.db();
            verify( localDB );
            localOplogRSCollection = localDB->getCollection( txn, rsoplog );
            massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection);
        }

        Client::Context ctx(txn, rsoplog, localDB);
        OplogDocWriter writer( partial, obj );
        checkOplogInsert( localOplogRSCollection->insertDocument( txn, &writer, false ) );

        /* todo: now() has code to handle clock skew.  but if the skew server to server is large it will get unhappy.
           this code (or code in now() maybe) should be improved.
        */
        if( theReplSet ) {
            if( !(theReplSet->lastOpTimeWritten<ts) ) {
                log() << "replication oplog stream went back in time. previous timestamp: "
                      << theReplSet->lastOpTimeWritten << " newest timestamp: " << ts
                      << ". attempting to sync directly from primary." << endl;
                BSONObjBuilder result;
                Status status = theReplSet->forceSyncFrom(theReplSet->box.getPrimary()->fullName(),
                                                          &result);
                if (!status.isOK()) {
                    log() << "Can't sync from primary: " << status;
                }
            }
            theReplSet->lastOpTimeWritten = ts;
            theReplSet->lastH = hashNew;
            ctx.getClient()->setLastOp( ts );
        }
        wunit.commit();

    }
コード例 #6
0
ファイル: oplog.cpp プロジェクト: TylerBrock/mongo
    static void _logOpRS(OperationContext* txn,
                         const char *opstr,
                         const char *ns,
                         const char *logNS,
                         const BSONObj& obj,
                         BSONObj *o2,
                         bool *bb,
                         bool fromMigrate ) {
        Lock::DBLock lk1(txn->lockState(), "local", newlm::MODE_X);
        WriteUnitOfWork wunit(txn);

        if ( strncmp(ns, "local.", 6) == 0 ) {
            if ( strncmp(ns, "local.slaves", 12) == 0 )
                resetSlaveCache();
            return;
        }
        ReplicationCoordinator* replCoord = getGlobalReplicationCoordinator();

        mutex::scoped_lock lk2(newOpMutex);

        OpTime ts(getNextGlobalOptime());
        newOptimeNotifier.notify_all();

        long long hashNew = BackgroundSync::get()->getLastAppliedHash();

        // Check to make sure logOp() is legal at this point.
        if (*opstr == 'n') {
            // 'n' operations are always logged
            invariant(*ns == '\0');

            // 'n' operations do not advance the hash, since they are not rolled back
        }
        else {
            if (!replCoord->canAcceptWritesForDatabase(nsToDatabaseSubstring(ns))) {
                severe() << "replSet error : logOp() but can't accept write to collection " << ns;
                fassertFailed(17405);
            }

            // Advance the hash
            hashNew = (hashNew * 131 + ts.asLL()) * 17 + replCoord->getMyId();
        }


        /* we jump through a bunch of hoops here to avoid copying the obj buffer twice --
           instead we do a single copy to the destination position in the memory mapped file.
        */

        logopbufbuilder.reset();
        BSONObjBuilder b(logopbufbuilder);
        b.appendTimestamp("ts", ts.asDate());
        b.append("h", hashNew);
        b.append("v", OPLOG_VERSION);
        b.append("op", opstr);
        b.append("ns", ns);
        if (fromMigrate) 
            b.appendBool("fromMigrate", true);
        if ( bb )
            b.appendBool("b", *bb);
        if ( o2 )
            b.append("o2", *o2);
        BSONObj partial = b.done();

        DEV verify( logNS == 0 ); // check this was never a master/slave master

        if ( localOplogRSCollection == 0 ) {
            Client::Context ctx(txn, rsoplog);
            localDB = ctx.db();
            verify( localDB );
            localOplogRSCollection = localDB->getCollection( txn, rsoplog );
            massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", localOplogRSCollection);
        }

        Client::Context ctx(txn, rsoplog, localDB);
        OplogDocWriter writer( partial, obj );
        checkOplogInsert( localOplogRSCollection->insertDocument( txn, &writer, false ) );

        BackgroundSync::get()->setLastAppliedHash(hashNew);
        ctx.getClient()->setLastOp( ts );
        replCoord->setMyLastOptime(txn, ts);

        wunit.commit();

    }