void addOp(const string& op, BSONObj o, BSONObj* o2 = NULL, const char* coll = NULL, int version = 0) { OpTime ts; { Lock::GlobalWrite lk; ts = OpTime::_now(); } BSONObjBuilder b; b.appendTimestamp("ts", ts.asLL()); if (version != 0) { b.append("v", version); } b.append("op", op); b.append("o", o); if (o2) { b.append("o2", *o2); } if (coll) { b.append("ns", coll); } else { b.append("ns", ns()); } _bgsync->addDoc(b.done()); }
void run() { OpTime o; { mongo::mutex::scoped_lock lk2(OpTime::m); o = OpTime::now(lk2); } BSONObjBuilder b; b.append("ns","dummy"); b.appendTimestamp("ts", o.asLL()); BSONObj obj = b.obj(); MockInitialSync mock; // all three should succeed std::vector<BSONObj> ops; ops.push_back(obj); replset::multiInitialSyncApply(ops, &mock); mock.failOnStep = MockInitialSync::FAIL_FIRST_APPLY; replset::multiInitialSyncApply(ops, &mock); mock.retry = false; replset::multiInitialSyncApply(ops, &mock); drop(); }
void run() { OpTime o = OpTime::_now(); BSONObjBuilder b; b.appendTimestamp("ts", o.asLL()); b.append("op", "u"); b.append("o", BSON("$set" << BSON("x" << 456))); b.append("o2", BSON("_id" << 123)); b.append("ns", ns()); BSONObj obj = b.obj(); SyncTest2 sync2; std::vector<BSONObj> ops; ops.push_back(obj); sync2.insertOnRetry = true; // succeeds multiInitialSyncApply(ops, &sync2); BSONObj fin = findOne(); verify(fin["x"].Number() == 456); drop(); }
static void _logOpRS(const char *opstr, const char *ns, const char *logNS, const BSONObj& obj, BSONObj *o2, bool *bb ) { DEV assertInWriteLock(); if ( strncmp(ns, "local.", 6) == 0 ) { if ( strncmp(ns, "local.slaves", 12) == 0 ) resetSlaveCache(); return; } const OpTime ts = OpTime::now(); long long hashNew; if( theReplSet ) { massert(13312, "replSet error : logOp() but not primary?", theReplSet->box.getState().primary()); hashNew = (theReplSet->lastH * 131 + ts.asLL()) * 17 + theReplSet->selfId(); } else { // must be initiation assert( *ns == 0 ); hashNew = 0; } /* we jump through a bunch of hoops here to avoid copying the obj buffer twice -- instead we do a single copy to the destination position in the memory mapped file. */ logopbufbuilder.reset(); BSONObjBuilder b(logopbufbuilder); b.appendTimestamp("ts", ts.asDate()); b.append("h", hashNew); b.append("op", opstr); b.append("ns", ns); if ( bb ) b.appendBool("b", *bb); if ( o2 ) b.append("o2", *o2); BSONObj partial = b.done(); int posz = partial.objsize(); int len = posz + obj.objsize() + 1 + 2 /*o:*/; Record *r; DEV assert( logNS == 0 ); { const char *logns = rsoplog; if ( rsOplogDetails == 0 ) { Client::Context ctx( logns , dbpath, 0, false); localDB = ctx.db(); assert( localDB ); rsOplogDetails = nsdetails(logns); massert(13347, "local.oplog.rs missing. did you drop it? if so restart server", rsOplogDetails); } Client::Context ctx( logns , localDB, false ); r = theDataFileMgr.fast_oplog_insert(rsOplogDetails, logns, len); /* todo: now() has code to handle clock skew. but if the skew server to server is large it will get unhappy. this code (or code in now() maybe) should be improved. */ if( theReplSet ) { if( !(theReplSet->lastOpTimeWritten<ts) ) { log() << "replSet ERROR possible failover clock skew issue? " << theReplSet->lastOpTimeWritten << ' ' << ts << rsLog; log() << "replSet " << theReplSet->isPrimary() << rsLog; } theReplSet->lastOpTimeWritten = ts; theReplSet->lastH = hashNew; ctx.getClient()->setLastOp( ts.asDate() ); } } append_O_Obj(r->data, partial, obj); if ( logLevel >= 6 ) { BSONObj temp(r); log( 6 ) << "logOp:" << temp << endl; } }