void msgassertedNoTrace(int msgid, const char *msg) { assertionCount.condrollover( ++assertionCount.warning ); log() << "Assertion: " << msgid << ":" << msg << endl; lastAssert[2].set(msg, getDbContext().c_str(), "", 0); raiseError(msgid,msg && *msg ? msg : "massert failure"); throw MsgAssertionException(msgid, msg); }
NOINLINE_DECL void msgasserted(int msgid, const char* msg) { assertionCount.condrollover(++assertionCount.warning); log() << "Assertion: " << msgid << ":" << msg << endl; // breakpoint(); logContext(); throw MsgAssertionException(msgid, msg); }
/* fetch a single object from collection ns that matches query set your db SavedContext first */ DiskLoc Helpers::findOne(const char *ns, const BSONObj &query, bool requireIndex) { MultiPlanScanner s( ns, query, BSONObj(), 0, !requireIndex ); FindOne original( requireIndex ); shared_ptr< FindOne > res = s.runOp( original ); if ( ! res->complete() ) throw MsgAssertionException( res->exception() ); return res->loc(); }
virtual void _init() { if ( requireIndex_ && strcmp( qp().indexKey().firstElement().fieldName(), "$natural" ) == 0 ) throw MsgAssertionException( 9011 , "Not an index cursor" ); c_ = qp().newCursor(); if ( !c_->ok() ) { setComplete(); } }
NOINLINE_DECL void msgasserted(int msgid, const char *msg) { assertionCount.condrollover( ++assertionCount.warning ); tlog() << "Assertion: " << msgid << ":" << msg << endl; raiseError(msgid,msg && *msg ? msg : "massert failure"); breakpoint(); printStackTrace(); throw MsgAssertionException(msgid, msg); }
virtual void _init() { if ( qp().scanAndOrderRequired() ) { throw MsgAssertionException( OutOfOrderDocumentsAssertionCode, "order spec cannot be satisfied with index" ); } _c = qp().newCursor(); _capped = _c->capped(); mayAdvance(); }
NOINLINE_DECL void msgasserted(int msgid, const char *msg) { assertionCount.condrollover( ++assertionCount.warning ); tlog() << "Assertion: " << msgid << ":" << msg << endl; setLastError(msgid,msg && *msg ? msg : "massert failure"); //breakpoint(); logContext(); throw MsgAssertionException(msgid, msg); }
virtual void init() { if ( requireIndex_ && strcmp( qp().indexKey().firstElement().fieldName(), "$natural" ) == 0 ) throw MsgAssertionException( 9011 , "Not an index cursor" ); c_ = qp().newCursor(); if ( !c_->ok() ) setComplete(); else matcher_.reset( new CoveredIndexMatcher( qp().query(), qp().indexKey() ) ); }
NOINLINE_DECL void msgassertedNoTraceWithLocation(int msgid, const char* msg, const char* file, unsigned line) { assertionCount.condrollover(++assertionCount.warning); log() << "Assertion: " << msgid << ":" << redact(msg) << ' ' << file << ' ' << dec << line << endl; throw MsgAssertionException(msgid, msg); }
/* fetch a single object from collection ns that matches query set your db SavedContext first */ bool Helpers::findOne(const char *ns, const BSONObj &query, BSONObj& result, bool requireIndex) { MultiPlanScanner s( ns, query, BSONObj(), 0, !requireIndex ); FindOne original( requireIndex ); shared_ptr< FindOne > res = s.runOp( original ); if ( ! res->complete() ) throw MsgAssertionException( res->exception() ); if ( res->one().isEmpty() ) return false; result = res->one(); return true; }
const IndexDetails& NamespaceDetails::idx(int idxNo, bool missingExpected) const { if( idxNo < NIndexesBase ) { const IndexDetails& id = _indexes[idxNo]; return id; } const Extra *e = extra(); if ( ! e ) { if ( missingExpected ) throw MsgAssertionException( 17421 , "Missing Extra" ); massert(17422, "missing Extra", e); } int i = idxNo - NIndexesBase; if( i >= NIndexesExtra ) { e = e->next(this); if ( ! e ) { if ( missingExpected ) throw MsgAssertionException( 17423 , "missing extra" ); massert(17424, "missing Extra", e); } i -= NIndexesExtra; } return e->details[i]; }
void applyOperationFromOplog(const BSONObj& op) { LOG(6) << "applying op: " << op << endl; OpCounters* opCounters = &replOpCounters; const char *names[] = { KEY_STR_NS, KEY_STR_OP_NAME }; BSONElement fields[2]; op.getFields(2, names, fields); const char* ns = fields[0].valuestrsafe(); const char* opType = fields[1].valuestrsafe(); if (strcmp(opType, OP_STR_INSERT) == 0) { opCounters->gotInsert(); runInsertFromOplog(ns, op); } else if (strcmp(opType, OP_STR_UPDATE) == 0) { opCounters->gotUpdate(); runUpdateFromOplog(ns, op, false); } else if (strcmp(opType, OP_STR_DELETE) == 0) { opCounters->gotDelete(); runDeleteFromOplog(ns, op); } else if (strcmp(opType, OP_STR_COMMAND) == 0) { opCounters->gotCommand(); runCommandFromOplog(ns, op); } else if (strcmp(opType, OP_STR_COMMENT) == 0) { // no-op } else if (strcmp(opType, OP_STR_CAPPED_INSERT) == 0) { opCounters->gotInsert(); runCappedInsertFromOplog(ns, op); } else if (strcmp(opType, OP_STR_CAPPED_DELETE) == 0) { opCounters->gotDelete(); runCappedDeleteFromOplog(ns, op); } else { throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) ); } }
void rollbackOperationFromOplog(const BSONObj& op) { LOG(6) << "rolling back op: " << op << endl; const char *names[] = { KEY_STR_NS, KEY_STR_OP_NAME }; BSONElement fields[2]; op.getFields(2, names, fields); const char* ns = fields[0].valuestrsafe(); const char* opType = fields[1].valuestrsafe(); if (strcmp(opType, OP_STR_INSERT) == 0) { runRollbackInsertFromOplog(ns, op); } else if (strcmp(opType, OP_STR_UPDATE) == 0) { runUpdateFromOplog(ns, op, true); } else if (strcmp(opType, OP_STR_DELETE) == 0) { // the rollback of a delete is to do the insert runInsertFromOplog(ns, op); } else if (strcmp(opType, OP_STR_COMMAND) == 0) { rollbackCommandFromOplog(ns, op); } else if (strcmp(opType, OP_STR_COMMENT) == 0) { // no-op } else if (strcmp(opType, OP_STR_CAPPED_INSERT) == 0) { runCappedDeleteFromOplog(ns, op); } else if (strcmp(opType, OP_STR_CAPPED_DELETE) == 0) { runCappedInsertFromOplog(ns, op); } else { throw MsgAssertionException( 16795 , ErrorMsg("error in applyOperation : unknown opType ", *opType) ); } }
void rethrowOnError( const shared_ptr< QueryOp > &op ) { // If all plans have erred out, assert. if ( op->error() ) { throw MsgAssertionException( op->exception() ); } }
void msgasserted(const char *msg) { log() << "Assertion: " << msg << '\n'; lastAssert[2].set(msg, getDbContext().c_str(), "", 0); raiseError(msg && *msg ? msg : "massert failure"); throw MsgAssertionException(msg); }
void applyOperation_inlock(const BSONObj& op , bool fromRepl ) { OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters; if( logLevel >= 6 ) log() << "applying op: " << op << endl; assertInWriteLock(); OpDebug debug; BSONObj o = op.getObjectField("o"); const char *ns = op.getStringField("ns"); // operation type -- see logOp() comments for types const char *opType = op.getStringField("op"); if ( *opType == 'i' ) { opCounters->gotInsert(); const char *p = strchr(ns, '.'); if ( p && strcmp(p, ".system.indexes") == 0 ) { // updates aren't allowed for indexes -- so we will do a regular insert. if index already // exists, that is ok. theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize()); } else { // do upserts for inserts as we might get replayed more than once BSONElement _id; if( !o.getObjectID(_id) ) { /* No _id. This will be very slow. */ Timer t; updateObjects(ns, o, o, true, false, false , debug ); if( t.millis() >= 2 ) { RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl; } } else { BSONObjBuilder b; b.append(_id); /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */ RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow /* todo : it may be better to do an insert here, and then catch the dup key exception and do update then. very few upserts will not be inserts... */ updateObjects(ns, o, b.done(), true, false, false , debug ); } } } else if ( *opType == 'u' ) { opCounters->gotUpdate(); RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ op.getBoolField("b"), /*multi*/ false, /*logop*/ false , debug ); } else if ( *opType == 'd' ) { opCounters->gotDelete(); if ( opType[1] == 0 ) deleteObjects(ns, o, op.getBoolField("b")); else assert( opType[1] == 'b' ); // "db" advertisement } else if ( *opType == 'n' ) { // no op } else if ( *opType == 'c' ) { opCounters->gotCommand(); BufBuilder bb; BSONObjBuilder ob; _runCommands(ns, o, bb, ob, true, 0); } else { stringstream ss; ss << "unknown opType [" << opType << "]"; throw MsgAssertionException( 13141 , ss.str() ); } }
void applyOperation_inlock(const BSONObj& op , bool fromRepl ) { assertInWriteLock(); LOG(6) << "applying op: " << op << endl; OpCounters * opCounters = fromRepl ? &replOpCounters : &globalOpCounters; const char *names[] = { "o", "ns", "op", "b" }; BSONElement fields[4]; op.getFields(4, names, fields); BSONObj o; if( fields[0].isABSONObj() ) o = fields[0].embeddedObject(); const char *ns = fields[1].valuestrsafe(); // operation type -- see logOp() comments for types const char *opType = fields[2].valuestrsafe(); if ( *opType == 'i' ) { opCounters->gotInsert(); const char *p = strchr(ns, '.'); if ( p && strcmp(p, ".system.indexes") == 0 ) { // updates aren't allowed for indexes -- so we will do a regular insert. if index already // exists, that is ok. theDataFileMgr.insert(ns, (void*) o.objdata(), o.objsize()); } else { // do upserts for inserts as we might get replayed more than once OpDebug debug; BSONElement _id; if( !o.getObjectID(_id) ) { /* No _id. This will be very slow. */ Timer t; updateObjects(ns, o, o, true, false, false, debug ); if( t.millis() >= 2 ) { RARELY OCCASIONALLY log() << "warning, repl doing slow updates (no _id field) for " << ns << endl; } } else { /* erh 10/16/2009 - this is probably not relevant any more since its auto-created, but not worth removing */ RARELY ensureHaveIdIndex(ns); // otherwise updates will be slow /* todo : it may be better to do an insert here, and then catch the dup key exception and do update then. very few upserts will not be inserts... */ BSONObjBuilder b; b.append(_id); updateObjects(ns, o, b.done(), true, false, false , debug ); } } } else if ( *opType == 'u' ) { opCounters->gotUpdate(); RARELY ensureHaveIdIndex(ns); // otherwise updates will be super slow OpDebug debug; updateObjects(ns, o, op.getObjectField("o2"), /*upsert*/ fields[3].booleanSafe(), /*multi*/ false, /*logop*/ false , debug ); } else if ( *opType == 'd' ) { opCounters->gotDelete(); if ( opType[1] == 0 ) deleteObjects(ns, o, /*justOne*/ fields[3].booleanSafe()); else assert( opType[1] == 'b' ); // "db" advertisement } else if ( *opType == 'c' ) { opCounters->gotCommand(); BufBuilder bb; BSONObjBuilder ob; _runCommands(ns, o, bb, ob, true, 0); } else if ( *opType == 'n' ) { // no op } else { throw MsgAssertionException( 14825 , ErrorMsg("error in applyOperation : unknown opType ", *opType) ); } }
Status storeMongoShellOptions(const moe::Environment& params, const std::vector<std::string>& args) { if (params.count("quiet")) { mongo::serverGlobalParams.quiet = true; } #ifdef MONGO_SSL Status ret = storeSSLClientOptions(params); if (!ret.isOK()) { return ret; } #endif if (params.count("ipv6")) { mongo::enableIPv6(); } if (params.count("verbose")) { logger::globalLogDomain()->setMinimumLoggedSeverity(logger::LogSeverity::Debug(1)); } if (params.count("port")) { shellGlobalParams.port = params["port"].as<string>(); } if (params.count("host")) { shellGlobalParams.dbhost = params["host"].as<string>(); } if (params.count("eval")) { shellGlobalParams.script = params["eval"].as<string>(); } if (params.count("username")) { shellGlobalParams.username = params["username"].as<string>(); } if (params.count("password")) { shellGlobalParams.usingPassword = true; shellGlobalParams.password = params["password"].as<string>(); } if (params.count("authenticationDatabase")) { shellGlobalParams.authenticationDatabase = params["authenticationDatabase"].as<string>(); } if (params.count("authenticationMechanism")) { shellGlobalParams.authenticationMechanism = params["authenticationMechanism"].as<string>(); } if (params.count("shell")) { shellGlobalParams.runShell = true; } if (params.count("nodb")) { shellGlobalParams.nodb = true; } if (params.count("norc")) { shellGlobalParams.norc = true; } if (params.count("files")) { shellGlobalParams.files = params["files"].as< vector<string> >(); } if (params.count("nokillop")) { mongo::shell_utils::_nokillop = true; } if (params.count("autokillop")) { shellGlobalParams.autoKillOp = true; } if (params.count("useLegacyWriteOps")) { shellGlobalParams.writeMode = "legacy"; } if (params.count("writeMode")) { std::string mode = params["writeMode"].as<string>(); if (mode != "commands" && mode != "legacy" && mode != "compatibility") { throw MsgAssertionException(17396, mongoutils::str::stream() << "Unknown writeMode option: " << mode); } shellGlobalParams.writeMode = mode; } /* This is a bit confusing, here are the rules: * * if nodb is set then all positional parameters are files * otherwise the first positional parameter might be a dbaddress, but * only if one of these conditions is met: * - it contains no '.' after the last appearance of '\' or '/' * - it doesn't end in '.js' and it doesn't specify a path to an existing file */ if (params.count("dbaddress")) { string dbaddress = params["dbaddress"].as<string>(); if (shellGlobalParams.nodb) { shellGlobalParams.files.insert( shellGlobalParams.files.begin(), dbaddress ); } else { string basename = dbaddress.substr( dbaddress.find_last_of( "/\\" ) + 1 ); if (basename.find_first_of( '.' ) == string::npos || (basename.find(".js", basename.size() - 3) == string::npos && !::mongo::shell_utils::fileExists(dbaddress))) { shellGlobalParams.url = dbaddress; } else { shellGlobalParams.files.insert( shellGlobalParams.files.begin(), dbaddress ); } } } if ( shellGlobalParams.url == "*" ) { StringBuilder sb; sb << "ERROR: " << "\"*\" is an invalid db address"; sb << getMongoShellHelp(args[0], moe::startupOptions); return Status(ErrorCodes::BadValue, sb.str()); } return Status::OK(); }
/** Forward an exception when the runner errs out. */ void QueryOptimizerCursorImpl::rethrowOnError( const shared_ptr< QueryPlanRunner > &runner ) { if ( runner->error() ) { throw MsgAssertionException( runner->exception() ); } }
void msgassertedNoTrace(int msgid, const char *msg) { assertionCount.condrollover( ++assertionCount.warning ); log() << "Assertion: " << msgid << ":" << msg << endl; raiseError(msgid,msg && *msg ? msg : "massert failure"); throw MsgAssertionException(msgid, msg); }
NOINLINE_DECL void msgassertedNoTrace(int msgid, const char* msg) { assertionCount.condrollover(++assertionCount.warning); log() << "Assertion: " << msgid << ":" << msg << endl; throw MsgAssertionException(msgid, msg); }
NOINLINE_DECL void msgassertedNoTrace(int msgid, const char *msg) { log() << "Assertion: " << msgid << ":" << msg << endl; throw MsgAssertionException(msgid, msg); }