void testRegex() { BSONObjBuilder b; b.appendRegex("x", "foo"); BSONObj o = b.done(); BSONObjBuilder c; c.appendRegex("x", "goo"); BSONObj p = c.done(); assert( !o.woEqual( p ) ); assert( o.woCompare( p ) < 0 ); { BSONObjBuilder b; b.appendRegex("r", "^foo"); BSONObj o = b.done(); assert( o.firstElement().simpleRegex() == "foo" ); } { BSONObjBuilder b; b.appendRegex("r", "^f?oo"); BSONObj o = b.done(); assert( o.firstElement().simpleRegex() == "" ); } { BSONObjBuilder b; b.appendRegex("r", "^fz?oo"); BSONObj o = b.done(); assert( o.firstElement().simpleRegex() == "f" ); } }
void testRegex() { BSONObjBuilder b; b.appendRegex("x", "foo"); BSONObj o = b.done(); BSONObjBuilder c; c.appendRegex("x", "goo"); BSONObj p = c.done(); assert( !o.woEqual( p ) ); assert( o.woCompare( p ) < 0 ); }
// static void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme, OrderedIntervalList* oilOut, bool* exact) { const string start = simpleRegex(rme->getString().c_str(), rme->getFlags().c_str(), exact); // QLOG() << "regex bounds start is " << start << endl; // Note that 'exact' is set by simpleRegex above. if (!start.empty()) { string end = start; end[end.size() - 1]++; oilOut->intervals.push_back(makeRangeInterval(start, end, true, false)); } else { BSONObjBuilder bob; bob.appendMinForType("", String); bob.appendMaxForType("", String); BSONObj dataObj = bob.obj(); verify(dataObj.isOwned()); oilOut->intervals.push_back(makeRangeInterval(dataObj, true, false)); } // Regexes are after strings. BSONObjBuilder bob; bob.appendRegex("", rme->getString(), rme->getFlags()); oilOut->intervals.push_back(makePointInterval(bob.obj())); }
bool DBConfig::_load() { ScopedDbConnection conn( configServer.modelServer() ); BSONObj o = conn->findOne( ShardNS::database , BSON( "_id" << _name ) ); if ( o.isEmpty() ) { conn.done(); return false; } unserialize( o ); BSONObjBuilder b; b.appendRegex( "_id" , (string)"^" + _name + "." ); auto_ptr<DBClientCursor> cursor = conn->query( ShardNS::collection ,b.obj() ); assert( cursor.get() ); while ( cursor->more() ) { BSONObj o = cursor->next(); _collections[o["_id"].String()] = CollectionInfo( o ); } conn.done(); return true; }
TEST( MatchExpressionParserLeafTest, NotRegex1 ) { BSONObjBuilder b; b.appendRegex( "$not", "abc", "i" ); BSONObj query = BSON( "x" << b.obj() ); StatusWithMatchExpression result = MatchExpressionParser::parse( query ); ASSERT_TRUE( result.isOK() ); ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "abc" ) ) ); ASSERT( !result.getValue()->matchesBSON( BSON( "x" << "ABC" ) ) ); ASSERT( result.getValue()->matchesBSON( BSON( "x" << "AC" ) ) ); }
TEST( ExpressionParserArrayTest, AllBadRegexArg ) { string tooLargePattern( 50 * 1000, 'z' ); BSONObjBuilder allArray; allArray.appendRegex( "0", tooLargePattern, "" ); BSONObjBuilder operand; operand.appendArray( "$all", allArray.obj() ); BSONObj query = BSON( "x" << operand.obj() ); StatusWithExpression result = ExpressionParser::parse( query ); ASSERT_FALSE( result.isOK() ); }
void ReplSource::syncToTailOfRemoteLog() { string _ns = ns(); BSONObjBuilder b; if ( !only.empty() ) { b.appendRegex("ns", string("^") + only); } BSONObj last = oplogReader.findOne( _ns.c_str(), Query( b.done() ).sort( BSON( "$natural" << -1 ) ) ); if ( !last.isEmpty() ) { BSONElement ts = last.getField( "ts" ); massert( 10386 , "non Date ts found: " + last.toString(), ts.type() == Date || ts.type() == Timestamp ); syncedTo = OpTime( ts.date() ); } }
void run(){ Scope * s = globalScriptEngine->createScope(); { // date BSONObj o; { BSONObjBuilder b; b.appendDate( "d" , 123456789 ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "return x.d.getTime() != 12;" , BSONObj() ); ASSERT_EQUALS( true, s->getBoolean( "return" ) ); s->invoke( "z = x.d.getTime();" , BSONObj() ); ASSERT_EQUALS( 123456789 , s->getNumber( "z" ) ); s->invoke( "z = { z : x.d }" , BSONObj() ); BSONObj out = s->getObject( "z" ); ASSERT( out["z"].type() == Date ); } { // regex BSONObj o; { BSONObjBuilder b; b.appendRegex( "r" , "^a" , "i" ); o = b.obj(); } s->setObject( "x" , o ); s->invoke( "z = x.r.test( 'b' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'a' );" , BSONObj() ); ASSERT_EQUALS( true , s->getBoolean( "z" ) ); s->invoke( "z = x.r.test( 'ba' );" , BSONObj() ); ASSERT_EQUALS( false , s->getBoolean( "z" ) ); s->invoke( "z = { a : x.r };" , BSONObj() ); BSONObj out = s->getObject("z"); ASSERT_EQUALS( (string)"^a" , out["a"].regex() ); ASSERT_EQUALS( (string)"i" , out["a"].regexFlags() ); } delete s; }
TEST(MatchExpressionParserLeafTest, NotRegex1) { BSONObjBuilder b; b.appendRegex("$not", "abc", "i"); BSONObj query = BSON("x" << b.obj()); boost::intrusive_ptr<ExpressionContextForTest> expCtx(new ExpressionContextForTest()); StatusWithMatchExpression result = MatchExpressionParser::parse(query, expCtx); ASSERT_TRUE(result.isOK()); ASSERT(!result.getValue()->matchesBSON(BSON("x" << "abc"))); ASSERT(!result.getValue()->matchesBSON(BSON("x" << "ABC"))); ASSERT(result.getValue()->matchesBSON(BSON("x" << "AC"))); }
TEST(MatchExpressionParserLeafTest, NotRegex1) { BSONObjBuilder b; b.appendRegex("$not", "abc", "i"); BSONObj query = BSON("x" << b.obj()); const CollatorInterface* collator = nullptr; StatusWithMatchExpression result = MatchExpressionParser::parse(query, ExtensionsCallbackDisallowExtensions(), collator); ASSERT_TRUE(result.isOK()); ASSERT(!result.getValue()->matchesBSON(BSON("x" << "abc"))); ASSERT(!result.getValue()->matchesBSON(BSON("x" << "ABC"))); ASSERT(result.getValue()->matchesBSON(BSON("x" << "AC"))); }
Status CatalogManagerReplicaSet::getCollections(OperationContext* txn, const std::string* dbName, std::vector<CollectionType>* collections, OpTime* opTime) { BSONObjBuilder b; if (dbName) { invariant(!dbName->empty()); b.appendRegex(CollectionType::fullNs(), string(str::stream() << "^" << pcrecpp::RE::QuoteMeta(*dbName) << "\\.")); } auto configShard = grid.shardRegistry()->getShard(txn, "config"); auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector); if (!readHost.isOK()) { return readHost.getStatus(); } auto findStatus = _exhaustiveFindOnConfig(readHost.getValue(), NamespaceString(CollectionType::ConfigNS), b.obj(), BSONObj(), boost::none); // no limit if (!findStatus.isOK()) { return findStatus.getStatus(); } const auto& docsOpTimePair = findStatus.getValue(); for (const BSONObj& obj : docsOpTimePair.value) { const auto collectionResult = CollectionType::fromBSON(obj); if (!collectionResult.isOK()) { collections->clear(); return {ErrorCodes::FailedToParse, str::stream() << "error while parsing " << CollectionType::ConfigNS << " document: " << obj << " : " << collectionResult.getStatus().toString()}; } collections->push_back(collectionResult.getValue()); } if (opTime) { *opTime = docsOpTimePair.opTime; } return Status::OK(); }
Status CatalogManagerReplicaSet::_checkDbDoesNotExist(OperationContext* txn, const string& dbName, DatabaseType* db) { BSONObjBuilder queryBuilder; queryBuilder.appendRegex( DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i"); const auto configShard = grid.shardRegistry()->getShard(txn, "config"); const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector); if (!readHost.isOK()) { return readHost.getStatus(); } auto findStatus = _exhaustiveFindOnConfig(readHost.getValue(), NamespaceString(DatabaseType::ConfigNS), queryBuilder.obj(), BSONObj(), 1); if (!findStatus.isOK()) { return findStatus.getStatus(); } const auto& docs = findStatus.getValue().value; if (docs.empty()) { return Status::OK(); } BSONObj dbObj = docs.front(); std::string actualDbName = dbObj[DatabaseType::name()].String(); if (actualDbName == dbName) { if (db) { auto parseDBStatus = DatabaseType::fromBSON(dbObj); if (!parseDBStatus.isOK()) { return parseDBStatus.getStatus(); } *db = parseDBStatus.getValue(); } return Status(ErrorCodes::NamespaceExists, str::stream() << "database " << dbName << " already exists"); } return Status(ErrorCodes::DatabaseDifferCase, str::stream() << "can't have 2 databases that just differ on case " << " have: " << actualDbName << " want to add: " << dbName); }
TEST( ExpressionParserArrayTest, AllRegex2 ) { BSONObjBuilder allArray; allArray.appendRegex( "0", "^a", "" ); allArray.append( "1", "abc" ); BSONObjBuilder all; all.appendArray( "$all", allArray.obj() ); BSONObj query = BSON( "a" << all.obj() ); StatusWithExpression result = ExpressionParser::parse( query ); ASSERT_TRUE( result.isOK() ); BSONObj notMatchFirst = BSON( "a" << "ax" ); BSONObj matchesBoth = BSON( "a" << "abc" ); ASSERT( !result.getValue()->matchesSingleElement( notMatchFirst[ "a" ] ) ); ASSERT( result.getValue()->matchesSingleElement( matchesBoth[ "a" ] ) ); }
StatusWith<std::vector<CollectionType>> ShardingCatalogClientImpl::getCollections( OperationContext* opCtx, const std::string* dbName, OpTime* opTime, repl::ReadConcernLevel readConcernLevel) { BSONObjBuilder b; if (dbName) { invariant(!dbName->empty()); b.appendRegex(CollectionType::fullNs(), string(str::stream() << "^" << pcrecpp::RE::QuoteMeta(*dbName) << "\\.")); } auto findStatus = _exhaustiveFindOnConfig(opCtx, kConfigReadSelector, readConcernLevel, CollectionType::ConfigNS, b.obj(), BSONObj(), boost::none); // no limit if (!findStatus.isOK()) { return findStatus.getStatus(); } const auto& docsOpTimePair = findStatus.getValue(); std::vector<CollectionType> collections; for (const BSONObj& obj : docsOpTimePair.value) { const auto collectionResult = CollectionType::fromBSON(obj); if (!collectionResult.isOK()) { return {ErrorCodes::FailedToParse, str::stream() << "error while parsing " << CollectionType::ConfigNS.ns() << " document: " << obj << " : " << collectionResult.getStatus().toString()}; } collections.push_back(collectionResult.getValue()); } if (opTime) { *opTime = docsOpTimePair.opTime; } return collections; }
StatusWith<std::string> CatalogManagerReplicaSet::_generateNewShardName() const { const auto configShard = grid.shardRegistry()->getShard("config"); const auto readHost = configShard->getTargeter()->findHost(kConfigReadSelector); if (!readHost.isOK()) { return readHost.getStatus(); } BSONObjBuilder shardNameRegex; shardNameRegex.appendRegex(ShardType::name(), "^shard"); auto findStatus = grid.shardRegistry()->exhaustiveFind(readHost.getValue(), NamespaceString(ShardType::ConfigNS), shardNameRegex.obj(), BSON(ShardType::name() << -1), 1); if (!findStatus.isOK()) { return findStatus.getStatus(); } const auto& docs = findStatus.getValue(); int count = 0; if (!docs.empty()) { const auto shardStatus = ShardType::fromBSON(docs.front()); if (!shardStatus.isOK()) { return shardStatus.getStatus(); } std::istringstream is(shardStatus.getValue().getName().substr(5)); is >> count; count++; } // TODO fix so that we can have more than 10000 automatically generated shard names if (count < 9999) { std::stringstream ss; ss << "shard" << std::setfill('0') << std::setw(4) << count; return ss.str(); } return Status(ErrorCodes::OperationFailed, "unable to generate new shard name"); }
StatusWith<std::string> ShardingCatalogManagerImpl::_generateNewShardName(OperationContext* txn) { BSONObjBuilder shardNameRegex; shardNameRegex.appendRegex(ShardType::name(), "^shard"); auto findStatus = Grid::get(txn)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( txn, kConfigReadSelector, repl::ReadConcernLevel::kMajorityReadConcern, NamespaceString(ShardType::ConfigNS), shardNameRegex.obj(), BSON(ShardType::name() << -1), 1); if (!findStatus.isOK()) { return findStatus.getStatus(); } const auto& docs = findStatus.getValue().docs; int count = 0; if (!docs.empty()) { const auto shardStatus = ShardType::fromBSON(docs.front()); if (!shardStatus.isOK()) { return shardStatus.getStatus(); } std::istringstream is(shardStatus.getValue().getName().substr(5)); is >> count; count++; } // TODO fix so that we can have more than 10000 automatically generated shard names if (count < 9999) { std::stringstream ss; ss << "shard" << std::setfill('0') << std::setw(4) << count; return ss.str(); } return Status(ErrorCodes::OperationFailed, "unable to generate new shard name"); }
void appendRegex( BSONObjBuilder& b , const string& name , string s ) { assert( s[0] == '/' ); s = s.substr(1); string::size_type end = s.rfind( '/' ); b.appendRegex( name.c_str() , s.substr( 0 , end ).c_str() , s.substr( end + 1 ).c_str() ); }
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){ if ( value->IsString() ){ b.append( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsFunction() ){ b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsNumber() ){ if ( value->IsInt32() ) b.append( sname.c_str(), int( value->ToInt32()->Value() ) ); else b.append( sname.c_str() , value->ToNumber()->Value() ); return; } if ( value->IsArray() ){ BSONObj sub = v8ToMongo( value->ToObject() ); b.appendArray( sname.c_str() , sub ); return; } if ( value->IsDate() ){ b.appendDate( sname.c_str() , Date_t(v8::Date::Cast( *value )->NumberValue()) ); return; } if ( value->IsExternal() ) return; if ( value->IsObject() ){ // The user could potentially modify the fields of these special objects, // wreaking havoc when we attempt to reinterpret them. Not doing any validation // for now... Local< v8::Object > obj = value->ToObject(); if ( obj->InternalFieldCount() && obj->GetInternalField( 0 )->IsNumber() ) { switch( obj->GetInternalField( 0 )->ToInt32()->Value() ) { // NOTE Uint32's Value() gave me a linking error, so going with this instead case Timestamp: b.appendTimestamp( sname.c_str(), Date_t( v8::Date::Cast( *obj->Get( v8::String::New( "time" ) ) )->NumberValue() ), obj->Get( v8::String::New( "i" ) )->ToInt32()->Value() ); return; case MinKey: b.appendMinKey( sname.c_str() ); return; case MaxKey: b.appendMaxKey( sname.c_str() ); return; default: assert( "invalid internal field" == 0 ); } } string s = toSTLString( value ); if ( s.size() && s[0] == '/' ){ s = s.substr( 1 ); string r = s.substr( 0 , s.rfind( "/" ) ); string o = s.substr( s.rfind( "/" ) + 1 ); b.appendRegex( sname.c_str() , r.c_str() , o.c_str() ); } else if ( value->ToObject()->GetPrototype()->IsObject() && value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( v8::String::New( "isObjectId" ) ) ){ OID oid; oid.init( toSTLString( value ) ); b.appendOID( sname.c_str() , &oid ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__NumberLong" ) ).IsEmpty() ) { // TODO might be nice to potentially speed this up with an indexed internal // field, but I don't yet know how to use an ObjectTemplate with a // constructor. unsigned long long val = ( (unsigned long long)( value->ToObject()->Get( v8::String::New( "top" ) )->ToInt32()->Value() ) << 32 ) + (unsigned)( value->ToObject()->Get( v8::String::New( "bottom" ) )->ToInt32()->Value() ); b.append( sname.c_str(), (long long)val ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__DBPointer" ) ).IsEmpty() ) { OID oid; oid.init( toSTLString( value->ToObject()->Get( v8::String::New( "id" ) ) ) ); string ns = toSTLString( value->ToObject()->Get( v8::String::New( "ns" ) ) ); b.appendDBRef( sname.c_str(), ns.c_str(), oid ); } else if ( !value->ToObject()->GetHiddenValue( v8::String::New( "__BinData" ) ).IsEmpty() ) { int len = obj->Get( v8::String::New( "len" ) )->ToInt32()->Value(); v8::String::Utf8Value data( obj->Get( v8::String::New( "data" ) ) ); const char *dataArray = *data; assert( data.length() == len ); b.appendBinData( sname.c_str(), len, mongo::BinDataType( obj->Get( v8::String::New( "type" ) )->ToInt32()->Value() ), dataArray ); } else { BSONObj sub = v8ToMongo( value->ToObject() ); b.append( sname.c_str() , sub ); } return; } if ( value->IsBoolean() ){ b.appendBool( sname.c_str() , value->ToBoolean()->Value() ); return; } else if ( value->IsUndefined() ){ b.appendUndefined( sname.c_str() ); return; } else if ( value->IsNull() ){ b.appendNull( sname.c_str() ); return; } cout << "don't know how to convert to mongo field [" << name << "]\t" << value << endl; }
int run() { if (mongoFilesGlobalParams.command.size() == 0) { cerr << "ERROR: need command" << endl << endl; printHelp(cout); return -1; } GridFS g(conn(), toolGlobalParams.db); if (mongoFilesGlobalParams.command == "list") { BSONObjBuilder b; if (mongoFilesGlobalParams.gridFSFilename.size()) { b.appendRegex( "filename" , (string)"^" + pcrecpp::RE::QuoteMeta(mongoFilesGlobalParams.gridFSFilename) ); } display( &g , b.obj() ); return 0; } if (mongoFilesGlobalParams.gridFSFilename.size() == 0) { cerr << "ERROR: need a filename" << endl << endl; printHelp(cout); return -1; } if (mongoFilesGlobalParams.command == "search") { BSONObjBuilder b; b.appendRegex("filename", mongoFilesGlobalParams.gridFSFilename); display( &g , b.obj() ); return 0; } if (mongoFilesGlobalParams.command == "get") { GridFile f = g.findFile(mongoFilesGlobalParams.gridFSFilename); if ( ! f.exists() ) { cerr << "ERROR: file not found" << endl; return -2; } f.write(mongoFilesGlobalParams.localFile); if (mongoFilesGlobalParams.localFile != "-") { toolInfoOutput() << "done write to: " << mongoFilesGlobalParams.localFile << std::endl; } return 0; } if (mongoFilesGlobalParams.command == "put") { BSONObj file = g.storeFile(mongoFilesGlobalParams.localFile, mongoFilesGlobalParams.gridFSFilename, mongoFilesGlobalParams.contentType); toolInfoOutput() << "added file: " << file << std::endl; if (mongoFilesGlobalParams.replace) { auto_ptr<DBClientCursor> cursor = conn().query(toolGlobalParams.db + ".fs.files", BSON("filename" << mongoFilesGlobalParams.gridFSFilename << "_id" << NE << file["_id"] )); while (cursor->more()) { BSONObj o = cursor->nextSafe(); conn().remove(toolGlobalParams.db + ".fs.files", BSON("_id" << o["_id"])); conn().remove(toolGlobalParams.db + ".fs.chunks", BSON("_id" << o["_id"])); toolInfoOutput() << "removed file: " << o << std::endl; } } conn().getLastError(); toolInfoOutput() << "done!" << std::endl; return 0; } if (mongoFilesGlobalParams.command == "delete") { g.removeFile(mongoFilesGlobalParams.gridFSFilename); conn().getLastError(); toolInfoOutput() << "done!" << std::endl; return 0; } cerr << "ERROR: unknown command '" << mongoFilesGlobalParams.command << "'" << endl << endl; printHelp(cout); return -1; }
/* slave: pull some data from the master's oplog note: not yet in db mutex at this point. @return -1 error 0 ok, don't sleep 1 ok, sleep */ int ReplSource::sync_pullOpLog(int& nApplied) { int okResultCode = 1; string ns = string("local.oplog.$") + sourceName(); log(2) << "repl: sync_pullOpLog " << ns << " syncedTo:" << syncedTo.toStringLong() << '\n'; bool tailing = true; oplogReader.tailCheck(); bool initial = syncedTo.isNull(); if ( !oplogReader.haveCursor() || initial ) { if ( initial ) { // Important to grab last oplog timestamp before listing databases. syncToTailOfRemoteLog(); BSONObj info; bool ok = oplogReader.conn()->runCommand( "admin", BSON( "listDatabases" << 1 ), info ); massert( 10389 , "Unable to get database list", ok ); BSONObjIterator i( info.getField( "databases" ).embeddedObject() ); while( i.moreWithEOO() ) { BSONElement e = i.next(); if ( e.eoo() ) break; string name = e.embeddedObject().getField( "name" ).valuestr(); if ( !e.embeddedObject().getBoolField( "empty" ) ) { if ( name != "local" ) { if ( only.empty() || only == name ) { log( 2 ) << "adding to 'addDbNextPass': "******"$gte", syncedTo.asDate()); BSONObjBuilder query; query.append("ts", q.done()); if ( !only.empty() ) { // note we may here skip a LOT of data table scanning, a lot of work for the master. query.appendRegex("ns", string("^") + only); // maybe append "\\." here? } BSONObj queryObj = query.done(); // e.g. queryObj = { ts: { $gte: syncedTo } } oplogReader.tailingQuery(ns.c_str(), queryObj); tailing = false; } else { log(2) << "repl: tailing=true\n"; } if( !oplogReader.haveCursor() ) { problem() << "repl: dbclient::query returns null (conn closed?)" << endl; oplogReader.resetConnection(); return -1; } // show any deferred database creates from a previous pass { set<string>::iterator i = addDbNextPass.begin(); if ( i != addDbNextPass.end() ) { BSONObjBuilder b; b.append("ns", *i + '.'); b.append("op", "db"); BSONObj op = b.done(); sync_pullOpLog_applyOperation(op, false); } } if ( !oplogReader.more() ) { if ( tailing ) { log(2) << "repl: tailing & no new activity\n"; if( oplogReader.awaitCapable() ) okResultCode = 0; // don't sleep } else { log() << "repl: " << ns << " oplog is empty\n"; } { dblock lk; save(); } return okResultCode; } OpTime nextOpTime; { BSONObj op = oplogReader.next(); BSONElement ts = op.getField("ts"); if ( ts.type() != Date && ts.type() != Timestamp ) { string err = op.getStringField("$err"); if ( !err.empty() ) { // 13051 is "tailable cursor requested on non capped collection" if (op.getIntField("code") == 13051) { problem() << "trying to slave off of a non-master" << '\n'; massert( 13344 , "trying to slave off of a non-master", false ); } else { problem() << "repl: $err reading remote oplog: " + err << '\n'; massert( 10390 , "got $err reading remote oplog", false ); } } else { problem() << "repl: bad object read from remote oplog: " << op.toString() << '\n'; massert( 10391 , "repl: bad object read from remote oplog", false); } } nextOpTime = OpTime( ts.date() ); log(2) << "repl: first op time received: " << nextOpTime.toString() << '\n'; if ( initial ) { log(1) << "repl: initial run\n"; } if( tailing ) { if( !( syncedTo < nextOpTime ) ) { log() << "repl ASSERTION failed : syncedTo < nextOpTime" << endl; log() << "repl syncTo: " << syncedTo.toStringLong() << endl; log() << "repl nextOpTime: " << nextOpTime.toStringLong() << endl; assert(false); } oplogReader.putBack( op ); // op will be processed in the loop below nextOpTime = OpTime(); // will reread the op below } else if ( nextOpTime != syncedTo ) { // didn't get what we queried for - error Nullstream& l = log(); l << "repl: nextOpTime " << nextOpTime.toStringLong() << ' '; if ( nextOpTime < syncedTo ) l << "<??"; else l << ">"; l << " syncedTo " << syncedTo.toStringLong() << '\n'; log() << "repl: time diff: " << (nextOpTime.getSecs() - syncedTo.getSecs()) << "sec\n"; log() << "repl: tailing: " << tailing << '\n'; log() << "repl: data too stale, halting replication" << endl; replInfo = replAllDead = "data too stale halted replication"; assert( syncedTo < nextOpTime ); throw SyncException(); } else { /* t == syncedTo, so the first op was applied previously or it is the first op of initial query and need not be applied. */ } } // apply operations { int n = 0; time_t saveLast = time(0); while ( 1 ) { bool moreInitialSyncsPending = !addDbNextPass.empty() && n; // we need "&& n" to assure we actually process at least one op to get a sync point recorded in the first place. if ( moreInitialSyncsPending || !oplogReader.more() ) { dblock lk; // NOTE aaron 2011-03-29 This block may be unnecessary, but I'm leaving it in place to avoid changing timing behavior. { dbtemprelease t; if ( !moreInitialSyncsPending && oplogReader.more() ) { continue; } // otherwise, break out of loop so we can set to completed or clone more dbs } if( oplogReader.awaitCapable() && tailing ) okResultCode = 0; // don't sleep syncedTo = nextOpTime; save(); // note how far we are synced up to now log() << "repl: applied " << n << " operations" << endl; nApplied = n; log() << "repl: end sync_pullOpLog syncedTo: " << syncedTo.toStringLong() << endl; break; } else { } OCCASIONALLY if( n > 0 && ( n > 100000 || time(0) - saveLast > 60 ) ) { // periodically note our progress, in case we are doing a lot of work and crash dblock lk; syncedTo = nextOpTime; // can't update local log ts since there are pending operations from our peer save(); log() << "repl: checkpoint applied " << n << " operations" << endl; log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl; saveLast = time(0); n = 0; } BSONObj op = oplogReader.next(); unsigned b = replApplyBatchSize; bool justOne = b == 1; scoped_ptr<writelock> lk( justOne ? 0 : new writelock() ); while( 1 ) { BSONElement ts = op.getField("ts"); if( !( ts.type() == Date || ts.type() == Timestamp ) ) { log() << "sync error: problem querying remote oplog record" << endl; log() << "op: " << op.toString() << endl; log() << "halting replication" << endl; replInfo = replAllDead = "sync error: no ts found querying remote oplog record"; throw SyncException(); } OpTime last = nextOpTime; nextOpTime = OpTime( ts.date() ); if ( !( last < nextOpTime ) ) { log() << "sync error: last applied optime at slave >= nextOpTime from master" << endl; log() << " last: " << last.toStringLong() << endl; log() << " nextOpTime: " << nextOpTime.toStringLong() << endl; log() << " halting replication" << endl; replInfo = replAllDead = "sync error last >= nextOpTime"; uassert( 10123 , "replication error last applied optime at slave >= nextOpTime from master", false); } if ( replSettings.slavedelay && ( unsigned( time( 0 ) ) < nextOpTime.getSecs() + replSettings.slavedelay ) ) { assert( justOne ); oplogReader.putBack( op ); _sleepAdviceTime = nextOpTime.getSecs() + replSettings.slavedelay + 1; dblock lk; if ( n > 0 ) { syncedTo = last; save(); } log() << "repl: applied " << n << " operations" << endl; log() << "repl: syncedTo: " << syncedTo.toStringLong() << endl; log() << "waiting until: " << _sleepAdviceTime << " to continue" << endl; return okResultCode; } sync_pullOpLog_applyOperation(op, !justOne); n++; if( --b == 0 ) break; // if to here, we are doing mulpile applications in a singel write lock acquisition if( !oplogReader.moreInCurrentBatch() ) { // break if no more in batch so we release lock while reading from the master break; } op = oplogReader.next(); getDur().commitIfNeeded(); } } } return okResultCode; }
int run(){ string cmd = getParam( "command" ); if ( cmd.size() == 0 ){ cerr << "ERROR: need command" << endl << endl; printHelp(cout); return -1; } GridFS g( conn() , _db ); auth(); string filename = getParam( "file" ); if ( cmd == "list" ){ BSONObjBuilder b; if ( filename.size() ) b.appendRegex( "filename" , ( (string)"^" + filename ).c_str() ); display( &g , b.obj() ); return 0; } if ( filename.size() == 0 ){ cerr << "ERROR: need a filename" << endl << endl; printHelp(cout); return -1; } if ( cmd == "search" ){ BSONObjBuilder b; b.appendRegex( "filename" , filename.c_str() ); display( &g , b.obj() ); return 0; } if ( cmd == "get" ){ GridFile f = g.findFile( filename ); if ( ! f.exists() ){ cerr << "ERROR: file not found" << endl; return -2; } string out = getParam("local", f.getFilename()); f.write( out ); if (out != "-") cout << "done write to: " << out << endl; return 0; } if ( cmd == "put" ){ const string& infile = getParam("local", filename); const string& type = getParam("type", ""); BSONObj file = g.storeFile(infile, filename, type); cout << "added file: " << file << endl; if (hasParam("replace")){ auto_ptr<DBClientCursor> cursor = conn().query(_db+".fs.files", BSON("filename" << filename << "_id" << NE << file["_id"] )); while (cursor->more()){ BSONObj o = cursor->nextSafe(); conn().remove(_db+".fs.files", BSON("_id" << o["_id"])); conn().remove(_db+".fs.chunks", BSON("_id" << o["_id"])); cout << "removed file: " << o << endl; } } conn().getLastError(); cout << "done!"; return 0; } if ( cmd == "delete" ){ g.removeFile(filename); conn().getLastError(); cout << "done!"; return 0; } cerr << "ERROR: unknown command '" << cmd << "'" << endl << endl; printHelp(cout); return -1; }
void v8ToMongoElement( BSONObjBuilder & b , v8::Handle<v8::String> name , const string sname , v8::Handle<v8::Value> value ){ if ( value->IsString() ){ if ( sname == "$where" ) b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); else b.append( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsFunction() ){ b.appendCode( sname.c_str() , toSTLString( value ).c_str() ); return; } if ( value->IsNumber() ){ b.append( sname.c_str() , value->ToNumber()->Value() ); return; } if ( value->IsArray() ){ BSONObj sub = v8ToMongo( value->ToObject() ); b.appendArray( sname.c_str() , sub ); return; } if ( value->IsDate() ){ b.appendDate( sname.c_str() , (unsigned long long )(v8::Date::Cast( *value )->NumberValue()) ); return; } if ( value->IsObject() ){ string s = toSTLString( value ); if ( s.size() && s[0] == '/' ){ s = s.substr( 1 ); string r = s.substr( 0 , s.find( "/" ) ); string o = s.substr( s.find( "/" ) + 1 ); b.appendRegex( sname.c_str() , r.c_str() , o.c_str() ); } else if ( value->ToObject()->GetPrototype()->IsObject() && value->ToObject()->GetPrototype()->ToObject()->HasRealNamedProperty( String::New( "isObjectId" ) ) ){ OID oid; oid.init( toSTLString( value ) ); b.appendOID( sname.c_str() , &oid ); } else { BSONObj sub = v8ToMongo( value->ToObject() ); b.append( sname.c_str() , sub ); } return; } if ( value->IsBoolean() ){ b.appendBool( sname.c_str() , value->ToBoolean()->Value() ); return; } else if ( value->IsUndefined() ){ return; } else if ( value->IsNull() ){ b.appendNull( sname.c_str() ); return; } cout << "don't know how to covert to mongo field [" << name << "]\t" << value << endl; }
DatabaseType ShardingCatalogManager::createDatabase(OperationContext* opCtx, const std::string& dbName) { invariant(nsIsDbOnly(dbName)); // The admin and config databases should never be explicitly created. They "just exist", // i.e. getDatabase will always return an entry for them. if (dbName == "admin" || dbName == "config") { uasserted(ErrorCodes::InvalidOptions, str::stream() << "cannot manually create database '" << dbName << "'"); } // Check if a database already exists with the same name (case sensitive), and if so, return the // existing entry. BSONObjBuilder queryBuilder; queryBuilder.appendRegex( DatabaseType::name(), (string) "^" + pcrecpp::RE::QuoteMeta(dbName) + "$", "i"); auto docs = uassertStatusOK(Grid::get(opCtx)->catalogClient()->_exhaustiveFindOnConfig( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, DatabaseType::ConfigNS, queryBuilder.obj(), BSONObj(), 1)) .value; if (!docs.empty()) { BSONObj dbObj = docs.front(); std::string actualDbName = dbObj[DatabaseType::name()].String(); uassert(ErrorCodes::DatabaseDifferCase, str::stream() << "can't have 2 databases that just differ on case " << " have: " << actualDbName << " want to add: " << dbName, actualDbName == dbName); // We did a local read of the database entry above and found that the database already // exists. However, the data may not be majority committed (a previous createDatabase // attempt may have failed with a writeConcern error). // Since the current Client doesn't know the opTime of the last write to the database entry, // make it wait for the last opTime in the system when we wait for writeConcern. repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); return uassertStatusOK(DatabaseType::fromBSON(dbObj)); } // The database does not exist. Insert an entry for the new database into the sharding catalog. // Pick a primary shard for the new database. const auto primaryShardId = uassertStatusOK(_selectShardForNewDatabase(opCtx, Grid::get(opCtx)->shardRegistry())); // Insert an entry for the new database into the sharding catalog. DatabaseType db(dbName, std::move(primaryShardId), false, databaseVersion::makeNew()); log() << "Registering new database " << db << " in sharding catalog"; uassertStatusOK(Grid::get(opCtx)->catalogClient()->insertConfigDocument( opCtx, DatabaseType::ConfigNS, db.toBSON(), ShardingCatalogClient::kLocalWriteConcern)); return db; }
bool _cleanupUpgradeState(const ConnectionString& configLoc, const OID& lastUpgradeId, string* errMsg) { string dummy; if (!errMsg) errMsg = &dummy; scoped_ptr<ScopedDbConnection> connPtr; string workingSuffix = genWorkingSuffix(lastUpgradeId); try { connPtr.reset(ScopedDbConnection::getInternalScopedDbConnection(configLoc, 30)); ScopedDbConnection& conn = *connPtr; // Drop old upgrade collections on config server bool resultOk; BSONObj dropResult; resultOk = conn->dropCollection(CollectionType::ConfigNS + workingSuffix, &dropResult); if (!resultOk) { *errMsg = stream() << "could not drop collection " << (CollectionType::ConfigNS + workingSuffix) << causedBy(dropResult.toString()); return false; } resultOk = conn->dropCollection(ChunkType::ConfigNS + workingSuffix, &dropResult); if (!resultOk) { *errMsg = stream() << "could not drop collection " << (ChunkType::ConfigNS + workingSuffix) << causedBy(dropResult.toString()); return false; } // Force old locks taken by previous upgrade process on config server // This is safe because no previous upgrade process can be active while we hold the // upgrade lock. log() << "forcing upgrade locks of previous failed upgrade with id " << lastUpgradeId.toString() << endl; // Explicit builder needed b/c of regex BSONObjBuilder lockQueryB; lockQueryB.appendRegex(LocksType::why(), pcrecpp::RE::QuoteMeta("(" + lastUpgradeId.toString() + ")")); conn->update(LocksType::ConfigNS, lockQueryB.obj(), BSON("$set" << BSON(LocksType::state(0))), false, true); // multi _checkGLE(conn); } catch (const DBException& e) { *errMsg = stream() << "could not drop collections during cleanup of upgrade " << lastUpgradeId << causedBy(e); return false; } connPtr->done(); return true; }