void logStartup() { BSONObjBuilder toLog; stringstream id; id << getHostNameCached() << "-" << jsTime(); toLog.append( "_id", id.str() ); toLog.append( "hostname", getHostNameCached() ); toLog.appendTimeT( "startTime", time(0) ); toLog.append( "startTimeLocal", dateToCtimeString(curTimeMillis64()) ); toLog.append("cmdLine", serverGlobalParams.parsedOpts); toLog.append( "pid", ProcessId::getCurrent().asLongLong() ); BSONObjBuilder buildinfo( toLog.subobjStart("buildinfo")); appendBuildInfo(buildinfo); buildinfo.doneFast(); BSONObj o = toLog.obj(); Lock::GlobalWrite lk; DBDirectClient c; const char* name = "local.startup_log"; c.createCollection( name, 10 * 1024 * 1024, true ); c.insert( name, o); }
void run() { // Insert some documents. int32_t nDocs = 1000; for( int32_t i = 0; i < nDocs; ++i ) { _client.insert( _ns, BSON( "a" << ( i / 4 ) ) ); } // Find the documents that are dups. set<DiskLoc> dups; int32_t last = -1; for( boost::shared_ptr<Cursor> cursor = theDataFileMgr.findAll( _ns ); cursor->ok(); cursor->advance() ) { int32_t currA = cursor->current()[ "a" ].Int(); if ( currA == last ) { dups.insert( cursor->currLoc() ); } last = currA; } // Check the expected number of dups. ASSERT_EQUALS( static_cast<uint32_t>( nDocs / 4 * 3 ), dups.size() ); // Drop the dups. BtreeBasedBuilder::doDropDups( _ns, nsdetails( _ns ), dups, true ); // Check that the expected number of documents remain. ASSERT_EQUALS( static_cast<uint32_t>( nDocs / 4 ), _client.count( _ns ) ); }
void DocumentSourceCursor::sourceToBson( BSONObjBuilder *pBuilder, bool explain) const { /* this has no analog in the BSON world, so only allow it for explain */ if (explain) { BSONObj bsonObj; pBuilder->append("query", *pQuery); if (pSort.get()) { pBuilder->append("sort", *pSort); } // construct query for explain BSONObjBuilder queryBuilder; queryBuilder.append("$query", *pQuery); if (pSort.get()) queryBuilder.append("$orderby", *pSort); queryBuilder.append("$explain", 1); Query query(queryBuilder.obj()); DBDirectClient directClient; BSONObj explainResult(directClient.findOne(ns, query)); pBuilder->append("cursor", explainResult); } }
void run(){ Client::initThread( "slaveTracking" ); DBDirectClient db; while ( ! inShutdown() ){ sleepsecs( 1 ); if ( ! _dirty ) continue; writelock lk(NS); list< pair<BSONObj,BSONObj> > todo; { scoped_lock mylk(_mutex); for ( map<Ident,Info>::iterator i=_slaves.begin(); i!=_slaves.end(); i++ ){ BSONObjBuilder temp; temp.appendTimestamp( "syncedTo" , i->second.loc[0].asDate() ); todo.push_back( pair<BSONObj,BSONObj>( i->first.obj.getOwned() , BSON( "$set" << temp.obj() ).getOwned() ) ); } _slaves.clear(); } for ( list< pair<BSONObj,BSONObj> >::iterator i=todo.begin(); i!=todo.end(); i++ ){ db.update( NS , i->first , i->second , true ); } _dirty = false; } }
void run() { dblock lk; const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap"; { DBDirectClient c; for( int i = 0; i < 10; ++i ) c.insert( ns, BSON( "a" << i ) ); for( int i = 100; i < 110; ++i ) c.insert( ns, BSON( "a" << i ) ); ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) ); } int v[] = { -50, 2, 40, 60, 109, 200 }; boost::shared_ptr< FieldRangeVector > frv( vec( v, 6 ) ); Client::Context ctx( ns ); scoped_ptr<BtreeCursor> _c( BtreeCursor::make(nsdetails( ns ), 1, nsdetails( ns )->idx(1), frv, 1 ) ); BtreeCursor &c = *_c.get(); ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() ); double expected[] = { 0, 1, 2, 109 }; for( int i = 0; i < 4; ++i ) { ASSERT( c.ok() ); ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() ); c.advance(); } ASSERT( !c.ok() ); }
TEST(DBHelperTests, FindDiskLocsNoIndex) { DBDirectClient client; client.remove( ns, BSONObj() ); client.insert( ns, BSON( "_id" << OID::gen() ) ); long long maxSizeBytes = 1024 * 1024 * 1024; set<DiskLoc> locs; long long numDocsFound; long long estSizeBytes; { Lock::DBRead lk( ns ); Client::Context ctx( ns ); // search invalid index range KeyRange range( ns, BSON( "badIndex" << 0 ), BSON( "badIndex" << 10 ), BSON( "badIndex" << 1 ) ); Status result = Helpers::getLocsInRange( range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes ); // Make sure we get the right error code ASSERT_EQUALS( result.code(), ErrorCodes::IndexNotFound ); ASSERT_EQUALS( static_cast<long long>( locs.size() ), 0 ); ASSERT_EQUALS( numDocsFound, 0 ); ASSERT_EQUALS( estSizeBytes, 0 ); } }
Status AuthzManagerExternalStateMongod::insert( const NamespaceString& collectionName, const BSONObj& document, const BSONObj& writeConcern) { try { DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.insert(collectionName, document); } // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (errstr.empty()) { return Status::OK(); } if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) { return Status(ErrorCodes::DuplicateKey, errstr); } return Status(ErrorCodes::UnknownError, errstr); } catch (const DBException& e) { return e.toStatus(); } }
void doDBUpgrade( const string& dbName, DataFileHeader* h ) { static DBDirectClient db; if ( h->version == 4 && h->versionMinor == 4 ) { verify( PDFILE_VERSION == 4 ); verify( PDFILE_VERSION_MINOR_22_AND_OLDER == 5 ); list<string> colls = db.getCollectionNames( dbName ); for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) { string c = *i; log() << "\t upgrading collection:" << c << endl; BSONObj out; bool ok = db.runCommand( dbName , BSON( "reIndex" << c.substr( dbName.size() + 1 ) ) , out ); if ( ! ok ) { log() << "\t\t reindex failed: " << out; fassertFailed( 17393 ); } } getDur().writingInt(h->versionMinor) = 5; return; } // do this in the general case fassert( 17401, repairDatabase( dbName ) ); }
Status AuthzManagerExternalStateMongod::insert( const NamespaceString& collectionName, const BSONObj& document, const BSONObj& writeConcern) { try { DBDirectClient client; client.insert(collectionName, document); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (errstr.empty()) { return Status::OK(); } if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) { return Status(ErrorCodes::DuplicateKey, errstr); } return Status(ErrorCodes::UnknownError, errstr); } catch (const DBException& e) { return e.toStatus(); } }
Status AuthzManagerExternalStateMongod::remove( const NamespaceString& collectionName, const BSONObj& query, const BSONObj& writeConcern, int* numRemoved) { try { DBDirectClient client; client.remove(collectionName, query); // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (!errstr.empty()) { return Status(ErrorCodes::UnknownError, errstr); } *numRemoved = res["n"].numberInt(); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
void run() { // Recreate the collection as capped, without an _id index. _client.dropCollection( _ns ); BSONObj info; ASSERT( _client.runCommand( "unittests", BSON( "create" << "indexupdate" << "capped" << true << "size" << ( 10 * 1024 ) << "autoIndexId" << false ), info ) ); // Insert some documents. int32_t nDocs = 1000; for( int32_t i = 0; i < nDocs; ++i ) { _client.insert( _ns, BSON( "_id" << i ) ); } // Initialize curop. cc().curop()->reset(); // Request an interrupt. killCurrentOp.killAll(); BSONObj indexInfo = BSON( "key" << BSON( "_id" << 1 ) << "ns" << _ns << "name" << "_id" ); // The call is not interrupted because mayInterrupt == false. theDataFileMgr.insertWithObjMod( "unittests.system.indexes", indexInfo, false ); // The new index is listed in system.indexes because the index build succeeded. ASSERT_EQUALS( 1U, _client.count( "unittests.system.indexes", BSON( "ns" << _ns ) ) ); }
void run() { dblock lk; const char *ns = "unittests.cursortests.BtreeCursorTests.MultiRangeGap"; { DBDirectClient c; for( int i = 0; i < 10; ++i ) c.insert( ns, BSON( "a" << i ) ); for( int i = 100; i < 110; ++i ) c.insert( ns, BSON( "a" << i ) ); ASSERT( c.ensureIndex( ns, BSON( "a" << 1 ) ) ); } BoundList b; b.push_back( pair< BSONObj, BSONObj >( BSON( "" << -50 ), BSON( "" << 2 ) ) ); b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 40 ), BSON( "" << 60 ) ) ); b.push_back( pair< BSONObj, BSONObj >( BSON( "" << 109 ), BSON( "" << 200 ) ) ); Client::Context ctx( ns ); BtreeCursor c( nsdetails( ns ), 1, nsdetails( ns )->idx(1), b, 1 ); ASSERT_EQUALS( "BtreeCursor a_1 multi", c.toString() ); double expected[] = { 0, 1, 2, 109 }; for( int i = 0; i < 4; ++i ) { ASSERT( c.ok() ); ASSERT_EQUALS( expected[ i ], c.currKey().firstElement().number() ); c.advance(); } ASSERT( !c.ok() ); }
Status AuthzManagerExternalStateMongod::removePrivilegeDocuments(const string& dbname, const BSONObj& query) { try { string userNS = dbname + ".system.users"; DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.remove(userNS, query); } // 30 second timeout for w:majority BSONObj res = client.getLastErrorDetailed(false, false, -1, 30*1000); string errstr = client.getLastErrorString(res); if (!errstr.empty()) { return Status(ErrorCodes::UserModificationFailed, errstr); } int numUpdated = res["n"].numberInt(); if (numUpdated == 0) { return Status(ErrorCodes::UserNotFound, mongoutils::str::stream() << "No users found on database \"" << dbname << "\" matching query: " << query.toString()); } return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
void ReplSetImpl::assumePrimary() { LOG(2) << "replSet assuming primary" << endl; verify( iAmPotentiallyHot() ); // Wait for replication to stop and buffer to be consumed LOG(1) << "replSet waiting for replication to finish before becoming primary" << endl; replset::BackgroundSync::get()->stopReplicationAndFlushBuffer(); // Lock here to prevent stepping down & becoming primary from getting interleaved Lock::GlobalWrite lk; // Make sure that new OpTimes are higher than existing ones even with clock skew DBDirectClient c; BSONObj lastOp = c.findOne( "local.oplog.rs", Query().sort(reverseNaturalObj), NULL, QueryOption_SlaveOk ); if ( !lastOp.isEmpty() ) { OpTime::setLast( lastOp[ "ts" ].date() ); } // Generate new election unique id elect.setElectionId(OID::gen()); changeState(MemberState::RS_PRIMARY); // This must be done after becoming primary but before releasing the write lock. This adds // the dropCollection entries for every temp collection to the opLog since we want it to be // replicated to secondaries. dropAllTempCollections(); }
Status AuthzManagerExternalStateMongod::remove( const NamespaceString& collectionName, const BSONObj& query, const BSONObj& writeConcern, int* numRemoved) { try { DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.remove(collectionName, query); } // Handle write concern BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (!errstr.empty()) { return Status(ErrorCodes::UnknownError, errstr); } *numRemoved = res["n"].numberInt(); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
bool doDBUpgrade( const string& dbName , string errmsg , DataFileHeader * h ) { static DBDirectClient db; if ( h->version == 4 && h->versionMinor == 4 ) { verify( PDFILE_VERSION == 4 ); verify( PDFILE_VERSION_MINOR == 5 ); list<string> colls = db.getCollectionNames( dbName ); for ( list<string>::iterator i=colls.begin(); i!=colls.end(); i++) { string c = *i; log() << "\t upgrading collection:" << c << endl; BSONObj out; bool ok = db.runCommand( dbName , BSON( "reIndex" << c.substr( dbName.size() + 1 ) ) , out ); if ( ! ok ) { errmsg = "reindex failed"; log() << "\t\t reindex failed: " << out << endl; return false; } } h->versionMinor = 5; return true; } // do this in the general case return repairDatabase( dbName.c_str(), errmsg ); }
Status AuthzManagerExternalStateMongod::insertPrivilegeDocument(const string& dbname, const BSONObj& userObj) { try { string userNS = dbname + ".system.users"; DBDirectClient client; { Client::GodScope gs; // TODO(spencer): Once we're no longer fully rebuilding the user cache on every // change to user data we should remove the global lock and uncomment the // WriteContext below Lock::GlobalWrite w; // Client::WriteContext ctx(userNS); client.insert(userNS, userObj); } // 30 second timeout for w:majority BSONObj res = client.getLastErrorDetailed(false, false, -1, 30*1000); string errstr = client.getLastErrorString(res); if (errstr.empty()) { return Status::OK(); } if (res.hasField("code") && res["code"].Int() == ASSERT_ID_DUPKEY) { return Status(ErrorCodes::DuplicateKey, mongoutils::str::stream() << "User \"" << userObj["user"].String() << "\" already exists on database \"" << dbname << "\""); } return Status(ErrorCodes::UserModificationFailed, errstr); } catch (const DBException& e) { return e.toStatus(); } }
Status AuthzManagerExternalStateMongod::createIndex( const NamespaceString& collectionName, const BSONObj& pattern, bool unique, const BSONObj& writeConcern) { DBDirectClient client; try { if (client.ensureIndex(collectionName.ns(), pattern, unique)) { BSONObjBuilder gleBuilder; gleBuilder.append("getLastError", 1); gleBuilder.appendElements(writeConcern); BSONObj res; client.runCommand("admin", gleBuilder.done(), res); string errstr = client.getLastErrorString(res); if (!errstr.empty()) { return Status(ErrorCodes::UnknownError, errstr); } } return Status::OK(); } catch (const DBException& ex) { return ex.toStatus(); } }
Status AuthzManagerExternalStateMongod::updatePrivilegeDocument( const UserName& user, const BSONObj& updateObj) const { string userNS = mongoutils::str::stream() << user.getDB() << ".system.users"; Client::GodScope gs; Client::WriteContext ctx(userNS); DBDirectClient client; client.update(userNS, QUERY("user" << user.getUser() << "userSource" << BSONNULL), updateObj); // 30 second timeout for w:majority BSONObj res = client.getLastErrorDetailed(false, false, -1, 30*1000); string err = client.getLastErrorString(res); if (!err.empty()) { return Status(ErrorCodes::UserModificationFailed, err); } int numUpdated = res["n"].numberInt(); dassert(numUpdated <= 1 && numUpdated >= 0); if (numUpdated == 0) { return Status(ErrorCodes::UserNotFound, mongoutils::str::stream() << "User " << user.getFullName() << " not found"); } return Status::OK(); }
/** * Checks if this server was started without --replset but has a config in local.system.replset * (meaning that this is probably a replica set member started in stand-alone mode). * * @returns the number of documents in local.system.replset or 0 if this was started with * --replset. */ unsigned long long checkIfReplMissingFromCommandLine() { Lock::GlobalWrite lk; // this is helpful for the query below to work as you can't open files when readlocked if (!replSettings.usingReplSets()) { DBDirectClient c; return c.count("local.system.replset"); } return 0; }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "x" << "ns" << _ns << "key" << BSON("y" << 1 << "x" << 1))); // Cannot create a different index with the same name as the existing one. ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "x" << "ns" << _ns << "key" << BSON("x" << 1 << "y" << 1))); // It is okay to try to create an index with the exact same specs (will be // ignored, but should not raise an error). ASSERT_EQUALS(_client.getLastError(), ""); }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "x" << "ns" << _ns << "unique" << true << "key" << BSON("x" << 1 << "y" << 1))); // Cannot have same key spec with an option different from the existing one. ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
void run(){ client.insert( _a , BSON( "a" << "17" ) ); { BSONObj fromA = client.findOne( _a , BSONObj() ); assert( fromA.valid() ); //cout << "Froma : " << fromA << endl; BSONObjBuilder b; b.append( "b" , 18 ); b.appendDBRef( "c" , "dbref.a" , fromA["_id"].__oid() ); client.insert( _b , b.obj() ); } ASSERT( client.eval( "unittest" , "x = db.dbref.b.findOne(); assert.eq( 17 , x.c.fetch().a , 'ref working' );" ) ); // BSON DBRef <=> JS DBPointer ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBPointer( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) ); ASSERT_EQUALS( DBRef, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() ); // BSON Object <=> JS DBRef ASSERT( client.eval( "unittest", "x = db.dbref.b.findOne(); db.dbref.b.drop(); x.c = new DBRef( x.c.ns, x.c.id ); db.dbref.b.insert( x );" ) ); ASSERT_EQUALS( Object, client.findOne( "unittest.dbref.b", "" )[ "c" ].type() ); ASSERT_EQUALS( string( "dbref.a" ), client.findOne( "unittest.dbref.b", "" )[ "c" ].embeddedObject().getStringField( "$ref" ) ); }
void clearTmpCollections() { vector< string > toDelete; DBDirectClient cli; auto_ptr< DBClientCursor > c = cli.query( "local.system.namespaces", Query( fromjson( "{name:/^local.temp./}" ) ) ); while( c->more() ) toDelete.push_back( c->next().getStringField( "name" ) ); for( vector< string >::iterator i = toDelete.begin(); i != toDelete.end(); ++i ) { log() << "Dropping old temporary collection: " << *i << endl; cli.dropCollection( *i ); } }
void run() { if( !globalScriptEngine->utf8Ok() ) { log() << "warning: utf8 not supported" << endl; return; } string utf8ObjSpec = "{'_id':'\\u0001\\u007f\\u07ff\\uffff'}"; BSONObj utf8Obj = fromjson( utf8ObjSpec ); client.insert( ns(), utf8Obj ); client.eval( "unittest", "v = db.jstests.utf8check.findOne(); db.jstests.utf8check.remove( {} ); db.jstests.utf8check.insert( v );" ); check( utf8Obj, client.findOne( ns(), BSONObj() ) ); }
void run() { _client.insert("unittests.system.indexes", BSON("name" << "super2" << "ns" << _ns << "unique" << 1 << "dropDups" << true << "sparse" << true << "expireAfterSeconds" << 2400 << "key" << BSON("superIdx" << "2d"))); ASSERT_NOT_EQUALS(_client.getLastError(), ""); }
Status AuthzManagerExternalStateMongod::query( const NamespaceString& collectionName, const BSONObj& query, const BSONObj& projection, const boost::function<void(const BSONObj&)>& resultProcessor) { try { DBDirectClient client; client.query(resultProcessor, collectionName.ns(), query, &projection); return Status::OK(); } catch (const DBException& e) { return e.toStatus(); } }
void run() { // Exactly the same specs with the existing one, only // specified in a different order than the original. _client.insert("unittests.system.indexes", BSON("name" << "super2" << "ns" << _ns << "expireAfterSeconds" << 3600 << "sparse" << true << "unique" << 1 << "dropDups" << true << "key" << BSON("superIdx" << "2d"))); ASSERT_EQUALS(_client.getLastError(), ""); }
TEST(DBHelperTests, FindDiskLocs) { DBDirectClient client; OperationContextImpl txn; // Some unique tag we can use to make sure we're pulling back the right data OID tag = OID::gen(); client.remove( ns, BSONObj() ); int numDocsInserted = 10; for ( int i = 0; i < numDocsInserted; ++i ) { client.insert( ns, BSON( "_id" << i << "tag" << tag ) ); } long long maxSizeBytes = 1024 * 1024 * 1024; set<DiskLoc> locs; long long numDocsFound; long long estSizeBytes; { // search _id range (0, 10) Lock::DBRead lk(txn.lockState(), ns); KeyRange range( ns, BSON( "_id" << 0 ), BSON( "_id" << numDocsInserted ), BSON( "_id" << 1 ) ); Status result = Helpers::getLocsInRange( &txn, range, maxSizeBytes, &locs, &numDocsFound, &estSizeBytes ); ASSERT_EQUALS( result, Status::OK() ); ASSERT_EQUALS( numDocsFound, numDocsInserted ); ASSERT_NOT_EQUALS( estSizeBytes, 0 ); ASSERT_LESS_THAN( estSizeBytes, maxSizeBytes ); Database* db = dbHolder().get( &txn, nsToDatabase(range.ns), storageGlobalParams.dbpath); const Collection* collection = db->getCollection(&txn, ns); // Make sure all the disklocs actually correspond to the right info for ( set<DiskLoc>::const_iterator it = locs.begin(); it != locs.end(); ++it ) { const BSONObj obj = collection->docFor(*it); ASSERT_EQUALS(obj["tag"].OID(), tag); } } }