void plumage::stats::processMachineStats(ODSMongodbOps* ops, Date_t& ts) { dprintf(D_FULLDEBUG, "ODSCollectorPlugin::processMachineStats() called...\n"); DBClientConnection* conn = ops->m_db_conn; conn->ensureIndex(DB_RAW_ADS, BSON( ATTR_MY_TYPE << 1 )); auto_ptr<DBClientCursor> cursor = conn->query(DB_RAW_ADS, QUERY( ATTR_MY_TYPE << "Machine" ) ); conn->ensureIndex(DB_STATS_SAMPLES_MACH, BSON( "ts" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_MACH, BSON( "m" << 1 )); conn->ensureIndex(DB_STATS_SAMPLES_MACH, BSON( "n" << 1 )); while( cursor->more() ) { BSONObj p = cursor->next(); // write record to machine samples BSONObjBuilder bob; DATE(ts,ts); STRING(m,ATTR_MACHINE); STRING(n,ATTR_NAME); STRING(ar,ATTR_ARCH); STRING(os,ATTR_OPSYS); STRING(req,ATTR_REQUIREMENTS); INTEGER(ki,ATTR_KEYBOARD_IDLE); DOUBLE(la,ATTR_LOAD_AVG); STRING(st,ATTR_STATE); INTEGER(cpu,ATTR_CPUS); INTEGER(mem,ATTR_MEMORY); // TODO: these might be moved to another collection // STRING(gjid,ATTR_GLOBAL_JOB_ID); // STRING(ru,ATTR_REMOTE_USER); // STRING(ag,ATTR_ACCOUNTING_GROUP); conn->insert(DB_STATS_SAMPLES_MACH,bob.obj()); } }
void plumage::stats::processSchedulerStats(ODSMongodbOps* ops, Date_t& ts) { dprintf(D_FULLDEBUG, "ODSCollectorPlugin::processSchedulerStats() called...\n"); DBClientConnection* conn = ops->m_db_conn; conn->ensureIndex(DB_RAW_ADS, BSON( ATTR_MY_TYPE << 1 )); auto_ptr<DBClientCursor> cursor = conn->query(DB_RAW_ADS, QUERY( ATTR_MY_TYPE << "Scheduler" ) ); conn->ensureIndex(DB_STATS_SAMPLES_SCHED, BSON( "ts" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_SCHED, BSON( "n" << 1 )); while( cursor->more() ) { BSONObj p = cursor->next(); // write record to scheduler samples BSONObjBuilder bob; DATE(ts,ts); STRING(n,ATTR_NAME); INTEGER(mjr,ATTR_MAX_JOBS_RUNNING); INTEGER(nu,ATTR_NUM_USERS); INTEGER(tja,ATTR_TOTAL_JOB_ADS); INTEGER(trun,ATTR_TOTAL_RUNNING_JOBS); INTEGER(thj,ATTR_TOTAL_HELD_JOBS); INTEGER(tij,ATTR_TOTAL_IDLE_JOBS); INTEGER(trem,ATTR_TOTAL_REMOVED_JOBS); INTEGER(tsr,ATTR_TOTAL_SCHEDULER_RUNNING_JOBS); INTEGER(tsi,ATTR_TOTAL_SCHEDULER_IDLE_JOBS); INTEGER(tlr,ATTR_TOTAL_LOCAL_RUNNING_JOBS); INTEGER(tli,ATTR_TOTAL_LOCAL_IDLE_JOBS); INTEGER(tfj,ATTR_TOTAL_FLOCKED_JOBS); conn->insert(DB_STATS_SAMPLES_SCHED,bob.obj()); } }
int toolMain( int argc, char* argv[], char* envp[] ) { mongo::runGlobalInitializersOrDie(argc, argv, envp); if( parseCmdLineOptions( argc, argv) ) return 1; BSONObj nestedDoc = BSON("Firstname" << "David" << "Lastname" << "Smith" << "Address" << BSON( "Street" << "5th Av" << "City" << "New York" ) ); std::vector<std::string> list; list.push_back("mongo new york city"); list.push_back("mongo rome"); list.push_back("mongo dublin"); list.push_back("mongo seoul"); list.push_back("mongo barcelona"); list.push_back("mongo madrid"); list.push_back("mongo chicago"); list.push_back("mongo amsterdam"); list.push_back("mongo delhi"); list.push_back("mongo beijing"); BSONObj args = BSONObjBuilder() .append( "_id", 0 ) .append( "blob", "MongoDB is an open source document-oriented database " "system designed with scalability and developer." ) .append( "nestedDoc", nestedDoc ) .append( "list", list ) .append( "counter", 0 ).obj(); const int numDocsPerDB = static_cast<int>( globalDocGenOption.dbSize * 1024 * 1024 / args.objsize() ); cout << "numDocsPerDB:" << numDocsPerDB << endl; try { DBClientConnection conn; conn.connect( globalDocGenOption.hostname ); cout << "successfully connected to the host" << endl; for( int i=0; i < globalDocGenOption.numdbs; ++i ) { scoped_ptr<DocumentGenerator> docGen( DocumentGenerator::makeDocumentGenerator(args) ); cout << "populating database " << globalDocGenOption.prefix << i << endl; long long j = 0; string ns = mongoutils::str::stream() << globalDocGenOption.prefix << i << ".sampledata"; while( j != numDocsPerDB ) { BSONObj doc = docGen->createDocument(); conn.insert( ns, doc ); ++j; } BSONObj blobIndex = BSON("blob" << 1); conn.ensureIndex(ns, blobIndex); BSONObj listIndex = BSON("list" << 1); conn.ensureIndex(ns, listIndex); } } catch( DBException &e ) { cout << "caught " << e.what() << endl; } return 0; }
void run() { DBClientConnection c; c.connect("localhost"); //"192.168.58.1"); cout << "connected ok" << endl; BSONObj p = BSON( "name" << "Joe" << "age" << 33 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Jane" << "age" << 40 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Abe" << "age" << 33 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" ); c.insert("tutorial.persons", p); c.ensureIndex("tutorial.persons", fromjson("{age:1}")); cout << "count:" << c.count("tutorial.persons") << endl; auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", BSONObj()); while( cursor->more() ) { cout << cursor->next().toString() << endl; } cout << "\nprintifage:\n"; printIfAge(c, 33); }
int main() { try { cout << "connecting to localhost..." << endl; DBClientConnection c; c.connect("localhost"); cout << "connected ok" << endl; unsigned long long count = c.count("test.foo"); cout << "count of exiting documents in collection test.foo : " << count << endl; bo o = BSON( "hello" << "world" ); c.insert("test.foo", o); string e = c.getLastError(); if( !e.empty() ) { cout << "insert #1 failed: " << e << endl; } // make an index with a unique key constraint c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true); c.insert("test.foo", o); // will cause a dup key error on "hello" field cout << "we expect a dup key error here:" << endl; cout << " " << c.getLastErrorDetailed().toString() << endl; } catch(DBException& e) { cout << "caught DBException " << e.toString() << endl; return 1; } return 0; }
int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) throw -12; port = argv[ 2 ]; } DBClientConnection conn; string errmsg; if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; throw -11; } const char * ns = "test.second"; conn.remove( ns , BSONObj() ); conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) ); conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) ); auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() ); cout << "using cursor" << endl; while ( cursor->more() ) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; } conn.ensureIndex( ns , BSON( "name" << 1 << "num" << -1 ) ); }
void plumage::stats::processSubmitterStats(ODSMongodbOps* ops, Date_t& ts) { dprintf(D_FULLDEBUG, "ODSCollectorPlugin::processSubmitterStats called...\n"); DBClientConnection* conn = ops->m_db_conn; conn->ensureIndex(DB_RAW_ADS, BSON( ATTR_MY_TYPE << 1 )); auto_ptr<DBClientCursor> cursor = conn->query(DB_RAW_ADS, QUERY( ATTR_MY_TYPE << "Submitter" ) ); conn->ensureIndex(DB_STATS_SAMPLES_SUB, BSON( "ts" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_SUB, BSON( "sn" << 1 )); while( cursor->more() ) { BSONObj p = cursor->next(); // write record to submitter samples BSONObjBuilder bob; DATE(ts,ts); STRING(sn,ATTR_NAME); STRING(ma,ATTR_MACHINE); INTEGER(jr,ATTR_RUNNING_JOBS); // TODO: weird...HeldJobs isn't always there in the raw submitter ad int h = p.getIntField(ATTR_HELD_JOBS); h = (h>0) ? h : 0; bob.append("jh",h); INTEGER(ji,ATTR_IDLE_JOBS); conn->insert(DB_STATS_SAMPLES_SUB,bob.obj()); } }
int run() { Status status = client::initialize(); if ( !status.isOK() ) { std::cout << "failed to initialize the client driver: " << status.toString() << endl; return EXIT_FAILURE; } DBClientConnection c; c.connect("localhost"); //"192.168.58.1"); cout << "connected ok" << endl; BSONObj p = BSON( "name" << "Joe" << "age" << 33 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Jane" << "age" << 40 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Abe" << "age" << 33 ); c.insert("tutorial.persons", p); p = BSON( "name" << "Methuselah" << "age" << BSONNULL); c.insert("tutorial.persons", p); p = BSON( "name" << "Samantha" << "age" << 21 << "city" << "Los Angeles" << "state" << "CA" ); c.insert("tutorial.persons", p); c.ensureIndex("tutorial.persons", fromjson("{age:1}")); cout << "count:" << c.count("tutorial.persons") << endl; std::auto_ptr<DBClientCursor> cursor = c.query("tutorial.persons", BSONObj()); if (!cursor.get()) { cout << "query failure" << endl; return EXIT_FAILURE; } while( cursor->more() ) { cout << cursor->next().toString() << endl; } cout << "\nprintifage:\n"; return printIfAge(c, 33); }
int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) throw -12; port = argv[ 2 ]; } DBClientConnection conn; string errmsg; if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; throw -11; } const char * ns = "test.test1"; conn.dropCollection(ns); // clean up old data from any previous tests conn.remove( ns, BSONObj() ); assert( conn.findOne( ns , BSONObj() ).isEmpty() ); // test insert conn.insert( ns ,BSON( "name" << "eliot" << "num" << 1 ) ); assert( ! conn.findOne( ns , BSONObj() ).isEmpty() ); // test remove conn.remove( ns, BSONObj() ); assert( conn.findOne( ns , BSONObj() ).isEmpty() ); // insert, findOne testing conn.insert( ns , BSON( "name" << "eliot" << "num" << 1 ) ); { BSONObj res = conn.findOne( ns , BSONObj() ); assert( strstr( res.getStringField( "name" ) , "eliot" ) ); assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); assert( 1 == res.getIntField( "num" ) ); } // cursor conn.insert( ns ,BSON( "name" << "sara" << "num" << 2 ) ); { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } assert( count == 2 ); } { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 1 ) ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } assert( count == 1 ); } { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 3 ) ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } assert( count == 0 ); } // update { BSONObj res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); BSONObj after = BSONObjBuilder().appendElements( res ).append( "name2" , "h" ).obj(); conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after ); res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); assert( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() ); conn.update( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() , after ); res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); assert( strstr( res.getStringField( "name" ) , "eliot" ) ); assert( strstr( res.getStringField( "name2" ) , "h" ) ); assert( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() ); // upsert conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after , 1 ); assert( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() ); } { // ensure index assert( conn.ensureIndex( ns , BSON( "name" << 1 ) ) ); assert( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) ); } { // hint related tests assert( conn.findOne(ns, "{}")["name"].str() == "sara" ); assert( conn.findOne(ns, "{ name : 'eliot' }")["name"].str() == "eliot" ); assert( conn.getLastError() == "" ); // nonexistent index test bool asserted = false; try { conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}")); } catch ( ... ){ asserted = true; } assert( asserted ); //existing index assert( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") ); // run validate assert( conn.validate( ns ) ); } { // timestamp test const char * tsns = "test.tstest1"; conn.dropCollection( tsns ); { mongo::BSONObjBuilder b; b.appendTimestamp( "ts" ); conn.insert( tsns , b.obj() ); } mongo::BSONObj out = conn.findOne( tsns , mongo::BSONObj() ); Date_t oldTime = out["ts"].timestampTime(); unsigned int oldInc = out["ts"].timestampInc(); { mongo::BSONObjBuilder b1; b1.append( out["_id"] ); mongo::BSONObjBuilder b2; b2.append( out["_id"] ); b2.appendTimestamp( "ts" ); conn.update( tsns , b1.obj() , b2.obj() ); } BSONObj found = conn.findOne( tsns , mongo::BSONObj() ); cout << "old: " << out << "\nnew: " << found << endl; assert( ( oldTime < found["ts"].timestampTime() ) || ( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) ); } { // check that killcursors doesn't affect last error assert( conn.getLastError().empty() ); BufBuilder b; b.appendNum( (int)0 ); // reserved b.appendNum( (int)-1 ); // invalid # of cursors triggers exception b.appendNum( (int)-1 ); // bogus cursor id Message m; m.setData( dbKillCursors, b.buf(), b.len() ); // say() is protected in DBClientConnection, so get superclass static_cast< DBConnector* >( &conn )->say( m ); assert( conn.getLastError().empty() ); } { list<string> l = conn.getDatabaseNames(); for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ){ cout << "db name : " << *i << endl; } l = conn.getCollectionNames( "test" ); for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ){ cout << "coll name : " << *i << endl; } } cout << "client test finished!" << endl; }
int main(int argc, char *argv[]) { try { cout << "mongoperf" << endl; if( argc > 1 ) { cout << "\n" "usage:\n" "\n" " mongoperf < myjsonconfigfile\n" "\n" " {\n" " nThreads:<n>, // number of threads\n" " fileSizeMB:<n>, // test file size\n" " sleepMicros:<n>, // pause for sleepMicros/nThreads between each operation\n" " mmf:<bool>, // if true do i/o's via memory mapped files\n" " r:<bool>, // do reads\n" " w:<bool> // do writes\n" " }\n" "\n" "most fields are optional.\n" "non-mmf io is direct io (no caching). use a large file size to test making the heads\n" " move significantly and to avoid i/o coalescing\n" "mmf io uses caching (the file system cache).\n" "\n" << endl; return 0; } cout << "use -h for help" << endl; char input[1024]; memset(input, 0, sizeof(input)); cin.read(input, 1000); if( *input == 0 ) { cout << "error no options found on stdin for mongoperf" << endl; return 2; } string s = input; str::stripTrailing(s, "\n\r\0x1a"); try { options = fromjson(s); } catch(...) { cout << s << endl; cout << "couldn't parse json options" << endl; return -1; } cout << "options:\n" << options.toString() << endl; go(); #if 0 cout << "connecting to localhost..." << endl; DBClientConnection c; c.connect("localhost"); cout << "connected ok" << endl; unsigned long long count = c.count("test.foo"); cout << "count of exiting documents in collection test.foo : " << count << endl; bo o = BSON( "hello" << "world" ); c.insert("test.foo", o); string e = c.getLastError(); if( !e.empty() ) { cout << "insert #1 failed: " << e << endl; } // make an index with a unique key constraint c.ensureIndex("test.foo", BSON("hello"<<1), /*unique*/true); c.insert("test.foo", o); // will cause a dup key error on "hello" field cout << "we expect a dup key error here:" << endl; cout << " " << c.getLastErrorDetailed().toString() << endl; #endif } catch(DBException& e) { cout << "caught DBException " << e.toString() << endl; return 1; } return 0; }
int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) { std::cout << "need to pass port as second param" << endl; return EXIT_FAILURE; } port = argv[ 2 ]; } DBClientConnection conn; string errmsg; if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; return EXIT_FAILURE; } const char * ns = "test.test1"; conn.dropCollection(ns); // clean up old data from any previous tests conn.remove( ns, BSONObj() ); verify( conn.findOne( ns , BSONObj() ).isEmpty() ); // test insert conn.insert( ns ,BSON( "name" << "eliot" << "num" << 1 ) ); verify( ! conn.findOne( ns , BSONObj() ).isEmpty() ); // test remove conn.remove( ns, BSONObj() ); verify( conn.findOne( ns , BSONObj() ).isEmpty() ); // insert, findOne testing conn.insert( ns , BSON( "name" << "eliot" << "num" << 1 ) ); { BSONObj res = conn.findOne( ns , BSONObj() ); verify( strstr( res.getStringField( "name" ) , "eliot" ) ); verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); verify( 1 == res.getIntField( "num" ) ); } // cursor conn.insert( ns ,BSON( "name" << "sara" << "num" << 2 ) ); { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } verify( count == 2 ); } { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 1 ) ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } verify( count == 1 ); } { auto_ptr<DBClientCursor> cursor = conn.query( ns , BSON( "num" << 3 ) ); int count = 0; while ( cursor->more() ) { count++; BSONObj obj = cursor->next(); } verify( count == 0 ); } // update { BSONObj res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); BSONObj after = BSONObjBuilder().appendElements( res ).append( "name2" , "h" ).obj(); conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after ); res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); verify( ! strstr( res.getStringField( "name2" ) , "eliot" ) ); verify( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() ); conn.update( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() , after ); res = conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ); verify( strstr( res.getStringField( "name" ) , "eliot" ) ); verify( strstr( res.getStringField( "name2" ) , "h" ) ); verify( conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() ).isEmpty() ); // upsert conn.update( ns , BSONObjBuilder().append( "name" , "eliot2" ).obj() , after , 1 ); verify( ! conn.findOne( ns , BSONObjBuilder().append( "name" , "eliot" ).obj() ).isEmpty() ); } { // ensure index verify( conn.ensureIndex( ns , BSON( "name" << 1 ) ) ); verify( ! conn.ensureIndex( ns , BSON( "name" << 1 ) ) ); } { // 5 second TTL index const char * ttlns = "test.ttltest1"; conn.dropCollection( ttlns ); { mongo::BSONObjBuilder b; b.appendTimeT("ttltime", time(0)); b.append("name", "foo"); conn.insert(ttlns, b.obj()); } conn.ensureIndex(ttlns, BSON("ttltime" << 1), false, false, "", true, false, -1, 5); verify(!conn.findOne(ttlns, BSONObjBuilder().append("name", "foo").obj()).isEmpty()); // Sleep 66 seconds, 60 seconds for the TTL loop, 5 seconds for the TTL and 1 to ensure sleepsecs(66); verify(conn.findOne(ttlns, BSONObjBuilder().append("name", "foo").obj()).isEmpty()); } { // hint related tests // tokumx doesn't reorder documents just because you updated one, what even is that verify( conn.findOne(ns, "{}")["name"].str() == "eliot" ); verify( conn.findOne(ns, "{ name : 'sara' }")["name"].str() == "sara" ); verify( conn.getLastError() == "" ); // nonexistent index test bool asserted = false; try { conn.findOne(ns, Query("{name:\"eliot\"}").hint("{foo:1}")); } catch ( ... ) { asserted = true; } verify( asserted ); //existing index verify( conn.findOne(ns, Query("{name:'eliot'}").hint("{name:1}")).hasElement("name") ); // run validate verify( conn.validate( ns ) ); } { // timestamp test const char * tsns = "test.tstest1"; conn.dropCollection( tsns ); { mongo::BSONObjBuilder b; b.appendTimestamp( "ts" ); conn.insert( tsns , b.obj() ); } mongo::BSONObj out = conn.findOne( tsns , mongo::BSONObj() ); Date_t oldTime = out["ts"].timestampTime(); unsigned int oldInc = out["ts"].timestampInc(); { mongo::BSONObjBuilder b1; b1.append( out["_id"] ); mongo::BSONObjBuilder b2; b2.append( out["_id"] ); b2.appendTimestamp( "ts" ); conn.update( tsns , b1.obj() , b2.obj() ); } BSONObj found = conn.findOne( tsns , mongo::BSONObj() ); cout << "old: " << out << "\nnew: " << found << endl; verify( ( oldTime < found["ts"].timestampTime() ) || ( oldTime == found["ts"].timestampTime() && oldInc < found["ts"].timestampInc() ) ); } { // check that killcursors doesn't affect last error verify( conn.getLastError().empty() ); BufBuilder b; b.appendNum( (int)0 ); // reserved b.appendNum( (int)-1 ); // invalid # of cursors triggers exception b.appendNum( (int)-1 ); // bogus cursor id Message m; m.setData( dbKillCursors, b.buf(), b.len() ); // say() is protected in DBClientConnection, so get superclass static_cast< DBConnector* >( &conn )->say( m ); verify( conn.getLastError().empty() ); } { list<string> l = conn.getDatabaseNames(); for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) { cout << "db name : " << *i << endl; } l = conn.getCollectionNames( "test" ); for ( list<string>::iterator i = l.begin(); i != l.end(); i++ ) { cout << "coll name : " << *i << endl; } } { //Map Reduce (this mostly just tests that it compiles with all output types) const string ns = "test.mr"; conn.insert(ns, BSON("a" << 1)); conn.insert(ns, BSON("a" << 1)); const char* map = "function() { emit(this.a, 1); }"; const char* reduce = "function(key, values) { return Array.sum(values); }"; const string outcoll = ns + ".out"; BSONObj out; out = conn.mapreduce(ns, map, reduce, BSONObj()); // default to inline //MONGO_PRINT(out); out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll); //MONGO_PRINT(out); out = conn.mapreduce(ns, map, reduce, BSONObj(), outcoll.c_str()); //MONGO_PRINT(out); out = conn.mapreduce(ns, map, reduce, BSONObj(), BSON("reduce" << outcoll)); //MONGO_PRINT(out); } { // test timeouts DBClientConnection conn( true , 0 , 2 ); if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; throw -11; } conn.insert( "test.totest" , BSON( "x" << 1 ) ); BSONObj res; bool gotError = false; verify( conn.eval( "test" , "return db.totest.findOne().x" , res ) ); try { conn.eval( "test" , "sleep(5000); return db.totest.findOne().x" , res ); } catch ( std::exception& e ) { gotError = true; log() << e.what() << endl; } verify( gotError ); // sleep so the server isn't locked anymore sleepsecs( 4 ); verify( conn.eval( "test" , "return db.totest.findOne().x" , res ) ); } cout << "client test finished!" << endl; return EXIT_SUCCESS; }
// liberally cribbed from user_prio.cpp void plumage::stats::processAccountantStats(ClassAd* ad, ODSMongodbOps* ops, Date_t& ts) { // attr%d holders...sadly reverting back to MyString for convenience of formatstr MyString attrName, attrPrio, attrResUsed, attrWtResUsed, attrFactor, attrBeginUsage, attrAccUsage; MyString attrLastUsage, attrAcctGroup, attrIsAcctGroup; MyString attrConfigQuota, attrEffectiveQuota, attrSubtreeQuota, attrSurplusPolicy; // values string name, acctGroup, surplusPolicy; float priority, factor, wtResUsed, configQuota, effectiveQuota, subtreeQuota, accUsage = -1; int resUsed, beginUsage, lastUsage; resUsed = beginUsage = lastUsage = 0; bool isAcctGroup; DBClientConnection* conn = ops->m_db_conn; conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "ts" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "lu" << -1 )); conn->ensureIndex(DB_STATS_SAMPLES_ACCOUNTANT, BSON( "n" << 1 )); unsigned long long acct_count = conn->count(DB_STATS_SAMPLES_ACCOUNTANT); // eventhough the Accountant doesn't forget // we don't care about stale submitters (default: last 24 hours) int cfg_last_usage = param_integer("ODS_ACCOUNTANT_LAST_USAGE", 60*60*24); int minLastUsageTime = time(0)-cfg_last_usage; int numElem = -1; ad->LookupInteger( "NumSubmittors", numElem ); for( int i=1; i<=numElem; i++) { priority=0; isAcctGroup = false; // skip stale records unless we have none attrLastUsage.formatstr("LastUsageTime%d", i ); ad->LookupInteger ( attrLastUsage.Value(), lastUsage ); if (lastUsage < minLastUsageTime && acct_count > 0) continue; // parse the horrid classad attrName.formatstr("Name%d", i ); attrPrio.formatstr("Priority%d", i ); attrResUsed.formatstr("ResourcesUsed%d", i ); attrWtResUsed.formatstr("WeightedResourcesUsed%d", i ); attrFactor.formatstr("PriorityFactor%d", i ); attrBeginUsage.formatstr("BeginUsageTime%d", i ); attrAccUsage.formatstr("WeightedAccumulatedUsage%d", i ); attrAcctGroup.formatstr("AccountingGroup%d", i); attrIsAcctGroup.formatstr("IsAccountingGroup%d", i); attrConfigQuota.formatstr("ConfigQuota%d", i); attrEffectiveQuota.formatstr("EffectiveQuota%d", i); attrSubtreeQuota.formatstr("SubtreeQuota%d", i); attrSurplusPolicy.formatstr("SurplusPolicy%d", i); ad->LookupString ( attrName.Value(), name ); ad->LookupFloat ( attrPrio.Value(), priority ); ad->LookupFloat ( attrFactor.Value(), factor ); ad->LookupFloat ( attrAccUsage.Value(), accUsage ); ad->LookupInteger ( attrBeginUsage.Value(), beginUsage ); ad->LookupInteger ( attrResUsed.Value(), resUsed ); ad->LookupBool ( attrIsAcctGroup.Value(), isAcctGroup); ad->LookupFloat ( attrConfigQuota.Value(), configQuota ); ad->LookupFloat ( attrEffectiveQuota.Value(), effectiveQuota ); ad->LookupFloat ( attrSubtreeQuota.Value(), subtreeQuota ); ad->LookupString ( attrSurplusPolicy.Value(), surplusPolicy ); if( !ad->LookupFloat( attrWtResUsed.Value(), wtResUsed ) ) { wtResUsed = resUsed; } if (!ad->LookupString(attrAcctGroup.Value(), acctGroup)) { acctGroup = "<none>"; } BSONObjBuilder bob; bob.appendDate("ts",ts); bob.append("n",name); bob.append("ag",acctGroup); bob.appendAsNumber("prio",formatReal(priority)); bob.appendAsNumber("fac",formatReal(factor)); bob.append("ru",resUsed); bob.append("wru",wtResUsed); // condor timestamps need massaging when going in the db bob.appendDate("bu",static_cast<unsigned long long>(beginUsage)*1000); bob.appendDate("lu",static_cast<unsigned long long>(lastUsage)*1000); bob.appendAsNumber("au",formatReal(accUsage)); bob.appendAsNumber("cq",formatReal(configQuota)); bob.appendAsNumber("eq",formatReal(effectiveQuota)); bob.appendAsNumber("sq",formatReal(subtreeQuota)); if (!surplusPolicy.empty()) bob.append("sp",surplusPolicy); conn->insert(DB_STATS_SAMPLES_ACCOUNTANT,bob.obj()); } }