int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) throw -12; port = argv[ 2 ]; } DBClientConnection conn; string errmsg; if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; throw -11; } const char * ns = "test.second"; conn.remove( ns , BSONObj() ); conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) ); conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) ); auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() ); cout << "using cursor" << endl; while ( cursor->more() ) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; } conn.ensureIndex( ns , BSON( "name" << 1 << "num" << -1 ) ); }
// throws DBException void buildAnIndex(const std::string& ns, NamespaceDetails* d, IndexDetails& idx, bool mayInterrupt) { BSONObj idxInfo = idx.info.obj(); MONGO_TLOG(0) << "build index on: " << ns << " properties: " << idxInfo.jsonString() << endl; Timer t; unsigned long long n; verify( Lock::isWriteLocked(ns) ); if( inDBRepair || !idxInfo["background"].trueValue() ) { int idxNo = IndexBuildsInProgress::get(ns.c_str(), idx.info.obj()["name"].valuestr()); n = BtreeBasedBuilder::fastBuildIndex(ns.c_str(), d, idx, mayInterrupt, idxNo); verify( !idx.head.isNull() ); } else { BackgroundIndexBuildJob j(ns.c_str()); n = j.go(ns, d, idx); } MONGO_TLOG(0) << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl; }
BSONObj SyncClusterConnection::findOne(const string &ns, Query query, const BSONObj *fieldsToReturn, int queryOptions) { if ( ns.find( ".$cmd" ) != string::npos ){ string cmdName = query.obj.firstElement().fieldName(); int lockType = _lockType( cmdName ); if ( lockType > 0 ){ // write $cmd string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 13104 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg ); vector<BSONObj> all; for ( size_t i=0; i<_conns.size(); i++ ){ all.push_back( _conns[i]->findOne( ns , query , 0 , queryOptions ).getOwned() ); } _checkLast(); for ( size_t i=0; i<all.size(); i++ ){ BSONObj temp = all[i]; if ( isOk( temp ) ) continue; stringstream ss; ss << "write $cmd failed on a shard: " << temp.jsonString(); ss << " " << _conns[i]->toString(); throw UserException( 13105 , ss.str() ); } return all[0]; } } return DBClientBase::findOne( ns , query , fieldsToReturn , queryOptions ); }
// throws DBException void buildAnIndex( Collection* collection, IndexDetails& idx, bool mayInterrupt ) { string ns = collection->ns().ns(); // our copy BSONObj idxInfo = idx.info.obj(); MONGO_TLOG(0) << "build index on: " << ns << " properties: " << idxInfo.jsonString() << endl; audit::logCreateIndex( currentClient.get(), &idxInfo, idx.indexName(), ns ); Timer t; unsigned long long n; verify( Lock::isWriteLocked( ns ) ); if( inDBRepair || !idxInfo["background"].trueValue() ) { int idxNo = collection->details()->findIndexByName( idx.info.obj()["name"].valuestr(), true ); verify( idxNo >= 0 ); n = BtreeBasedBuilder::fastBuildIndex( ns.c_str(), collection->details(), idx, mayInterrupt, idxNo ); verify( !idx.head.isNull() ); } else { BackgroundIndexBuildJob j( ns ); n = j.go( collection, idx ); } MONGO_TLOG(0) << "build index done. scanned " << n << " total records. " << t.millis() / 1000.0 << " secs" << endl; }
v8::Handle<v8::Value> mongoInsert(const v8::Arguments& args){ jsassert( args.Length() == 2 , "insert needs 2 args" ); jsassert( args[1]->IsObject() , "have to insert an object" ); DBClientConnection * conn = getConnection( args ); GETNS; v8::Handle<v8::Object> in = args[1]->ToObject(); if ( ! in->Has( String::New( "_id" ) ) ){ v8::Handle<v8::Value> argv[1]; in->Set( String::New( "_id" ) , getObjectIdCons()->NewInstance( 0 , argv ) ); } BSONObj o = v8ToMongo( in ); DDD( "want to save : " << o.jsonString() ); try { conn->insert( ns , o ); } catch ( ... ){ return v8::ThrowException( v8::String::New( "socket error on insert" ) ); } return args[1]; }
void SyncClusterConnection::insert( const string &ns, const vector< BSONObj >& v , int flags) { if (v.size() == 1){ insert(ns, v[0], flags); return; } for (vector<BSONObj>::const_iterator it = v.begin(); it != v.end(); ++it ) { BSONObj obj = *it; if ( obj["_id"].type() == EOO ) { string assertMsg = "SyncClusterConnection::insert (batched) obj misses an _id: "; uasserted( 16743, assertMsg + obj.jsonString() ); } } // fsync all connections before starting the batch. string errmsg; if ( ! prepare( errmsg ) ) { string assertMsg = "SyncClusterConnection::insert (batched) prepare failed: "; throw UserException( 16744, assertMsg + errmsg ); } // We still want one getlasterror per document, even if they're batched. for ( size_t i=0; i<_conns.size(); i++ ) { for ( vector<BSONObj>::const_iterator it = v.begin(); it != v.end(); ++it ) { _conns[i]->insert( ns, *it, flags ); _conns[i]->getLastErrorDetailed(); } } // We issue a final getlasterror, but this time with an fsync. _checkLast(); }
inline int test_second(){ boost::scoped_ptr<DBClientBase> conn(mongo_wrapper::instance().connect()); const char * ns = "test.second"; conn->remove(ns, BSONObj()); conn->insert(ns, BSON("name" << "eliot" << "num" << 17)); conn->insert(ns, BSON("name" << "sara" << "num" << 24)); std::auto_ptr<DBClientCursor> cursor = conn->query(ns, BSONObj()); if (!cursor.get()) { cout << "query failure" << endl; return EXIT_FAILURE; } cout << "using cursor" << endl; while (cursor->more()) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; } conn->createIndex(ns, BSON("name" << 1 << "num" << -1)); /////////////////////////////////////////////// cout << "now using $where" << endl; Query q = Query("{}").where("this.name == name", BSON("name" << "sara")); cursor = conn->query(ns, q); if (!cursor.get()) { cout << "query failure" << endl; return EXIT_FAILURE; } int num = 0; while (cursor->more()) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; num++; } verify(num == 1); return EXIT_SUCCESS; }
int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) throw -12; port = argv[ 2 ]; } DBClientConnection conn; string errmsg; if ( ! conn.connect( string( "127.0.0.1:" ) + port , errmsg ) ) { cout << "couldn't connect : " << errmsg << endl; throw -11; } const char * ns = "test.where"; conn.remove( ns , BSONObj() ); conn.insert( ns , BSON( "name" << "eliot" << "num" << 17 ) ); conn.insert( ns , BSON( "name" << "sara" << "num" << 24 ) ); auto_ptr<DBClientCursor> cursor = conn.query( ns , BSONObj() ); while ( cursor->more() ) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; } cout << "now using $where" << endl; Query q = Query("{}").where("this.name == name" , BSON( "name" << "sara" )); cursor = conn.query( ns , q ); int num = 0; while ( cursor->more() ) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; num++; } MONGO_verify( num == 1 ); }
void insertGeoData(DBClientBase* conn) { Point p1(BSON("type" << "Point" << "coordinates" << BSON_ARRAY(-5.0 << -5.0))); Point p2(BSON("type" << "Point" << "coordinates" << BSON_ARRAY(100.0 << 0.0))); Point p3(BSON("type" << "Point" << "coordinates" << BSON_ARRAY(20.0 << 30.0))); Point p4(BSON("type" << "Point" << "coordinates" << BSON_ARRAY(50.0 << 50.0))); cout << p4.toBSON().jsonString() << endl; BSONObj lineBson = BSON("type" << "LineString" << "coordinates" << BSON_ARRAY(BSON_ARRAY(0.0 << 10.0) << BSON_ARRAY(100.0 << 10.0))); LineString line(lineBson); std::vector<LineString> lineStrings; lineStrings.push_back(line); lineStrings.push_back(line); MultiLineString mls(lineStrings); geo::coords2dgeographic::Polygon poly(lineStrings); BSONObj mpBson = BSON("type" << "MultiPolygon" << "coordinates" << BSON_ARRAY( BSON_ARRAY( BSON_ARRAY( BSON_ARRAY(0.0 << 10.0) << BSON_ARRAY(100.0 << 10.0) << BSON_ARRAY(5.0 << 5.0) << BSON_ARRAY(0.0 << 10.0))))); cout << "MULTIPOLYGON BSON:" << endl; cout << mpBson.jsonString() << endl; MultiPolygon mp(mpBson); cout << mp.toBSON().jsonString() << endl; BSONObj gcolBson = BSON("type" << "GeometryCollection" << "geometries" << BSON_ARRAY(p1.toBSON() << p2.toBSON() << line.toBSON())); GeometryCollection gcol(gcolBson); cout << "GEO COLLECTION BSON:" << endl; cout << gcol.toBSON() << endl; const vector<const GeoObj*>& geoms = gcol.getGeometries(); for (size_t i = 0; i < geoms.size(); ++i) { cout << geoms[i]->getType() << endl; } conn->insert(kDbCollectionName, BSON(kLocField << p1.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << p2.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << p3.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << p4.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << line.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << mls.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << mp.toBSON())); conn->insert(kDbCollectionName, BSON(kLocField << gcol.toBSON())); conn->createIndex(kDbCollectionName, fromjson("{loc:\"2dsphere\"}")); cout << "Coordinates p1 toBSON().toString():" << endl; cout << p1.getCoordinates().toBSON().toString() << endl; cout << "MultiLineString mls toBSON().jsonString():" << endl; cout << mls.toBSON().jsonString() << endl << endl; }
void checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative ){ // TODO: cache, optimize, etc... WriteBackListener::init( conn ); DBConfigPtr conf = grid.getDBConfig( ns ); if ( ! conf ) return; ShardChunkVersion version = 0; unsigned long long officialSequenceNumber = 0; ChunkManagerPtr manager; const bool isSharded = conf->isSharded( ns ); if ( isSharded ){ manager = conf->getChunkManager( ns , authoritative ); officialSequenceNumber = manager->getSequenceNumber(); } unsigned long long & sequenceNumber = checkShardVersionLastSequence[ make_pair(&conn,ns) ]; if ( sequenceNumber == officialSequenceNumber ) return; if ( isSharded ){ version = manager->getVersion( Shard::make( conn.getServerAddress() ) ); } log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber << " version: " << version << " manager: " << manager.get() << endl; BSONObj result; if ( setShardVersion( conn , ns , version , authoritative , result ) ){ // success! log(1) << " setShardVersion success!" << endl; sequenceNumber = officialSequenceNumber; dassert( sequenceNumber == checkShardVersionLastSequence[ make_pair(&conn,ns) ] ); return; } log(1) << " setShardVersion failed!\n" << result << endl; if ( result.getBoolField( "need_authoritative" ) ) massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative ); if ( ! authoritative ){ checkShardVersion( conn , ns , 1 ); return; } log() << " setShardVersion failed: " << result << endl; massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 ); }
void run() { ASSERT( db.createCollection(ns()) ); BSONObjBuilder cmd; cmd.appendSymbol("dropIndexes", nsColl()); // Use Symbol for SERVER-16260 cmd.append("index", "*"); BSONObj result; bool ok = db.runCommand(nsDb(), cmd.obj(), result); log() << result.jsonString(); ASSERT(ok); }
virtual void gotObject( const BSONObj& o ) { switch ( _type ) { case JSON: cout << o.jsonString( TenGen ) << endl; break; case DEBUG: debug(o); break; default: cerr << "bad type? : " << _type << endl; } }
void createCollectionWithOptions(BSONObj cmdObj) { // Create a new cmdObj to skip undefined fields and fix collection name BSONObjBuilder bo; // Add a "create" field if it doesn't exist if (!cmdObj.hasField("create")) { bo.append("create", _curcoll); } BSONObjIterator i(cmdObj); while ( i.more() ) { BSONElement e = i.next(); // Replace the "create" field with the name of the collection we are actually creating if (strcmp(e.fieldName(), "create") == 0) { bo.append("create", _curcoll); } else { if (e.type() == Undefined) { log() << _curns << ": skipping undefined field: " << e.fieldName() << endl; } else { bo.append(e); } } } cmdObj = bo.obj(); BSONObj fields = BSON("options" << 1); scoped_ptr<DBClientCursor> cursor(conn().query(_curdb + ".system.namespaces", Query(BSON("name" << _curns)), 0, 0, &fields)); bool createColl = true; if (cursor->more()) { createColl = false; BSONObj obj = cursor->next(); if (!obj.hasField("options") || !optionsSame(cmdObj, obj["options"].Obj())) { log() << "WARNING: collection " << _curns << " exists with different options than are in the metadata.json file and not using --drop. Options in the metadata file will be ignored." << endl; } } if (!createColl) { return; } BSONObj info; if (!conn().runCommand(_curdb, cmdObj, info)) { uasserted(15936, "Creating collection " + _curns + " failed. Errmsg: " + info["errmsg"].String()); } else { log() << "\tCreated collection " << _curns << " with options: " << cmdObj.jsonString() << endl; } }
void run() { ASSERT( db.createCollection(ns()) ); { BSONObjBuilder cmd; cmd.appendSymbol("touch", nsColl()); // Use Symbol for SERVER-16260 cmd.append("data", true); cmd.append("index", true); BSONObj result; bool ok = db.runCommand(nsDb(), cmd.obj(), result); log() << result.jsonString(); ASSERT(ok || result["code"].Int() == ErrorCodes::CommandNotSupported); } }
void SyncClusterConnection::insert( const string &ns, BSONObj obj , int flags) { uassert(13119, (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString(), NamespaceString(ns).coll == "system.indexes" || obj["_id"].type()); string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ) { _conns[i]->insert( ns , obj , flags); } _checkLast(); }
void createCollectionWithOptions(BSONObj obj) { BSONObjIterator i(obj); // Rebuild obj as a command object for the "create" command. // - {create: <name>} comes first, where <name> is the new name for the collection // - elements with type Undefined get skipped over BSONObjBuilder bo; bo.append("create", _curcoll); while (i.more()) { BSONElement e = i.next(); if (strcmp(e.fieldName(), "create") == 0) { continue; } if (e.type() == Undefined) { log() << _curns << ": skipping undefined field: " << e.fieldName() << endl; continue; } bo.append(e); } obj = bo.obj(); BSONObj fields = BSON("options" << 1); scoped_ptr<DBClientCursor> cursor(conn().query(_curdb + ".system.namespaces", Query(BSON("name" << _curns)), 0, 0, &fields)); bool createColl = true; if (cursor->more()) { createColl = false; BSONObj nsObj = cursor->next(); if (!nsObj.hasField("options") || !optionsSame(obj, nsObj["options"].Obj())) { log() << "WARNING: collection " << _curns << " exists with different options than are in the metadata.json file and not using --drop. Options in the metadata file will be ignored." << endl; } } if (!createColl) { return; } BSONObj info; if (!conn().runCommand(_curdb, obj, info)) { uasserted(15936, "Creating collection " + _curns + " failed. Errmsg: " + info["errmsg"].String()); } else { log() << "\tCreated collection " << _curns << " with options: " << obj.jsonString() << endl; } }
int main( int argc, const char **argv ) { const char *port = "27017"; if ( argc != 1 ) { if ( argc != 3 ) { cout << "need to pass port as second param" << endl; return EXIT_FAILURE; } port = argv[ 2 ]; } Status status = client::initialize(); if ( !status.isOK() ) { std::cout << "failed to initialize the client driver: " << status.toString() << endl; return EXIT_FAILURE; } ScopedDbConnection conn(string( "127.0.0.1:" ) + port); const char * ns = "test.second"; conn->remove( ns , BSONObj() ); conn->insert( ns , BSON( "name" << "eliot" << "num" << 17 ) ); conn->insert( ns , BSON( "name" << "sara" << "num" << 24 ) ); std::auto_ptr<DBClientCursor> cursor = conn->query( ns , BSONObj() ); if (!cursor.get()) { cout << "query failure" << endl; return EXIT_FAILURE; } cout << "using cursor" << endl; while ( cursor->more() ) { BSONObj obj = cursor->next(); cout << "\t" << obj.jsonString() << endl; } conn->ensureIndex( ns , BSON( "name" << 1 << "num" << -1 ) ); conn.done(); return EXIT_SUCCESS; }
void run() { // Subset of geo_haystack1.js int n = 0; for (int x = 0; x < 20; x++) { for (int y = 0; y < 20; y++) { db.insert(ns(), BSON("_id" << n << "loc" << BSON_ARRAY(x << y) << "z" << n % 5)); n++; } } // Build geoHaystack index. Can's use db.ensureIndex, no way to pass "bucketSize". // So run createIndexes command instead. // // Shell example: // t.ensureIndex( { loc : "geoHaystack" , z : 1 }, { bucketSize : .7 } ); { BSONObjBuilder cmd; cmd.append("createIndexes", nsColl()); cmd.append("indexes", BSON_ARRAY( BSON("key" << BSON("loc" << "geoHaystack" << "z" << 1.0) << "name" << "loc_geoHaystack_z_1" << "bucketSize" << static_cast<double>(0.7)) )); BSONObj result; ASSERT( db.runCommand(nsDb(), cmd.obj(), result) ); } { BSONObjBuilder cmd; cmd.appendSymbol("geoSearch", nsColl()); // Use Symbol for SERVER-16260 cmd.append("near", BSON_ARRAY(7 << 8)); cmd.append("maxDistance", 3); cmd.append("search", BSON("z" << 3)); BSONObj result; bool ok = db.runCommand(nsDb(), cmd.obj(), result); log() << result.jsonString(); ASSERT(ok); } }
string GetPaymentJson(string id) { auto_ptr<DBClientCursor> cursor = db.query(PAYMENTS_COLLECTION_NAMESPASE, MONGO_QUERY("_id" << OID(id))); if (cursor->more()) { BSONObj payment = cursor->next(); string paymentJson = payment.jsonString(); return paymentJson; //Json::Value paymentJson; //Json::Reader reader; //reader.parse(payment.jsonString(), paymentJson); //return video.toStyledString(); } return ""; }
v8::Handle<v8::Value> mongoRemove(const v8::Arguments& args){ jsassert( args.Length() == 2 , "remove needs 2 args" ); jsassert( args[1]->IsObject() , "have to remove an object template" ); DBClientConnection * conn = getConnection( args ); GETNS; v8::Handle<v8::Object> in = args[1]->ToObject(); BSONObj o = v8ToMongo( in ); DDD( "want to remove : " << o.jsonString() ); try { conn->remove( ns , o ); } catch ( ... ){ return v8::ThrowException( v8::String::New( "socket error on remove" ) ); } return v8::Undefined(); }
void run() { ASSERT( db.createCollection(ns()) ); { BSONObjBuilder b; b.genOID(); b.append("name", "Tom"); b.append("rating", 0); db.insert(ns(), b.obj()); } BSONObjBuilder cmd; cmd.appendSymbol("findAndModify", nsColl()); // Use Symbol for SERVER-16260 cmd.append("update", BSON("$inc" << BSON("score" << 1)) ); cmd.append("new", true); BSONObj result; bool ok = db.runCommand(nsDb(), cmd.obj(), result); log() << result.jsonString(); ASSERT(ok); // TODO(kangas) test that Tom's score is 1 }
Json::Value mongo_storage_t::get(const std::string& ns, const std::string& key) { Json::Reader reader; Json::Value result(Json::objectValue); BSONObj object; try { ScopedDbConnection connection(m_url); object = connection->findOne(resolve(ns), BSON("key" << key)); connection.done(); } catch(const DBException& e) { throw std::runtime_error(e.what()); } if(!object.isEmpty()) { if(reader.parse(object.jsonString(), result)) { return result["object"]; } else { throw std::runtime_error("corrupted data in '" + ns + "'"); } } return result; }
bool handleRESTQuery( OperationContext* txn, const std::string& ns, const std::string& action, BSONObj & params, int & responseCode, stringstream & out ) { Timer t; int html = _getOption( params["html"] , 0 ); int skip = _getOption( params["skip"] , 0 ); int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new int one = 0; if ( params["one"].type() == String && tolower( params["one"].valuestr()[0] ) == 't' ) { num = 1; one = 1; } BSONObjBuilder queryBuilder; BSONObjIterator i(params); while ( i.more() ) { BSONElement e = i.next(); string name = e.fieldName(); if ( name.find( "filter_" ) != 0 ) continue; string field = name.substr(7); const char * val = e.valuestr(); char * temp; // TODO: this is how i guess if something is a number. pretty lame right now double number = strtod( val , &temp ); if ( temp != val ) queryBuilder.append( field , number ); else queryBuilder.append( field , val ); } BSONObj query = queryBuilder.obj(); DBDirectClient db(txn); auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip ); uassert( 13085 , "query failed for dbwebserver" , cursor.get() ); if ( one ) { if ( cursor->more() ) { BSONObj obj = cursor->next(); out << obj.jsonString(Strict,html?1:0) << '\n'; } else { responseCode = 404; } return html != 0; } if( html ) { string title = string("query ") + ns; out << start(title) << p(title) << "<pre>"; } else { out << "{\n"; out << " \"offset\" : " << skip << ",\n"; out << " \"rows\": [\n"; } int howMany = 0; while ( cursor->more() ) { if ( howMany++ && html == 0 ) out << " ,\n"; BSONObj obj = cursor->next(); if( html ) { if( out.tellp() > 4 * 1024 * 1024 ) { out << "Stopping output: more than 4MB returned and in html mode\n"; break; } out << obj.jsonString(Strict, html?1:0) << "\n\n"; } else { if( out.tellp() > 50 * 1024 * 1024 ) // 50MB limit - we are using ram break; out << " " << obj.jsonString(); } } if( html ) { out << "</pre>\n"; if( howMany == 0 ) out << p("Collection is empty"); out << _end(); } else { out << "\n ],\n\n"; out << " \"total_rows\" : " << howMany << " ,\n"; out << " \"query\" : " << query.jsonString() << " ,\n"; out << " \"millis\" : " << t.millis() << '\n'; out << "}\n"; } return html != 0; }
int run(){ string ns; const bool csv = hasParam( "csv" ); ostream *outPtr = &cout; string outfile = getParam( "out" ); auto_ptr<ofstream> fileStream; if ( hasParam( "out" ) ){ size_t idx = outfile.rfind( "/" ); if ( idx != string::npos ){ string dir = outfile.substr( 0 , idx + 1 ); create_directories( dir ); } ofstream * s = new ofstream( outfile.c_str() , ios_base::out ); fileStream.reset( s ); outPtr = s; if ( ! s->good() ){ cerr << "couldn't open [" << outfile << "]" << endl; return -1; } } ostream &out = *outPtr; BSONObj * fieldsToReturn = 0; BSONObj realFieldsToReturn; try { ns = getNS(); } catch (...) { printHelp(cerr); return 1; } auth(); if ( hasParam( "fields" ) ){ needFields(); fieldsToReturn = &_fieldsObj; } if ( csv && _fields.size() == 0 ){ cerr << "csv mode requires a field list" << endl; return -1; } auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , ((Query)(getParam( "query" , "" ))).snapshot() , 0 , 0 , fieldsToReturn , QueryOption_SlaveOk | QueryOption_NoCursorTimeout ); if ( csv ){ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){ if ( i != _fields.begin() ) out << ","; out << *i; } out << endl; } long long num = 0; while ( cursor->more() ) { num++; BSONObj obj = cursor->next(); if ( csv ){ for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ){ if ( i != _fields.begin() ) out << ","; const BSONElement & e = obj.getFieldDotted(i->c_str()); if ( ! e.eoo() ){ out << e.jsonString( Strict , false ); } } out << endl; } else { out << obj.jsonString() << endl; } } cerr << "exported " << num << " records" << endl; return 0; }
/** * @return true if had to do something */ bool checkShardVersion( DBClientBase& conn , const string& ns , bool authoritative , int tryNumber ) { // TODO: cache, optimize, etc... WriteBackListener::init( conn ); DBConfigPtr conf = grid.getDBConfig( ns ); if ( ! conf ) return false; unsigned long long officialSequenceNumber = 0; ChunkManagerPtr manager; const bool isSharded = conf->isSharded( ns ); if ( isSharded ) { manager = conf->getChunkManager( ns , authoritative ); officialSequenceNumber = manager->getSequenceNumber(); } // has the ChunkManager been reloaded since the last time we updated the connection-level version? // (ie, last time we issued the setShardVersions below) unsigned long long sequenceNumber = connectionShardStatus.getSequence(&conn,ns); if ( sequenceNumber == officialSequenceNumber ) { return false; } ShardChunkVersion version = 0; if ( isSharded ) { version = manager->getVersion( Shard::make( conn.getServerAddress() ) ); } log(2) << " have to set shard version for conn: " << &conn << " ns:" << ns << " my last seq: " << sequenceNumber << " current: " << officialSequenceNumber << " version: " << version << " manager: " << manager.get() << endl; BSONObj result; if ( setShardVersion( conn , ns , version , authoritative , result ) ) { // success! LOG(1) << " setShardVersion success: " << result << endl; connectionShardStatus.setSequence( &conn , ns , officialSequenceNumber ); return true; } log(1) << " setShardVersion failed!\n" << result << endl; if ( result.getBoolField( "need_authoritative" ) ) massert( 10428 , "need_authoritative set but in authoritative mode already" , ! authoritative ); if ( ! authoritative ) { checkShardVersion( conn , ns , 1 , tryNumber + 1 ); return true; } if ( tryNumber < 4 ) { log(1) << "going to retry checkShardVersion" << endl; sleepmillis( 10 ); checkShardVersion( conn , ns , 1 , tryNumber + 1 ); return true; } log() << " setShardVersion failed: " << result << endl; massert( 10429 , (string)"setShardVersion failed! " + result.jsonString() , 0 ); return true; }
void SyncClusterConnection::insert( const string &ns, BSONObj obj ){ uassert( 13119 , (string)"SyncClusterConnection::insert obj has to have an _id: " + obj.jsonString() , ns.find( ".system.indexes" ) != string::npos || obj["_id"].type() ); string errmsg; if ( ! prepare( errmsg ) ) throw UserException( 8003 , (string)"SyncClusterConnection::insert prepare failed: " + errmsg ); for ( size_t i=0; i<_conns.size(); i++ ){ _conns[i]->insert( ns , obj ); } _checkLast(); }
int run() { string ns; const bool csv = hasParam( "csv" ); const bool jsonArray = hasParam( "jsonArray" ); ostream *outPtr = &cout; string outfile = getParam( "out" ); auto_ptr<ofstream> fileStream; if ( hasParam( "out" ) ) { size_t idx = outfile.rfind( "/" ); if ( idx != string::npos ) { string dir = outfile.substr( 0 , idx + 1 ); boost::filesystem::create_directories( dir ); } ofstream * s = new ofstream( outfile.c_str() , ios_base::out ); fileStream.reset( s ); outPtr = s; if ( ! s->good() ) { cerr << "couldn't open [" << outfile << "]" << endl; return -1; } } ostream &out = *outPtr; BSONObj * fieldsToReturn = 0; BSONObj realFieldsToReturn; try { ns = getNS(); } catch (...) { printHelp(cerr); return 1; } auth(); if ( hasParam( "fields" ) || csv ) { needFields(); // we can't use just _fieldsObj since we support everything getFieldDotted does set<string> seen; BSONObjBuilder b; BSONObjIterator i( _fieldsObj ); while ( i.more() ){ BSONElement e = i.next(); string f = str::before( e.fieldName() , '.' ); if ( seen.insert( f ).second ) b.append( f , 1 ); } realFieldsToReturn = b.obj(); fieldsToReturn = &realFieldsToReturn; } if ( csv && _fields.size() == 0 ) { cerr << "csv mode requires a field list" << endl; return -1; } Query q( getParam( "query" , "" ) ); if ( q.getFilter().isEmpty() && !hasParam("dbpath") && !hasParam("forceTableScan") ) q.snapshot(); bool slaveOk = _params["slaveOk"].as<bool>(); auto_ptr<DBClientCursor> cursor = conn().query( ns.c_str() , q , 0 , 0 , fieldsToReturn , ( slaveOk ? QueryOption_SlaveOk : 0 ) | QueryOption_NoCursorTimeout ); if ( csv ) { for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) { if ( i != _fields.begin() ) out << ","; out << *i; } out << endl; } if (jsonArray) out << '['; long long num = 0; while ( cursor->more() ) { num++; BSONObj obj = cursor->next(); if ( csv ) { for ( vector<string>::iterator i=_fields.begin(); i != _fields.end(); i++ ) { if ( i != _fields.begin() ) out << ","; const BSONElement & e = obj.getFieldDotted(i->c_str()); if ( ! e.eoo() ) { out << csvString(e); } } out << endl; } else { if (jsonArray && num != 1) out << ','; out << obj.jsonString(); if (!jsonArray) out << endl; } } if (jsonArray) out << ']' << endl; cerr << "exported " << num << " records" << endl; return 0; }
virtual int doRun() { // authenticate enum Auth::Level authLevel = Auth::NONE; auth("", &authLevel); uassert(15935, "user does not have write access", authLevel == Auth::WRITE); boost::filesystem::path root = getParam("dir"); // check if we're actually talking to a machine that can write if (!isMaster()) { return -1; } if (isMongos() && _db == "" && exists(root / "config")) { log() << "Cannot do a full restore on a sharded system" << endl; return -1; } _drop = hasParam( "drop" ); _keepIndexVersion = hasParam("keepIndexVersion"); _restoreOptions = !hasParam("noOptionsRestore"); _restoreIndexes = !hasParam("noIndexRestore"); _w = getParam( "w" , 1 ); bool doOplog = hasParam( "oplogReplay" ); if (doOplog) { // fail early if errors if (_db != "") { log() << "Can only replay oplog on full restore" << endl; return -1; } if ( ! exists(root / "oplog.bson") ) { log() << "No oplog file to replay. Make sure you run mongodump with --oplog." << endl; return -1; } BSONObj out; if (! conn().simpleCommand("admin", &out, "buildinfo")) { log() << "buildinfo command failed: " << out["errmsg"].String() << endl; return -1; } StringData version = out["version"].valuestr(); if (versionCmp(version, "1.7.4-pre-") < 0) { log() << "Can only replay oplog to server version >= 1.7.4" << endl; return -1; } string oplogLimit = getParam( "oplogLimit", "" ); string oplogInc = "0"; if(!oplogLimit.empty()) { size_t i = oplogLimit.find_first_of(':'); if ( i != string::npos ) { if ( i + 1 < oplogLimit.length() ) { oplogInc = oplogLimit.substr(i + 1); } oplogLimit = oplogLimit.substr(0, i); } try { _oplogLimitTS.reset(new OpTime( boost::lexical_cast<unsigned long>(oplogLimit.c_str()), boost::lexical_cast<unsigned long>(oplogInc.c_str()))); } catch( const boost::bad_lexical_cast& error) { log() << "Could not parse oplogLimit into Timestamp from values ( " << oplogLimit << " , " << oplogInc << " )" << endl; return -1; } if (!oplogLimit.empty()) { // Only for a replica set as master will have no-op entries so we would need to // skip them all to find the real op scoped_ptr<DBClientCursor> cursor( conn().query("local.oplog.rs", Query().sort(BSON("$natural" << -1)), 1 /*return first*/)); OpTime tsOptime; // get newest oplog entry and make sure it is older than the limit to apply. if (cursor->more()) { tsOptime = cursor->next().getField("ts")._opTime(); if (tsOptime > *_oplogLimitTS.get()) { log() << "The oplogLimit is not newer than" << " the last oplog entry on the server." << endl; return -1; } } BSONObjBuilder tsRestrictBldr; if (!tsOptime.isNull()) tsRestrictBldr.appendTimestamp("$gt", tsOptime.asDate()); tsRestrictBldr.appendTimestamp("$lt", _oplogLimitTS->asDate()); BSONObj query = BSON("ts" << tsRestrictBldr.obj()); if (!tsOptime.isNull()) { log() << "Latest oplog entry on the server is " << tsOptime.getSecs() << ":" << tsOptime.getInc() << endl; log() << "Only applying oplog entries matching this criteria: " << query.jsonString() << endl; } _opmatcher.reset(new Matcher(query)); } } } /* If _db is not "" then the user specified a db name to restore as. * * In that case we better be given either a root directory that * contains only .bson files or a single .bson file (a db). * * In the case where a collection name is specified we better be * given either a root directory that contains only a single * .bson file, or a single .bson file itself (a collection). */ drillDown(root, _db != "", _coll != "", !(_oplogLimitTS.get() == NULL), true); // should this happen for oplog replay as well? conn().getLastError(_db == "" ? "admin" : _db); if (doOplog) { log() << "\t Replaying oplog" << endl; _curns = OPLOG_SENTINEL; processFile( root / "oplog.bson" ); log() << "Applied " << _oplogEntryApplies << " oplog entries out of " << _oplogEntryApplies + _oplogEntrySkips << " (" << _oplogEntrySkips << " skipped)." << endl; } return EXIT_CLEAN; }
void run() { if ( _token.size() == 0 && _name.size() == 0 ) { log(1) << "mms not configured" << endl; return; } if ( _token.size() == 0 ) { log() << "no token for mms - not running" << endl; return; } if ( _name.size() == 0 ) { log() << "no name for mms - not running" << endl; return; } log() << "mms monitor staring... token:" << _token << " name:" << _name << " interval: " << _secsToSleep << endl; Client::initThread( "mms" ); Client& c = cc(); // TODO: using direct client is bad, but easy for now while ( ! inShutdown() ) { sleepsecs( _secsToSleep ); try { stringstream url; url << _baseurl << "?" << "token=" << _token << "&" << "name=" << _name << "&" << "ts=" << time(0) ; BSONObjBuilder bb; // duplicated so the post has everything bb.append( "token" , _token ); bb.append( "name" , _name ); bb.appendDate( "ts" , jsTime() ); // any commands _add( bb , "buildinfo" ); _add( bb , "serverStatus" ); BSONObj postData = bb.obj(); log(1) << "mms url: " << url.str() << "\n\t post: " << postData << endl;; HttpClient c; HttpClient::Result r; int rc = c.post( url.str() , postData.jsonString() , &r ); log(1) << "\t response code: " << rc << endl; if ( rc != 200 ) { log() << "mms error response code:" << rc << endl; log(1) << "mms error body:" << r.getEntireResponse() << endl; } } catch ( std::exception& e ) { log() << "mms exception: " << e.what() << endl; } } c.shutdown(); }
void handleRESTQuery( string ns , string action , map<string,string> & params , int & responseCode , stringstream & out ) { Timer t; int skip = _getOption( params["skip"] , 0 ); int num = _getOption( params["limit"] , _getOption( params["count" ] , 1000 ) ); // count is old, limit is new int one = 0; if ( params["one"].size() > 0 && tolower( params["one"][0] ) == 't' ) { num = 1; one = 1; } BSONObjBuilder queryBuilder; for ( map<string,string>::iterator i = params.begin(); i != params.end(); i++ ) { if ( ! i->first.find( "filter_" ) == 0 ) continue; const char * field = i->first.substr( 7 ).c_str(); const char * val = i->second.c_str(); char * temp; // TODO: this is how i guess if something is a number. pretty lame right now double number = strtod( val , &temp ); if ( temp != val ) queryBuilder.append( field , number ); else queryBuilder.append( field , val ); } BSONObj query = queryBuilder.obj(); auto_ptr<DBClientCursor> cursor = db.query( ns.c_str() , query, num , skip ); if ( one ) { if ( cursor->more() ) { BSONObj obj = cursor->next(); out << obj.jsonString() << "\n"; } else { responseCode = 404; } return; } out << "{\n"; out << " \"offset\" : " << skip << ",\n"; out << " \"rows\": [\n"; int howMany = 0; while ( cursor->more() ) { if ( howMany++ ) out << " ,\n"; BSONObj obj = cursor->next(); out << " " << obj.jsonString(); } out << "\n ],\n\n"; out << " \"total_rows\" : " << howMany << " ,\n"; out << " \"query\" : " << query.jsonString() << " ,\n"; out << " \"millis\" : " << t.millis() << "\n"; out << "}\n"; }