bool setShardVersion(DBClientBase& conn, const string& ns, const string& configServerPrimary, ChunkVersion version, ChunkManager* manager, bool authoritative, BSONObj& result) { BSONObjBuilder cmdBuilder; cmdBuilder.append("setShardVersion", ns); cmdBuilder.append("configdb", configServerPrimary); Shard s = Shard::make(conn.getServerAddress()); cmdBuilder.append("shard", s.getName()); cmdBuilder.append("shardHost", s.getConnString()); if (ns.size() > 0) { version.addToBSON(cmdBuilder); } else { cmdBuilder.append("init", true); } if (authoritative) { cmdBuilder.appendBool("authoritative", 1); } BSONObj cmd = cmdBuilder.obj(); LOG(1) << " setShardVersion " << s.getName() << " " << conn.getServerAddress() << " " << ns << " " << cmd << (manager ? string(str::stream() << " " << manager->getSequenceNumber()) : ""); return conn.runCommand("admin", cmd, result, 0); }
bool LastError::appendSelf(BSONObjBuilder& b, bool blankErr) const { if (!_valid) { if (blankErr) b.appendNull("err"); b.append("n", 0); return false; } if (_msg.empty()) { if (blankErr) { b.appendNull("err"); } } else { b.append("err", _msg); } if (_code) { b.append("code", _code); b.append("codeName", ErrorCodes::errorString(ErrorCodes::Error(_code))); } if (_updatedExisting != NotUpdate) b.appendBool("updatedExisting", _updatedExisting == True); if (!_upsertedId.isEmpty()) { b.append(_upsertedId[kUpsertedFieldName]); } b.appendNumber("n", _nObjects); return !_msg.empty(); }
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo(); result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); return true; }
bool DBClientWithCommands::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name ) { BSONObjBuilder toSave; toSave.append( "ns" , ns ); toSave.append( "key" , keys ); string cacheKey(ns); cacheKey += "--"; if ( name != "" ) { toSave.append( "name" , name ); cacheKey += name; } else { string nn = genIndexName( keys ); toSave.append( "name" , nn ); cacheKey += nn; } if ( unique ) toSave.appendBool( "unique", unique ); if ( _seenIndexes.count( cacheKey ) ) return 0; _seenIndexes.insert( cacheKey ); insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , toSave.obj() ); return 1; }
BSONObj listFiles(const BSONObj& args){ uassert( "need to specify 1 argument to listFiles" , args.nFields() == 1 ); BSONObjBuilder lst; string rootname = args.firstElement().valuestrsafe(); path root( rootname ); directory_iterator end; directory_iterator i( root); int num =0; while ( i != end ){ path p = *i; BSONObjBuilder b; b << "name" << p.string(); b.appendBool( "isDirectory", is_directory( p ) ); stringstream ss; ss << num; string name = ss.str(); lst.append( name.c_str(), b.done() ); num++; i++; } BSONObjBuilder ret; ret.appendArray( "", lst.done() ); return ret.obj(); }
bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const { if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb == shardingState.getConfigServer() ) return true; result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() << "given" << configdb ) ); errmsg = "specified a different configdb!"; return false; } if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } if ( locked ) { shardingState.enable( configdb ); configServer.init( configdb ); return true; } dblock lk; return checkConfigOrInit( configdb , authoritative , errmsg , result , true ); }
void KVCatalog::FeatureTracker::putInfo(OperationContext* opCtx, const FeatureBits& versionInfo) { BSONObjBuilder bob; bob.appendBool(kIsFeatureDocumentFieldName, true); // We intentionally include the "ns" field with a null value in the feature document to prevent // older versions that do 'obj["ns"].String()' from starting up. This way only versions that are // aware of the feature document's existence can successfully start up. bob.appendNull(kNamespaceFieldName); bob.append(kNonRepairableFeaturesFieldName, static_cast<long long>(versionInfo.nonRepairableFeatures)); bob.append(kRepairableFeaturesFieldName, static_cast<long long>(versionInfo.repairableFeatures)); BSONObj obj = bob.done(); if (_rid.isNull()) { // This is the first time a feature is being marked as in-use or not in-use, so we must // insert the feature document rather than update it. const bool enforceQuota = false; // TODO SERVER-30638: using timestamp 0 for these inserts auto rid = _catalog->_rs->insertRecord( opCtx, obj.objdata(), obj.objsize(), Timestamp(), enforceQuota); fassert(40113, rid.getStatus()); _rid = rid.getValue(); } else { const bool enforceQuota = false; UpdateNotifier* notifier = nullptr; auto status = _catalog->_rs->updateRecord( opCtx, _rid, obj.objdata(), obj.objsize(), enforceQuota, notifier); fassert(40114, status); } }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { BSONElement e = cmdObj.firstElement(); if ( e.type() != jstOID ) { errmsg = "need oid as first value"; return 0; } // get the command issuer's (a mongos) serverID const OID id = e.__oid(); // the command issuer is blocked awaiting a response // we want to do return at least at every 5 minutes so sockets don't timeout BSONObj z; if ( writeBackManager.getWritebackQueue(id.str())->queue.blockingPop( z, 5 * 60 /* 5 minutes */ ) ) { MONGO_LOG(1) << "WriteBackCommand got : " << z << endl; result.append( "data" , z ); } else { result.appendBool( "noop" , true ); } #ifdef _DEBUG // Sleep a short amount of time usually int sleepFor = rand() % 10; sleepmillis( sleepFor ); // Sleep a longer amount of time every once and awhile int sleepLong = rand() % 50; if( sleepLong == 0 ) sleepsecs( 2 ); #endif return true; }
bool Database::setProfilingLevel( int newLevel , string& errmsg ) { if ( profile == newLevel ) return true; if ( newLevel < 0 || newLevel > 2 ) { errmsg = "profiling level has to be >=0 and <= 2"; return false; } if ( newLevel == 0 ) { profile = 0; return true; } assert( cc().database() == this ); if ( ! namespaceIndex.details( profileName.c_str() ) ) { log() << "creating profile collection: " << profileName << endl; BSONObjBuilder spec; spec.appendBool( "capped", true ); spec.append( "size", 131072.0 ); if ( ! userCreateNS( profileName.c_str(), spec.done(), errmsg , false /* we don't replica profile messages */ ) ) { return false; } } profile = newLevel; return true; }
bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string ns = cmdObj["getShardVersion"].valuestrsafe(); if ( ns.size() == 0 ) { errmsg = "need to specify full namespace"; return false; } result.append( "configServer" , shardingState.getConfigServer() ); result.appendTimestamp( "global" , shardingState.getVersion(ns).toLong() ); ShardedConnectionInfo* info = ShardedConnectionInfo::get( false ); result.appendBool( "inShardedMode" , info != 0 ); if ( info ) result.appendTimestamp( "mine" , info->getVersion(ns).toLong() ); else result.appendTimestamp( "mine" , 0 ); if ( cmdObj["fullMetadata"].trueValue() ) { CollectionMetadataPtr metadata = shardingState.getCollectionMetadata( ns ); if ( metadata ) result.append( "metadata", metadata->toBSON() ); else result.append( "metadata", BSONObj() ); } return true; }
bool checkConfigOrInit( const string& configdb , bool authoritative , string& errmsg , BSONObjBuilder& result , bool locked=false ) const { if ( configdb.size() == 0 ) { errmsg = "no configdb"; return false; } if ( shardingState.enabled() ) { if ( configdb == shardingState.getConfigServer() ) return true; result.append( "configdb" , BSON( "stored" << shardingState.getConfigServer() << "given" << configdb ) ); errmsg = str::stream() << "mongos specified a different config database string : " << "stored : " << shardingState.getConfigServer() << " vs given : " << configdb; return false; } if ( ! authoritative ) { result.appendBool( "need_authoritative" , true ); errmsg = "first setShardVersion"; return false; } if ( locked ) { ShardedConnectionInfo::addHook(); shardingState.enable( configdb ); configServer.init( configdb ); return true; } Lock::GlobalWrite lk; return checkConfigOrInit( configdb , authoritative , errmsg , result , true ); }
void DBConfig::serialize(BSONObjBuilder& to){ to.append("name", _name); to.appendBool("partitioned", _shardingEnabled ); to.append("primary", _primary ); if ( _sharded.size() > 0 ){ BSONObjBuilder a; for ( map<string,CollectionInfo>::reverse_iterator i=_sharded.rbegin(); i != _sharded.rend(); i++){ BSONObjBuilder temp; temp.append( "key" , i->second.key.key() ); temp.appendBool( "unique" , i->second.unique ); a.append( i->first.c_str() , temp.obj() ); } to.append( "sharded" , a.obj() ); } }
void Chunk::pickMedianKey(BSONObj& medianKey) const { // Ask the mongod holding this chunk to figure out the split points. ScopedDbConnection conn(_getShardConnectionString()); BSONObj result; BSONObjBuilder cmd; cmd.append("splitVector", _manager->getns()); cmd.append("keyPattern", _manager->getShardKeyPattern().toBSON()); cmd.append("min", getMin()); cmd.append("max", getMax()); cmd.appendBool("force", true); BSONObj cmdObj = cmd.obj(); if (!conn->runCommand("admin", cmdObj, result)) { conn.done(); ostringstream os; os << "splitVector command (median key) failed: " << result; uassert(13503, os.str(), 0); } BSONObjIterator it(result.getObjectField("splitKeys")); if (it.more()) { medianKey = it.next().Obj().getOwned(); } conn.done(); }
void Helpers::ensureIndex(const char *ns, BSONObj keyPattern, bool unique, const char *name) { NamespaceDetails *d = nsdetails(ns); if( d == 0 ) return; { NamespaceDetails::IndexIterator i = d->ii(); while( i.more() ) { if( i.next().keyPattern().woCompare(keyPattern) == 0 ) return; } } if( d->nIndexes >= NamespaceDetails::NIndexesMax ) { problem() << "Helper::ensureIndex fails, MaxIndexes exceeded " << ns << '\n'; return; } string system_indexes = cc().database()->name + ".system.indexes"; BSONObjBuilder b; b.append("name", name); b.append("ns", ns); b.append("key", keyPattern); b.appendBool("unique", unique); BSONObj o = b.done(); theDataFileMgr.insert(system_indexes.c_str(), o.objdata(), o.objsize()); }
bool handlePossibleShardedMessage( Message &m, DbResponse &dbresponse ){ if ( shardConfigServer.empty() ){ return false; } int op = m.data->operation(); if ( op < 2000 || op >= 3000 ) return false; const char *ns = m.data->_data + 4; string errmsg; if ( shardVersionOk( ns , errmsg ) ){ return false; } log() << "shardVersionOk failed ns:" << ns << " " << errmsg << endl; if ( doesOpGetAResponse( op ) ){ BufBuilder b( 32768 ); b.skip( sizeof( QueryResult ) ); { BSONObj obj = BSON( "$err" << errmsg ); b.append( obj.objdata() , obj.objsize() ); } QueryResult *qr = (QueryResult*)b.buf(); qr->_resultFlags() = QueryResult::ResultFlag_ErrSet | QueryResult::ResultFlag_ShardConfigStale; qr->len = b.len(); qr->setOperation( opReply ); qr->cursorId = 0; qr->startingFrom = 0; qr->nReturned = 1; b.decouple(); Message * resp = new Message(); resp->setData( qr , true ); dbresponse.response = resp; dbresponse.responseTo = m.data->id; return true; } OID * clientID = clientServerIds.get(); massert( 10422 , "write with bad shard config and no server id!" , clientID ); log() << "got write with an old config - writing back" << endl; BSONObjBuilder b; b.appendBool( "writeBack" , true ); b.append( "ns" , ns ); b.appendBinData( "msg" , m.data->len , bdtCustom , (char*)(m.data) ); log() << "writing back msg with len: " << m.data->len << " op: " << m.data->_operation << endl; clientQueues[clientID->str()]->push( b.obj() ); return true; }
void append( BSONObjBuilder& b , string name , jsval val , BSONType oldType = EOO , int depth=0 ) { //cout << "name: " << name << "\t" << typeString( val ) << " oldType: " << oldType << endl; switch ( JS_TypeOfValue( _context , val ) ) { case JSTYPE_VOID: b.appendUndefined( name.c_str() ); break; case JSTYPE_NULL: b.appendNull( name.c_str() ); break; case JSTYPE_NUMBER: { double d = toNumber( val ); if ( oldType == NumberInt && ((int)d) == d ) b.append( name.c_str() , (int)d ); else b.append( name.c_str() , d ); break; } case JSTYPE_STRING: b.append( name.c_str() , toString( val ) ); break; case JSTYPE_BOOLEAN: b.appendBool( name.c_str() , toBoolean( val ) ); break; case JSTYPE_OBJECT: { JSObject * o = JSVAL_TO_OBJECT( val ); if ( ! o || o == JSVAL_NULL ) { b.appendNull( name.c_str() ); } else if ( ! appendSpecialDBObject( this , b , name , val , o ) ) { BSONObj sub = toObject( o , depth ); if ( JS_IsArrayObject( _context , o ) ) { b.appendArray( name.c_str() , sub ); } else { b.append( name.c_str() , sub ); } } break; } case JSTYPE_FUNCTION: { string s = toString(val); if ( s[0] == '/' ) { appendRegex( b , name , s ); } else { b.appendCode( name.c_str() , getFunctionCode( val ).c_str() ); } break; } default: uassert( 10217 , (string)"can't append field. name:" + name + " type: " + typeString( val ) , 0 ); } }
void a(){ BSONObjBuilder b; b << "_id" << "abc"; b.appendBool( "partitioned" , true ); b << "primary" << "myserver"; DBConfig c; testInOut( c , b.obj() ); }
Status createIndex(OperationContext* opCtx, StringData ns, const BSONObj& keys, bool unique) { BSONObjBuilder specBuilder; specBuilder.append("name", DBClientBase::genIndexName(keys)); specBuilder.append("ns", ns); specBuilder.append("key", keys); specBuilder.append("v", static_cast<int>(kIndexVersion)); if (unique) { specBuilder.appendBool("unique", true); } return createIndexFromSpec(opCtx, ns, specBuilder.done()); }
virtual bool _split( BSONObjBuilder& result , string& errmsg , const string& ns , ChunkManagerPtr manager , ChunkPtr old , BSONObj middle ){ result << "shardinfo" << old->toString(); result.appendBool( "auto" , middle.isEmpty() ); if ( middle.isEmpty() ) middle = old->pickSplitPoint(); result.append( "middle" , middle ); return true; }
void DBConfig::serialize(BSONObjBuilder& to) { to.append("name", _name); to.appendBool("partitioned", _partitioned ); to.append("primary", _primary ); if ( _sharded.size() > 0 ) { BSONObjBuilder a; for ( map<string,ShardKeyPattern>::reverse_iterator i=_sharded.rbegin(); i != _sharded.rend(); i++) { a.append( i->first.c_str() , i->second.key() ); } to.append( "sharded" , a.obj() ); } }
bool VersionManager::initShardVersionCB( DBClientBase * conn_in, BSONObj& result ){ WriteBackListener::init( *conn_in ); DBClientBase* conn = getVersionable( conn_in ); verify( conn ); // errors thrown above BSONObjBuilder cmdBuilder; cmdBuilder.append( "setShardVersion" , "" ); cmdBuilder.appendBool( "init", true ); cmdBuilder.append( "configdb" , configServer.modelServer() ); cmdBuilder.appendOID( "serverID" , &serverID ); cmdBuilder.appendBool( "authoritative" , true ); BSONObj cmd = cmdBuilder.obj(); LOG(1) << "initializing shard connection to " << conn->toString() << endl; LOG(2) << "initial sharding settings : " << cmd << endl; bool ok = conn->runCommand( "admin", cmd, result, 0, &AuthenticationTable::getInternalSecurityAuthenticationTable() ); // HACK for backwards compatibility with v1.8.x, v2.0.0 and v2.0.1 // Result is false, but will still initialize serverID and configdb if( ! ok && ! result["errmsg"].eoo() && ( result["errmsg"].String() == "need to specify namespace"/* 2.0.1/2 */ || result["errmsg"].String() == "need to speciy namespace" /* 1.8 */ )) { ok = true; } LOG(3) << "initial sharding result : " << result << endl; return ok; }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result) { vector<string> dbNames; StorageEngine* storageEngine = getGlobalServiceContext()->getGlobalStorageEngine(); { ScopedTransaction transaction(txn, MODE_IS); Lock::GlobalLock lk(txn->lockState(), MODE_IS, UINT_MAX); storageEngine->listDatabases(&dbNames); } vector<BSONObj> dbInfos; set<string> seen; intmax_t totalSize = 0; for (vector<string>::iterator i = dbNames.begin(); i != dbNames.end(); ++i) { const string& dbname = *i; BSONObjBuilder b; b.append("name", dbname); { ScopedTransaction transaction(txn, MODE_IS); Lock::DBLock dbLock(txn->lockState(), dbname, MODE_IS); Database* db = dbHolder().get(txn, dbname); if (!db) continue; const DatabaseCatalogEntry* entry = db->getDatabaseCatalogEntry(); invariant(entry); int64_t size = entry->sizeOnDisk(txn); b.append("sizeOnDisk", static_cast<double>(size)); totalSize += size; b.appendBool("empty", entry->isEmpty()); } dbInfos.push_back(b.obj()); seen.insert(i->c_str()); } result.append("databases", dbInfos); result.append("totalSize", double(totalSize)); return true; }
bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { string ns = dbname + "." + cmdObj.firstElement().valuestrsafe(); NamespaceString ns_string(ns); const bool full = cmdObj["full"].trueValue(); const bool scanData = full || cmdObj["scandata"].trueValue(); if ( !ns_string.isNormal() && full ) { errmsg = "Can only run full validate on a regular collection"; return false; } if (!serverGlobalParams.quiet) { MONGO_TLOG(0) << "CMD: validate " << ns << endl; } Client::ReadContext ctx(ns_string.ns()); Database* db = cc().database(); if ( !db ) { errmsg = "database not found"; return false; } Collection* collection = db->getCollection( ns ); if ( !collection ) { errmsg = "collection not found"; return false; } result.append( "ns", ns ); ValidateResults results; Status status = collection->validate( full, scanData, &results, &result ); if ( !status.isOK() ) return appendCommandStatus( result, status ); result.appendBool("valid", results.valid); result.append("errors", results.errors); if ( !full ){ result.append("warning", "Some checks omitted for speed. use {full:true} option to do more thorough scan."); } if ( !results.valid ) { result.append("advice", "ns corrupt. See http://dochub.mongodb.org/core/data-recovery"); } return true; }
BSONObj removeFile(const BSONObj& args){ BSONElement e = oneArg(args); bool found = false; path root( args.firstElement().valuestrsafe() ); if ( boost::filesystem::exists( root ) ){ found = true; boost::filesystem::remove_all( root ); } BSONObjBuilder b; b.appendBool( "removed" , found ); return b.obj(); }
void appendBuildInfo(BSONObjBuilder& result) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo() << "loaderFlags" << loaderFlags() << "compilerFlags" << compilerFlags() << "allocator" << allocator() << "versionArray" << versionArray << "javascriptEngine" << compiledJSEngine() /*TODO: add this back once the module system is in place -- maybe once we do something like serverstatus with callbacks*/ // << "interpreterVersion" << globalScriptEngine->getInterpreterVersionString() << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); }
void DBConfig::CollectionInfo::save( const string& ns , DBClientBase* conn ) { BSONObj key = BSON( "_id" << ns ); BSONObjBuilder val; val.append( "_id" , ns ); val.appendDate( "lastmod" , time(0) ); val.appendBool( "dropped" , _dropped ); if ( _cm ) _cm->getInfo( val ); conn->update( ShardNS::collection , key , val.obj() , true ); string err = conn->getLastError(); uassert( 13473 , (string)"failed to save collection (" + ns + "): " + err , err.size() == 0 ); _dirty = false; }
BSONObj buildOpMergeChunk(const ChunkType& mergedChunk) { BSONObjBuilder opB; // Op basics opB.append("op", "u"); opB.appendBool("b", false); // no upserting opB.append("ns", ChunkType::ConfigNS); // New object opB.append("o", mergedChunk.toBSON()); // Query object opB.append("o2", BSON(ChunkType::name(mergedChunk.getName()))); return opB.obj(); }
bool run(const std::string& dbname, BSONObj& jsobj, int, // options std::string& errmsg, BSONObjBuilder& result, bool fromRepl) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo() << "versionArray" << versionArray << "interpreterVersion" << globalScriptEngine->getInterpreterVersionString() << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); return true; }
bool DBClientBase::ensureIndex( const string &ns , BSONObj keys , bool unique, const string & name ) { BSONObjBuilder toSave; toSave.append( "ns" , ns ); toSave.append( "key" , keys ); string cacheKey(ns); cacheKey += "--"; if ( name != "" ) { toSave.append( "name" , name ); cacheKey += name; } else { stringstream ss; bool first = 1; for ( BSONObjIterator i(keys); i.more(); ) { BSONElement f = i.next(); if ( first ) first = 0; else ss << "_"; ss << f.fieldName() << "_"; if ( f.type() == NumberInt ) ss << (int)(f.number() ); else if ( f.type() == NumberDouble ) ss << f.number(); } toSave.append( "name" , ss.str() ); cacheKey += ss.str(); } if ( unique ) toSave.appendBool( "unique", unique ); if ( _seenIndexes.count( cacheKey ) ) return 0; _seenIndexes.insert( cacheKey ); insert( Namespace( ns.c_str() ).getSisterNS( "system.indexes" ).c_str() , toSave.obj() ); return 1; }
BSONObj listFiles(const BSONObj& _args){ static BSONObj cd = BSON( "0" << "." ); BSONObj args = _args.isEmpty() ? cd : _args; uassert( 10257 , "need to specify 1 argument to listFiles" , args.nFields() == 1 ); BSONObjBuilder lst; string rootname = args.firstElement().valuestrsafe(); path root( rootname ); stringstream ss; ss << "listFiles: no such directory: " << rootname; string msg = ss.str(); uassert( 12581, msg.c_str(), boost::filesystem::exists( root ) ); directory_iterator end; directory_iterator i( root); int num =0; while ( i != end ){ path p = *i; BSONObjBuilder b; b << "name" << p.string(); b.appendBool( "isDirectory", is_directory( p ) ); if ( ! is_directory( p ) ){ try { b.append( "size" , (double)file_size( p ) ); } catch ( ... ){ i++; continue; } } stringstream ss; ss << num; string name = ss.str(); lst.append( name, b.done() ); num++; i++; } BSONObjBuilder ret; ret.appendArray( "", lst.done() ); return ret.obj(); }