void DBConnectionPool::appendInfo( BSONObjBuilder& b ) { int avail = 0; long long created = 0; map<ConnectionString::ConnectionType,long long> createdByType; BSONObjBuilder bb( b.subobjStart( "hosts" ) ); { scoped_lock lk( _mutex ); for ( PoolMap::iterator i=_pools.begin(); i!=_pools.end(); ++i ) { if ( i->second.numCreated() == 0 ) continue; string s = str::stream() << i->first.ident << "::" << i->first.timeout; BSONObjBuilder temp( bb.subobjStart( s ) ); temp.append( "available" , i->second.numAvailable() ); temp.appendNumber( "created" , i->second.numCreated() ); temp.done(); avail += i->second.numAvailable(); created += i->second.numCreated(); long long& x = createdByType[i->second.type()]; x += i->second.numCreated(); } } bb.done(); // Always report all replica sets being tracked set<string> replicaSets = ReplicaSetMonitor::getAllTrackedSets(); BSONObjBuilder setBuilder( b.subobjStart( "replicaSets" ) ); for ( set<string>::iterator i=replicaSets.begin(); i!=replicaSets.end(); ++i ) { string rs = *i; ReplicaSetMonitorPtr m = ReplicaSetMonitor::get( rs ); if ( ! m ) { warning() << "no monitor for set: " << rs << endl; continue; } BSONObjBuilder temp( setBuilder.subobjStart( rs ) ); m->appendInfo( temp ); temp.done(); } setBuilder.done(); { BSONObjBuilder temp( bb.subobjStart( "createdByType" ) ); for ( map<ConnectionString::ConnectionType,long long>::iterator i=createdByType.begin(); i!=createdByType.end(); ++i ) { temp.appendNumber( ConnectionString::typeToString( i->first ) , i->second ); } temp.done(); } b.append( "totalAvailable" , avail ); b.appendNumber( "totalCreated" , created ); }
void DocumentSourceSort::addToBsonArray(BSONArrayBuilder *pBuilder, bool explain) const { if (explain) { // always one obj for combined $sort + $limit BSONObjBuilder sortObj (pBuilder->subobjStart()); BSONObjBuilder insides (sortObj.subobjStart(sortName)); BSONObjBuilder sortKey (insides.subobjStart("sortKey")); sortKeyToBson(&sortKey, false); sortKey.doneFast(); if (explain && limitSrc) { insides.appendNumber("limit", limitSrc->getLimit()); } insides.doneFast(); sortObj.doneFast(); } else { // one obj for $sort + maybe one obj for $limit { BSONObjBuilder sortObj (pBuilder->subobjStart()); BSONObjBuilder insides (sortObj.subobjStart(sortName)); sortKeyToBson(&insides, false); insides.doneFast(); sortObj.doneFast(); } if (limitSrc) { limitSrc->addToBsonArray(pBuilder, explain); } } }
void OpDebug::append(const CurOp& curop, const SingleThreadedLockStats& lockStats, BSONObjBuilder& b) const { const size_t maxElementSize = 50 * 1024; b.append( "op" , iscommand ? "command" : opToString( op ) ); b.append( "ns" , ns.toString() ); if (!query.isEmpty()) { appendAsObjOrString(iscommand ? "command" : "query", query, maxElementSize, &b); } else if (!iscommand && curop.haveQuery()) { appendAsObjOrString("query", curop.query(), maxElementSize, &b); } if (!updateobj.isEmpty()) { appendAsObjOrString("updateobj", updateobj, maxElementSize, &b); } const bool moved = (nmoved >= 1); OPDEBUG_APPEND_NUMBER( cursorid ); OPDEBUG_APPEND_NUMBER( ntoreturn ); OPDEBUG_APPEND_NUMBER( ntoskip ); OPDEBUG_APPEND_BOOL( exhaust ); OPDEBUG_APPEND_NUMBER( nscanned ); OPDEBUG_APPEND_NUMBER( nscannedObjects ); OPDEBUG_APPEND_BOOL( idhack ); OPDEBUG_APPEND_BOOL( scanAndOrder ); OPDEBUG_APPEND_BOOL( moved ); OPDEBUG_APPEND_NUMBER( nmoved ); OPDEBUG_APPEND_NUMBER( nMatched ); OPDEBUG_APPEND_NUMBER( nModified ); OPDEBUG_APPEND_NUMBER( ninserted ); OPDEBUG_APPEND_NUMBER( ndeleted ); OPDEBUG_APPEND_BOOL( fastmod ); OPDEBUG_APPEND_BOOL( fastmodinsert ); OPDEBUG_APPEND_BOOL( upsert ); OPDEBUG_APPEND_BOOL( cursorExhausted ); OPDEBUG_APPEND_NUMBER( keyUpdates ); OPDEBUG_APPEND_NUMBER( writeConflicts ); b.appendNumber("numYield", curop.numYields()); { BSONObjBuilder locks(b.subobjStart("locks")); lockStats.report(&locks); } if (!exceptionInfo.empty()) { exceptionInfo.append(b, "exception", "exceptionCode"); } OPDEBUG_APPEND_NUMBER( nreturned ); OPDEBUG_APPEND_NUMBER( responseLength ); b.append( "millis" , executionTime ); execStats.append(b, "execStats"); }
void CursorCache::appendInfo(BSONObjBuilder& result) const { stdx::lock_guard<stdx::mutex> lk(_mutex); result.append("sharded", static_cast<int>(cursorStatsMultiTarget.get())); result.appendNumber("shardedEver", _shardedTotal); result.append("refs", static_cast<int>(cursorStatsSingleTarget.get())); result.append("totalOpen", static_cast<int>(cursorStatsTotalOpen.get())); }
virtual bool run(const string& , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not authenticated. */ if ( cmdObj["forShell"].trueValue() ) lastError.disableForCommand(); appendReplicationInfo(result, 0); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes); result.appendDate("localTime", jsTime()); result.append("maxWireVersion", maxWireVersion); result.append("minWireVersion", minWireVersion); return true; }
bool run(const string& dbname, BSONObj& jsobj, string& errmsg, BSONObjBuilder& result, bool fromRepl ) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo(); result << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); return true; }
virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string p = cmdObj.firstElement().String(); if ( p == "*" ) { vector<string> names; RamLog::getNames( names ); BSONArrayBuilder arr; for ( unsigned i=0; i<names.size(); i++ ) { arr.append( names[i] ); } result.appendArray( "names" , arr.arr() ); } else { RamLog* ramlog = RamLog::getIfExists(p); if ( ! ramlog ) { errmsg = str::stream() << "no RamLog named: " << p; return false; } RamLog::LineIterator rl(ramlog); result.appendNumber( "totalLinesWritten", rl.getTotalLinesWritten() ); BSONArrayBuilder arr( result.subarrayStart( "log" ) ); while (rl.more()) arr.append(rl.next()); arr.done(); } return true; }
BSONObj IndexCounters::generateSection(const BSONElement& configElement) const { if ( ! _memSupported ) { return BSON( "note" << "not supported on this platform" ); } BSONObjBuilder bb; bb.appendNumber( "accesses" , _btreeAccesses ); bb.appendNumber( "hits" , _btreeMemHits ); bb.appendNumber( "misses" , _btreeMemMisses ); bb.append( "resets" , _resets ); bb.append( "missRatio" , (_btreeAccesses ? (_btreeMemMisses / (double)_btreeAccesses) : 0) ); return bb.obj(); }
void CursorCache::appendInfo( BSONObjBuilder& result ) const { scoped_lock lk( _mutex ); result.append( "sharded" , (int)_cursors.size() ); result.appendNumber( "shardedEver" , _shardedTotal ); result.append( "refs" , (int)_refs.size() ); result.append( "totalOpen" , (int)(_cursors.size() + _refs.size() ) ); }
bool LastError::appendSelf(BSONObjBuilder& b, bool blankErr) const { if (!_valid) { if (blankErr) b.appendNull("err"); b.append("n", 0); return false; } if (_msg.empty()) { if (blankErr) { b.appendNull("err"); } } else { b.append("err", _msg); } if (_code) { b.append("code", _code); b.append("codeName", ErrorCodes::errorString(ErrorCodes::Error(_code))); } if (_updatedExisting != NotUpdate) b.appendBool("updatedExisting", _updatedExisting == True); if (!_upsertedId.isEmpty()) { b.append(_upsertedId[kUpsertedFieldName]); } b.appendNumber("n", _nObjects); return !_msg.empty(); }
BSONObj createBSONMetadataDocument(const BSONObj& metadata, Date_t date) { BSONObjBuilder builder; builder.appendDate(kFTDCIdField, date); builder.appendNumber(kFTDCTypeField, static_cast<int>(FTDCType::kMetadata)); builder.appendObject(kFTDCDocField, metadata.objdata(), metadata.objsize()); return builder.obj(); }
bool run(OperationContext* txn, const string& dbname , BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { DBDirectClient db(txn); BSONElement e = jsobj.firstElement(); string toDeleteNs = dbname + '.' + e.valuestr(); LOG(0) << "CMD: reIndex " << toDeleteNs << endl; Lock::DBWrite dbXLock(txn->lockState(), dbname); WriteUnitOfWork wunit(txn->recoveryUnit()); Client::Context ctx(txn, toDeleteNs); Collection* collection = ctx.db()->getCollection( txn, toDeleteNs ); if ( !collection ) { errmsg = "ns not found"; return false; } BackgroundOperation::assertNoBgOpInProgForNs( toDeleteNs ); std::vector<BSONObj> indexesInProg = stopIndexBuilds(txn, ctx.db(), jsobj); vector<BSONObj> all; { vector<string> indexNames; collection->getCatalogEntry()->getAllIndexes( &indexNames ); for ( size_t i = 0; i < indexNames.size(); i++ ) { const string& name = indexNames[i]; BSONObj spec = collection->getCatalogEntry()->getIndexSpec( name ); all.push_back( spec.getOwned() ); } } result.appendNumber( "nIndexesWas", all.size() ); Status s = collection->getIndexCatalog()->dropAllIndexes(txn, true); if ( !s.isOK() ) { errmsg = "dropIndexes failed"; return appendCommandStatus( result, s ); } for ( size_t i = 0; i < all.size(); i++ ) { BSONObj o = all[i]; LOG(1) << "reIndex ns: " << toDeleteNs << " index: " << o << endl; Status s = collection->getIndexCatalog()->createIndex(txn, o, false); if ( !s.isOK() ) return appendCommandStatus( result, s ); } result.append( "nIndexes", (int)all.size() ); result.append( "indexes", all ); IndexBuilder::restoreIndexes(indexesInProg); wunit.commit(); return true; }
void ProcessInfo::getExtraInfo( BSONObjBuilder& info ) { // [dm] i don't think mallinfo works. (64 bit.) ?? struct mallinfo malloc_info = mallinfo(); // structure has same name as function that returns it. (see malloc.h) info.append("heap_usage_bytes", malloc_info.uordblks/*main arena*/ + malloc_info.hblkhd/*mmap blocks*/); //docs claim hblkhd is included in uordblks but it isn't LinuxProc p(_pid); info.appendNumber("page_faults", static_cast<long long>(p._maj_flt) ); }
BSONObj CollectionBulkLoaderImpl::Stats::toBSON() const { BSONObjBuilder bob; bob.appendDate("startBuildingIndexes", startBuildingIndexes); bob.appendDate("endBuildingIndexes", endBuildingIndexes); auto indexElapsed = endBuildingIndexes - startBuildingIndexes; long long indexElapsedMillis = duration_cast<Milliseconds>(indexElapsed).count(); bob.appendNumber("indexElapsedMillis", indexElapsedMillis); return bob.obj(); }
BSONObj CurOp::info() { BSONObjBuilder b; b.append("opid", _opNum); bool a = _active && _start; b.append("active", a); if( a ) { b.append("secs_running", elapsedSeconds() ); } b.append( "op" , opToString( _op ) ); b.append("ns", _ns); if (_op == dbInsert) { _query.append(b, "insert"); } else { _query.append(b , "query"); } if( !_remote.empty() ) { b.append("client", _remote.toString()); } if ( _client ) { b.append( "desc" , _client->desc() ); if ( _client->_threadId.size() ) b.append( "threadId" , _client->_threadId ); if ( _client->_connectionId ) b.appendNumber( "connectionId" , _client->_connectionId ); _client->_ls.reportState(b); } if ( ! _message.empty() ) { if ( _progressMeter.isActive() ) { StringBuilder buf; buf << _message.toString() << " " << _progressMeter.toString(); b.append( "msg" , buf.str() ); BSONObjBuilder sub( b.subobjStart( "progress" ) ); sub.appendNumber( "done" , (long long)_progressMeter.done() ); sub.appendNumber( "total" , (long long)_progressMeter.total() ); sub.done(); } else { b.append( "msg" , _message.toString() ); } } if( killPending() ) b.append("killPending", true); b.append( "numYields" , _numYields ); b.append( "lockStats" , _lockStat.report() ); return b.obj(); }
BSONObj createBSONMetricChunkDocument(ConstDataRange buf, Date_t date) { BSONObjBuilder builder; builder.appendDate(kFTDCIdField, date); builder.appendNumber(kFTDCTypeField, static_cast<int>(FTDCType::kMetricChunk)); builder.appendBinData(kFTDCDataField, buf.length(), BinDataType::BinDataGeneral, buf.data()); return builder.obj(); }
BSONObj CurOp::infoNoauth() { BSONObjBuilder b; b.append("opid", _opNum); bool a = _active && _start; b.append("active", a); if ( _lockType ) { char str[2]; str[0] = _lockType; str[1] = 0; b.append("lockType" , str); } b.append("waitingForLock" , _waitingForLock ); if( a ) { b.append("secs_running", elapsedSeconds() ); } b.append( "op" , opToString( _op ) ); b.append("ns", _ns); _query.append( b , "query" ); if( !_remote.empty() ) { b.append("client", _remote.toString()); } if ( _client ) { b.append( "desc" , _client->desc() ); if ( _client->_threadId.size() ) b.append( "threadId" , _client->_threadId ); if ( _client->_connectionId ) b.appendNumber( "connectionId" , _client->_connectionId ); } if ( ! _message.empty() ) { if ( _progressMeter.isActive() ) { StringBuilder buf; buf << _message.toString() << " " << _progressMeter.toString(); b.append( "msg" , buf.str() ); BSONObjBuilder sub( b.subobjStart( "progress" ) ); sub.appendNumber( "done" , (long long)_progressMeter.done() ); sub.appendNumber( "total" , (long long)_progressMeter.total() ); sub.done(); } else { b.append( "msg" , _message.toString() ); } } if( killed() ) b.append("killed", true); b.append( "numYields" , _numYields ); return b.obj(); }
BSONObj ClusteredCursor::explain(){ BSONObjBuilder b; b.append( "clusteredType" , type() ); long long nscanned = 0; long long nscannedObjects = 0; long long n = 0; long long millis = 0; double numExplains = 0; map<string,list<BSONObj> > out; { _explain( out ); BSONObjBuilder x( b.subobjStart( "shards" ) ); for ( map<string,list<BSONObj> >::iterator i=out.begin(); i!=out.end(); ++i ){ string shard = i->first; list<BSONObj> l = i->second; BSONArrayBuilder y( x.subarrayStart( shard.c_str() ) ); for ( list<BSONObj>::iterator j=l.begin(); j!=l.end(); ++j ){ BSONObj temp = *j; y.append( temp ); nscanned += temp["nscanned"].numberLong(); nscannedObjects += temp["nscannedObjects"].numberLong(); n += temp["n"].numberLong(); millis += temp["millis"].numberLong(); numExplains++; } y.done(); } x.done(); } b.appendNumber( "nscanned" , nscanned ); b.appendNumber( "nscannedObjects" , nscannedObjects ); b.appendNumber( "n" , n ); b.appendNumber( "millisTotal" , millis ); b.append( "millisAvg" , (int)((double)millis / numExplains ) ); b.append( "numQueries" , (int)numExplains ); b.append( "numShards" , (int)out.size() ); return b.obj(); }
BSONObj BatchedCommandResponse::toBSON() const { BSONObjBuilder builder; if (_isOkSet) builder.append(ok(), _ok); if (_isErrCodeSet) builder.append(errCode(), _errCode); if (_isErrMessageSet) builder.append(errMessage(), _errMessage); if (_isNModifiedSet) builder.appendNumber(nModified(), _nModified); if (_isNSet) builder.appendNumber(n(), _n); if (_upsertDetails.get()) { BSONArrayBuilder upsertedBuilder(builder.subarrayStart(upsertDetails())); for (std::vector<BatchedUpsertDetail*>::const_iterator it = _upsertDetails->begin(); it != _upsertDetails->end(); ++it) { BSONObj upsertedDetailsDocument = (*it)->toBSON(); upsertedBuilder.append(upsertedDetailsDocument); } upsertedBuilder.done(); } if (_isLastOpSet) builder.append(lastOp(), _lastOp); if (_isElectionIdSet) builder.appendOID(electionId(), const_cast<OID*>(&_electionId)); if (_writeErrorDetails.get()) { BSONArrayBuilder errDetailsBuilder(builder.subarrayStart(writeErrors())); for (std::vector<WriteErrorDetail*>::const_iterator it = _writeErrorDetails->begin(); it != _writeErrorDetails->end(); ++it) { BSONObj errDetailsDocument = (*it)->toBSON(); errDetailsBuilder.append(errDetailsDocument); } errDetailsBuilder.done(); } if (_wcErrDetails.get()) { builder.append(writeConcernError(), _wcErrDetails->toBSON()); } return builder.obj(); }
void ClientCursor::appendStats( BSONObjBuilder& result ) { recursive_scoped_lock lock(ccmutex); result.appendNumber("totalOpen", clientCursorsById.size() ); result.appendNumber("clientCursors_size", (int) numCursors()); result.appendNumber("timedOut" , numberTimedOut); unsigned pinned = 0; unsigned notimeout = 0; for ( CCById::iterator i = clientCursorsById.begin(); i != clientCursorsById.end(); i++ ) { unsigned p = i->second->_pinValue; if( p >= 100 ) pinned++; else if( p > 0 ) notimeout++; } if( pinned ) result.append("pinned", pinned); if( notimeout ) result.append("totalNoTimeout", notimeout); }
void Client::reportState(BSONObjBuilder& builder) { builder.append("desc", desc()); if (_threadId.size()) { builder.append("threadId", _threadId); } if (_connectionId) { builder.appendNumber("connectionId", _connectionId); } }
void Client::reportState(BSONObjBuilder& builder) { builder.append("desc", desc()); if (_connectionId) { builder.appendNumber("connectionId", _connectionId); } if (hasRemote()) { builder.append("client", getRemote().toString()); } }
void Client::reportState(BSONObjBuilder& builder) { builder.append("desc", desc()); std::stringstream ss; ss << _threadId; builder.append("threadId", ss.str()); if (_connectionId) { builder.appendNumber("connectionId", _connectionId); } }
void V8CpuProfiler::traverseDepthFirst(const v8::CpuProfileNode* cpuProfileNode, BSONArrayBuilder& arrayBuilder) { if (cpuProfileNode == NULL) return; BSONObjBuilder frameObjBuilder; frameObjBuilder.append("Function", *v8::String::Utf8Value(cpuProfileNode->GetFunctionName())); frameObjBuilder.append("Source", *v8::String::Utf8Value(cpuProfileNode->GetScriptResourceName())); frameObjBuilder.appendNumber("Line", cpuProfileNode->GetLineNumber()); frameObjBuilder.appendNumber("SelfTime", cpuProfileNode->GetSelfTime()); frameObjBuilder.appendNumber("TotalTime", cpuProfileNode->GetTotalTime()); if (cpuProfileNode->GetChildrenCount()) { BSONArrayBuilder subArrayBuilder(frameObjBuilder.subarrayStart("Children")); for (int i = 0; i < cpuProfileNode->GetChildrenCount(); ++i) { traverseDepthFirst(cpuProfileNode->GetChild(i), subArrayBuilder); } subArrayBuilder.done(); } arrayBuilder << frameObjBuilder.obj(); }
Status Collection::validate( OperationContext* txn, bool full, bool scanData, ValidateResults* results, BSONObjBuilder* output ){ MyValidateAdaptor adaptor; Status status = _recordStore->validate( txn, full, scanData, &adaptor, results, output ); if ( !status.isOK() ) return status; { // indexes output->append("nIndexes", _indexCatalog.numIndexesReady( txn ) ); int idxn = 0; try { // Only applicable when 'full' validation is requested. boost::scoped_ptr<BSONObjBuilder> indexDetails(full ? new BSONObjBuilder() : NULL); BSONObjBuilder indexes; // not using subObjStart to be exception safe IndexCatalog::IndexIterator i = _indexCatalog.getIndexIterator(txn, false); while( i.more() ) { const IndexDescriptor* descriptor = i.next(); log(LogComponent::kIndex) << "validating index " << descriptor->indexNamespace() << endl; IndexAccessMethod* iam = _indexCatalog.getIndex( descriptor ); invariant( iam ); boost::scoped_ptr<BSONObjBuilder> bob( indexDetails.get() ? new BSONObjBuilder( indexDetails->subobjStart(descriptor->indexNamespace())) : NULL); int64_t keys; iam->validate(txn, full, &keys, bob.get()); indexes.appendNumber(descriptor->indexNamespace(), static_cast<long long>(keys)); idxn++; } output->append("keysPerIndex", indexes.done()); if (indexDetails.get()) { output->append("indexDetails", indexDetails->done()); } } catch ( DBException& exc ) { string err = str::stream() << "exception during index validate idxn "<< BSONObjBuilder::numStr(idxn) << ": " << exc.toString(); results->errors.push_back( err ); results->valid = false; } } return Status::OK(); }
virtual bool run(OperationContext* txn, const string&, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result) { /* currently request to arbiter is (somewhat arbitrarily) an ismaster request that is not authenticated. */ if (cmdObj["forShell"].trueValue()) LastError::get(txn->getClient()).disable(); appendReplicationInfo(txn, result, 0); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); result.appendNumber("maxMessageSizeBytes", MaxMessageSizeBytes); result.appendNumber("maxWriteBatchSize", BatchedCommandRequest::kMaxWriteBatchSize); result.appendDate("localTime", jsTime()); result.append("maxWireVersion", maxWireVersion); result.append("minWireVersion", minWireVersion); return true; }
virtual bool run(OperationContext* txn, const string& dbname, BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool fromRepl) { CountRequest request; Status parseStatus = parseRequest(dbname, cmdObj, &request); if (!parseStatus.isOK()) { return appendCommandStatus(result, parseStatus); } AutoGetCollectionForRead ctx(txn, request.ns); Collection* collection = ctx.getCollection(); // Prevent chunks from being cleaned up during yields - this allows us to only check the // version on initial entry into count. RangePreserver preserver(collection); PlanExecutor* rawExec; Status getExecStatus = getExecutorCount(txn, collection, request, PlanExecutor::YIELD_AUTO, &rawExec); if (!getExecStatus.isOK()) { return appendCommandStatus(result, getExecStatus); } scoped_ptr<PlanExecutor> exec(rawExec); // Store the plan summary string in CurOp. if (NULL != txn->getCurOp()) { txn->getCurOp()->debug().planSummary = Explain::getPlanSummary(exec.get()); } Status execPlanStatus = exec->executePlan(); if (!execPlanStatus.isOK()) { return appendCommandStatus(result, execPlanStatus); } // Plan is done executing. We just need to pull the count out of the root stage. invariant(STAGE_COUNT == exec->getRootStage()->stageType()); CountStage* countStage = static_cast<CountStage*>(exec->getRootStage()); const CountStats* countStats = static_cast<const CountStats*>(countStage->getSpecificStats()); result.appendNumber("n", countStats->nCounted); return true; }
void appendBuildInfo(BSONObjBuilder& result) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo() << "loaderFlags" << loaderFlags() << "compilerFlags" << compilerFlags() << "allocator" << allocator() << "versionArray" << versionArray << "javascriptEngine" << compiledJSEngine() /*TODO: add this back once the module system is in place -- maybe once we do something like serverstatus with callbacks*/ // << "interpreterVersion" << globalScriptEngine->getInterpreterVersionString() << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); }
void DBConnectionPool::appendInfo(BSONObjBuilder& b) { int avail = 0; long long created = 0; map<ConnectionString::ConnectionType, long long> createdByType; BSONObjBuilder bb(b.subobjStart("hosts")); { stdx::lock_guard<stdx::mutex> lk(_mutex); for (PoolMap::iterator i = _pools.begin(); i != _pools.end(); ++i) { if (i->second.numCreated() == 0) continue; string s = str::stream() << i->first.ident << "::" << i->first.timeout; BSONObjBuilder temp(bb.subobjStart(s)); temp.append("available", i->second.numAvailable()); temp.appendNumber("created", i->second.numCreated()); temp.done(); avail += i->second.numAvailable(); created += i->second.numCreated(); long long& x = createdByType[i->second.type()]; x += i->second.numCreated(); } } bb.done(); // Always report all replica sets being tracked BSONObjBuilder setBuilder(b.subobjStart("replicaSets")); globalRSMonitorManager.report(&setBuilder); setBuilder.done(); { BSONObjBuilder temp(bb.subobjStart("createdByType")); for (map<ConnectionString::ConnectionType, long long>::iterator i = createdByType.begin(); i != createdByType.end(); ++i) { temp.appendNumber(ConnectionString::typeToString(i->first), i->second); } temp.done(); } b.append("totalAvailable", avail); b.appendNumber("totalCreated", created); }
bool run(const std::string& dbname, BSONObj& jsobj, int, // options std::string& errmsg, BSONObjBuilder& result, bool fromRepl) { result << "version" << versionString << "gitVersion" << gitVersion() << "sysInfo" << sysInfo() << "versionArray" << versionArray << "interpreterVersion" << globalScriptEngine->getInterpreterVersionString() << "bits" << ( sizeof( int* ) == 4 ? 32 : 64 ); result.appendBool( "debug" , debug ); result.appendNumber("maxBsonObjectSize", BSONObjMaxUserSize); return true; }