// Aggregate all WC errors for the whole batch into a single error static void combineWCErrors( const vector<ShardError*>& wcResponses, BatchedErrorDetail* error ) { // Special case, pass through details of single error for better usability if ( wcResponses.size() == 1 ) { wcResponses.front()->error.cloneTo( error ); return; } error->setErrCode( ErrorCodes::WriteConcernFailed ); // Generate the multi-error message below stringstream msg; msg << "multiple errors reported : "; BSONArrayBuilder errB; for ( vector<ShardError*>::const_iterator it = wcResponses.begin(); it != wcResponses.end(); ++it ) { const ShardError* wcError = *it; if ( it != wcResponses.begin() ) msg << " :: and :: "; msg << wcError->error.getErrMessage(); errB.append( wcError->error.getErrInfo() ); } error->setErrInfo( BSON( "info" << errB.arr() ) ); error->setErrMessage( msg.str() ); }
virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string p = cmdObj.firstElement().String(); if ( p == "*" ) { vector<string> names; RamLog::getNames( names ); BSONArrayBuilder arr; for ( unsigned i=0; i<names.size(); i++ ) { arr.append( names[i] ); } result.appendArray( "names" , arr.arr() ); } else { RamLog* ramlog = RamLog::getIfExists(p); if ( ! ramlog ) { errmsg = str::stream() << "no RamLog named: " << p; return false; } RamLog::LineIterator rl(ramlog); result.appendNumber( "totalLinesWritten", rl.getTotalLinesWritten() ); BSONArrayBuilder arr( result.subarrayStart( "log" ) ); while (rl.more()) arr.append(rl.next()); arr.done(); } return true; }
Status runApplyOpsCmd(OperationContext* txn, const std::vector<ChunkType>& chunksToMerge, const ChunkVersion& currShardVersion, const ChunkVersion& newMergedVersion) { BSONArrayBuilder updatesB; // The chunk we'll be "expanding" is the first chunk const ChunkType& firstChunk = chunksToMerge.front(); // Fill in details not tracked by metadata ChunkType mergedChunk(firstChunk); mergedChunk.setName(ChunkType::genID(firstChunk.getNS(), firstChunk.getMin())); mergedChunk.setMax(chunksToMerge.back().getMax()); mergedChunk.setVersion(newMergedVersion); updatesB.append(buildOpMergeChunk(mergedChunk)); // Don't remove chunk we're expanding for (size_t i = 1; i < chunksToMerge.size(); ++i) { ChunkType chunkToMerge(chunksToMerge[i]); chunkToMerge.setName(ChunkType::genID(chunkToMerge.getNS(), chunkToMerge.getMin())); updatesB.append(buildOpRemoveChunk(chunkToMerge)); } BSONArray preCond = buildOpPrecond(firstChunk.getNS(), firstChunk.getShard(), currShardVersion); return grid.catalogManager(txn)->applyChunkOpsDeprecated( txn, updatesB.arr(), preCond, firstChunk.getNS(), newMergedVersion); }
void writeMetadataFile( const string coll, boost::filesystem::path outputFile, map<string, BSONObj> options, multimap<string, BSONObj> indexes ) { toolInfoLog() << "\tMetadata for " << coll << " to " << outputFile.string() << std::endl; bool hasOptions = options.count(coll) > 0; bool hasIndexes = indexes.count(coll) > 0; BSONObjBuilder metadata; if (hasOptions) { metadata << "options" << options.find(coll)->second; } if (hasIndexes) { BSONArrayBuilder indexesOutput (metadata.subarrayStart("indexes")); // I'd kill for C++11 auto here... const pair<multimap<string, BSONObj>::iterator, multimap<string, BSONObj>::iterator> range = indexes.equal_range(coll); for (multimap<string, BSONObj>::iterator it=range.first; it!=range.second; ++it) { indexesOutput << it->second; } indexesOutput.done(); } ofstream file (outputFile.string().c_str()); uassert(15933, "Couldn't open file: " + outputFile.string(), file.is_open()); file << metadata.done().jsonString(); }
mongo::BSONArray MockRemoteDBServer::query(MockRemoteDBServer::InstanceID id, const string& ns, mongo::Query query, int nToReturn, int nToSkip, const BSONObj* fieldsToReturn, int queryOptions, int batchSize) { checkIfUp(id); if (_delayMilliSec > 0) { mongo::sleepmillis(_delayMilliSec); } checkIfUp(id); scoped_spinlock sLock(_lock); _queryCount++; const vector<BSONObj>& coll = _dataMgr[ns]; BSONArrayBuilder result; for (vector<BSONObj>::const_iterator iter = coll.begin(); iter != coll.end(); ++iter) { result.append(iter->copy()); } return BSONArray(result.obj()); }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { Client::ReadContext ctx( txn, dbname ); const Database* d = ctx.ctx().db(); const DatabaseCatalogEntry* dbEntry = d->getDatabaseCatalogEntry(); list<string> names; dbEntry->getCollectionNamespaces( &names ); BSONArrayBuilder arr; for ( list<string>::const_iterator i = names.begin(); i != names.end(); ++i ) { string ns = *i; BSONObjBuilder b; b.append( "name", nsToCollectionSubstring( ns ) ); CollectionOptions options = dbEntry->getCollectionCatalogEntry( txn, ns )->getCollectionOptions(txn); b.append( "options", options.toBSON() ); arr.append( b.obj() ); } result.append( "collections", arr.arr() ); return true; }
void NetworkTestEnv::onFindWithMetadataCommand(OnFindCommandWithMetadataFunction func) { onCommandWithMetadata([&func](const RemoteCommandRequest& request) -> RemoteCommandResponse { const auto& resultStatus = func(request); if (!resultStatus.isOK()) { return resultStatus.getStatus(); } std::vector<BSONObj> result; BSONObj metadata; std::tie(result, metadata) = resultStatus.getValue(); BSONArrayBuilder arr; for (const auto& obj : result) { arr.append(obj); } const NamespaceString nss = NamespaceString(request.dbname, request.cmdObj.firstElement().String()); BSONObjBuilder resultBuilder; appendCursorResponseObject(0LL, nss.toString(), arr.arr(), &resultBuilder); return RemoteCommandResponse(resultBuilder.obj(), metadata, Milliseconds(1)); }); }
void CMISProductNotificationAPI::Convert2JSON(CNotificationModel* pData, BSONObj &boRecord) { //{"data":[{"request_code":"RP130314/004","operation_department":"BO6"}],"source":"SDK"} BSONArrayBuilder babElement; BSONObjBuilder bobProductInfo; map<string, string>::iterator mit; map<string, string> mapAPIField; mapAPIField["department_alias"] = "operation_department"; mapAPIField["request_code"] = "request_code"; BSONObj boTemp = *pData; for (mit = mapAPIField.begin(); mit != mapAPIField.end(); mit++) { if (boTemp.hasField(mit->first)){ bobProductInfo.append(mit->second, boTemp.getStringField(mit->first.c_str())); } else{ bobProductInfo.append(mit->second, ""); } } babElement << bobProductInfo.obj(); boRecord = BSON( "data" << babElement.arr() << "source" << "SDK" ); }
int64_t RecordStoreV1Base::storageSize(OperationContext* txn, BSONObjBuilder* extraInfo, int level) const { BSONArrayBuilder extentInfo; int64_t total = 0; int n = 0; DiskLoc cur = _details->firstExtent(txn); while (!cur.isNull()) { Extent* e = _extentManager->getExtent(cur); total += e->length; n++; if (extraInfo && level > 0) { extentInfo.append(BSON("len" << e->length << "loc: " << e->myLoc.toBSONObj())); } cur = e->xnext; } if (extraInfo) { extraInfo->append("numExtents", n); if (level > 0) extraInfo->append("extents", extentInfo.arr()); } return total; }
BSONArray CAdminUserGroupsProductsController::GetListUserGroupsProducts(auto_ptr<DBClientCursor>& ptrCursor, const string &strProductCode) { BSONArrayBuilder baGroupProduct; BSONObj boRecord; if (FindOperatingUserGroupByProduct(ptrCursor, strProductCode)) { try { while(ptrCursor->more()) { boRecord = ptrCursor->nextSafe(); baGroupProduct << boRecord["user_group_id"]; } } catch(exception& ex) { stringstream strErrorMess; string strLog; strErrorMess << ex.what() << "][" << __FILE__ << "|" << __LINE__ ; strLog = CUtilities::FormatLog(ERROR_MSG, "CAdminUserGroupsProductsController", "GetListUserGroupsProducts","Exception:" + strErrorMess.str()); CUtilities::WriteErrorLog(ERROR_MSG, strLog); } } return baGroupProduct.arr(); }
void ShardingMongodTestFixture::setUp() { ServiceContextMongoDTest::setUp(); auto serviceContext = getServiceContext(); _opCtx = cc().makeOperationContext(); // Set up this node as part of a replica set. repl::ReplSettings replSettings; replSettings.setReplSetString(ConnectionString::forReplicaSet(_setName, _servers).toString()); auto replCoordPtr = makeReplicationCoordinator(replSettings); _replCoord = replCoordPtr.get(); BSONArrayBuilder serversBob; for (size_t i = 0; i < _servers.size(); ++i) { serversBob.append(BSON("host" << _servers[i].toString() << "_id" << static_cast<int>(i))); } repl::ReplicaSetConfig replSetConfig; replSetConfig.initialize(BSON("_id" << _setName << "protocolVersion" << 1 << "version" << 3 << "members" << serversBob.arr())); replCoordPtr->setGetConfigReturnValue(replSetConfig); repl::ReplicationCoordinator::set(serviceContext, std::move(replCoordPtr)); serviceContext->setOpObserver(stdx::make_unique<OpObserverImpl>()); repl::setOplogCollectionName(); repl::createOplog(_opCtx.get()); }
virtual bool run(const string& dbname , BSONObj& cmdObj, int, string& errmsg, BSONObjBuilder& result, bool) { string p = cmdObj.firstElement().String(); if ( p == "*" ) { vector<string> names; RamLog::getNames( names ); BSONArrayBuilder arr; for ( unsigned i=0; i<names.size(); i++ ) { arr.append( names[i] ); } result.appendArray( "names" , arr.arr() ); } else { RamLog* rl = RamLog::get( p ); if ( ! rl ) { errmsg = str::stream() << "no RamLog named: " << p; return false; } result.appendNumber( "totalLinesWritten", rl->getTotalLinesWritten() ); vector<const char*> lines; rl->get( lines ); BSONArrayBuilder arr( result.subarrayStart( "log" ) ); for ( unsigned i=0; i<lines.size(); i++ ) arr.append( lines[i] ); arr.done(); } return true; }
BSONObj getErrorLabels(const OperationSessionInfoFromClient& sessionOptions, const std::string& commandName, ErrorCodes::Error code, bool hasWriteConcernError) { BSONArrayBuilder labelArray; // Note that we only apply the TransientTxnError label if the "autocommit" field is present in // the session options. When present, "autocommit" will always be false, so we don't check its // value. if (sessionOptions.getAutocommit() && isTransientTransactionError(code, hasWriteConcernError, commandName == "commitTransaction" || commandName == "coordinateCommitTransaction")) { // An error code for which isTransientTransactionError() is true indicates a transaction // failure with no persistent side effects. labelArray << txn::TransientTxnErrorFieldName; } if (ErrorCodes::isNonResumableChangeStreamError(code)) { labelArray << "NonResumableChangeStreamError"; } return (labelArray.arrSize() > 0) ? BSON("errorLabels" << labelArray.arr()) : BSONObj(); }
void Pipeline::toBson(BSONObjBuilder *pBuilder) const { /* create an array out of the pipeline operations */ BSONArrayBuilder arrayBuilder; for(SourceContainer::const_iterator iter(sources.begin()), listEnd(sources.end()); iter != listEnd; ++iter) { intrusive_ptr<DocumentSource> pSource(*iter); pSource->addToBsonArray(&arrayBuilder); } /* add the top-level items to the command */ pBuilder->append(commandName, getCollectionName()); pBuilder->append(pipelineName, arrayBuilder.arr()); if (explain) { pBuilder->append(explainName, explain); } bool btemp; if ((btemp = getSplitMongodPipeline())) { pBuilder->append(splitMongodPipelineName, btemp); } if ((btemp = pCtx->getInRouter())) { pBuilder->append(fromRouterName, btemp); } }
void DocumentSource::writeString(stringstream &ss) const { BSONArrayBuilder bab; addToBsonArray(&bab); BSONArray ba(bab.arr()); ss << ba.toString(/* isArray */true); // our toString should use standard string types..... }
bool WriteBatchExecutor::executeBatch(const WriteBatch& writeBatch, string* errMsg, BSONObjBuilder* result) { Timer commandTimer; BSONArrayBuilder resultsArray; bool batchSuccess = applyWriteBatch(writeBatch, &resultsArray); result->append("resultsBatchSuccess", batchSuccess); result->append("results", resultsArray.arr()); BSONObjBuilder writeConcernResults; Timer writeConcernTimer; // TODO Define final layout for write commands result object. // bool writeConcernSuccess = waitForWriteConcern(writeBatch.getWriteConcern(), // writeConcernResults, // !batchSuccess, // *errMsg); // if (!writeConcernSuccess) { // return false; // } // // const char *writeConcernErrField = writeConcernResults.asTempObj().getStringField("err"); // // TODO Should consider changing following existing strange behavior with GLE? // // - {w:2} specified with batch where any op fails skips replication wait, yields success // bool writeConcernFulfilled = !writeConcernErrField || strlen(writeConcernErrField) == 0; // writeConcernResults.append("micros", static_cast<long long>(writeConcernTimer.micros())); // writeConcernResults.append("ok", writeConcernFulfilled); // result->append("writeConcernResults", writeConcernResults.obj()); result->append("micros", static_cast<long long>(commandTimer.micros())); return true; }
bool run(OperationContext* txn, const string& dbname, BSONObj& jsobj, int, string& errmsg, BSONObjBuilder& result, bool /*fromRepl*/) { ScopedTransaction scopedXact(txn, MODE_IS); AutoGetDb autoDb(txn, dbname, MODE_S); const Database* d = autoDb.getDb(); const DatabaseCatalogEntry* dbEntry = NULL; list<string> names; if ( d ) { dbEntry = d->getDatabaseCatalogEntry(); dbEntry->getCollectionNamespaces( &names ); names.sort(); } scoped_ptr<MatchExpression> matcher; if ( jsobj["filter"].isABSONObj() ) { StatusWithMatchExpression parsed = MatchExpressionParser::parse( jsobj["filter"].Obj() ); if ( !parsed.isOK() ) { return appendCommandStatus( result, parsed.getStatus() ); } matcher.reset( parsed.getValue() ); } BSONArrayBuilder arr; for ( list<string>::const_iterator i = names.begin(); i != names.end(); ++i ) { string ns = *i; StringData collection = nsToCollectionSubstring( ns ); if ( collection == "system.namespaces" ) { continue; } BSONObjBuilder b; b.append( "name", collection ); CollectionOptions options = dbEntry->getCollectionCatalogEntry( txn, ns )->getCollectionOptions(txn); b.append( "options", options.toBSON() ); BSONObj maybe = b.obj(); if ( matcher && !matcher->matchesBSON( maybe ) ) { continue; } arr.append( maybe ); } result.append( "collections", arr.arr() ); return true; }
bo ReplSetConfig::asBson() const { bob b; b.append("_id", _id).append("version", version); BSONArrayBuilder a; for( unsigned i = 0; i < members.size(); i++ ) a.append( members[i].asBson() ); b.append("members", a.arr()); if( !ho.isDefault() || !getLastErrorDefaults.isEmpty() || !rules.empty()) { bob settings; if( !rules.empty() ) { bob modes; for (map<string,TagRule*>::const_iterator it = rules.begin(); it != rules.end(); it++) { bob clauses; vector<TagClause*> r = (*it).second->clauses; for (vector<TagClause*>::iterator it2 = r.begin(); it2 < r.end(); it2++) { clauses << (*it2)->name << (*it2)->target; } modes << (*it).first << clauses.obj(); } settings << "getLastErrorModes" << modes.obj(); } if( !getLastErrorDefaults.isEmpty() ) settings << "getLastErrorDefaults" << getLastErrorDefaults; b << "settings" << settings.obj(); } return b.obj(); }
Status AuthzManagerExternalStateMongos::getRolesDescription(OperationContext* opCtx, const std::vector<RoleName>& roles, PrivilegeFormat showPrivileges, BSONObj* result) { BSONArrayBuilder rolesInfoCmdArray; for (const RoleName& roleName : roles) { rolesInfoCmdArray << BSON(AuthorizationManager::ROLE_NAME_FIELD_NAME << roleName.getRole() << AuthorizationManager::ROLE_DB_FIELD_NAME << roleName.getDB()); } BSONObjBuilder rolesInfoCmd; rolesInfoCmd.append("rolesInfo", rolesInfoCmdArray.arr()); addShowPrivilegesToBuilder(&rolesInfoCmd, showPrivileges); BSONObjBuilder builder; const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, "admin", rolesInfoCmd.obj(), &builder); BSONObj cmdResult = builder.obj(); if (!ok) { return getStatusFromCommandResult(cmdResult); } std::vector<BSONElement> foundRoles = cmdResult[rolesFieldName(showPrivileges)].Array(); if (foundRoles.size() == 0) { return Status(ErrorCodes::RoleNotFound, "Roles not found"); } *result = foundRoles[0].Obj().getOwned(); return Status::OK(); }
void BitTestMatchExpression::toBSON(BSONObjBuilder* out) const { string opString = ""; switch (matchType()) { case BITS_ALL_SET: opString = "$bitsAllSet"; break; case BITS_ALL_CLEAR: opString = "$bitsAllClear"; break; case BITS_ANY_SET: opString = "$bitsAnySet"; break; case BITS_ANY_CLEAR: opString = "$bitsAnyClear"; break; default: invariant(false); } BSONArrayBuilder arrBob; for (auto bitPosition : _bitPositions) { arrBob.append(bitPosition); } arrBob.doneFast(); out->append(path(), BSON(opString << arrBob.arr())); }
// Aggregate a bunch of errors for a single op together static void combineOpErrors(const vector<ChildWriteOp const*>& errOps, WriteErrorDetail* error) { // Special case single response if (errOps.size() == 1) { errOps.front()->error->cloneTo(error); return; } // Generate the multi-error message below stringstream msg; msg << "multiple errors for op : "; BSONArrayBuilder errB; for (vector<ChildWriteOp const*>::const_iterator it = errOps.begin(); it != errOps.end(); ++it) { const ChildWriteOp* errOp = *it; if (it != errOps.begin()) msg << " :: and :: "; msg << errOp->error->toStatus().reason(); errB.append(errOp->error->toBSON()); } error->setErrInfo(BSON("causedBy" << errB.arr())); error->setIndex(errOps.front()->error->getIndex()); error->setStatus({ErrorCodes::MultipleErrorsOccurred, msg.str()}); }
void CAggregationSwitchModel::AccessRack2RackInfo(string strAccessRack) { BSONArrayBuilder arrBuilderRackInfo; string strToken = ""; int iIndex = 0; while (iIndex < strAccessRack.length()) { switch (strAccessRack[iIndex]) { case ',': case ';': arrBuilderRackInfo << strToken; strToken = ""; break; default: strToken += strAccessRack[iIndex]; } iIndex++; } if (strToken != "") { arrBuilderRackInfo << strToken; } // Assign rack info m_barrRackInfo = arrBuilderRackInfo.arr(); m_mapFieldSet[FIELD_RACK_INFO] = true; }
void Pipeline::writeExplainMongos(BSONObjBuilder &result) const { /* For now, this should be a BSON source array. In future, we might have a more clever way of getting this, when we have more interleaved fetching between shards. The DocumentSource interface will have to change to accommodate that. */ DocumentSourceBsonArray *pSourceBsonArray = dynamic_cast<DocumentSourceBsonArray *>(sources.front().get()); verify(pSourceBsonArray); BSONArrayBuilder shardOpArray; // where we'll put the pipeline ops for(bool hasDocument = !pSourceBsonArray->eof(); hasDocument; hasDocument = pSourceBsonArray->advance()) { Document pDocument = pSourceBsonArray->getCurrent(); BSONObjBuilder opBuilder; pDocument->toBson(&opBuilder); shardOpArray.append(opBuilder.obj()); } BSONArrayBuilder mongosOpArray; // where we'll put the pipeline ops writeExplainOps(&mongosOpArray); // now we combine the shard pipelines with the one here result.append(serverPipelineName, shardOpArray.arr()); result.append(mongosPipelineName, mongosOpArray.arr()); }
static void handleCursorCommand(CursorId id, BSONObj& cmdObj, BSONObjBuilder& result) { BSONElement batchSizeElem = cmdObj.getFieldDotted("cursor.batchSize"); const long long batchSize = batchSizeElem.isNumber() ? batchSizeElem.numberLong() : 101; // same as query ClientCursorPin pin(id); ClientCursor* cursor = pin.c(); massert(16958, "Cursor shouldn't have been deleted", cursor); verify(cursor->isAggCursor); PipelineRunner* runner = dynamic_cast<PipelineRunner*>(cursor->getRunner()); verify(runner); try { const string cursorNs = cursor->ns(); // we need this after cursor may have been deleted // can't use result BSONObjBuilder directly since it won't handle exceptions correctly. BSONArrayBuilder resultsArray; const int byteLimit = MaxBytesToReturnToClientAtOnce; BSONObj next; for (int objCount = 0; objCount < batchSize; objCount++) { // The initial getNext() on a PipelineRunner may be very expensive so we don't do it // when batchSize is 0 since that indicates a desire for a fast return. if (runner->getNext(&next, NULL) != Runner::RUNNER_ADVANCED) { pin.deleteUnderlying(); id = 0; cursor = NULL; // make it an obvious error to use cursor after this point break; } if (resultsArray.len() + next.objsize() > byteLimit) { // too big. next will be the first doc in the second batch runner->pushBack(next); break; } resultsArray.append(next); } if (cursor) { // If a time limit was set on the pipeline, remaining time is "rolled over" to the // cursor (for use by future getmore ops). cursor->setLeftoverMaxTimeMicros( cc().curop()->getRemainingMaxTimeMicros() ); } BSONObjBuilder cursorObj(result.subobjStart("cursor")); cursorObj.append("id", id); cursorObj.append("ns", cursorNs); cursorObj.append("firstBatch", resultsArray.arr()); cursorObj.done(); } catch (...) { // Clean up cursor on way out of scope. pin.deleteUnderlying(); throw; } }
void Node::writeJointNames (const sm::JointState& m) { BSONArrayBuilder b; BOOST_FOREACH (const string& name, m.name) b.append(name); BSONObj s = BSON("names" << b.arr()); conn_->update(joint_name_coll_, mongo::fromjson("{}"), s, 1); }
void Pipeline::writeExplainShard(BSONObjBuilder &result) const { BSONArrayBuilder opArray; // where we'll put the pipeline ops // next, add the pipeline operators writeExplainOps(&opArray); result.appendArray(serverPipelineName, opArray.arr()); }
BSONArray equalityFields() const { BSONArrayBuilder ret; for( set<string>::const_iterator i = _equalityFields.begin(); i != _equalityFields.end(); ++i ) { ret << *i; } return ret.arr(); }
BSONArrayBuilder DocumentStructureEnumerator::_getArrayBuilderFromArr(BSONArray arr) { BSONArrayBuilder arrBuilder; for (auto elem : arr) { arrBuilder.append(elem); } return arrBuilder; }
/** * Validates that the roles array described by rolesElement is valid. * Also returns a new roles array (via the modifiedRolesArray output param) where any roles * from the input array that were listed as strings have been expanded to a full role document. */ Status _validateAndModifyRolesArray(const BSONElement& rolesElement, const std::string& dbname, AuthorizationManager* authzManager, BSONArray* modifiedRolesArray) { BSONArrayBuilder rolesBuilder; for (BSONObjIterator it(rolesElement.Obj()); it.more(); it.next()) { BSONElement element = *it; if (element.type() == String) { RoleName roleName(element.String(), dbname); if (!authzManager->roleExists(roleName)) { return Status(ErrorCodes::RoleNotFound, mongoutils::str::stream() << roleName.toString() << " does not name an existing role"); } rolesBuilder.append(BSON("name" << element.String() << "source" << dbname << "hasRole" << true << "canDelegate" << false)); } else if (element.type() == Object) { // Check that the role object is valid V2PrivilegeDocumentParser parser; BSONObj roleObj = element.Obj(); Status status = parser.checkValidRoleObject(roleObj); if (!status.isOK()) { return status; } // Check that the role actually exists std::string roleNameString; std::string roleSource; status = bsonExtractStringField(roleObj, "name", &roleNameString); if (!status.isOK()) { return status; } status = bsonExtractStringField(roleObj, "source", &roleSource); if (!status.isOK()) { return status; } RoleName roleName(roleNameString, roleSource); if (!authzManager->roleExists(roleName)) { return Status(ErrorCodes::RoleNotFound, mongoutils::str::stream() << roleName.toString() << " does not name an existing role"); } rolesBuilder.append(element); } else { return Status(ErrorCodes::UnsupportedFormat, "Values in 'roles' array must be sub-documents or strings"); } } *modifiedRolesArray = rolesBuilder.arr(); return Status::OK(); }
BSONArray docs() const { auto_ptr<DBClientCursor> cursor = client.query( ns, Query().hint( BSON( "_id" << 1 ) ) ); BSONArrayBuilder bab; while( cursor->more() ) { bab << cursor->next(); } return bab.arr(); }