Status checkAuthForWriteCommand(AuthorizationSession* authzSession, BatchedCommandRequest::BatchType cmdType, const OpMsgRequest& request) { std::vector<Privilege> privileges; ActionSet actionsOnCommandNSS; if (shouldBypassDocumentValidationForCommand(request.body)) { actionsOnCommandNSS.addAction(ActionType::bypassDocumentValidation); } NamespaceString cmdNSS; if (cmdType == BatchedCommandRequest::BatchType_Insert) { auto op = Insert::parse(IDLParserErrorContext("insert"), request); cmdNSS = op.getNamespace(); if (!op.getNamespace().isSystemDotIndexes()) { actionsOnCommandNSS.addAction(ActionType::insert); } else { // Special-case indexes until we have a command const auto swNssToIndex = getIndexedNss(op.getDocuments()); if (!swNssToIndex.isOK()) { return swNssToIndex.getStatus(); } const auto& nssToIndex = swNssToIndex.getValue(); privileges.push_back( Privilege(ResourcePattern::forExactNamespace(nssToIndex), ActionType::createIndex)); } } else if (cmdType == BatchedCommandRequest::BatchType_Update) { auto op = Update::parse(IDLParserErrorContext("update"), request); cmdNSS = op.getNamespace(); actionsOnCommandNSS.addAction(ActionType::update); // Upsert also requires insert privs if (containsUpserts(op.getUpdates())) { actionsOnCommandNSS.addAction(ActionType::insert); } } else { fassert(17251, cmdType == BatchedCommandRequest::BatchType_Delete); auto op = Delete::parse(IDLParserErrorContext("delete"), request); cmdNSS = op.getNamespace(); actionsOnCommandNSS.addAction(ActionType::remove); } if (!actionsOnCommandNSS.empty()) { privileges.emplace_back(ResourcePattern::forExactNamespace(cmdNSS), actionsOnCommandNSS); } if (authzSession->isAuthorizedForPrivileges(privileges)) return Status::OK(); return Status(ErrorCodes::Unauthorized, "unauthorized"); }
void MongoDSessionCatalog::invalidateSessions(OperationContext* opCtx, boost::optional<BSONObj> singleSessionDoc) { const auto replCoord = repl::ReplicationCoordinator::get(opCtx); bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet; if (isReplSet) { uassert(40528, str::stream() << "Direct writes against " << NamespaceString::kSessionTransactionsTableNamespace.ns() << " cannot be performed using a transaction or on a session.", !opCtx->getLogicalSessionId()); } const auto catalog = SessionCatalog::get(opCtx); // The use of shared_ptr here is in order to work around the limitation of stdx::function that // the functor must be copyable. auto sessionKillTokens = std::make_shared<std::vector<SessionCatalog::KillToken>>(); if (singleSessionDoc) { sessionKillTokens->emplace_back(catalog->killSession(LogicalSessionId::parse( IDLParserErrorContext("lsid"), singleSessionDoc->getField("_id").Obj()))); } else { SessionKiller::Matcher matcher( KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)}); catalog->scanSessions(matcher, [&sessionKillTokens](const ObservableSession& session) { sessionKillTokens->emplace_back(session.kill()); }); } killSessionTokensFunction(opCtx, sessionKillTokens); }
bool ProfileCmdBase::run(OperationContext* opCtx, const std::string& dbName, const BSONObj& cmdObj, BSONObjBuilder& result) { auto request = ProfileCmdRequest::parse(IDLParserErrorContext("profile"), cmdObj); const auto profilingLevel = request.getCommandParameter(); // Delegate to _applyProfilingLevel to set the profiling level appropriately whether we are on // mongoD or mongoS. int oldLevel = _applyProfilingLevel(opCtx, dbName, profilingLevel); result.append("was", oldLevel); result.append("slowms", serverGlobalParams.slowMS); result.append("sampleRate", serverGlobalParams.sampleRate); if (auto slowms = request.getSlowms()) { serverGlobalParams.slowMS = *slowms; } if (auto sampleRate = request.getSampleRate()) { uassert(ErrorCodes::BadValue, "'sampleRate' must be between 0.0 and 1.0 inclusive", *sampleRate >= 0.0 && *sampleRate <= 1.0); serverGlobalParams.sampleRate = *sampleRate; } return true; }
OplogEntry::OplogEntry(BSONObj rawInput) : raw(std::move(rawInput)) { raw = raw.getOwned(); parseProtected(IDLParserErrorContext("OplogEntryBase"), raw); // Parse command type from 'o' and 'o2' fields. if (isCommand()) { _commandType = parseCommandType(getObject()); } }
boost::intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson( BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx) { uassert(17026, "$mergeCursors stage expected an object as argument", elem.type() == BSONType::Object); auto ownedObj = elem.embeddedObject().getOwned(); auto armParams = AsyncResultsMergerParams::parse(IDLParserErrorContext(kStageName), ownedObj); return new DocumentSourceMergeCursors( Grid::get(expCtx->opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(armParams), expCtx, std::move(ownedObj)); }
StatusWith<OpTime> ReplicationProcess::getRollbackProgress(OperationContext* opCtx) { auto documentResult = _storageInterface->findById(opCtx, kRollbackProgressNamespace, kRollbackProgressIdKey); if (!documentResult.isOK()) { return documentResult.getStatus(); } const auto& doc = documentResult.getValue(); RollbackProgress rollbackProgress; try { rollbackProgress = RollbackProgress::parse(IDLParserErrorContext("RollbackProgress"), doc); } catch (...) { return exceptionToStatus(); } return rollbackProgress.getApplyUntil(); }
/** * Factory function for producing DbCheckRun's from command objects. */ std::unique_ptr<DbCheckRun> getRun(OperationContext* opCtx, const std::string& dbName, const BSONObj& obj) { BSONObjBuilder builder; // Get rid of generic command fields. for (const auto& elem : obj) { if (!isGenericArgument(elem.fieldNameStringData())) { builder.append(elem); } } BSONObj toParse = builder.obj(); // If the dbCheck argument is a string, this is the per-collection form. if (toParse["dbCheck"].type() == BSONType::String) { return singleCollectionRun( opCtx, dbName, DbCheckSingleInvocation::parse(IDLParserErrorContext(""), toParse)); } else { // Otherwise, it's the database-wide form. return fullDatabaseRun( opCtx, dbName, DbCheckAllInvocation::parse(IDLParserErrorContext(""), toParse)); } }
SessionCatalogMigrationSource::SessionCatalogMigrationSource(OperationContext* opCtx, NamespaceString ns) : _ns(std::move(ns)), _rollbackIdAtInit(repl::ReplicationProcess::get(opCtx)->getRollbackID()) { // Exclude entries for transaction. Query query; // Sort is not needed for correctness. This is just for making it easier to write deterministic // tests. query.sort(BSON("_id" << 1)); DBDirectClient client(opCtx); auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, query); while (cursor->more()) { auto nextSession = SessionTxnRecord::parse( IDLParserErrorContext("Session migration cloning"), cursor->next()); if (!nextSession.getLastWriteOpTime().isNull()) { _sessionOplogIterators.push_back( stdx::make_unique<SessionOplogIterator>(std::move(nextSession), _rollbackIdAtInit)); } } { AutoGetCollection autoColl(opCtx, NamespaceString::kRsOplogNamespace, MODE_IX); writeConflictRetry( opCtx, "session migration initialization majority commit barrier", NamespaceString::kRsOplogNamespace.ns(), [&] { const auto message = BSON("sessionMigrateCloneStart" << _ns.ns()); WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onInternalOpMessage( opCtx, _ns, {}, {}, message); wuow.commit(); }); } auto opTimeToWait = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); WriteConcernResult result; WriteConcernOptions majority( WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, 0); uassertStatusOK(waitForWriteConcern(opCtx, opTimeToWait, majority, &result)); }
void ReplicationRecoveryImpl::_reconstructPreparedTransactions(OperationContext* opCtx) { DBDirectClient client(opCtx); const auto cursor = client.query(NamespaceString::kSessionTransactionsTableNamespace, {BSON("state" << "prepared")}); // Iterate over each entry in the transactions table that has a prepared transaction. while (cursor->more()) { const auto txnRecordObj = cursor->next(); const auto txnRecord = SessionTxnRecord::parse( IDLParserErrorContext("recovering prepared transaction"), txnRecordObj); invariant(txnRecord.getState() == DurableTxnStateEnum::kPrepared); // Get the prepareTransaction oplog entry corresponding to this transactions table entry. invariant(!opCtx->recoveryUnit()->getPointInTimeReadTimestamp()); const auto prepareOpTime = txnRecord.getLastWriteOpTime(); invariant(!prepareOpTime.isNull()); TransactionHistoryIterator iter(prepareOpTime); invariant(iter.hasNext()); const auto prepareOplogEntry = iter.next(opCtx); { // Make a new opCtx so that we can set the lsid when applying the prepare transaction // oplog entry. auto newClient = opCtx->getServiceContext()->makeClient("reconstruct-prepared-transactions"); AlternativeClientRegion acr(newClient); const auto newOpCtx = cc().makeOperationContext(); repl::UnreplicatedWritesBlock uwb(newOpCtx.get()); // Snapshot transaction can never conflict with the PBWM lock. newOpCtx->lockState()->setShouldConflictWithSecondaryBatchApplication(false); // TODO: SERVER-40177 This should be removed once it is guaranteed operations applied on // recovering nodes cannot encounter unnecessary prepare conflicts. newOpCtx->recoveryUnit()->setIgnorePrepared(true); // Checks out the session, applies the operations and prepares the transactions. uassertStatusOK(applyRecoveredPrepareTransaction(newOpCtx.get(), prepareOplogEntry)); } } }
boost::optional<FreeMonStorageState> FreeMonStorage::read(OperationContext* opCtx) { BSONObj deleteKey = BSON("_id" << kFreeMonDocIdKey); BSONElement elementKey = deleteKey.firstElement(); auto storageInterface = repl::StorageInterface::get(opCtx); AutoGetCollectionForRead autoRead(opCtx, NamespaceString::kServerConfigurationNamespace); auto swObj = storageInterface->findById( opCtx, NamespaceString::kServerConfigurationNamespace, elementKey); if (!swObj.isOK()) { if (swObj.getStatus() == ErrorCodes::NoSuchKey || swObj.getStatus() == ErrorCodes::NamespaceNotFound) { return {}; } uassertStatusOK(swObj.getStatus()); } return FreeMonStorageState::parse(IDLParserErrorContext("FreeMonStorage"), swObj.getValue()); }
Status ProfileCmdBase::checkAuthForCommand(Client* client, const std::string& dbName, const BSONObj& cmdObj) const { AuthorizationSession* authzSession = AuthorizationSession::get(client); auto request = ProfileCmdRequest::parse(IDLParserErrorContext("profile"), cmdObj); const auto profilingLevel = request.getCommandParameter(); if (profilingLevel < 0 && !request.getSlowms() && !request.getSampleRate()) { // If the user just wants to view the current values of 'slowms' and 'sampleRate', they // only need read rights on system.profile, even if they can't change the profiling level. if (authzSession->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace({dbName, "system.profile"}), ActionType::find)) { return Status::OK(); } } return authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbName), ActionType::enableProfiler) ? Status::OK() : Status(ErrorCodes::Unauthorized, "unauthorized"); }
boost::intrusive_ptr<DocumentSource> DocumentSourceMergeCursors::createFromBson( BSONElement elem, const boost::intrusive_ptr<ExpressionContext>& expCtx) { if (elem.type() == BSONType::Object) { // This is the modern serialization format. We de-serialize using the IDL. auto ownedObj = elem.embeddedObject().getOwned(); auto armParams = AsyncResultsMergerParams::parse(IDLParserErrorContext(kStageName), ownedObj); return new DocumentSourceMergeCursors( Grid::get(expCtx->opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(armParams), expCtx, std::move(ownedObj)); } // This is the old serialization format which can still be generated by mongos processes // older than 4.0. // TODO SERVER-34009 Remove support for this format. uassert(17026, "$mergeCursors stage expected either an array or an object as argument", elem.type() == BSONType::Array); const auto serializedRemotes = elem.Array(); uassert(50729, "$mergeCursors stage expected array with at least one entry", serializedRemotes.size() > 0); boost::optional<NamespaceString> nss; std::vector<RemoteCursor> remotes; for (auto&& cursor : serializedRemotes) { BSONElement nsElem; BSONElement hostElem; BSONElement idElem; uassert(17027, "$mergeCursors stage requires each cursor in array to be an object", cursor.type() == BSONType::Object); for (auto&& cursorElem : cursor.Obj()) { const auto fieldName = cursorElem.fieldNameStringData(); if (fieldName == "ns"_sd) { nsElem = cursorElem; } else if (fieldName == "host"_sd) { hostElem = cursorElem; } else if (fieldName == "id"_sd) { idElem = cursorElem; } else { uasserted(50730, str::stream() << "Unrecognized option " << fieldName << " within cursor provided to $mergeCursors: " << cursor); } } uassert( 50731, "$mergeCursors stage requires \'ns\' field with type string for each cursor in array", nsElem.type() == BSONType::String); // We require each cursor to have the same namespace. This isn't a fundamental limit of the // system, but needs to be true due to the implementation of AsyncResultsMerger, which // tracks one namespace for all cursors. uassert(50720, "$mergeCursors requires each cursor to have the same namespace", !nss || nss->ns() == nsElem.valueStringData()); nss = NamespaceString(nsElem.String()); uassert( 50721, "$mergeCursors stage requires \'host\' field with type string for each cursor in array", hostElem.type() == BSONType::String); auto host = uassertStatusOK(HostAndPort::parse(hostElem.valueStringData())); uassert(50722, "$mergeCursors stage requires \'id\' field with type long for each cursor in array", idElem.type() == BSONType::NumberLong); auto cursorId = idElem.Long(); // We are assuming that none of the cursors have been iterated at all, and so will not have // any data in the initial batch. // TODO SERVER-33323 We use a fake shard id because the AsyncResultsMerger won't use it for // anything, and finding the real one is non-trivial. RemoteCursor remoteCursor; remoteCursor.setShardId(ShardId("fakeShardIdForMergeCursors")); remoteCursor.setHostAndPort(std::move(host)); std::vector<BSONObj> emptyBatch; remoteCursor.setCursorResponse(CursorResponse{*nss, cursorId, emptyBatch}); remotes.push_back(std::move(remoteCursor)); } invariant(nss); // We know there is at least one cursor in 'serializedRemotes', and we require // each cursor to have a 'ns' field. AsyncResultsMergerParams params; params.setRemotes(std::move(remotes)); params.setNss(*nss); return new DocumentSourceMergeCursors( Grid::get(expCtx->opCtx)->getExecutorPool()->getArbitraryExecutor(), std::move(params), expCtx, elem.embeddedObject().getOwned()); }