Status doSaslStart(const Client* client, SaslAuthenticationSession* session, const std::string& db, const BSONObj& cmdObj, BSONObjBuilder* result) { bool autoAuthorize = false; Status status = bsonExtractBooleanFieldWithDefault( cmdObj, saslCommandAutoAuthorizeFieldName, autoAuthorizeDefault, &autoAuthorize); if (!status.isOK()) return status; std::string mechanism; status = extractMechanism(cmdObj, &mechanism); if (!status.isOK()) return status; if (!sequenceContains(saslGlobalParams.authenticationMechanisms, mechanism) && mechanism != "SCRAM-SHA-1") { // Always allow SCRAM-SHA-1 to pass to the first sasl step since we need to // handle internal user authentication, SERVER-16534 result->append(saslCommandMechanismListFieldName, saslGlobalParams.authenticationMechanisms); return Status(ErrorCodes::BadValue, mongoutils::str::stream() << "Unsupported mechanism " << mechanism); } status = session->start( db, mechanism, saslGlobalParams.serviceName, saslGlobalParams.hostName, 1, autoAuthorize); if (!status.isOK()) return status; return doSaslStep(client, session, cmdObj, result); }
bool ReplicaSetTagMatch::update(const ReplicaSetTag& tag) { const std::vector<BoundTagValue>::iterator iter = std::find_if( _boundTagValues.begin(), _boundTagValues.end(), stdx::bind(std::equal_to<int32_t>(), tag.getKeyIndex(), stdx::bind( &BoundTagValue::getKeyIndex, stdx::placeholders::_1))); if (iter != _boundTagValues.end()) { if (!sequenceContains(iter->boundValues, tag.getValueIndex())) { iter->boundValues.push_back(tag.getValueIndex()); } } return isSatisfied(); }
Status doSaslStart(SaslAuthenticationSession* session, const std::string& db, const BSONObj& cmdObj, BSONObjBuilder* result) { bool autoAuthorize = false; Status status = bsonExtractBooleanFieldWithDefault(cmdObj, saslCommandAutoAuthorizeFieldName, autoAuthorizeDefault, &autoAuthorize); if (!status.isOK()) return status; std::string mechanism; status = extractMechanism(cmdObj, &mechanism); if (!status.isOK()) return status; if (!sequenceContains(saslGlobalParams.authenticationMechanisms, mechanism)) { result->append(saslCommandMechanismListFieldName, saslGlobalParams.authenticationMechanisms); return Status(ErrorCodes::BadValue, mongoutils::str::stream() << "Unsupported mechanism " << mechanism); } status = session->start(db, mechanism, saslGlobalParams.serviceName, saslGlobalParams.hostName, 1, autoAuthorize); if (!status.isOK()) return status; return doSaslStep(session, cmdObj, result); }
bool ReplicationCoordinatorExternalStateMock::isSelf(const HostAndPort& host, ServiceContext* const ctx) { return sequenceContains(_selfHosts, host); }
bool SASLServerMechanismRegistry::_mechanismSupportedByConfig(StringData mechName) const { return sequenceContains(_enabledMechanisms, mechName); }
ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); initWireSpec(); auto serviceContext = getGlobalServiceContext(); serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); auto opObserverRegistry = stdx::make_unique<OpObserverRegistry>(); opObserverRegistry->addObserver(stdx::make_unique<OpObserverShardingImpl>()); opObserverRegistry->addObserver(stdx::make_unique<UUIDCatalogObserver>()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { opObserverRegistry->addObserver(stdx::make_unique<ShardServerOpObserver>()); } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { opObserverRegistry->addObserver(stdx::make_unique<ConfigServerOpObserver>()); } setupFreeMonitoringOpObserver(opObserverRegistry.get()); serviceContext->setOpObserver(std::move(opObserverRegistry)); DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) { return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx)); }); const repl::ReplSettings& replSettings = repl::ReplicationCoordinator::get(serviceContext)->getSettings(); { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(LogComponent::kControl); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; const bool is32bit = sizeof(int*) == 4; l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl; } DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl; #if defined(_WIN32) VersionInfoInterface::instance().logTargetMinOS(); #endif logProcessDetails(); serviceContext->setServiceEntryPoint( stdx::make_unique<ServiceEntryPointMongod>(serviceContext)); if (!storageGlobalParams.repair) { auto tl = transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext); auto res = tl->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } serviceContext->setTransportLayer(std::move(tl)); } // Set up the periodic runner for background job execution. This is required to be running // before the storage engine is initialized. auto runner = makePeriodicRunner(serviceContext); runner->startup(); serviceContext->setPeriodicRunner(std::move(runner)); initializeStorageEngine(serviceContext, StorageEngineInitFlags::kNone); #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED if (EncryptionHooks::get(serviceContext)->restartRequired()) { exitCleanly(EXIT_CLEAN); } #endif // Warn if we detect configurations for multiple registered storage engines in the same // configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); for (auto&& e : storageElement.Obj()) { // Ignore if field name under "storage" matches current storage engine. if (storageGlobalParams.engine == e.fieldName()) { continue; } // Warn if field name matches non-active registered storage engine. if (isRegisteredStorageEngine(serviceContext, e.fieldName())) { warning() << "Detected configuration for non-active storage engine " << e.fieldName() << " when current storage engine is " << storageGlobalParams.engine; } } } // Disallow running a storage engine that doesn't support capped collections with --profile if (!serviceContext->getStorageEngine()->supportsCappedCollections() && serverGlobalParams.defaultProfile != 0) { log() << "Running " << storageGlobalParams.engine << " with profiling is not supported. " << "Make sure you are not using --profile."; exitCleanly(EXIT_BADOPTIONS); } // Disallow running WiredTiger with --nojournal in a replica set if (storageGlobalParams.engine == "wiredTiger" && !storageGlobalParams.dur && replSettings.usingReplSets()) { log() << "Running wiredTiger without journaling in a replica set is not " << "supported. Make sure you are not using --nojournal and that " << "storage.journal.enabled is not set to 'false'."; exitCleanly(EXIT_BADOPTIONS); } logMongodStartupWarnings(storageGlobalParams, serverGlobalParams, serviceContext); #ifdef MONGO_CONFIG_SSL if (sslGlobalParams.sslAllowInvalidCertificates && ((serverGlobalParams.clusterAuthMode.load() == ServerGlobalParams::ClusterAuthMode_x509) || sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-X509"))) { log() << "** WARNING: While invalid X509 certificates may be used to" << startupWarningsLog; log() << "** connect to this server, they will not be considered" << startupWarningsLog; log() << "** permissible for authentication." << startupWarningsLog; log() << startupWarningsLog; } #endif { std::stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } initializeSNMP(); if (!storageGlobalParams.readOnly) { boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); } if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); } auto startupOpCtx = serviceContext->makeOperationContext(&cc()); bool canCallFCVSetIfCleanStartup = !storageGlobalParams.readOnly && (storageGlobalParams.engine != "devnull"); if (canCallFCVSetIfCleanStartup && !replSettings.usingReplSets()) { Lock::GlobalWrite lk(startupOpCtx.get()); FeatureCompatibilityVersion::setIfCleanStartup(startupOpCtx.get(), repl::StorageInterface::get(serviceContext)); } auto swNonLocalDatabases = repairDatabasesAndCheckVersion(startupOpCtx.get()); if (!swNonLocalDatabases.isOK()) { // SERVER-31611 introduced a return value to `repairDatabasesAndCheckVersion`. Previously, // a failing condition would fassert. SERVER-31611 covers a case where the binary (3.6) is // refusing to start up because it refuses acknowledgement of FCV 3.2 and requires the // user to start up with an older binary. Thus shutting down the server must leave the // datafiles in a state that the older binary can start up. This requires going through a // clean shutdown. // // The invariant is *not* a statement that `repairDatabasesAndCheckVersion` must return // `MustDowngrade`. Instead, it is meant as a guardrail to protect future developers from // accidentally buying into this behavior. New errors that are returned from the method // may or may not want to go through a clean shutdown, and they likely won't want the // program to return an exit code of `EXIT_NEED_DOWNGRADE`. severe(LogComponent::kControl) << "** IMPORTANT: " << swNonLocalDatabases.getStatus().reason(); invariant(swNonLocalDatabases == ErrorCodes::MustDowngrade); exitCleanly(EXIT_NEED_DOWNGRADE); } // Assert that the in-memory featureCompatibilityVersion parameter has been explicitly set. If // we are part of a replica set and are started up with no data files, we do not set the // featureCompatibilityVersion until a primary is chosen. For this case, we expect the in-memory // featureCompatibilityVersion parameter to still be uninitialized until after startup. if (canCallFCVSetIfCleanStartup && (!replSettings.usingReplSets() || swNonLocalDatabases.getValue())) { invariant(serverGlobalParams.featureCompatibility.isVersionInitialized()); } if (storageGlobalParams.upgrade) { log() << "finished checking dbs"; exitCleanly(EXIT_CLEAN); } // Start up health log writer thread. HealthLog::get(startupOpCtx.get()).startup(); auto const globalAuthzManager = AuthorizationManager::get(serviceContext); uassertStatusOK(globalAuthzManager->initialize(startupOpCtx.get())); // This is for security on certain platforms (nonce generation) srand((unsigned)(curTimeMicros64()) ^ (unsigned(uintptr_t(&startupOpCtx)))); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << redact(status); if (status == ErrorCodes::AuthSchemaIncompatible) { exitCleanly(EXIT_NEED_UPGRADE); } else if (status == ErrorCodes::NotMaster) { // Try creating the indexes if we become master. If we do not become master, // the master will create the indexes and we will replicate them. } else { quickExit(EXIT_FAILURE); } } // SERVER-14090: Verify that auth schema version is schemaVersion26Final. int foundSchemaVersion; status = globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion); if (!status.isOK()) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but startup could not verify schema version: " << status; log() << "To manually repair the 'authSchema' document in the admin.system.version " "collection, start up with --setParameter " "startupAuthSchemaValidation=false to disable validation."; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion <= AuthorizationManager::schemaVersion26Final) { log() << "This server is using MONGODB-CR, an authentication mechanism which " << "has been removed from MongoDB 4.0. In order to upgrade the auth schema, " << "first downgrade MongoDB binaries to version 3.6 and then run the " << "authSchemaUpgrade command. " << "See http://dochub.mongodb.org/core/3.0-upgrade-to-scram-sha-1"; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { error() << "Auth must be disabled when starting without auth schema validation"; exitCleanly(EXIT_BADOPTIONS); } else { // If authSchemaValidation is disabled and server is running without auth, // warn the user and continue startup without authSchema metadata checks. log() << startupWarningsLog; log() << "** WARNING: Startup auth schema validation checks are disabled for the " "database." << startupWarningsLog; log() << "** This mode should only be used to manually repair corrupted auth " "data." << startupWarningsLog; } // This function may take the global lock. auto shardingInitialized = ShardingInitializationMongoD::get(startupOpCtx.get()) ->initializeShardingAwarenessIfNeeded(startupOpCtx.get()); if (shardingInitialized) { waitForShardRegistryReload(startupOpCtx.get()).transitional_ignore(); } auto storageEngine = serviceContext->getStorageEngine(); invariant(storageEngine); BackupCursorHooks::initialize(serviceContext, storageEngine); if (!storageGlobalParams.readOnly) { if (storageEngine->supportsCappedCollections()) { logStartup(startupOpCtx.get()); } startMongoDFTDC(); startFreeMonitoring(serviceContext); restartInProgressIndexesFromLastShutdown(startupOpCtx.get()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!repl::ReplicationCoordinator::get(startupOpCtx.get())->isReplEnabled()) { if (ShardingState::get(startupOpCtx.get())->enabled()) { uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get())); } } } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { initializeGlobalShardingStateForMongoD(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer); Balancer::create(startupOpCtx->getServiceContext()); ShardingCatalogManager::create( startupOpCtx->getServiceContext(), makeShardingTaskExecutor(executor::makeNetworkInterface("AddShard-TaskExecutor"))); Grid::get(startupOpCtx.get())->setShardingInitialized(); } else if (replSettings.usingReplSets()) { // standalone replica set auto keysCollectionClient = stdx::make_unique<KeysCollectionClientDirect>(); auto keyManager = std::make_shared<KeysCollectionManager>( KeysCollectionManager::kKeyManagerPurposeString, std::move(keysCollectionClient), Seconds(KeysRotationIntervalSec)); keyManager->startMonitoring(startupOpCtx->getServiceContext()); LogicalTimeValidator::set(startupOpCtx->getServiceContext(), stdx::make_unique<LogicalTimeValidator>(keyManager)); } repl::ReplicationCoordinator::get(startupOpCtx.get())->startup(startupOpCtx.get()); const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(startupOpCtx.get()); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset." << startupWarningsLog; log() << "** Database contents may appear inconsistent with the oplog and may " "appear to not contain" << startupWarningsLog; log() << "** writes that were visible when this node was running as part of a " "replica set." << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and no " "other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** "; log() << " For more info see http://dochub.mongodb.org/core/ttlcollections"; log() << startupWarningsLog; } else { startTTLBackgroundJob(); } if (replSettings.usingReplSets() || !internalValidateFeaturesAsMaster) { serverGlobalParams.validateFeaturesAsMaster.store(false); } } startClientCursorMonitor(); PeriodicTask::startRunningPeriodicTasks(); SessionKiller::set(serviceContext, std::make_shared<SessionKiller>(serviceContext, killSessionsLocal)); // Start up a background task to periodically check for and kill expired transactions; and a // background task to periodically check for and decrease cache pressure by decreasing the // target size setting for the storage engine's window of available snapshots. // // Only do this on storage engines supporting snapshot reads, which hold resources we wish to // release periodically in order to avoid storage cache pressure build up. if (storageEngine->supportsReadConcernSnapshot()) { startPeriodicThreadToAbortExpiredTransactions(serviceContext); startPeriodicThreadToDecreaseSnapshotHistoryCachePressure(serviceContext); } // Set up the logical session cache LogicalSessionCacheServer kind = LogicalSessionCacheServer::kStandalone; if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { kind = LogicalSessionCacheServer::kSharded; } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { kind = LogicalSessionCacheServer::kConfigServer; } else if (replSettings.usingReplSets()) { kind = LogicalSessionCacheServer::kReplicaSet; } auto sessionCache = makeLogicalSessionCacheD(kind); LogicalSessionCache::set(serviceContext, std::move(sessionCache)); // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); auto start = serviceContext->getServiceExecutor()->start(); if (!start.isOK()) { error() << "Failed to start the service executor: " << start; return EXIT_NET_ERROR; } start = serviceContext->getServiceEntryPoint()->start(); if (!start.isOK()) { error() << "Failed to start the service entry point: " << start; return EXIT_NET_ERROR; } if (!storageGlobalParams.repair) { start = serviceContext->getTransportLayer()->start(); if (!start.isOK()) { error() << "Failed to start the listener: " << start.toString(); return EXIT_NET_ERROR; } } serviceContext->notifyStartupComplete(); #ifndef _WIN32 mongo::signalForkSuccess(); #else if (ntservice::shouldStartService()) { ntservice::reportStatus(SERVICE_RUNNING); log() << "Service running"; } #endif if (MONGO_FAIL_POINT(shutdownAtStartup)) { log() << "starting clean exit via failpoint"; exitCleanly(EXIT_CLEAN); } MONGO_IDLE_THREAD_BLOCK; return waitForShutdown(); }
StatusWith<std::tuple<bool, std::string>> SaslSCRAMServerMechanism<Policy>::_firstStep( OperationContext* opCtx, StringData inputData) { const auto badCount = [](int got) { return Status(ErrorCodes::BadValue, str::stream() << "Incorrect number of arguments for first SCRAM client message, got " << got << " expected at least 3"); }; /** * gs2-cbind-flag := ("p=" cb-name) / 'y' / 'n' * gs2-header := gs2-cbind-flag ',' [ authzid ] ',' * reserved-mext := "m=" 1*(value-char) * client-first-message-bare := [reserved-mext ','] username ',' nonce [',' extensions] * client-first-message := gs2-header client-first-message-bare */ const auto gs2_cbind_comma = inputData.find(','); if (gs2_cbind_comma == std::string::npos) { return badCount(1); } const auto gs2_cbind_flag = inputData.substr(0, gs2_cbind_comma); if (gs2_cbind_flag.startsWith("p=")) { return Status(ErrorCodes::BadValue, "Server does not support channel binding"); } if ((gs2_cbind_flag != "y") && (gs2_cbind_flag != "n")) { return Status(ErrorCodes::BadValue, str::stream() << "Incorrect SCRAM client message prefix: " << gs2_cbind_flag); } const auto gs2_header_comma = inputData.find(',', gs2_cbind_comma + 1); if (gs2_header_comma == std::string::npos) { return badCount(2); } auto authzId = inputData.substr(gs2_cbind_comma + 1, gs2_header_comma - (gs2_cbind_comma + 1)); if (authzId.size()) { if (authzId.startsWith("a=")) { authzId = authzId.substr(2); } else { return Status(ErrorCodes::BadValue, str::stream() << "Incorrect SCRAM authzid: " << authzId); } } const auto client_first_message_bare = inputData.substr(gs2_header_comma + 1); if (client_first_message_bare.startsWith("m=")) { return Status(ErrorCodes::BadValue, "SCRAM mandatory extensions are not supported"); } /* StringSplitter::split() will ignore consecutive delimiters. * e.g. "foo,,bar" => {"foo","bar"} * This makes our implementation of SCRAM *slightly* more generous * in what it will accept than the standard calls for. * * This does not impact _authMessage, as it's composed from the raw * string input, rather than the output of the split operation. */ const auto input = StringSplitter::split(client_first_message_bare.toString(), ","); if (input.size() < 2) { // gs2-header is not included in this count, so add it back in. return badCount(input.size() + 2); } if (!str::startsWith(input[0], "n=") || input[0].size() < 3) { return Status(ErrorCodes::BadValue, str::stream() << "Invalid SCRAM user name: " << input[0]); } ServerMechanismBase::_principalName = input[0].substr(2); decodeSCRAMUsername(ServerMechanismBase::_principalName); if (!authzId.empty() && ServerMechanismBase::_principalName != authzId) { return Status(ErrorCodes::BadValue, str::stream() << "SCRAM user name " << ServerMechanismBase::_principalName << " does not match authzid " << authzId); } if (!str::startsWith(input[1], "r=") || input[1].size() < 6) { return Status(ErrorCodes::BadValue, str::stream() << "Invalid SCRAM client nonce: " << input[1]); } const auto clientNonce = input[1].substr(2); // SERVER-16534, SCRAM-SHA-1 must be enabled for authenticating the internal user, so that // cluster members may communicate with each other. Hence ignore disabled auth mechanism // for the internal user. UserName user(ServerMechanismBase::ServerMechanismBase::_principalName, ServerMechanismBase::getAuthenticationDatabase()); if (!sequenceContains(saslGlobalParams.authenticationMechanisms, "SCRAM-SHA-1") && user != internalSecurity.user->getName()) { return Status(ErrorCodes::BadValue, "SCRAM-SHA-1 authentication is disabled"); } // The authentication database is also the source database for the user. User* userObj; auto authManager = AuthorizationManager::get(opCtx->getServiceContext()); Status status = authManager->acquireUser(opCtx, user, &userObj); if (!status.isOK()) { return status; } User::CredentialData credentials = userObj->getCredentials(); UserName userName = userObj->getName(); authManager->releaseUser(userObj); _scramCredentials = credentials.scram<HashBlock>(); if (!_scramCredentials.isValid()) { // Check for authentication attempts of the __system user on // systems started without a keyfile. if (userName == internalSecurity.user->getName()) { return Status(ErrorCodes::AuthenticationFailed, "It is not possible to authenticate as the __system user " "on servers started without a --keyFile parameter"); } else { return Status(ErrorCodes::AuthenticationFailed, "Unable to perform SCRAM authentication for a user with missing " "or invalid SCRAM credentials"); } } _secrets = scram::Secrets<HashBlock>("", base64::decode(_scramCredentials.storedKey), base64::decode(_scramCredentials.serverKey)); // Generate server-first-message // Create text-based nonce as base64 encoding of a binary blob of length multiple of 3 const int nonceLenQWords = 3; uint64_t binaryNonce[nonceLenQWords]; std::unique_ptr<SecureRandom> sr(SecureRandom::create()); binaryNonce[0] = sr->nextInt64(); binaryNonce[1] = sr->nextInt64(); binaryNonce[2] = sr->nextInt64(); _nonce = clientNonce + base64::encode(reinterpret_cast<char*>(binaryNonce), sizeof(binaryNonce)); StringBuilder sb; sb << "r=" << _nonce << ",s=" << _scramCredentials.salt << ",i=" << _scramCredentials.iterationCount; std::string outputData = sb.str(); // add client-first-message-bare and server-first-message to _authMessage _authMessage = client_first_message_bare.toString() + "," + outputData; return std::make_tuple(false, std::move(outputData)); }