static ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); _initWireSpec(); auto globalServiceContext = getGlobalServiceContext(); globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); globalServiceContext->setOpObserver(stdx::make_unique<OpObserver>()); DBDirectClientFactory::get(globalServiceContext) .registerImplementation([](OperationContext* txn) { return std::unique_ptr<DBClientBase>(new DBDirectClient(txn)); }); const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(LogComponent::kControl); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; if (replSettings.isMaster()) l << " master=" << replSettings.isMaster(); if (replSettings.isSlave()) l << " slave=" << (int)replSettings.isSlave(); const bool is32bit = sizeof(int*) == 4; l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl; } DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl; #if defined(_WIN32) printTargetMinOS(); #endif logProcessDetails(); checked_cast<ServiceContextMongoD*>(getGlobalServiceContext())->createLockFile(); transport::TransportLayerLegacy::Options options; options.port = listenPort; options.ipList = serverGlobalParams.bind_ip; // Create, start, and attach the TL auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>( options, std::make_shared<ServiceEntryPointMongod>(getGlobalServiceContext()->getTransportLayer())); auto res = transportLayer->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res.toString(); return EXIT_NET_ERROR; } std::shared_ptr<DbWebServer> dbWebServer; if (serverGlobalParams.isHttpInterfaceEnabled) { dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, getGlobalServiceContext(), new RestAdminAccess())); if (!dbWebServer->setupSockets()) { error() << "Failed to set up sockets for HTTP interface during startup."; return EXIT_NET_ERROR; } } getGlobalServiceContext()->initializeGlobalStorageEngine(); #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED if (WiredTigerCustomizationHooks::get(getGlobalServiceContext())->restartRequired()) { exitCleanly(EXIT_CLEAN); } #endif // Warn if we detect configurations for multiple registered storage engines in // the same configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); BSONObj storageParamsObj = storageElement.Obj(); BSONObjIterator i = storageParamsObj.begin(); while (i.more()) { BSONElement e = i.next(); // Ignore if field name under "storage" matches current storage engine. if (storageGlobalParams.engine == e.fieldName()) { continue; } // Warn if field name matches non-active registered storage engine. if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) { warning() << "Detected configuration for non-active storage engine " << e.fieldName() << " when current storage engine is " << storageGlobalParams.engine; } } } if (!getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager()) { if (moe::startupOptionsParsed.count("replication.enableMajorityReadConcern") && moe::startupOptionsParsed["replication.enableMajorityReadConcern"].as<bool>()) { // Note: we are intentionally only erroring if the user explicitly requested that we // enable majority read concern. We do not error if the they are implicitly enabled for // CSRS because a required step in the upgrade procedure can involve an mmapv1 node in // the CSRS in the REMOVED state. This is handled by the TopologyCoordinator. invariant(replSettings.isMajorityReadConcernEnabled()); severe() << "Majority read concern requires a storage engine that supports" << " snapshots, such as wiredTiger. " << storageGlobalParams.engine << " does not support snapshots."; exitCleanly(EXIT_BADOPTIONS); } } logMongodStartupWarnings(storageGlobalParams, serverGlobalParams); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } { stringstream ss; ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist"; uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath)); } // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct // dependencies. if (snmpInit) { snmpInit(); } if (!storageGlobalParams.readOnly) { boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); } if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly) return EXIT_NET_ERROR; if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); } auto startupOpCtx = getGlobalServiceContext()->makeOperationContext(&cc()); repairDatabasesAndCheckVersion(startupOpCtx.get()); if (storageGlobalParams.upgrade) { log() << "finished checking dbs" << endl; exitCleanly(EXIT_CLEAN); } uassertStatusOK(getGlobalAuthorizationManager()->initialize(startupOpCtx.get())); /* this is for security on certain platforms (nonce generation) */ srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros())); // The snapshot thread provides historical collection level and lock statistics for use // by the web interface. Only needed when HTTP is enabled. if (serverGlobalParams.isHttpInterfaceEnabled) { statsSnapshotThread.go(); invariant(dbWebServer); stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer)); web.detach(); } { #ifndef _WIN32 mongo::signalForkSuccess(); #endif AuthorizationManager* globalAuthzManager = getGlobalAuthorizationManager(); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = authindex::verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << status.reason(); exitCleanly(EXIT_NEED_UPGRADE); } // SERVER-14090: Verify that auth schema version is schemaVersion26Final. int foundSchemaVersion; status = globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion); if (!status.isOK()) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but startup could not verify schema version: " << status.toString() << endl; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but found " << foundSchemaVersion << ". In order to upgrade " << "the auth schema, first downgrade MongoDB binaries to version " << "2.6 and then run the authSchemaUpgrade command." << endl; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { error() << "Auth must be disabled when starting without auth schema validation"; exitCleanly(EXIT_BADOPTIONS); } else { // If authSchemaValidation is disabled and server is running without auth, // warn the user and continue startup without authSchema metadata checks. log() << startupWarningsLog; log() << "** WARNING: Startup auth schema validation checks are disabled for the " "database." << startupWarningsLog; log() << "** This mode should only be used to manually repair corrupted auth " "data." << startupWarningsLog; } if (!storageGlobalParams.readOnly) { logStartup(startupOpCtx.get()); getDeleter()->startWorkers(); restartInProgressIndexesFromLastShutdown(startupOpCtx.get()); repl::getGlobalReplicationCoordinator()->startup(startupOpCtx.get()); const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(startupOpCtx.get()); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset" << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and " << " no other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** "; log() << " For more info see http://dochub.mongodb.org/core/ttlcollections"; log() << startupWarningsLog; } else { startTTLBackgroundJob(); } } } startClientCursorMonitor(); PeriodicTask::startRunningPeriodicTasks(); HostnameCanonicalizationWorker::start(getGlobalServiceContext()); uassertStatusOK(ShardingState::get(startupOpCtx.get()) ->initializeShardingAwarenessIfNeeded(startupOpCtx.get())); if (!storageGlobalParams.readOnly) { startFTDC(); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) { uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get())); } } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { uassertStatusOK( initializeGlobalShardingStateForMongod(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer)); Balancer::create(startupOpCtx->getServiceContext()); } } // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); auto start = getGlobalServiceContext()->addAndStartTransportLayer(std::move(transportLayer)); if (!start.isOK()) { error() << "Failed to start the listener: " << start.toString(); return EXIT_NET_ERROR; } return waitForShutdown(); }
bool HostAndPort::isSelf() const { int _p = port(); int p = _p == -1 ? ServerGlobalParams::DefaultDBPort : _p; string host = str::stream() << this->host() << ":" << p; { // check cache for this host // debatably something _could_ change, but I'm not sure right now (erh 10/14/2010) scoped_lock lk( isSelfCommand._cacheLock ); map<string,bool>::const_iterator i = isSelfCommand._cache.find( host ); if ( i != isSelfCommand._cache.end() ) return i->second; } #if !defined(_WIN32) && !defined(__sunos__) // on linux and os x we can do a quick check for an ip match // no need for ip match if the ports do not match if (p == serverGlobalParams.port) { const vector<string> myaddrs = getMyAddrs(); const vector<string> addrs = getAllIPs(_host); for (vector<string>::const_iterator i=myaddrs.begin(), iend=myaddrs.end(); i!=iend; ++i) { for (vector<string>::const_iterator j=addrs.begin(), jend=addrs.end(); j!=jend; ++j) { string a = *i; string b = *j; if ( a == b || ( str::startsWith( a , "127." ) && str::startsWith( b , "127." ) ) // 127. is all loopback ) { // add to cache scoped_lock lk( isSelfCommand._cacheLock ); isSelfCommand._cache[host] = true; return true; } } } } #endif if ( ! Listener::getTimeTracker() ) { // this ensures we are actually running a server // this may return true later, so may want to retry return false; } try { isSelfCommand.init(); DBClientConnection conn; string errmsg; if ( ! conn.connect( host , errmsg ) ) { // should this go in the cache? return false; } if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) { if (!authenticateInternalUser(&conn)) { return false; } } BSONObj out; bool ok = conn.simpleCommand( "admin" , &out , "_isSelf" ); bool me = ok && out["id"].type() == jstOID && isSelfCommand._id == out["id"].OID(); // add to cache scoped_lock lk( isSelfCommand._cacheLock ); isSelfCommand._cache[host] = me; return me; } catch ( std::exception& e ) { warning() << "could't check isSelf (" << host << ") " << e.what() << endl; } return false; }
// TODO: The bulk of the implementation of this will need to change once we're using the // new v2 authorization storage format. bool run(const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl) { std::string userName; std::string password; std::string userSource; // TODO: remove this. bool readOnly; // TODO: remove this. BSONElement extraData; BSONElement roles; if (cmdObj.hasField("pwd") && cmdObj.hasField("userSource")) { errmsg = "User objects can't have both 'pwd' and 'userSource'"; return false; } if (!cmdObj.hasField("pwd") && !cmdObj.hasField("userSource")) { errmsg = "User objects must have one of 'pwd' and 'userSource'"; return false; } if (cmdObj.hasField("roles") && cmdObj.hasField("readOnly")) { errmsg = "User objects can't have both 'roles' and 'readOnly'"; return false; } Status status = bsonExtractStringField(cmdObj, "user", &userName); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "\"user\" string not specified"), result); return false; } status = bsonExtractStringFieldWithDefault(cmdObj, "pwd", "", &password); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "Invalid \"pwd\" string"), result); return false; } status = bsonExtractStringFieldWithDefault(cmdObj, "userSource", "", &userSource); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "Invalid \"userSource\" string"), result); return false; } status = bsonExtractBooleanFieldWithDefault(cmdObj, "readOnly", false, &readOnly); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "Invalid \"readOnly\" boolean"), result); return false; } if (cmdObj.hasField("extraData")) { status = bsonExtractField(cmdObj, "extraData", &extraData); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "Invalid \"extraData\" object"), result); return false; } } if (cmdObj.hasField("roles")) { status = bsonExtractField(cmdObj, "roles", &roles); if (!status.isOK()) { addStatus(Status(ErrorCodes::UserModificationFailed, "Invalid \"roles\" array"), result); return false; } } BSONObjBuilder userObjBuilder; userObjBuilder.append("user", userName); if (cmdObj.hasField("pwd")) { // TODO: hash password once we're receiving plaintext passwords here. userObjBuilder.append("pwd", password); } if (cmdObj.hasField("userSource")) { userObjBuilder.append("userSource", userSource); } if (cmdObj.hasField("readOnly")) { userObjBuilder.append("readOnly", readOnly); } if (cmdObj.hasField("extraData")) { userObjBuilder.append("extraData", extraData); } if (cmdObj.hasField("roles")) { userObjBuilder.append(roles); } status = getGlobalAuthorizationManager()->insertPrivilegeDocument(dbname, userObjBuilder.obj()); if (!status.isOK()) { addStatus(status, result); return false; } return true; }
void _initAndListen(int listenPort ) { Client::initThread("initandlisten"); bool is32bit = sizeof(int*) == 4; { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; if( replSettings.master ) l << " master=" << replSettings.master; if( replSettings.slave ) l << " slave=" << (int) replSettings.slave; l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl; } DEV log() << "_DEBUG build (which is slower)" << endl; logStartupWarnings(); #if defined(_WIN32) printTargetMinOS(); #endif logProcessDetails(); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } { stringstream ss; ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist"; uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath)); } // TODO check non-journal subdirs if using directory-per-db checkReadAhead(storageGlobalParams.dbpath); acquirePathLock(mongodGlobalParams.repair); boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); FileAllocator::get()->start(); // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct // dependencies. if (snmpInit) { snmpInit(); } MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" ); dur::startup(); if (storageGlobalParams.durOptions & StorageGlobalParams::DurRecoverOnly) return; unsigned long long missingRepl = checkIfReplMissingFromCommandLine(); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset" << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and no" << " other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** For more info see http://dochub.mongodb.org/core/ttlcollections" << startupWarningsLog; log() << startupWarningsLog; } if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback ); globalScriptEngine->setGetCurrentOpIdCallback( jsGetCurrentOpIdCallback ); } // On replica set members we only clear temp collections on DBs other than "local" during // promotion to primary. On pure slaves, they are only cleared when the oplog tells them to. // The local DB is special because it is not replicated. See SERVER-10927 for more details. const bool shouldClearNonLocalTmpCollections = !(missingRepl || replSettings.usingReplSets() || replSettings.slave == SimpleSlave); repairDatabasesAndCheckVersion(shouldClearNonLocalTmpCollections); if (mongodGlobalParams.upgrade) return; uassertStatusOK(getGlobalAuthorizationManager()->initialize()); /* this is for security on certain platforms (nonce generation) */ srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros())); snapshotThread.go(); d.clientCursorMonitor.go(); PeriodicTask::startRunningPeriodicTasks(); if (missingRepl) { // a warning was logged earlier } else { startTTLBackgroundJob(); } #ifndef _WIN32 mongo::signalForkSuccess(); #endif if(getGlobalAuthorizationManager()->isAuthEnabled()) { // open admin db in case we need to use it later. TODO this is not the right way to // resolve this. Client::WriteContext c("admin", storageGlobalParams.dbpath); } getDeleter()->startWorkers(); // Starts a background thread that rebuilds all incomplete indices. indexRebuilder.go(); listen(listenPort); // listen() will return when exit code closes its socket. exitCleanly(EXIT_NET_ERROR); }
Status CmdAuthenticate::_authenticateCR(const UserName& user, const BSONObj& cmdObj) { if (user == internalSecurity.user->getName() && serverGlobalParams.clusterAuthMode.load() == ServerGlobalParams::ClusterAuthMode_x509) { return Status(ErrorCodes::AuthenticationFailed, "Mechanism x509 is required for internal cluster authentication"); } if (_isCRAuthDisabled) { // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that // cluster members may communicate with each other. if (user != internalSecurity.user->getName()) { return Status(ErrorCodes::BadValue, _nonceAuthenticationDisabledMessage); } } string key = cmdObj.getStringField("key"); string received_nonce = cmdObj.getStringField("nonce"); if( user.getUser().empty() || key.empty() || received_nonce.empty() ) { sleepmillis(10); return Status(ErrorCodes::ProtocolError, "field missing/wrong type in received authenticate command"); } stringstream digestBuilder; { ClientBasic *client = ClientBasic::getCurrent(); boost::scoped_ptr<AuthenticationSession> session; client->swapAuthenticationSession(session); if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) { sleepmillis(30); return Status(ErrorCodes::ProtocolError, "No pending nonce"); } else { nonce64 nonce = static_cast<MongoAuthenticationSession*>(session.get())->getNonce(); digestBuilder << hex << nonce; if (digestBuilder.str() != received_nonce) { sleepmillis(30); return Status(ErrorCodes::AuthenticationFailed, "Received wrong nonce."); } } } User* userObj; Status status = getGlobalAuthorizationManager()->acquireUser(user, &userObj); if (!status.isOK()) { // Failure to find the privilege document indicates no-such-user, a fact that we do not // wish to reveal to the client. So, we return AuthenticationFailed rather than passing // through the returned status. return Status(ErrorCodes::AuthenticationFailed, status.toString()); } string pwd = userObj->getCredentials().password; getGlobalAuthorizationManager()->releaseUser(userObj); md5digest d; { digestBuilder << user.getUser() << pwd; string done = digestBuilder.str(); md5_state_t st; md5_init(&st); md5_append(&st, (const md5_byte_t *) done.c_str(), done.size()); md5_finish(&st, d); } string computed = digestToString( d ); if ( key != computed ) { return Status(ErrorCodes::AuthenticationFailed, "key mismatch"); } AuthorizationSession* authorizationSession = ClientBasic::getCurrent()->getAuthorizationSession(); status = authorizationSession->addAndAuthorizeUser(user); if (!status.isOK()) { return status; } return Status::OK(); }
void TaskRunner::_runTasks() { Client* client = nullptr; ServiceContext::UniqueOperationContext txn; while (Task task = _waitForNextTask()) { if (!txn) { if (!client) { // We initialize cc() because ServiceContextMongoD::_newOpCtx() expects cc() // to be equal to the client used to create the operation context. Client::initThreadIfNotAlready(); client = &cc(); if (getGlobalAuthorizationManager()->isAuthEnabled()) { AuthorizationSession::get(client)->grantInternalAuthorization(); } } txn = client->makeOperationContext(); } NextAction nextAction = runSingleTask(task, txn.get(), Status::OK()); if (nextAction != NextAction::kKeepOperationContext) { txn.reset(); } if (nextAction == NextAction::kCancel) { break; } // Release thread back to pool after disposing if no scheduled tasks in queue. if (nextAction == NextAction::kDisposeOperationContext || nextAction == NextAction::kInvalid) { stdx::lock_guard<stdx::mutex> lk(_mutex); if (_tasks.empty()) { _finishRunTasks_inlock(); return; } } } txn.reset(); std::list<Task> tasks; UniqueLock lk{_mutex}; auto cancelTasks = [&]() { tasks.swap(_tasks); lk.unlock(); // Cancel remaining tasks with a CallbackCanceled status. for (auto task : tasks) { runSingleTask(task, nullptr, Status(ErrorCodes::CallbackCanceled, "this task has been canceled by a previously invoked task")); } tasks.clear(); }; cancelTasks(); lk.lock(); _finishRunTasks_inlock(); cancelTasks(); }
Status storeMongodOptions(const moe::Environment& params, const std::vector<std::string>& args) { Status ret = storeServerOptions(params, args); if (!ret.isOK()) { std::cerr << "Error storing command line: " << ret.toString() << std::endl; ::_exit(EXIT_BADOPTIONS); } if (params.count("dbpath")) { storageGlobalParams.dbpath = params["dbpath"].as<string>(); if (params.count("fork") && storageGlobalParams.dbpath[0] != '/') { // we need to change dbpath if we fork since we change // cwd to "/" // fork only exists on *nix // so '/' is safe storageGlobalParams.dbpath = serverGlobalParams.cwd + "/" + storageGlobalParams.dbpath; } } #ifdef _WIN32 if (storageGlobalParams.dbpath.size() > 1 && storageGlobalParams.dbpath[storageGlobalParams.dbpath.size()-1] == '/') { // size() check is for the unlikely possibility of --dbpath "/" storageGlobalParams.dbpath = storageGlobalParams.dbpath.erase(storageGlobalParams.dbpath.size()-1); } #endif if ( params.count("slowms")) { serverGlobalParams.slowMS = params["slowms"].as<int>(); } if ( params.count("syncdelay")) { storageGlobalParams.syncdelay = params["syncdelay"].as<double>(); } if (params.count("directoryperdb")) { storageGlobalParams.directoryperdb = true; } if (params.count("cpu")) { serverGlobalParams.cpu = true; } if (params.count("noauth")) { getGlobalAuthorizationManager()->setAuthEnabled(false); } if (params.count("auth")) { getGlobalAuthorizationManager()->setAuthEnabled(true); } if (params.count("quota")) { storageGlobalParams.quota = true; } if (params.count("quotaFiles")) { storageGlobalParams.quota = true; storageGlobalParams.quotaFiles = params["quotaFiles"].as<int>() - 1; } if ((params.count("nodur") || params.count("nojournal")) && (params.count("dur") || params.count("journal"))) { std::cerr << "Can't specify both --journal and --nojournal options." << std::endl; ::_exit(EXIT_BADOPTIONS); } if (params.count("nodur") || params.count("nojournal")) { storageGlobalParams.dur = false; } if (params.count("dur") || params.count("journal")) { storageGlobalParams.dur = true; } if (params.count("durOptions")) { storageGlobalParams.durOptions = params["durOptions"].as<int>(); } if( params.count("journalCommitInterval") ) { // don't check if dur is false here as many will just use the default, and will default // to off on win32. ie no point making life a little more complex by giving an error on // a dev environment. storageGlobalParams.journalCommitInterval = params["journalCommitInterval"].as<unsigned>(); if (storageGlobalParams.journalCommitInterval <= 1 || storageGlobalParams.journalCommitInterval > 300) { std::cerr << "--journalCommitInterval out of allowed range (0-300ms)" << std::endl; ::_exit(EXIT_BADOPTIONS); } } if (params.count("journalOptions")) { storageGlobalParams.durOptions = params["journalOptions"].as<int>(); } if (params.count("nohints")) { storageGlobalParams.useHints = false; } if (params.count("nopreallocj")) { storageGlobalParams.preallocj = false; } if (params.count("httpinterface")) { if (params.count("nohttpinterface")) { std::cerr << "can't have both --httpinterface and --nohttpinterface" << std::endl; ::_exit(EXIT_BADOPTIONS); } serverGlobalParams.isHttpInterfaceEnabled = true; } // SERVER-10019 Enabling rest/jsonp without --httpinterface should break in the future if (params.count("rest")) { if (params.count("nohttpinterface")) { log() << "** WARNING: Should not specify both --rest and --nohttpinterface" << startupWarningsLog; } else if (!params.count("httpinterface")) { log() << "** WARNING: --rest is specified without --httpinterface," << startupWarningsLog; log() << "** enabling http interface" << startupWarningsLog; serverGlobalParams.isHttpInterfaceEnabled = true; } serverGlobalParams.rest = true; } if (params.count("jsonp")) { if (params.count("nohttpinterface")) { log() << "** WARNING: Should not specify both --jsonp and --nohttpinterface" << startupWarningsLog; } else if (!params.count("httpinterface")) { log() << "** WARNING --jsonp is specified without --httpinterface," << startupWarningsLog; log() << "** enabling http interface" << startupWarningsLog; serverGlobalParams.isHttpInterfaceEnabled = true; } serverGlobalParams.jsonp = true; } if (params.count("noscripting")) { mongodGlobalParams.scriptingEnabled = false; } if (params.count("noprealloc")) { storageGlobalParams.prealloc = false; cout << "note: noprealloc may hurt performance in many applications" << endl; } if (params.count("smallfiles")) { storageGlobalParams.smallfiles = true; } if (params.count("diaglog")) { int x = params["diaglog"].as<int>(); if ( x < 0 || x > 7 ) { std::cerr << "can't interpret --diaglog setting" << std::endl; ::_exit(EXIT_BADOPTIONS); } _diaglog.setLevel(x); } if ((params.count("dur") || params.count("journal")) && params.count("repair")) { std::cerr << "Can't specify both --journal and --repair options." << std::endl; ::_exit(EXIT_BADOPTIONS); } if (params.count("repair")) { Record::MemoryTrackingEnabled = false; mongodGlobalParams.upgrade = 1; // --repair implies --upgrade mongodGlobalParams.repair = 1; storageGlobalParams.dur = false; } if (params.count("upgrade")) { Record::MemoryTrackingEnabled = false; mongodGlobalParams.upgrade = 1; } if (params.count("notablescan")) { storageGlobalParams.noTableScan = true; } if (params.count("master")) { replSettings.master = true; } if (params.count("slave")) { replSettings.slave = SimpleSlave; } if (params.count("slavedelay")) { replSettings.slavedelay = params["slavedelay"].as<int>(); } if (params.count("fastsync")) { replSettings.fastsync = true; } if (params.count("autoresync")) { replSettings.autoresync = true; if( params.count("replSet") ) { std::cerr << "--autoresync is not used with --replSet\nsee " << "http://dochub.mongodb.org/core/resyncingaverystalereplicasetmember" << std::endl; ::_exit(EXIT_BADOPTIONS); } } if (params.count("source")) { /* specifies what the source in local.sources should be */ replSettings.source = params["source"].as<string>().c_str(); } if( params.count("pretouch") ) { replSettings.pretouch = params["pretouch"].as<int>(); } if (params.count("replSet")) { if (params.count("slavedelay")) { std::cerr << "--slavedelay cannot be used with --replSet" << std::endl; ::_exit(EXIT_BADOPTIONS); } else if (params.count("only")) { std::cerr << "--only cannot be used with --replSet" << std::endl; ::_exit(EXIT_BADOPTIONS); } /* seed list of hosts for the repl set */ replSettings.replSet = params["replSet"].as<string>().c_str(); } if (params.count("replIndexPrefetch")) { replSettings.rsIndexPrefetch = params["replIndexPrefetch"].as<std::string>(); } if (params.count("noIndexBuildRetry")) { serverGlobalParams.indexBuildRetry = false; } if (params.count("only")) { replSettings.only = params["only"].as<string>().c_str(); } if( params.count("nssize") ) { int x = params["nssize"].as<int>(); if (x <= 0 || x > (0x7fffffff/1024/1024)) { std::cerr << "bad --nssize arg" << std::endl; ::_exit(EXIT_BADOPTIONS); } storageGlobalParams.lenForNewNsFiles = x * 1024 * 1024; verify(storageGlobalParams.lenForNewNsFiles > 0); } if (params.count("oplogSize")) { long long x = params["oplogSize"].as<int>(); if (x <= 0) { std::cerr << "bad --oplogSize arg" << std::endl; ::_exit(EXIT_BADOPTIONS); } // note a small size such as x==1 is ok for an arbiter. if( x > 1000 && sizeof(void*) == 4 ) { StringBuilder sb; std::cerr << "--oplogSize of " << x << "MB is too big for 32 bit version. Use 64 bit build instead." << std::endl; ::_exit(EXIT_BADOPTIONS); } replSettings.oplogSize = x * 1024 * 1024; verify(replSettings.oplogSize > 0); } if (params.count("cacheSize")) { long x = params["cacheSize"].as<long>(); if (x <= 0) { std::cerr << "bad --cacheSize arg" << std::endl; ::_exit(EXIT_BADOPTIONS); } std::cerr << "--cacheSize option not currently supported" << std::endl; ::_exit(EXIT_BADOPTIONS); } if (!params.count("port")) { if( params.count("configsvr") ) { serverGlobalParams.port = ServerGlobalParams::ConfigServerPort; } if( params.count("shardsvr") ) { if( params.count("configsvr") ) { std::cerr << "can't do --shardsvr and --configsvr at the same time" << std::endl; ::_exit(EXIT_BADOPTIONS); } serverGlobalParams.port = ServerGlobalParams::ShardServerPort; } } else { if (serverGlobalParams.port <= 0 || serverGlobalParams.port > 65535) { std::cerr << "bad --port number" << std::endl; ::_exit(EXIT_BADOPTIONS); } } if ( params.count("configsvr" ) ) { serverGlobalParams.configsvr = true; storageGlobalParams.smallfiles = true; // config server implies small files if (replSettings.usingReplSets() || replSettings.master || replSettings.slave) { std::cerr << "replication should not be enabled on a config server" << std::endl; ::_exit(EXIT_BADOPTIONS); } if (!params.count("nodur") && !params.count("nojournal")) storageGlobalParams.dur = true; if (!params.count("dbpath")) storageGlobalParams.dbpath = "/data/configdb"; replSettings.master = true; if (!params.count("oplogSize")) replSettings.oplogSize = 5 * 1024 * 1024; } if ( params.count( "profile" ) ) { serverGlobalParams.defaultProfile = params["profile"].as<int>(); } if (params.count("ipv6")) { enableIPv6(); } if (params.count("noMoveParanoia") && params.count("moveParanoia")) { std::cerr << "The moveParanoia and noMoveParanoia flags cannot both be set; " << "please use only one of them." << std::endl; ::_exit(EXIT_BADOPTIONS); } if (params.count("noMoveParanoia")) serverGlobalParams.moveParanoia = false; if (params.count("moveParanoia")) serverGlobalParams.moveParanoia = true; if (params.count("pairwith") || params.count("arbiter") || params.count("opIdMem")) { std::cerr << "****\n" << "Replica Pairs have been deprecated. Invalid options: --pairwith, " << "--arbiter, and/or --opIdMem\n" << "<http://dochub.mongodb.org/core/replicapairs>\n" << "****" << std::endl; ::_exit(EXIT_BADOPTIONS); } // needs to be after things like --configsvr parsing, thus here. if (params.count("repairpath")) { storageGlobalParams.repairpath = params["repairpath"].as<string>(); if (!storageGlobalParams.repairpath.size()) { std::cerr << "repairpath is empty" << std::endl; ::_exit(EXIT_BADOPTIONS); } if (storageGlobalParams.dur && !str::startsWith(storageGlobalParams.repairpath, storageGlobalParams.dbpath)) { std::cerr << "You must use a --repairpath that is a subdirectory of " << "--dbpath when using journaling" << std::endl; ::_exit(EXIT_BADOPTIONS); } } else { storageGlobalParams.repairpath = storageGlobalParams.dbpath; } if (replSettings.pretouch) log() << "--pretouch " << replSettings.pretouch << endl; if (sizeof(void*) == 4 && !(params.count("nodur") || params.count("nojournal") || params.count("dur") || params.count("journal"))) { // trying to make this stand out more like startup warnings log() << endl; warning() << "32-bit servers don't have journaling enabled by default. " << "Please use --journal if you want durability." << endl; log() << endl; } return Status::OK(); }
bool isSelf(const HostAndPort& hostAndPort) { // Fastpath: check if the host&port in question is bound to one // of the interfaces on this machine. // No need for ip match if the ports do not match if (hostAndPort.port() == serverGlobalParams.port) { std::vector<std::string> myAddrs = serverGlobalParams.bind_ip.empty() ? getBoundAddrs(IPv6Enabled()) : std::vector<std::string>(); if (!serverGlobalParams.bind_ip.empty()) { boost::split(myAddrs, serverGlobalParams.bind_ip, boost::is_any_of(", ")); } const std::vector<std::string> hostAddrs = getAddrsForHost(hostAndPort.host(), hostAndPort.port(), IPv6Enabled()); for (std::vector<std::string>::const_iterator i = myAddrs.begin(); i != myAddrs.end(); ++i) { for (std::vector<std::string>::const_iterator j = hostAddrs.begin(); j != hostAddrs.end(); ++j) { if (*i == *j) { return true; } } } } // Ensure that the server is up and ready to accept incoming network requests. const Listener* listener = Listener::getTimeTracker(); if (!listener) { return false; } listener->waitUntilListening(); try { DBClientConnection conn; std::string errmsg; conn.setSoTimeout(30); // 30 second timeout if (!conn.connect(hostAndPort, errmsg)) { return false; } if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) { if (!authenticateInternalUser(&conn)) { return false; } } BSONObj out; bool ok = conn.simpleCommand("admin" , &out, "_isSelf"); bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID(); return me; } catch (const std::exception& e) { warning() << "could't check isSelf (" << hostAndPort << ") " << e.what() << std::endl; } return false; }
bool run(const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl) { AuthorizationManager* authzManager = getGlobalAuthorizationManager(); AuthzDocumentsUpdateGuard updateGuard(authzManager); if (!updateGuard.tryLock("Grant privileges to role")) { addStatus(Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."), result); return false; } RoleName roleName; PrivilegeVector privilegesToAdd; BSONObj writeConcern; Status status = auth::parseAndValidateRolePrivilegeManipulationCommands( cmdObj, "grantPrivilegesToRole", dbname, &roleName, &privilegesToAdd, &writeConcern); if (!status.isOK()) { addStatus(status, result); return false; } if (!authzManager->roleExists(roleName)) { addStatus(Status(ErrorCodes::RoleNotFound, mongoutils::str::stream() << roleName.getFullName() << " does not name an existing role"), result); return false; } if (authzManager->isBuiltinRole(roleName)) { addStatus(Status(ErrorCodes::InvalidRoleModification, mongoutils::str::stream() << roleName.getFullName() << " is a built-in role and cannot be modified."), result); return false; } PrivilegeVector privileges = authzManager->getDirectPrivilegesForRole(roleName); for (PrivilegeVector::iterator it = privilegesToAdd.begin(); it != privilegesToAdd.end(); ++it) { Privilege::addPrivilegeToPrivilegeVector(&privileges, *it); } // Build up update modifier object to $set privileges. mutablebson::Document updateObj; mutablebson::Element setElement = updateObj.makeElementObject("$set"); status = updateObj.root().pushBack(setElement); if (!status.isOK()) { addStatus(status, result); return false; } mutablebson::Element privilegesElement = updateObj.makeElementArray("privileges"); status = setElement.pushBack(privilegesElement); if (!status.isOK()) { addStatus(status, result); return false; } status = authzManager->getBSONForPrivileges(privileges, privilegesElement); if (!status.isOK()) { addStatus(status, result); return false; } BSONObjBuilder updateBSONBuilder; updateObj.writeTo(&updateBSONBuilder); status = authzManager->updateRoleDocument( roleName, updateBSONBuilder.done(), writeConcern); if (!status.isOK()) { addStatus(status, result); return false; } return true; }
bool run(const string& dbname, BSONObj& cmdObj, int options, string& errmsg, BSONObjBuilder& result, bool fromRepl) { AuthorizationManager* authzManager = getGlobalAuthorizationManager(); AuthzDocumentsUpdateGuard updateGuard(authzManager); if (!updateGuard.tryLock("Revoke role delegation from user")) { addStatus(Status(ErrorCodes::LockBusy, "Could not lock auth data update lock."), result); return false; } UserName userName; std::vector<RoleName> roles; BSONObj writeConcern; Status status = auth::parseUserRoleManipulationCommand(cmdObj, "revokeDelegateRolesFromUser", dbname, authzManager, &userName, &roles, &writeConcern); if (!status.isOK()) { addStatus(status, result); return false; } User::RoleDataMap userRoles; status = getCurrentUserRoles(authzManager, userName, &userRoles); if (!status.isOK()) { addStatus(status, result); return false; } for (vector<RoleName>::iterator it = roles.begin(); it != roles.end(); ++it) { RoleName& roleName = *it; User::RoleDataMap::iterator roleDataIt = userRoles.find(roleName); if (roleDataIt == userRoles.end()) { continue; // User already doesn't have the role, nothing to do } User::RoleData& role = roleDataIt->second; if (role.hasRole) { // If the user still has the role, need to leave it in the roles array role.canDelegate = false; } else { // If the user doesn't have the role, and now can't delegate it either, remove // the role from that user's roles array entirely userRoles.erase(roleDataIt); } } BSONArray newRolesBSONArray = rolesToBSONArray(userRoles); status = authzManager->updatePrivilegeDocument( userName, BSON("$set" << BSON("roles" << newRolesBSONArray)), writeConcern); // Must invalidate even on bad status - what if the write succeeded but the GLE failed? authzManager->invalidateUserByName(userName); if (!status.isOK()) { addStatus(status, result); return false; } return true; }
bool CmdAuthenticate::authenticateCR(const string& dbname, BSONObj& cmdObj, string& errmsg, BSONObjBuilder& result) { string user = cmdObj.getStringField("user"); if (!_areNonceAuthenticateCommandsEnabled) { // SERVER-8461, MONGODB-CR must be enabled for authenticating the internal user, so that // cluster members may communicate with each other. if (dbname != StringData("local", StringData::LiteralTag()) || user != internalSecurity.user) { errmsg = _nonceAuthenticateCommandsDisabledMessage; result.append(saslCommandCodeFieldName, ErrorCodes::AuthenticationFailed); return false; } } string key = cmdObj.getStringField("key"); string received_nonce = cmdObj.getStringField("nonce"); if( user.empty() || key.empty() || received_nonce.empty() ) { log() << "field missing/wrong type in received authenticate command " << dbname << endl; errmsg = "auth fails"; sleepmillis(10); result.append(saslCommandCodeFieldName, ErrorCodes::AuthenticationFailed); return false; } stringstream digestBuilder; { bool reject = false; ClientBasic *client = ClientBasic::getCurrent(); AuthenticationSession *session = client->getAuthenticationSession(); if (!session || session->getType() != AuthenticationSession::SESSION_TYPE_MONGO) { reject = true; LOG(1) << "auth: No pending nonce" << endl; } else { nonce64 nonce = static_cast<MongoAuthenticationSession*>(session)->getNonce(); digestBuilder << hex << nonce; reject = digestBuilder.str() != received_nonce; if ( reject ) { LOG(1) << "auth: Authentication failed for " << dbname << '$' << user << endl; } } client->resetAuthenticationSession(NULL); if ( reject ) { log() << "auth: bad nonce received or getnonce not called. could be a driver bug or a security attack. db:" << dbname << endl; errmsg = "auth fails"; sleepmillis(30); result.append(saslCommandCodeFieldName, ErrorCodes::AuthenticationFailed); return false; } } BSONObj userObj; string pwd; Status status = getGlobalAuthorizationManager()->getPrivilegeDocument( dbname, UserName(user, dbname), &userObj); if (!status.isOK()) { log() << status.reason() << std::endl; errmsg = "auth fails"; result.append(saslCommandCodeFieldName, ErrorCodes::AuthenticationFailed); return false; } pwd = userObj["pwd"].String(); md5digest d; { digestBuilder << user << pwd; string done = digestBuilder.str(); md5_state_t st; md5_init(&st); md5_append(&st, (const md5_byte_t *) done.c_str(), done.size()); md5_finish(&st, d); } string computed = digestToString( d ); if ( key != computed ) { log() << "auth: key mismatch " << user << ", ns:" << dbname << endl; errmsg = "auth fails"; result.append(saslCommandCodeFieldName, ErrorCodes::AuthenticationFailed); return false; } AuthorizationSession* authorizationSession = ClientBasic::getCurrent()->getAuthorizationSession(); Principal* principal = new Principal(UserName(user, dbname)); principal->setImplicitPrivilegeAcquisition(true); authorizationSession->addAuthorizedPrincipal(principal); result.append( "dbname" , dbname ); result.append( "user" , user ); return true; }
bool isSelf(const HostAndPort& hostAndPort) { // Fastpath: check if the host&port in question is bound to one // of the interfaces on this machine. // No need for ip match if the ports do not match if (hostAndPort.port() == serverGlobalParams.port) { std::vector<std::string> myAddrs = serverGlobalParams.bind_ip.empty() ? getBoundAddrs(IPv6Enabled()) : std::vector<std::string>(); if (!serverGlobalParams.bind_ip.empty()) { boost::split(myAddrs, serverGlobalParams.bind_ip, boost::is_any_of(", ")); } const std::vector<std::string> hostAddrs = getAddrsForHost(hostAndPort.host(), hostAndPort.port(), IPv6Enabled()); for (std::vector<std::string>::const_iterator i = myAddrs.begin(); i != myAddrs.end(); ++i) { for (std::vector<std::string>::const_iterator j = hostAddrs.begin(); j != hostAddrs.end(); ++j) { if (*i == *j) { return true; } } } } // Ensure that the server is up and ready to accept incoming network requests. const Listener* listener = Listener::getTimeTracker(); if (!listener) { return false; } listener->waitUntilListening(); try { DBClientConnection conn; conn.setSoTimeout(30); // 30 second timeout // We need to avoid the isMaster call triggered by a normal connect, which would // cause a deadlock. 'isSelf' is called by the Replication Coordinator when validating // a replica set configuration document, but the 'isMaster' command requires a lock on the // replication coordinator to execute. As such we call we call 'connectSocketOnly', which // does not call 'isMaster'. if (!conn.connectSocketOnly(hostAndPort).isOK()) { return false; } if (getGlobalAuthorizationManager()->isAuthEnabled() && isInternalAuthSet()) { if (!conn.authenticateInternalUser()) { return false; } } BSONObj out; bool ok = conn.simpleCommand("admin", &out, "_isSelf"); bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID(); return me; } catch (const std::exception& e) { warning() << "couldn't check isSelf (" << hostAndPort << ") " << e.what() << std::endl; } return false; }
ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection( const HostAndPort& target, Date_t now, Milliseconds timeout) { stdx::unique_lock<stdx::mutex> lk(_mutex); // Clean up connections on stale/unused hosts _cleanUpStaleHosts_inlock(now); for (HostConnectionMap::iterator hostConns; (hostConns = _connections.find(target)) != _connections.end();) { // Clean up the requested host to remove stale/unused connections _cleanUpOlderThan_inlock(now, &hostConns->second); if (hostConns->second.empty()) { // prevent host from causing unnecessary cleanups _lastUsedHosts[hostConns->first] = kNeverTooStale; break; } _inUseConnections.splice( _inUseConnections.begin(), hostConns->second, hostConns->second.begin()); const ConnectionList::iterator candidate = _inUseConnections.begin(); lk.unlock(); try { if (candidate->conn->isStillConnected()) { // setSoTimeout takes a double representing the number of seconds for send and // receive timeouts. Thus, we must express 'timeout' in milliseconds and divide by // 1000.0 to get the number of seconds with a fractional part. candidate->conn->setSoTimeout(durationCount<Milliseconds>(timeout) / 1000.0); return candidate; } } catch (...) { lk.lock(); _destroyConnection_inlock(&_inUseConnections, candidate); throw; } lk.lock(); _destroyConnection_inlock(&_inUseConnections, candidate); } // No idle connection in the pool; make a new one. lk.unlock(); std::unique_ptr<DBClientConnection> conn; if (_hook) { conn.reset(new DBClientConnection( false, // auto reconnect 0, // socket timeout [this, target](const executor::RemoteCommandResponse& isMasterReply) { return _hook->validateHost(target, isMasterReply); })); } else { conn.reset(new DBClientConnection()); } // setSoTimeout takes a double representing the number of seconds for send and receive // timeouts. Thus, we must express 'timeout' in milliseconds and divide by 1000.0 to get // the number of seconds with a fractional part. conn->setSoTimeout(durationCount<Milliseconds>(timeout) / 1000.0); uassertStatusOK(conn->connect(target)); conn->port().tag |= _messagingPortTags; if (getGlobalAuthorizationManager()->isAuthEnabled()) { uassert(ErrorCodes::AuthenticationFailed, "Missing credentials for authenticating as internal user", isInternalAuthSet()); conn->auth(getInternalUserAuthParamsWithFallback()); } if (_hook) { auto postConnectRequest = uassertStatusOK(_hook->makeRequest(target)); // We might not have a postConnectRequest if (postConnectRequest != boost::none) { auto start = Date_t::now(); auto reply = conn->runCommandWithMetadata(postConnectRequest->dbname, postConnectRequest->cmdObj.firstElementFieldName(), postConnectRequest->metadata, postConnectRequest->cmdObj); auto rcr = executor::RemoteCommandResponse(reply->getCommandReply().getOwned(), reply->getMetadata().getOwned(), Date_t::now() - start); uassertStatusOK(_hook->handleReply(target, std::move(rcr))); } } lk.lock(); return _inUseConnections.insert(_inUseConnections.begin(), ConnectionInfo(conn.release(), now)); }