// The above signals will be processed by this thread only, in order to // ensure the db and log mutexes aren't held. void interruptThread() { int x; sigwait( &asyncSignals, &x ); log() << "got kill or ctrl c or hup signal " << x << " (" << strsignal( x ) << "), will terminate after current cmd ends" << endl; Client::initThread( "interruptThread" ); exitCleanly( EXIT_KILL ); }
int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; frameworkGlobalParams.seed = time( 0 ); frameworkGlobalParams.runsPerTest = 1; Client::initThread("testsuite"); srand( (unsigned) frameworkGlobalParams.seed ); printGitVersion(); printOpenSSLVersion(); printSysInfo(); getGlobalEnvironment()->setGlobalStorageEngine(storageGlobalParams.engine); TestWatchDog twd; twd.go(); int ret = ::mongo::unittest::Suite::run(frameworkGlobalParams.suites, frameworkGlobalParams.filter, frameworkGlobalParams.runsPerTest); cc().shutdown(); exitCleanly( (ExitCode)ret ); // so everything shuts down cleanly return ret; }
void threadRun( MessagingPort * inPort) { TicketHolderReleaser connTicketReleaser( &connTicketHolder ); setThreadName( "conn" ); assert( inPort ); inPort->setLogLevel(1); scoped_ptr<MessagingPort> p( inPort ); p->postFork(); string otherSide; Message m; try { LastError * le = new LastError(); lastError.reset( le ); // lastError now has ownership otherSide = p->remoteString(); handler->connected( p.get() ); while ( ! inShutdown() ) { m.reset(); p->clearCounters(); if ( ! p->recv(m) ) { if( !cmdLine.quiet ) log() << "end connection " << otherSide << endl; p->shutdown(); break; } handler->process( m , p.get() , le ); networkCounter.hit( p->getBytesIn() , p->getBytesOut() ); } } catch ( AssertionException& e ) { log() << "AssertionException handling request, closing client connection: " << e << endl; p->shutdown(); } catch ( SocketException& e ) { log() << "SocketException handling request, closing client connection: " << e << endl; p->shutdown(); } catch ( const ClockSkewException & ) { log() << "ClockSkewException - shutting down" << endl; exitCleanly( EXIT_CLOCK_SKEW ); } catch ( std::exception &e ) { error() << "Uncaught std::exception: " << e.what() << ", terminating" << endl; dbexit( EXIT_UNCAUGHT ); } catch ( ... ) { error() << "Uncaught exception, terminating" << endl; dbexit( EXIT_UNCAUGHT ); } handler->disconnected( p.get() ); }
void eventProcessingThread() { std::string eventName = getShutdownSignalName(ProcessId::getCurrent().asUInt32()); HANDLE event = CreateEventA(NULL, TRUE, FALSE, eventName.c_str()); if (event == NULL) { warning() << "eventProcessingThread CreateEvent failed: " << errnoWithDescription(); return; } ON_BLOCK_EXIT(CloseHandle, event); int returnCode = WaitForSingleObject(event, INFINITE); if (returnCode != WAIT_OBJECT_0) { if (returnCode == WAIT_FAILED) { warning() << "eventProcessingThread WaitForSingleObject failed: " << errnoWithDescription(); return; } else { warning() << "eventProcessingThread WaitForSingleObject failed: " << errnoWithDescription(returnCode); return; } } Client::initThread("eventTerminate"); log() << "shutdown event signaled, will terminate after current cmd ends"; exitCleanly(EXIT_CLEAN); }
static void serviceShutdown( const char* controlCodeName ) { Client::initThread( "serviceShutdown" ); log() << "got " << controlCodeName << " request from Windows Service Controller, " << ( inShutdown() ? "already in shutdown" : "will terminate after current cmd ends" ) << endl; ServiceController::reportStatus( SERVICE_STOP_PENDING ); if ( ! inShutdown() ) { exitCleanly( EXIT_WINDOWS_SERVICE_STOP ); ServiceController::reportStatus( SERVICE_STOPPED ); } }
virtual void process( Message& m , AbstractMessagingPort* port , LastError * le) { while ( true ) { if ( inShutdown() ) { log() << "got request after shutdown()" << endl; break; } lastError.startRequest( m , le ); DbResponse dbresponse; try { assembleResponse( m, dbresponse, port->remote() ); } catch ( const ClockSkewException & ) { log() << "ClockSkewException - shutting down" << endl; exitCleanly( EXIT_CLOCK_SKEW ); } if ( dbresponse.response ) { port->reply(m, *dbresponse.response, dbresponse.responseTo); if( dbresponse.exhaust ) { MsgData *header = dbresponse.response->header(); QueryResult *qr = (QueryResult *) header; long long cursorid = qr->cursorId; if( cursorid ) { verify( dbresponse.exhaust && *dbresponse.exhaust != 0 ); string ns = dbresponse.exhaust; // before reset() free's it... m.reset(); BufBuilder b(512); b.appendNum((int) 0 /*size set later in appendData()*/); b.appendNum(header->id); b.appendNum(header->responseTo); b.appendNum((int) dbGetMore); b.appendNum((int) 0); b.appendStr(ns); b.appendNum((int) 0); // ntoreturn b.appendNum(cursorid); m.appendData(b.buf(), b.len()); b.decouple(); DEV log() << "exhaust=true sending more" << endl; beNice(); continue; // this goes back to top loop } } } break; } }
int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; frameworkGlobalParams.seed = time(0); frameworkGlobalParams.runsPerTest = 1; registerShutdownTask([] { // We drop the scope cache because leak sanitizer can't see across the // thread we use for proxying MozJS requests. Dropping the cache cleans up // the memory and makes leak sanitizer happy. ScriptEngine::dropScopeCache(); // We may be shut down before we have a global storage // engine. if (!getGlobalServiceContext()->getGlobalStorageEngine()) return; getGlobalServiceContext()->shutdownGlobalStorageEngineCleanly(); }); Client::initThread("testsuite"); auto globalServiceContext = getGlobalServiceContext(); // DBTests run as if in the database, so allow them to create direct clients. DBDirectClientFactory::get(globalServiceContext) .registerImplementation([](OperationContext* opCtx) { return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx)); }); srand((unsigned)frameworkGlobalParams.seed); checked_cast<ServiceContextMongoD*>(globalServiceContext)->createLockFile(); globalServiceContext->initializeGlobalStorageEngine(); auto registry = stdx::make_unique<OpObserverRegistry>(); registry->addObserver(stdx::make_unique<UUIDCatalogObserver>()); globalServiceContext->setOpObserver(std::move(registry)); int ret = unittest::Suite::run(frameworkGlobalParams.suites, frameworkGlobalParams.filter, frameworkGlobalParams.runsPerTest); // So everything shuts down cleanly exitCleanly((ExitCode)ret); return ret; }
int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; frameworkGlobalParams.seed = time(0); frameworkGlobalParams.runsPerTest = 1; Client::initThread("testsuite"); srand((unsigned)frameworkGlobalParams.seed); printBuildInfo(); getGlobalServiceContext()->initializeGlobalStorageEngine(); { auto txn = cc().makeOperationContext(); // Initialize the sharding state so we can run sharding tests in isolation auto connectHook = stdx::make_unique<CustomConnectHook>(txn.get()); ConnectionString::setConnectionHook(connectHook.get()); ON_BLOCK_EXIT([] { ConnectionString::setConnectionHook(nullptr); }); ShardingState::get(txn.get())->initialize(txn.get(), "$dummy:10000"); } // Note: ShardingState::initialize also initializes the distLockMgr. { auto txn = cc().makeOperationContext(); auto distLockMgr = dynamic_cast<LegacyDistLockManager*>( grid.forwardingCatalogManager()->getDistLockManager()); if (distLockMgr) { distLockMgr->enablePinger(false); } } int ret = unittest::Suite::run(frameworkGlobalParams.suites, frameworkGlobalParams.filter, frameworkGlobalParams.runsPerTest); // So everything shuts down cleanly exitCleanly((ExitCode)ret); return ret; }
int runDbTests(int argc, char** argv) { frameworkGlobalParams.perfHist = 1; frameworkGlobalParams.seed = time(0); frameworkGlobalParams.runsPerTest = 1; Client::initThread("testsuite"); srand((unsigned)frameworkGlobalParams.seed); printGitVersion(); printOpenSSLVersion(); getGlobalServiceContext()->initializeGlobalStorageEngine(); { auto txn = cc().makeOperationContext(); // Initialize the sharding state so we can run sharding tests in isolation ShardingState::get(getGlobalServiceContext())->initialize(txn.get(), "$dummy:10000"); } // Note: ShardingState::initialize also initializes the distLockMgr. { auto txn = cc().makeOperationContext(); auto distLockMgr = dynamic_cast<LegacyDistLockManager*>( grid.catalogManager(txn.get())->getDistLockManager()); if (distLockMgr) { distLockMgr->enablePinger(false); } } int ret = unittest::Suite::run(frameworkGlobalParams.suites, frameworkGlobalParams.filter, frameworkGlobalParams.runsPerTest); // So everything shuts down cleanly exitCleanly((ExitCode)ret); return ret; }
ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); _initWireSpec(); auto globalServiceContext = getGlobalServiceContext(); globalServiceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); globalServiceContext->setOpObserver(stdx::make_unique<OpObserver>()); DBDirectClientFactory::get(globalServiceContext) .registerImplementation([](OperationContext* txn) { return std::unique_ptr<DBClientBase>(new DBDirectClient(txn)); }); const repl::ReplSettings& replSettings = repl::getGlobalReplicationCoordinator()->getSettings(); { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(LogComponent::kControl); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; if (replSettings.isMaster()) l << " master=" << replSettings.isMaster(); if (replSettings.isSlave()) l << " slave=" << (int)replSettings.isSlave(); const bool is32bit = sizeof(int*) == 4; l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl; } DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl; #if defined(_WIN32) VersionInfoInterface::instance().logTargetMinOS(); #endif logProcessDetails(); checked_cast<ServiceContextMongoD*>(getGlobalServiceContext())->createLockFile(); transport::TransportLayerLegacy::Options options; options.port = listenPort; options.ipList = serverGlobalParams.bind_ip; auto sep = stdx::make_unique<ServiceEntryPointMongod>(getGlobalServiceContext()->getTransportLayer()); auto sepPtr = sep.get(); getGlobalServiceContext()->setServiceEntryPoint(std::move(sep)); // Create, start, and attach the TL auto transportLayer = stdx::make_unique<transport::TransportLayerLegacy>(options, sepPtr); auto res = transportLayer->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } std::shared_ptr<DbWebServer> dbWebServer; if (serverGlobalParams.isHttpInterfaceEnabled) { dbWebServer.reset(new DbWebServer(serverGlobalParams.bind_ip, serverGlobalParams.port + 1000, getGlobalServiceContext(), new RestAdminAccess())); if (!dbWebServer->setupSockets()) { error() << "Failed to set up sockets for HTTP interface during startup."; return EXIT_NET_ERROR; } } getGlobalServiceContext()->initializeGlobalStorageEngine(); #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED if (WiredTigerCustomizationHooks::get(getGlobalServiceContext())->restartRequired()) { exitCleanly(EXIT_CLEAN); } #endif // Warn if we detect configurations for multiple registered storage engines in // the same configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); BSONObj storageParamsObj = storageElement.Obj(); BSONObjIterator i = storageParamsObj.begin(); while (i.more()) { BSONElement e = i.next(); // Ignore if field name under "storage" matches current storage engine. if (storageGlobalParams.engine == e.fieldName()) { continue; } // Warn if field name matches non-active registered storage engine. if (getGlobalServiceContext()->isRegisteredStorageEngine(e.fieldName())) { warning() << "Detected configuration for non-active storage engine " << e.fieldName() << " when current storage engine is " << storageGlobalParams.engine; } } } if (!getGlobalServiceContext()->getGlobalStorageEngine()->getSnapshotManager()) { if (moe::startupOptionsParsed.count("replication.enableMajorityReadConcern") && moe::startupOptionsParsed["replication.enableMajorityReadConcern"].as<bool>()) { // Note: we are intentionally only erroring if the user explicitly requested that we // enable majority read concern. We do not error if the they are implicitly enabled for // CSRS because a required step in the upgrade procedure can involve an mmapv1 node in // the CSRS in the REMOVED state. This is handled by the TopologyCoordinator. invariant(replSettings.isMajorityReadConcernEnabled()); severe() << "Majority read concern requires a storage engine that supports" << " snapshots, such as wiredTiger. " << storageGlobalParams.engine << " does not support snapshots."; exitCleanly(EXIT_BADOPTIONS); } } logMongodStartupWarnings(storageGlobalParams, serverGlobalParams); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } { stringstream ss; ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist"; uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath)); } // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct // dependencies. if (snmpInit) { snmpInit(); } if (!storageGlobalParams.readOnly) { boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); } if (mmapv1GlobalOptions.journalOptions & MMAPV1Options::JournalRecoverOnly) return EXIT_NET_ERROR; if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); } auto startupOpCtx = getGlobalServiceContext()->makeOperationContext(&cc()); repairDatabasesAndCheckVersion(startupOpCtx.get()); if (storageGlobalParams.upgrade) { log() << "finished checking dbs"; exitCleanly(EXIT_CLEAN); } uassertStatusOK(getGlobalAuthorizationManager()->initialize(startupOpCtx.get())); /* this is for security on certain platforms (nonce generation) */ srand((unsigned)(curTimeMicros64() ^ startupSrandTimer.micros())); // The snapshot thread provides historical collection level and lock statistics for use // by the web interface. Only needed when HTTP is enabled. if (serverGlobalParams.isHttpInterfaceEnabled) { statsSnapshotThread.go(); invariant(dbWebServer); stdx::thread web(stdx::bind(&webServerListenThread, dbWebServer)); web.detach(); } #ifndef _WIN32 mongo::signalForkSuccess(); #endif AuthorizationManager* globalAuthzManager = getGlobalAuthorizationManager(); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = authindex::verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << redact(status); exitCleanly(EXIT_NEED_UPGRADE); } // SERVER-14090: Verify that auth schema version is schemaVersion26Final. int foundSchemaVersion; status = globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion); if (!status.isOK()) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but startup could not verify schema version: " << status; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion < AuthorizationManager::schemaVersion26Final) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but found " << foundSchemaVersion << ". In order to upgrade " << "the auth schema, first downgrade MongoDB binaries to version " << "2.6 and then run the authSchemaUpgrade command."; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { error() << "Auth must be disabled when starting without auth schema validation"; exitCleanly(EXIT_BADOPTIONS); } else { // If authSchemaValidation is disabled and server is running without auth, // warn the user and continue startup without authSchema metadata checks. log() << startupWarningsLog; log() << "** WARNING: Startup auth schema validation checks are disabled for the " "database." << startupWarningsLog; log() << "** This mode should only be used to manually repair corrupted auth " "data." << startupWarningsLog; } auto shardingInitialized = uassertStatusOK(ShardingState::get(startupOpCtx.get()) ->initializeShardingAwarenessIfNeeded(startupOpCtx.get())); if (shardingInitialized) { reloadShardRegistryUntilSuccess(startupOpCtx.get()); } if (!storageGlobalParams.readOnly) { logStartup(startupOpCtx.get()); startFTDC(); getDeleter()->startWorkers(); restartInProgressIndexesFromLastShutdown(startupOpCtx.get()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!repl::getGlobalReplicationCoordinator()->isReplEnabled()) { uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get())); } } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { uassertStatusOK( initializeGlobalShardingStateForMongod(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer)); Balancer::create(startupOpCtx->getServiceContext()); } repl::getGlobalReplicationCoordinator()->startup(startupOpCtx.get()); const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(startupOpCtx.get()); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset" << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and " << " no other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** "; log() << " For more info see http://dochub.mongodb.org/core/ttlcollections"; log() << startupWarningsLog; } else { startTTLBackgroundJob(); } if (!replSettings.usingReplSets() && !replSettings.isSlave() && storageGlobalParams.engine != "devnull") { ScopedTransaction transaction(startupOpCtx.get(), MODE_X); Lock::GlobalWrite lk(startupOpCtx.get()->lockState()); FeatureCompatibilityVersion::setIfCleanStartup( startupOpCtx.get(), repl::StorageInterface::get(getGlobalServiceContext())); } } startClientCursorMonitor(); PeriodicTask::startRunningPeriodicTasks(); // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); auto start = getGlobalServiceContext()->addAndStartTransportLayer(std::move(transportLayer)); if (!start.isOK()) { error() << "Failed to start the listener: " << start.toString(); return EXIT_NET_ERROR; } return waitForShutdown(); }
ExitCode _initAndListen(int listenPort) { Client::initThread("initandlisten"); initWireSpec(); auto serviceContext = getGlobalServiceContext(); serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); auto opObserverRegistry = stdx::make_unique<OpObserverRegistry>(); opObserverRegistry->addObserver(stdx::make_unique<OpObserverShardingImpl>()); opObserverRegistry->addObserver(stdx::make_unique<UUIDCatalogObserver>()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { opObserverRegistry->addObserver(stdx::make_unique<ShardServerOpObserver>()); } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { opObserverRegistry->addObserver(stdx::make_unique<ConfigServerOpObserver>()); } setupFreeMonitoringOpObserver(opObserverRegistry.get()); serviceContext->setOpObserver(std::move(opObserverRegistry)); DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) { return std::unique_ptr<DBClientBase>(new DBDirectClient(opCtx)); }); const repl::ReplSettings& replSettings = repl::ReplicationCoordinator::get(serviceContext)->getSettings(); { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(LogComponent::kControl); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; const bool is32bit = sizeof(int*) == 4; l << (is32bit ? " 32" : " 64") << "-bit host=" << getHostNameCached() << endl; } DEV log(LogComponent::kControl) << "DEBUG build (which is slower)" << endl; #if defined(_WIN32) VersionInfoInterface::instance().logTargetMinOS(); #endif logProcessDetails(); serviceContext->setServiceEntryPoint( stdx::make_unique<ServiceEntryPointMongod>(serviceContext)); if (!storageGlobalParams.repair) { auto tl = transport::TransportLayerManager::createWithConfig(&serverGlobalParams, serviceContext); auto res = tl->setup(); if (!res.isOK()) { error() << "Failed to set up listener: " << res; return EXIT_NET_ERROR; } serviceContext->setTransportLayer(std::move(tl)); } // Set up the periodic runner for background job execution. This is required to be running // before the storage engine is initialized. auto runner = makePeriodicRunner(serviceContext); runner->startup(); serviceContext->setPeriodicRunner(std::move(runner)); initializeStorageEngine(serviceContext, StorageEngineInitFlags::kNone); #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED if (EncryptionHooks::get(serviceContext)->restartRequired()) { exitCleanly(EXIT_CLEAN); } #endif // Warn if we detect configurations for multiple registered storage engines in the same // configuration file/environment. if (serverGlobalParams.parsedOpts.hasField("storage")) { BSONElement storageElement = serverGlobalParams.parsedOpts.getField("storage"); invariant(storageElement.isABSONObj()); for (auto&& e : storageElement.Obj()) { // Ignore if field name under "storage" matches current storage engine. if (storageGlobalParams.engine == e.fieldName()) { continue; } // Warn if field name matches non-active registered storage engine. if (isRegisteredStorageEngine(serviceContext, e.fieldName())) { warning() << "Detected configuration for non-active storage engine " << e.fieldName() << " when current storage engine is " << storageGlobalParams.engine; } } } // Disallow running a storage engine that doesn't support capped collections with --profile if (!serviceContext->getStorageEngine()->supportsCappedCollections() && serverGlobalParams.defaultProfile != 0) { log() << "Running " << storageGlobalParams.engine << " with profiling is not supported. " << "Make sure you are not using --profile."; exitCleanly(EXIT_BADOPTIONS); } // Disallow running WiredTiger with --nojournal in a replica set if (storageGlobalParams.engine == "wiredTiger" && !storageGlobalParams.dur && replSettings.usingReplSets()) { log() << "Running wiredTiger without journaling in a replica set is not " << "supported. Make sure you are not using --nojournal and that " << "storage.journal.enabled is not set to 'false'."; exitCleanly(EXIT_BADOPTIONS); } logMongodStartupWarnings(storageGlobalParams, serverGlobalParams, serviceContext); #ifdef MONGO_CONFIG_SSL if (sslGlobalParams.sslAllowInvalidCertificates && ((serverGlobalParams.clusterAuthMode.load() == ServerGlobalParams::ClusterAuthMode_x509) || sequenceContains(saslGlobalParams.authenticationMechanisms, "MONGODB-X509"))) { log() << "** WARNING: While invalid X509 certificates may be used to" << startupWarningsLog; log() << "** connect to this server, they will not be considered" << startupWarningsLog; log() << "** permissible for authentication." << startupWarningsLog; log() << startupWarningsLog; } #endif { std::stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } initializeSNMP(); if (!storageGlobalParams.readOnly) { boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); } if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); } auto startupOpCtx = serviceContext->makeOperationContext(&cc()); bool canCallFCVSetIfCleanStartup = !storageGlobalParams.readOnly && (storageGlobalParams.engine != "devnull"); if (canCallFCVSetIfCleanStartup && !replSettings.usingReplSets()) { Lock::GlobalWrite lk(startupOpCtx.get()); FeatureCompatibilityVersion::setIfCleanStartup(startupOpCtx.get(), repl::StorageInterface::get(serviceContext)); } auto swNonLocalDatabases = repairDatabasesAndCheckVersion(startupOpCtx.get()); if (!swNonLocalDatabases.isOK()) { // SERVER-31611 introduced a return value to `repairDatabasesAndCheckVersion`. Previously, // a failing condition would fassert. SERVER-31611 covers a case where the binary (3.6) is // refusing to start up because it refuses acknowledgement of FCV 3.2 and requires the // user to start up with an older binary. Thus shutting down the server must leave the // datafiles in a state that the older binary can start up. This requires going through a // clean shutdown. // // The invariant is *not* a statement that `repairDatabasesAndCheckVersion` must return // `MustDowngrade`. Instead, it is meant as a guardrail to protect future developers from // accidentally buying into this behavior. New errors that are returned from the method // may or may not want to go through a clean shutdown, and they likely won't want the // program to return an exit code of `EXIT_NEED_DOWNGRADE`. severe(LogComponent::kControl) << "** IMPORTANT: " << swNonLocalDatabases.getStatus().reason(); invariant(swNonLocalDatabases == ErrorCodes::MustDowngrade); exitCleanly(EXIT_NEED_DOWNGRADE); } // Assert that the in-memory featureCompatibilityVersion parameter has been explicitly set. If // we are part of a replica set and are started up with no data files, we do not set the // featureCompatibilityVersion until a primary is chosen. For this case, we expect the in-memory // featureCompatibilityVersion parameter to still be uninitialized until after startup. if (canCallFCVSetIfCleanStartup && (!replSettings.usingReplSets() || swNonLocalDatabases.getValue())) { invariant(serverGlobalParams.featureCompatibility.isVersionInitialized()); } if (storageGlobalParams.upgrade) { log() << "finished checking dbs"; exitCleanly(EXIT_CLEAN); } // Start up health log writer thread. HealthLog::get(startupOpCtx.get()).startup(); auto const globalAuthzManager = AuthorizationManager::get(serviceContext); uassertStatusOK(globalAuthzManager->initialize(startupOpCtx.get())); // This is for security on certain platforms (nonce generation) srand((unsigned)(curTimeMicros64()) ^ (unsigned(uintptr_t(&startupOpCtx)))); if (globalAuthzManager->shouldValidateAuthSchemaOnStartup()) { Status status = verifySystemIndexes(startupOpCtx.get()); if (!status.isOK()) { log() << redact(status); if (status == ErrorCodes::AuthSchemaIncompatible) { exitCleanly(EXIT_NEED_UPGRADE); } else if (status == ErrorCodes::NotMaster) { // Try creating the indexes if we become master. If we do not become master, // the master will create the indexes and we will replicate them. } else { quickExit(EXIT_FAILURE); } } // SERVER-14090: Verify that auth schema version is schemaVersion26Final. int foundSchemaVersion; status = globalAuthzManager->getAuthorizationVersion(startupOpCtx.get(), &foundSchemaVersion); if (!status.isOK()) { log() << "Auth schema version is incompatible: " << "User and role management commands require auth data to have " << "at least schema version " << AuthorizationManager::schemaVersion26Final << " but startup could not verify schema version: " << status; log() << "To manually repair the 'authSchema' document in the admin.system.version " "collection, start up with --setParameter " "startupAuthSchemaValidation=false to disable validation."; exitCleanly(EXIT_NEED_UPGRADE); } if (foundSchemaVersion <= AuthorizationManager::schemaVersion26Final) { log() << "This server is using MONGODB-CR, an authentication mechanism which " << "has been removed from MongoDB 4.0. In order to upgrade the auth schema, " << "first downgrade MongoDB binaries to version 3.6 and then run the " << "authSchemaUpgrade command. " << "See http://dochub.mongodb.org/core/3.0-upgrade-to-scram-sha-1"; exitCleanly(EXIT_NEED_UPGRADE); } } else if (globalAuthzManager->isAuthEnabled()) { error() << "Auth must be disabled when starting without auth schema validation"; exitCleanly(EXIT_BADOPTIONS); } else { // If authSchemaValidation is disabled and server is running without auth, // warn the user and continue startup without authSchema metadata checks. log() << startupWarningsLog; log() << "** WARNING: Startup auth schema validation checks are disabled for the " "database." << startupWarningsLog; log() << "** This mode should only be used to manually repair corrupted auth " "data." << startupWarningsLog; } // This function may take the global lock. auto shardingInitialized = ShardingInitializationMongoD::get(startupOpCtx.get()) ->initializeShardingAwarenessIfNeeded(startupOpCtx.get()); if (shardingInitialized) { waitForShardRegistryReload(startupOpCtx.get()).transitional_ignore(); } auto storageEngine = serviceContext->getStorageEngine(); invariant(storageEngine); BackupCursorHooks::initialize(serviceContext, storageEngine); if (!storageGlobalParams.readOnly) { if (storageEngine->supportsCappedCollections()) { logStartup(startupOpCtx.get()); } startMongoDFTDC(); startFreeMonitoring(serviceContext); restartInProgressIndexesFromLastShutdown(startupOpCtx.get()); if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { // Note: For replica sets, ShardingStateRecovery happens on transition to primary. if (!repl::ReplicationCoordinator::get(startupOpCtx.get())->isReplEnabled()) { if (ShardingState::get(startupOpCtx.get())->enabled()) { uassertStatusOK(ShardingStateRecovery::recover(startupOpCtx.get())); } } } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { initializeGlobalShardingStateForMongoD(startupOpCtx.get(), ConnectionString::forLocal(), kDistLockProcessIdForConfigServer); Balancer::create(startupOpCtx->getServiceContext()); ShardingCatalogManager::create( startupOpCtx->getServiceContext(), makeShardingTaskExecutor(executor::makeNetworkInterface("AddShard-TaskExecutor"))); Grid::get(startupOpCtx.get())->setShardingInitialized(); } else if (replSettings.usingReplSets()) { // standalone replica set auto keysCollectionClient = stdx::make_unique<KeysCollectionClientDirect>(); auto keyManager = std::make_shared<KeysCollectionManager>( KeysCollectionManager::kKeyManagerPurposeString, std::move(keysCollectionClient), Seconds(KeysRotationIntervalSec)); keyManager->startMonitoring(startupOpCtx->getServiceContext()); LogicalTimeValidator::set(startupOpCtx->getServiceContext(), stdx::make_unique<LogicalTimeValidator>(keyManager)); } repl::ReplicationCoordinator::get(startupOpCtx.get())->startup(startupOpCtx.get()); const unsigned long long missingRepl = checkIfReplMissingFromCommandLine(startupOpCtx.get()); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset." << startupWarningsLog; log() << "** Database contents may appear inconsistent with the oplog and may " "appear to not contain" << startupWarningsLog; log() << "** writes that were visible when this node was running as part of a " "replica set." << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and no " "other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** "; log() << " For more info see http://dochub.mongodb.org/core/ttlcollections"; log() << startupWarningsLog; } else { startTTLBackgroundJob(); } if (replSettings.usingReplSets() || !internalValidateFeaturesAsMaster) { serverGlobalParams.validateFeaturesAsMaster.store(false); } } startClientCursorMonitor(); PeriodicTask::startRunningPeriodicTasks(); SessionKiller::set(serviceContext, std::make_shared<SessionKiller>(serviceContext, killSessionsLocal)); // Start up a background task to periodically check for and kill expired transactions; and a // background task to periodically check for and decrease cache pressure by decreasing the // target size setting for the storage engine's window of available snapshots. // // Only do this on storage engines supporting snapshot reads, which hold resources we wish to // release periodically in order to avoid storage cache pressure build up. if (storageEngine->supportsReadConcernSnapshot()) { startPeriodicThreadToAbortExpiredTransactions(serviceContext); startPeriodicThreadToDecreaseSnapshotHistoryCachePressure(serviceContext); } // Set up the logical session cache LogicalSessionCacheServer kind = LogicalSessionCacheServer::kStandalone; if (serverGlobalParams.clusterRole == ClusterRole::ShardServer) { kind = LogicalSessionCacheServer::kSharded; } else if (serverGlobalParams.clusterRole == ClusterRole::ConfigServer) { kind = LogicalSessionCacheServer::kConfigServer; } else if (replSettings.usingReplSets()) { kind = LogicalSessionCacheServer::kReplicaSet; } auto sessionCache = makeLogicalSessionCacheD(kind); LogicalSessionCache::set(serviceContext, std::move(sessionCache)); // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); auto start = serviceContext->getServiceExecutor()->start(); if (!start.isOK()) { error() << "Failed to start the service executor: " << start; return EXIT_NET_ERROR; } start = serviceContext->getServiceEntryPoint()->start(); if (!start.isOK()) { error() << "Failed to start the service entry point: " << start; return EXIT_NET_ERROR; } if (!storageGlobalParams.repair) { start = serviceContext->getTransportLayer()->start(); if (!start.isOK()) { error() << "Failed to start the listener: " << start.toString(); return EXIT_NET_ERROR; } } serviceContext->notifyStartupComplete(); #ifndef _WIN32 mongo::signalForkSuccess(); #else if (ntservice::shouldStartService()) { ntservice::reportStatus(SERVICE_RUNNING); log() << "Service running"; } #endif if (MONGO_FAIL_POINT(shutdownAtStartup)) { log() << "starting clean exit via failpoint"; exitCleanly(EXIT_CLEAN); } MONGO_IDLE_THREAD_BLOCK; return waitForShutdown(); }
void _initAndListen(int listenPort ) { Client::initThread("initandlisten"); bool is32bit = sizeof(int*) == 4; { ProcessId pid = ProcessId::getCurrent(); LogstreamBuilder l = log(); l << "MongoDB starting : pid=" << pid << " port=" << serverGlobalParams.port << " dbpath=" << storageGlobalParams.dbpath; if( replSettings.master ) l << " master=" << replSettings.master; if( replSettings.slave ) l << " slave=" << (int) replSettings.slave; l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl; } DEV log() << "_DEBUG build (which is slower)" << endl; logStartupWarnings(); #if defined(_WIN32) printTargetMinOS(); #endif logProcessDetails(); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; ss << "*********************************************************************" << endl; uassert(10296, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } { stringstream ss; ss << "repairpath (" << storageGlobalParams.repairpath << ") does not exist"; uassert(12590, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.repairpath)); } // TODO check non-journal subdirs if using directory-per-db checkReadAhead(storageGlobalParams.dbpath); acquirePathLock(mongodGlobalParams.repair); boost::filesystem::remove_all(storageGlobalParams.dbpath + "/_tmp/"); FileAllocator::get()->start(); // TODO: This should go into a MONGO_INITIALIZER once we have figured out the correct // dependencies. if (snmpInit) { snmpInit(); } MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" ); dur::startup(); if (storageGlobalParams.durOptions & StorageGlobalParams::DurRecoverOnly) return; unsigned long long missingRepl = checkIfReplMissingFromCommandLine(); if (missingRepl) { log() << startupWarningsLog; log() << "** WARNING: mongod started without --replSet yet " << missingRepl << " documents are present in local.system.replset" << startupWarningsLog; log() << "** Restart with --replSet unless you are doing maintenance and no" << " other clients are connected." << startupWarningsLog; log() << "** The TTL collection monitor will not start because of this." << startupWarningsLog; log() << "** For more info see http://dochub.mongodb.org/core/ttlcollections" << startupWarningsLog; log() << startupWarningsLog; } if (mongodGlobalParams.scriptingEnabled) { ScriptEngine::setup(); globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback ); globalScriptEngine->setGetCurrentOpIdCallback( jsGetCurrentOpIdCallback ); } // On replica set members we only clear temp collections on DBs other than "local" during // promotion to primary. On pure slaves, they are only cleared when the oplog tells them to. // The local DB is special because it is not replicated. See SERVER-10927 for more details. const bool shouldClearNonLocalTmpCollections = !(missingRepl || replSettings.usingReplSets() || replSettings.slave == SimpleSlave); repairDatabasesAndCheckVersion(shouldClearNonLocalTmpCollections); if (mongodGlobalParams.upgrade) return; uassertStatusOK(getGlobalAuthorizationManager()->initialize()); /* this is for security on certain platforms (nonce generation) */ srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros())); snapshotThread.go(); d.clientCursorMonitor.go(); PeriodicTask::startRunningPeriodicTasks(); if (missingRepl) { // a warning was logged earlier } else { startTTLBackgroundJob(); } #ifndef _WIN32 mongo::signalForkSuccess(); #endif if(getGlobalAuthorizationManager()->isAuthEnabled()) { // open admin db in case we need to use it later. TODO this is not the right way to // resolve this. Client::WriteContext c("admin", storageGlobalParams.dbpath); } authindex::configureSystemIndexes("admin"); getDeleter()->startWorkers(); // Starts a background thread that rebuilds all incomplete indices. indexRebuilder.go(); listen(listenPort); // listen() will return when exit code closes its socket. exitCleanly(EXIT_NET_ERROR); }
void _initAndListen(int listenPort, const char *appserverLoc = NULL) { bool is32bit = sizeof(int*) == 4; { #if !defined(_WIN32) pid_t pid = getpid(); #else DWORD pid=GetCurrentProcessId(); #endif Nullstream& l = log(); l << "MongoDB starting : pid=" << pid << " port=" << cmdLine.port << " dbpath=" << dbpath; if( replSettings.master ) l << " master=" << replSettings.master; if( replSettings.slave ) l << " slave=" << (int) replSettings.slave; l << ( is32bit ? " 32" : " 64" ) << "-bit " << endl; } DEV log() << "_DEBUG build (which is slower)" << endl; show_warnings(); log() << mongodVersion() << endl; printGitVersion(); printSysInfo(); { stringstream ss; ss << "dbpath (" << dbpath << ") does not exist"; uassert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) ); } { stringstream ss; ss << "repairpath (" << repairpath << ") does not exist"; uassert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) ); } acquirePathLock(); remove_all( dbpath + "/_tmp/" ); theFileAllocator().start(); BOOST_CHECK_EXCEPTION( clearTmpFiles() ); Client::initThread("initandlisten"); _diaglog.init(); clearTmpCollections(); Module::initAll(); #if 0 { stringstream indexpath; indexpath << dbpath << "/indexes.dat"; RecCache::tempStore.init(indexpath.str().c_str(), BucketSize); } #endif if ( useJNI ) { ScriptEngine::setup(); globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback ); } repairDatabases(); /* we didn't want to pre-open all fiels for the repair check above. for regular operation we do for read/write lock concurrency reasons. */ Database::_openAllFiles = true; if ( shouldRepairDatabases ) return; /* this is for security on certain platforms (nonce generation) */ srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros())); snapshotThread.go(); clientCursorMonitor.go(); if( !cmdLine._replSet.empty() ) { replSet = true; ReplSetCmdline *replSetCmdline = new ReplSetCmdline(cmdLine._replSet); boost::thread t( boost::bind( &startReplSets, replSetCmdline) ); } listen(listenPort); // listen() will return when exit code closes its socket. exitCleanly(EXIT_NET_ERROR); }
/* we create one thread for each connection from an app server database. app server will open a pool of threads. todo: one day, asio... */ void connThread( MessagingPort * inPort ) { TicketHolderReleaser connTicketReleaser( &connTicketHolder ); /* todo: move to Client object */ LastError *le = new LastError(); lastError.reset(le); inPort->_logLevel = 1; auto_ptr<MessagingPort> dbMsgPort( inPort ); Client& c = Client::initThread("conn", inPort); try { c.getAuthenticationInfo()->isLocalHost = dbMsgPort->farEnd.isLocalHost(); Message m; while ( 1 ) { if ( !dbMsgPort->recv(m) ) { if( !cmdLine.quiet ) log() << "end connection " << dbMsgPort->farEnd.toString() << endl; dbMsgPort->shutdown(); break; } sendmore: if ( inShutdown() ) { log() << "got request after shutdown()" << endl; break; } lastError.startRequest( m , le ); DbResponse dbresponse; if ( !assembleResponse( m, dbresponse, dbMsgPort->farEnd ) ) { log() << curTimeMillis() % 10000 << " end msg " << dbMsgPort->farEnd.toString() << endl; /* todo: we may not wish to allow this, even on localhost: very low priv accounts could stop us. */ if ( dbMsgPort->farEnd.isLocalHost() ) { dbMsgPort->shutdown(); sleepmillis(50); problem() << "exiting end msg" << endl; dbexit(EXIT_CLEAN); } else { log() << " (not from localhost, ignoring end msg)" << endl; } } if ( dbresponse.response ) { dbMsgPort->reply(m, *dbresponse.response, dbresponse.responseTo); if( dbresponse.exhaust ) { MsgData *header = dbresponse.response->header(); QueryResult *qr = (QueryResult *) header; long long cursorid = qr->cursorId; if( cursorid ) { assert( dbresponse.exhaust && *dbresponse.exhaust != 0 ); string ns = dbresponse.exhaust; // before reset() free's it... m.reset(); BufBuilder b(512); b.appendNum((int) 0 /*size set later in appendData()*/); b.appendNum(header->id); b.appendNum(header->responseTo); b.appendNum((int) dbGetMore); b.appendNum((int) 0); b.appendStr(ns); b.appendNum((int) 0); // ntoreturn b.appendNum(cursorid); m.appendData(b.buf(), b.len()); b.decouple(); DEV log() << "exhaust=true sending more" << endl; beNice(); goto sendmore; } } } m.reset(); } } catch ( AssertionException& e ) { log() << "AssertionException in connThread, closing client connection" << endl; log() << ' ' << e.what() << endl; dbMsgPort->shutdown(); } catch ( SocketException& ) { problem() << "SocketException in connThread, closing client connection" << endl; dbMsgPort->shutdown(); } catch ( const ClockSkewException & ) { exitCleanly( EXIT_CLOCK_SKEW ); } catch ( std::exception &e ) { problem() << "Uncaught std::exception: " << e.what() << ", terminating" << endl; dbexit( EXIT_UNCAUGHT ); } catch ( ... ) { problem() << "Uncaught exception, terminating" << endl; dbexit( EXIT_UNCAUGHT ); } // thread ending... { Client * c = currentClient.get(); if( c ) c->shutdown(); } globalScriptEngine->threadDone(); }
int Tool::main( int argc , char ** argv, char ** envp ) { setupSignalHandlers(true); static StaticObserver staticObserver; mongo::runGlobalInitializersOrDie(argc, argv, envp); // hide password from ps output for (int i=0; i < (argc-1); ++i) { if (!strcmp(argv[i], "-p") || !strcmp(argv[i], "--password")) { char* arg = argv[i+1]; while (*arg) { *arg++ = 'x'; } } } if (!toolGlobalParams.useDirectClient) { if (toolGlobalParams.noconnection) { // do nothing } else { string errmsg; ConnectionString cs = ConnectionString::parse(toolGlobalParams.connectionString, errmsg); if ( ! cs.isValid() ) { toolError() << "invalid hostname [" << toolGlobalParams.connectionString << "] " << errmsg << std::endl; ::_exit(-1); } _conn = cs.connect( errmsg ); if ( ! _conn ) { toolError() << "couldn't connect to [" << toolGlobalParams.connectionString << "] " << errmsg << std::endl; ::_exit(-1); } toolInfoOutput() << "connected to: " << toolGlobalParams.connectionString << std::endl; } } else { verify( lastError.get( true ) ); Client::initThread("tools"); storageGlobalParams.dbpath = toolGlobalParams.dbpath; try { getGlobalEnvironment()->setGlobalStorageEngine(storageGlobalParams.engine); } catch (const DBException& ex) { if (ex.getCode() == ErrorCodes::DBPathInUse) { toolError() << std::endl << "If you are running a mongod on the same " "path you should connect to that instead of direct data " "file access" << std::endl << std::endl; } else { toolError() << "Failed to initialize storage engine: " << ex.toString(); } dbexit( EXIT_FS ); ::_exit(EXIT_FAILURE); } _txn.reset(new OperationContextImpl()); _conn = new DBDirectClient(_txn.get()); } int ret = -1; try { if (!toolGlobalParams.useDirectClient && !toolGlobalParams.noconnection) auth(); ret = run(); } catch ( DBException& e ) { toolError() << "assertion: " << e.toString() << std::endl; ret = -1; } catch(const boost::filesystem::filesystem_error &fse) { /* https://jira.mongodb.org/browse/SERVER-2904 Simple tools that don't access the database, such as bsondump, aren't throwing DBExceptions, but are throwing boost exceptions. The currently available set of error codes don't seem to match boost documentation. boost::filesystem::not_found_error (from http://www.boost.org/doc/libs/1_31_0/libs/filesystem/doc/exception.htm) doesn't seem to exist in our headers. Also, fse.code() isn't boost::system::errc::no_such_file_or_directory when this happens, as you would expect. And, determined from experimentation that the command-line argument gets turned into "\\?" instead of "/?" !!! */ #if defined(_WIN32) if (/*(fse.code() == boost::system::errc::no_such_file_or_directory) &&*/ (fse.path1() == "\\?")) printHelp(cerr); else #endif // _WIN32 toolError() << "error: " << fse.what() << std::endl; ret = -1; } if ( currentClient.get() ) currentClient.get()->shutdown(); if (toolGlobalParams.useDirectClient) { exitCleanly(EXIT_CLEAN); } fflush(stdout); fflush(stderr); ::_exit(ret); }
void _initAndListen(int listenPort ) { Client::initThread("initandlisten"); Database::_openAllFiles = false; Logstream::get().addGlobalTee( new RamLog("global") ); bool is32bit = sizeof(int*) == 4; { #if !defined(_WIN32) pid_t pid = getpid(); #else DWORD pid=GetCurrentProcessId(); #endif Nullstream& l = log(); l << "MongoDB starting : pid=" << pid << " port=" << cmdLine.port << " dbpath=" << dbpath; if( replSettings.master ) l << " master=" << replSettings.master; if( replSettings.slave ) l << " slave=" << (int) replSettings.slave; l << ( is32bit ? " 32" : " 64" ) << "-bit host=" << getHostNameCached() << endl; } DEV log() << "_DEBUG build (which is slower)" << endl; show_warnings(); log() << mongodVersion() << endl; printGitVersion(); printSysInfo(); printCommandLineOpts(); { stringstream ss; ss << endl; ss << "*********************************************************************" << endl; ss << " ERROR: dbpath (" << dbpath << ") does not exist." << endl; ss << " Create this directory or give existing directory in --dbpath." << endl; ss << " See http://www.mongodb.org/display/DOCS/Starting+and+Stopping+Mongo" << endl; ss << "*********************************************************************" << endl; uassert( 10296 , ss.str().c_str(), boost::filesystem::exists( dbpath ) ); } { stringstream ss; ss << "repairpath (" << repairpath << ") does not exist"; uassert( 12590 , ss.str().c_str(), boost::filesystem::exists( repairpath ) ); } acquirePathLock(forceRepair); boost::filesystem::remove_all( dbpath + "/_tmp/" ); FileAllocator::get()->start(); MONGO_ASSERT_ON_EXCEPTION_WITH_MSG( clearTmpFiles(), "clear tmp files" ); dur::startup(); if( cmdLine.durOptions & CmdLine::DurRecoverOnly ) return; // comes after getDur().startup() because this reads from the database clearTmpCollections(); checkIfReplMissingFromCommandLine(); Module::initAll(); if ( scriptingEnabled ) { ScriptEngine::setup(); globalScriptEngine->setCheckInterruptCallback( jsInterruptCallback ); globalScriptEngine->setGetInterruptSpecCallback( jsGetInterruptSpecCallback ); } repairDatabasesAndCheckVersion(); /* we didn't want to pre-open all files for the repair check above. for regular operation we do for read/write lock concurrency reasons. */ Database::_openAllFiles = true; if ( shouldRepairDatabases ) return; /* this is for security on certain platforms (nonce generation) */ srand((unsigned) (curTimeMicros() ^ startupSrandTimer.micros())); snapshotThread.go(); d.clientCursorMonitor.go(); PeriodicTask::theRunner->go(); startTTLBackgroundJob(); #ifndef _WIN32 CmdLine::launchOk(); #endif if( !noauth ) { // open admin db in case we need to use it later. TODO this is not the right way to // resolve this. Client::WriteContext c("admin",dbpath,false); } listen(listenPort); // listen() will return when exit code closes its socket. exitCleanly(EXIT_NET_ERROR); }
void ctrlCTerminate() { log() << "got kill or ctrl-c signal, will terminate after current cmd ends" << endl; Client::initThread( "ctrlCTerminate" ); exitCleanly( EXIT_KILL ); }