string getUploadBufferDir(const ServerInstanceDir::GenerationPtr &generation) const { if (uploadBufferDir != NULL) { return uploadBufferDir; } else { return generation->getPath() + "/buffered_uploads"; } }
AccountsDatabasePtr AccountsDatabase::createDefault(const ServerInstanceDir::GenerationPtr &generation, bool userSwitching, const string &defaultUser, const string &defaultGroup) { AccountsDatabasePtr database(new AccountsDatabase()); struct passwd *defaultUserEntry; struct group *defaultGroupEntry; uid_t defaultUid; gid_t defaultGid; RandomGenerator random; string passengerStatusPassword = random.generateByteString(MESSAGE_SERVER_MAX_PASSWORD_SIZE); defaultUserEntry = getpwnam(defaultUser.c_str()); if (defaultUserEntry == NULL) { throw NonExistentUserException("Default user '" + defaultUser + "' does not exist."); } defaultUid = defaultUserEntry->pw_uid; defaultGroupEntry = getgrnam(defaultGroup.c_str()); if (defaultGroupEntry == NULL) { throw NonExistentGroupException("Default group '" + defaultGroup + "' does not exist."); } defaultGid = defaultGroupEntry->gr_gid; // An account for the 'passenger-status' command. Its password is only readable by // root, or (if user switching is turned off) only by the web server's user. database->add("_passenger-status", passengerStatusPassword, false, Account::INSPECT_BASIC_INFO | Account::INSPECT_SENSITIVE_INFO | Account::INSPECT_BACKTRACES); if (geteuid() == 0 && !userSwitching) { createFile(generation->getPath() + "/passenger-status-password.txt", passengerStatusPassword, S_IRUSR, defaultUid, defaultGid); } else { createFile(generation->getPath() + "/passenger-status-password.txt", passengerStatusPassword, S_IRUSR | S_IWUSR); } return database; }
ApplicationPool_Server_PoolTest() { createServerInstanceDirAndGeneration(serverInstanceDir, generation); socketFilename = generation->getPath() + "/socket"; accountsDatabase = ptr(new AccountsDatabase()); accountsDatabase->add("test", "12345", false); messageServer = ptr(new MessageServer(socketFilename, accountsDatabase)); realPool = ptr(new ApplicationPool::Pool("../helper-scripts/passenger-spawn-server", generation)); poolServer = ptr(new ApplicationPool::Server(realPool)); messageServer->addHandler(poolServer); serverThread = ptr(new oxt::thread( boost::bind(&MessageServer::mainLoop, messageServer.get()) )); pool = newPoolConnection(); pool2 = newPoolConnection(); }
int main(int argc, char *argv[]) { disableOomKiller(); agentsOptions = initializeAgent(argc, argv, "PassengerWatchdog"); logLevel = agentsOptions.getInt("log_level"); webServerPid = agentsOptions.getPid("web_server_pid"); tempDir = agentsOptions.get("temp_dir"); userSwitching = agentsOptions.getBool("user_switching"); defaultUser = agentsOptions.get("default_user"); defaultGroup = agentsOptions.get("default_group"); webServerWorkerUid = agentsOptions.getUid("web_server_worker_uid"); webServerWorkerGid = agentsOptions.getGid("web_server_worker_gid"); passengerRoot = agentsOptions.get("passenger_root"); rubyCommand = agentsOptions.get("ruby"); maxPoolSize = agentsOptions.getInt("max_pool_size"); maxInstancesPerApp = agentsOptions.getInt("max_instances_per_app"); poolIdleTime = agentsOptions.getInt("pool_idle_time"); serializedPrestartURLs = agentsOptions.get("prestart_urls"); try { randomGenerator = new RandomGenerator(); errorEvent = new EventFd(); MessageChannel feedbackChannel(FEEDBACK_FD); serverInstanceDir.reset(new ServerInstanceDir(webServerPid, tempDir)); generation = serverInstanceDir->newGeneration(userSwitching, defaultUser, defaultGroup, webServerWorkerUid, webServerWorkerGid); agentsOptions.set("server_instance_dir", serverInstanceDir->getPath()); agentsOptions.setInt("generation_number", generation->getNumber()); ServerInstanceDirToucher serverInstanceDirToucher; ResourceLocator resourceLocator(passengerRoot); if (agentsOptions.get("analytics_server", false).empty()) { // Using local, server instance specific logging agent. loggingAgentAddress = "unix:" + generation->getPath() + "/logging.socket"; loggingAgentPassword = randomGenerator->generateAsciiString(64); } else { // Using remote logging agent. loggingAgentAddress = agentsOptions.get("analytics_server"); } HelperAgentWatcher helperAgentWatcher(resourceLocator); LoggingAgentWatcher loggingAgentWatcher(resourceLocator); vector<AgentWatcher *> watchers; vector<AgentWatcher *>::iterator it; watchers.push_back(&helperAgentWatcher); if (agentsOptions.get("analytics_server", false).empty()) { watchers.push_back(&loggingAgentWatcher); } for (it = watchers.begin(); it != watchers.end(); it++) { try { (*it)->start(); } catch (const std::exception &e) { feedbackChannel.write("Watchdog startup error", e.what(), NULL); forceAllAgentsShutdown(watchers); return 1; } // Allow other exceptions to propagate and crash the watchdog. } for (it = watchers.begin(); it != watchers.end(); it++) { try { (*it)->startWatching(); } catch (const std::exception &e) { feedbackChannel.write("Watchdog startup error", e.what(), NULL); forceAllAgentsShutdown(watchers); return 1; } // Allow other exceptions to propagate and crash the watchdog. } feedbackChannel.write("Basic startup info", serverInstanceDir->getPath().c_str(), toString(generation->getNumber()).c_str(), NULL); for (it = watchers.begin(); it != watchers.end(); it++) { (*it)->sendStartupInfo(feedbackChannel); } feedbackChannel.write("All agents started", NULL); this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; bool exitGracefully = waitForStarterProcessOrWatchers(watchers); AgentWatcher::stopWatching(watchers); if (exitGracefully) { /* Fork a child process which cleans up all the agent processes in * the background and exit this watchdog process so that we don't block * the web server. */ cleanupAgentsInBackground(watchers); return 0; } else { P_DEBUG("Web server did not exit gracefully, forcing shutdown of all service processes..."); forceAllAgentsShutdown(watchers); return 1; } } catch (const tracable_exception &e) { P_ERROR(e.what() << "\n" << e.backtrace()); return 1; } catch (const std::exception &e) { P_ERROR(e.what()); return 1; } }
static void cleanupAgentsInBackground(vector<AgentWatcher *> &watchers) { this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; pid_t pid; int e; pid = fork(); if (pid == 0) { // Child vector<AgentWatcher *>::const_iterator it; Timer timer(false); fd_set fds, fds2; int max, agentProcessesDone; unsigned long long deadline = 30000; // miliseconds // Wait until all agent processes have exited. max = 0; FD_ZERO(&fds); for (it = watchers.begin(); it != watchers.end(); it++) { FD_SET((*it)->getFeedbackFd(), &fds); if ((*it)->getFeedbackFd() > max) { max = (*it)->getFeedbackFd(); } } timer.start(); agentProcessesDone = 0; while (agentProcessesDone != -1 && agentProcessesDone < (int) watchers.size() && timer.elapsed() < deadline) { struct timeval timeout; #ifdef FD_COPY FD_COPY(&fds, &fds2); #else FD_ZERO(&fds2); for (it = watchers.begin(); it != watchers.end(); it++) { FD_SET((*it)->getFeedbackFd(), &fds2); } #endif timeout.tv_sec = 0; timeout.tv_usec = 10000; agentProcessesDone = syscalls::select(max + 1, &fds2, NULL, NULL, &timeout); if (agentProcessesDone > 0 && timer.elapsed() < deadline) { usleep(10000); } } if (agentProcessesDone == -1 || timer.elapsed() >= deadline) { // An error occurred or we've waited long enough. Kill all the // processes. P_WARN("Some Phusion Passenger agent processes did not exit " << "in time, forcefully shutting down all."); for (it = watchers.begin(); it != watchers.end(); it++) { (*it)->forceShutdown(); } } else { P_DEBUG("All Phusion Passenger agent processes have exited."); } // Now clean up the server instance directory. delete generation.get(); delete serverInstanceDir.get(); _exit(0); } else if (pid == -1) { // Error e = errno; throw SystemException("fork() failed", errno); } else { // Parent // Let child process handle cleanup. serverInstanceDir->detach(); generation->detach(); } }
Server(FileDescriptor feedbackFd, const AgentOptions &_options) : options(_options), requestLoop(true), serverInstanceDir(_options.serverInstanceDir, false), resourceLocator(options.passengerRoot) { TRACE_POINT(); this->feedbackFd = feedbackFd; UPDATE_TRACE_POINT(); generation = serverInstanceDir.getGeneration(options.generationNumber); startListening(); accountsDatabase = boost::make_shared<AccountsDatabase>(); accountsDatabase->add("_passenger-status", options.adminToolStatusPassword, false, Account::INSPECT_BASIC_INFO | Account::INSPECT_SENSITIVE_INFO | Account::INSPECT_BACKTRACES | Account::INSPECT_REQUESTS); accountsDatabase->add("_web_server", options.exitPassword, false, Account::EXIT); messageServer = boost::make_shared<MessageServer>( parseUnixSocketAddress(options.adminSocketAddress), accountsDatabase); createFile(generation->getPath() + "/helper_agent.pid", toString(getpid()), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (geteuid() == 0 && !options.userSwitching) { lowerPrivilege(options.defaultUser, options.defaultGroup); } UPDATE_TRACE_POINT(); randomGenerator = boost::make_shared<RandomGenerator>(); // Check whether /dev/urandom is actually random. // https://code.google.com/p/phusion-passenger/issues/detail?id=516 if (randomGenerator->generateByteString(16) == randomGenerator->generateByteString(16)) { throw RuntimeException("Your random number device, /dev/urandom, appears to be broken. " "It doesn't seem to be returning random data. Please fix this."); } UPDATE_TRACE_POINT(); loggerFactory = boost::make_shared<UnionStation::LoggerFactory>(options.loggingAgentAddress, "logging", options.loggingAgentPassword); spawnerFactory = boost::make_shared<SpawnerFactory>(poolLoop.safe, resourceLocator, generation, boost::make_shared<SpawnerConfig>(randomGenerator)); pool = boost::make_shared<Pool>(poolLoop.safe.get(), spawnerFactory, loggerFactory, randomGenerator); pool->initialize(); pool->setMax(options.maxPoolSize); //pool->setMaxPerApp(maxInstancesPerApp); pool->setMaxIdleTime(options.poolIdleTime * 1000000); requestHandler = boost::make_shared<RequestHandler>(requestLoop.safe, requestSocket, pool, options); messageServer->addHandler(boost::make_shared<RemoteController>(requestHandler, pool)); messageServer->addHandler(ptr(new ExitHandler(exitEvent))); sigquitWatcher.set(requestLoop.loop); sigquitWatcher.set(SIGQUIT); sigquitWatcher.set<Server, &Server::onSigquit>(this); sigquitWatcher.start(); UPDATE_TRACE_POINT(); writeArrayMessage(feedbackFd, "initialized", getRequestSocketFilename().c_str(), messageServer->getSocketFilename().c_str(), NULL); boost::function<void ()> func = boost::bind(prestartWebApps, resourceLocator, options.defaultRubyCommand, options.prestartUrls ); prestarterThread = ptr(new oxt::thread( boost::bind(runAndPrintExceptions, func, true) )); }
string getRequestSocketFilename() const { return generation->getPath() + "/request.socket"; }
Server(FileDescriptor feedbackFd, const AgentOptions &_options) : options(_options), requestLoop(true), serverInstanceDir(options.webServerPid, options.tempDir, false), resourceLocator(options.passengerRoot) { TRACE_POINT(); this->feedbackFd = feedbackFd; UPDATE_TRACE_POINT(); generation = serverInstanceDir.getGeneration(options.generationNumber); startListening(); accountsDatabase = AccountsDatabase::createDefault(generation, options.userSwitching, options.defaultUser, options.defaultGroup); accountsDatabase->add("_web_server", options.messageSocketPassword, false, Account::EXIT); messageServer = ptr(new MessageServer(generation->getPath() + "/socket", accountsDatabase)); createFile(generation->getPath() + "/helper_agent.pid", toString(getpid()), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (geteuid() == 0 && !options.userSwitching) { lowerPrivilege(options.defaultUser, options.defaultGroup); } UPDATE_TRACE_POINT(); loggerFactory = make_shared<UnionStation::LoggerFactory>(options.loggingAgentAddress, "logging", options.loggingAgentPassword); randomGenerator = make_shared<RandomGenerator>(); spawnerFactory = make_shared<SpawnerFactory>(poolLoop.safe, resourceLocator, generation, randomGenerator); pool = make_shared<Pool>(poolLoop.safe.get(), spawnerFactory, loggerFactory, randomGenerator); pool->setMax(options.maxPoolSize); //pool->setMaxPerApp(maxInstancesPerApp); pool->setMaxIdleTime(options.poolIdleTime * 1000000); messageServer->addHandler(make_shared<RemoteController>(pool)); messageServer->addHandler(make_shared<BacktracesServer>()); messageServer->addHandler(ptr(new ExitHandler(exitEvent))); requestHandler = make_shared<RequestHandler>(requestLoop.safe, requestSocket, pool, options); sigquitWatcher.set(requestLoop.loop); sigquitWatcher.set(SIGQUIT); sigquitWatcher.set<Server, &Server::onSigquit>(this); sigquitWatcher.start(); UPDATE_TRACE_POINT(); writeArrayMessage(feedbackFd, "initialized", getRequestSocketFilename().c_str(), messageServer->getSocketFilename().c_str(), NULL); function<void ()> func = boost::bind(prestartWebApps, resourceLocator, options.prestartUrls ); prestarterThread = ptr(new oxt::thread( boost::bind(runAndPrintExceptions, func, true) )); }
Server(FileDescriptor feedbackFd, pid_t webServerPid, const string &tempDir, bool userSwitching, const string &defaultUser, const string &defaultGroup, const string &passengerRoot, const string &rubyCommand, unsigned int generationNumber, unsigned int maxPoolSize, unsigned int maxInstancesPerApp, unsigned int poolIdleTime, const VariantMap &options) : serverInstanceDir(webServerPid, tempDir, false), resourceLocator(passengerRoot) { TRACE_POINT(); string messageSocketPassword; string loggingAgentPassword; this->feedbackFd = feedbackFd; feedbackChannel = MessageChannel(feedbackFd); UPDATE_TRACE_POINT(); messageSocketPassword = Base64::decode(options.get("message_socket_password")); loggingAgentPassword = options.get("logging_agent_password"); generation = serverInstanceDir.getGeneration(generationNumber); accountsDatabase = AccountsDatabase::createDefault(generation, userSwitching, defaultUser, defaultGroup); accountsDatabase->add("_web_server", messageSocketPassword, false, Account::GET | Account::DETACH | Account::SET_PARAMETERS | Account::EXIT); messageServer = ptr(new MessageServer(generation->getPath() + "/socket", accountsDatabase)); createFile(generation->getPath() + "/helper_server.pid", toString(getpid()), S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (geteuid() == 0 && !userSwitching) { lowerPrivilege(defaultUser, defaultGroup); } UPDATE_TRACE_POINT(); analyticsLogger = ptr(new AnalyticsLogger(options.get("logging_agent_address"), "logging", loggingAgentPassword)); pool = ptr(new ApplicationPool::Pool( resourceLocator.getSpawnServerFilename(), generation, accountsDatabase, rubyCommand, analyticsLogger, options.getInt("log_level"), options.get("debug_log_file", false) )); pool->setMax(maxPoolSize); pool->setMaxPerApp(maxInstancesPerApp); pool->setMaxIdleTime(poolIdleTime); messageServer->addHandler(ptr(new TimerUpdateHandler(exitTimer))); messageServer->addHandler(ptr(new ApplicationPool::Server(pool))); messageServer->addHandler(ptr(new BacktracesServer())); messageServer->addHandler(ptr(new ExitHandler(exitEvent))); UPDATE_TRACE_POINT(); feedbackChannel.write("initialized", "", // Request socket filename; not available in the Apache helper server. messageServer->getSocketFilename().c_str(), NULL); prestarterThread = ptr(new oxt::thread( boost::bind(prestartWebApps, resourceLocator, options.get("prestart_urls")) )); }
int main(int argc, char *argv[]) { /* * Some Apache installations (like on OS X) redirect stdout to /dev/null, * so that only stderr is redirected to the log file. We therefore * forcefully redirect stdout to stderr so that everything ends up in the * same place. */ dup2(2, 1); /* * Most operating systems overcommit memory. We *know* that this watchdog process * doesn't use much memory; on OS X it uses about 200 KB of private RSS. If the * watchdog is killed by the system Out-Of-Memory Killer or then it's all over: * the system administrator will have to restart the web server for Phusion * Passenger to be usable again. So here we disable Linux's OOM killer * for this watchdog. Note that the OOM score is inherited by child processes * so we need to restore it after each fork(). */ oldOomScore = setOomScoreNeverKill(); agentsOptions = initializeAgent(argc, argv, "PassengerWatchdog"); agentsOptions .setDefaultInt ("log_level", DEFAULT_LOG_LEVEL) .setDefault ("temp_dir", getSystemTempDir()) .setDefaultBool("user_switching", true) .setDefault ("default_user", DEFAULT_WEB_APP_USER) .setDefaultUid ("web_server_worker_uid", getuid()) .setDefaultGid ("web_server_worker_gid", getgid()) .setDefault ("ruby", DEFAULT_RUBY) .setDefault ("python", DEFAULT_PYTHON) .setDefaultInt ("max_pool_size", DEFAULT_MAX_POOL_SIZE) .setDefaultInt ("max_instances_per_app", DEFAULT_MAX_INSTANCES_PER_APP) .setDefaultInt ("pool_idle_time", DEFAULT_POOL_IDLE_TIME); P_DEBUG("Starting Watchdog..."); try { TRACE_POINT(); // Required options passengerRoot = agentsOptions.get("passenger_root"); webServerPid = agentsOptions.getPid("web_server_pid"); // Optional options UPDATE_TRACE_POINT(); tempDir = agentsOptions.get("temp_dir"); userSwitching = agentsOptions.getBool("user_switching"); defaultUser = agentsOptions.get("default_user"); if (!agentsOptions.has("default_group")) { agentsOptions.set("default_group", inferDefaultGroup(defaultUser)); } defaultGroup = agentsOptions.get("default_group"); webServerWorkerUid = agentsOptions.getUid("web_server_worker_uid"); webServerWorkerGid = agentsOptions.getGid("web_server_worker_gid"); UPDATE_TRACE_POINT(); randomGenerator = new RandomGenerator(); errorEvent = new EventFd(); UPDATE_TRACE_POINT(); serverInstanceDir.reset(new ServerInstanceDir(webServerPid, tempDir)); generation = serverInstanceDir->newGeneration(userSwitching, defaultUser, defaultGroup, webServerWorkerUid, webServerWorkerGid); agentsOptions.set("server_instance_dir", serverInstanceDir->getPath()); agentsOptions.setInt("generation_number", generation->getNumber()); UPDATE_TRACE_POINT(); ServerInstanceDirToucher serverInstanceDirToucher; ResourceLocator resourceLocator(passengerRoot); if (agentsOptions.get("analytics_server", false).empty()) { // Using local, server instance specific logging agent. loggingAgentAddress = "unix:" + generation->getPath() + "/logging.socket"; loggingAgentPassword = randomGenerator->generateAsciiString(64); } else { // Using remote logging agent. loggingAgentAddress = agentsOptions.get("analytics_server"); } UPDATE_TRACE_POINT(); shared_ptr<HelperAgentWatcher> helperAgentWatcher = make_shared<HelperAgentWatcher>(resourceLocator); shared_ptr<LoggingAgentWatcher> loggingAgentWatcher = make_shared<LoggingAgentWatcher>(resourceLocator); UPDATE_TRACE_POINT(); vector<AgentWatcherPtr> watchers; vector<AgentWatcherPtr>::iterator it; watchers.push_back(helperAgentWatcher); if (agentsOptions.get("analytics_server", false).empty()) { watchers.push_back(loggingAgentWatcher); } UPDATE_TRACE_POINT(); for (it = watchers.begin(); it != watchers.end(); it++) { try { (*it)->start(); } catch (const std::exception &e) { writeArrayMessage(FEEDBACK_FD, "Watchdog startup error", e.what(), NULL); forceAllAgentsShutdown(watchers); return 1; } // Allow other exceptions to propagate and crash the watchdog. } UPDATE_TRACE_POINT(); for (it = watchers.begin(); it != watchers.end(); it++) { try { (*it)->startWatching(); } catch (const std::exception &e) { writeArrayMessage(FEEDBACK_FD, "Watchdog startup error", e.what(), NULL); forceAllAgentsShutdown(watchers); return 1; } // Allow other exceptions to propagate and crash the watchdog. } UPDATE_TRACE_POINT(); writeArrayMessage(FEEDBACK_FD, "Basic startup info", serverInstanceDir->getPath().c_str(), toString(generation->getNumber()).c_str(), NULL); UPDATE_TRACE_POINT(); for (it = watchers.begin(); it != watchers.end(); it++) { (*it)->sendStartupInfo(FEEDBACK_FD); } UPDATE_TRACE_POINT(); writeArrayMessage(FEEDBACK_FD, "All agents started", NULL); P_DEBUG("All Phusion Passenger agents started!"); this_thread::disable_interruption di; this_thread::disable_syscall_interruption dsi; UPDATE_TRACE_POINT(); bool exitGracefully = waitForStarterProcessOrWatchers(watchers); if (exitGracefully) { /* Fork a child process which cleans up all the agent processes in * the background and exit this watchdog process so that we don't block * the web server. */ P_DEBUG("Web server exited gracefully; gracefully shutting down all agents..."); } else { P_DEBUG("Web server did not exit gracefully, forcing shutdown of all agents..."); } UPDATE_TRACE_POINT(); AgentWatcher::stopWatching(watchers); if (exitGracefully) { UPDATE_TRACE_POINT(); cleanupAgentsInBackground(watchers); return 0; } else { UPDATE_TRACE_POINT(); forceAllAgentsShutdown(watchers); return 1; } } catch (const tracable_exception &e) { P_ERROR(e.what() << "\n" << e.backtrace()); return 1; } catch (const std::exception &e) { P_ERROR(e.what()); return 1; } }