Beispiel #1
0
int main(int argc, const char *const *argv, const char *const *env)
{
	const char *db_filename = "last_message.db";
	int dying = 0;
	sqlite3 *sql = NULL;
	lmSQL lmdb = {};

	apr_socket_t *acc = NULL;
	apr_pollset_t *pollset = NULL;

	APR_DO_OR_DIE(apr_app_initialize(&argc, &argv, &env));
	atexit(&apr_terminate);

	apr_pool_t *pool;
	APR_DO_OR_DIE(apr_pool_create(&pool, NULL));

	if (argc > 2) {
		return print_usage();
	}
	if (argc == 2) {
		if (strcmp(argv[1], "--help") == 0) {
			return print_usage();
		}
		db_filename = argv[1];
	}
	int err;
	if ((err = sqlite3_open(db_filename, &sql)) != SQLITE_OK) {
		fprintf(stderr, "Can't open DB (%s): %s.\n", db_filename,
		        sqlite3_errstr(err));
		return 1;
	}

	int rc;
	char *rc_msg;
	const char *CREATE_MESSAGE_TABLES =
	    "CREATE TABLE IF NOT EXISTS messages ("
	    "  msgid INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,"
	    "  name BLOB NOT NULL,"
	    "  left INTEGER NOT NULL,"
	    "  message BLOB NOT NULL"
	    ");";
	rc = sqlite3_exec(sql, CREATE_MESSAGE_TABLES, NULL, NULL, &rc_msg);
	if (rc != SQLITE_OK) {
		FAIL("Can't create 'messages' table: %s.\n", rc_msg);
		sqlite3_close(sql);
		return 1;
	}
	const char *CREATE_MESSAGE_SEEN = "CREATE TABLE IF NOT EXISTS seen ("
	                                  "  name BLOB PRIMARY KEY NOT NULL,"
	                                  "  msgid INTEGER  NOT NULL"
	                                  ");";
	rc = sqlite3_exec(sql, CREATE_MESSAGE_SEEN, NULL, NULL, &rc_msg);
	if (rc != SQLITE_OK) {
		FAIL("Can't create 'seen' table: %s.\n", rc_msg);
		sqlite3_close(sql);
		return 1;
	}

	lmdb.sql = sql;

#define LM_SQLITE_PREP(thing, statement)                                       \
	do {                                                                   \
		int prep =                                                     \
		    sqlite3_prepare_v2(sql, (statement), -1, (thing), NULL);   \
		if (prep != SQLITE_OK) {                                       \
			FAIL("SQL compilation error: (%s) while compiling "    \
			     "(%s).\n",                                        \
			     sqlite3_errmsg(sql), statement);                  \
			goto die;                                              \
		}                                                              \
	} while (0)

	LM_SQLITE_PREP(&lmdb.put, "INSERT INTO messages (name, left, message) "
	                          "VALUES (?, ?, ?);");
	LM_SQLITE_PREP(&lmdb.get, "SELECT msgid, left, message "
	                          "FROM messages "
	                          "WHERE name = ? "
	                          "ORDER BY msgid ASC;");
	LM_SQLITE_PREP(&lmdb.seen_add, "INSERT INTO seen (name, msgid) "
	                               "VALUES (?, ?);");
	LM_SQLITE_PREP(&lmdb.seen_del, "DELETE FROM seen "
	                               "WHERE name = ?;");
	LM_SQLITE_PREP(&lmdb.seen_get, "SELECT msgid FROM seen "
	                               "WHERE name = ?;");
	LM_SQLITE_PREP(&lmdb.drop, "DELETE FROM messages "
	                           "WHERE msgid <= ? "
	                           "  AND name = ?;");

	APR_DO_OR_DIE(apr_socket_create(&acc, APR_INET, SOCK_STREAM, 0, pool));
	APR_DO_OR_DIE(apr_socket_opt_set(acc, APR_SO_REUSEADDR, 1));
	apr_sockaddr_t *l_addr;
	APR_DO_OR_DIE(
	    apr_sockaddr_info_get(&l_addr, NULL, APR_INET, 1066, 0, pool));
	APR_DO_OR_DIE(apr_socket_bind(acc, l_addr));
	APR_DO_OR_DIE(apr_socket_listen(acc, 8));

	apr_pollfd_t apr_accept_desc;
	memset(&apr_accept_desc, 0, sizeof apr_accept_desc);
	apr_accept_desc.p = pool;
	apr_accept_desc.desc_type = APR_POLL_SOCKET;
	apr_accept_desc.desc.s = acc;
	apr_accept_desc.reqevents = APR_POLLIN;
	apr_accept_desc.client_data = NULL;

	APR_DO_OR_DIE(apr_pollset_create(&pollset, 256, pool, 0));
	APR_DO_OR_DIE(apr_pollset_add(pollset, &apr_accept_desc));

	apr_signal(SIGTERM, shutdown_on_signal);
	apr_signal(SIGINT, shutdown_on_signal);

	apr_int32_t signalled_len = 0;
	const apr_pollfd_t *signalled = NULL;
	apr_status_t poll_err = 0;
	for (;;) {
		if (global_shutting_down) {
			goto goodnight;
		}
		if (!APR_STATUS_IS_EINTR(poll_err)) {
			APR_DO_OR_DIE(poll_err);
		}
		for (apr_int32_t i = 0; i < signalled_len; i++) {
			const apr_pollfd_t *s = signalled + i;
			if (s->desc.s == acc) {
				DEBUG("accept\n");
				do_client_accept(acc, pollset, pool, &lmdb);
			} else {
				do_client_state_machine(s, pollset);
			}
		}
		poll_err =
		    apr_pollset_poll(pollset, -1, &signalled_len, &signalled);
	}

	if (0) {
	goodnight:
		fprintf(stderr, "Goodnight!\n");
	}

	if (0) {
	die:
		dying = 1;
	}

	sqlite3_finalize(lmdb.put);
	lmdb.put = 0;
	sqlite3_finalize(lmdb.get);
	lmdb.get = 0;
	sqlite3_finalize(lmdb.seen_add);
	lmdb.seen_add = 0;
	sqlite3_finalize(lmdb.seen_del);
	lmdb.seen_del = 0;
	sqlite3_finalize(lmdb.seen_get);
	lmdb.seen_get = 0;
	sqlite3_finalize(lmdb.drop);
	lmdb.drop = 0;

	if (sqlite3_close(sql) != SQLITE_OK) {
		fprintf(stderr, "Error closing DB (%s): %s.\n", db_filename,
		        sqlite3_errmsg(sql));
		return 1;
	}

	apr_pollset_destroy(pollset);
	apr_socket_close(acc);

	return dying;
}
Beispiel #2
0
int main(int argc, char *argv[])
{
	char errbuf[ERRBUFLEN + 1];
	apr_pool_t *pool;
	apr_proc_t proc;
	dynalogin_session_t *h;
	apr_status_t res;

	apr_sockaddr_t *sa;
	apr_socket_t *socket, *socket_new;

	char *cfg_filename;
	char *bind_address;
	int bind_port;
	int qlen = 32;
	int cfg_detach = 1;
	int ret;

	int done = 0;

	apr_hash_t *config;

	if(apr_initialize() != APR_SUCCESS)
	{
		fprintf(stderr, "apr_initialize failed\n");
		return 1;
	}

	/* Just return an error if a client closes a socket */
	apr_signal_block(SIGPIPE);

	openlog(argv[0], LOG_PID, LOG_AUTHPRIV);

	if((res = apr_pool_create(&pool, NULL)) != APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to create root pool: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		return 1;
	}

	if(argc == 2)
		cfg_filename = argv[1];
	else
		cfg_filename = apr_psprintf(pool, "%s%c%s",
			SYSCONFDIR, DIR_SEP, DEFAULT_CONFIG_FILENAME);
	if(cfg_filename == NULL)
	{
		syslog(LOG_ERR, "apr_psprintf failed to create filename: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		return 1;
	}
	/* Read config */
	if(dynalogin_read_config_from_file(&config, cfg_filename, pool)
			!= DYNALOGIN_SUCCESS)
	{
		syslog(LOG_ERR, "failed to read config file %s",
				cfg_filename);
		return 1;
	}

	/* Set up DYNALOGIN session (threadsafe?) */
	if(dynalogin_init(&h, pool, config) != DYNALOGIN_SUCCESS)
	{
		syslog(LOG_ERR, "failed to init dynalogin stack");
		return 1;
	}

	/* Daemonize? */
	GET_INT_PARAM(cfg_detach, config, DYNALOGIND_PARAM_DETACH)
	if((res=apr_proc_detach(cfg_detach)) != APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to detach: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		return 1;
	}

	/* Create socket for clients */
	if((res=apr_socket_create(&socket,
			APR_INET, SOCK_STREAM, APR_PROTO_TCP, pool))!=APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to create listening socket: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		return 1;
	}
	GET_STRING_PARAM_DEF(bind_address, config, DYNALOGIND_PARAM_BIND_ADDR, DEFAULT_BIND_ADDR)
	GET_INT_PARAM_DEF(bind_port, config, DYNALOGIND_PARAM_BIND_PORT, DEFAULT_BIND_PORT)
	if((res=apr_sockaddr_info_get(&sa, bind_address, APR_UNSPEC,
			bind_port, APR_IPV4_ADDR_OK || APR_IPV6_ADDR_OK, pool))!=APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to resolve bind address: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		apr_socket_close(socket);
		return 1;
	}
	if((res=apr_socket_opt_set(socket, APR_SO_REUSEADDR, 1))!=APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to set APR_SO_REUSEADDR: %s",
			apr_strerror(res, errbuf, ERRBUFLEN));
		apr_socket_close(socket);
		return 1;
	}
	if((res=apr_socket_bind(socket, sa))!=APR_SUCCESS)
	{
		syslog(LOG_ERR, "failed to bind: %s",
				apr_strerror(res, errbuf, ERRBUFLEN));
		apr_socket_close(socket);
		return 1;
	}

	GET_STRING_PARAM_DEF(tls_cert, config, DYNALOGIND_PARAM_TLS_CERT, NULL)
	GET_STRING_PARAM_DEF(tls_key, config, DYNALOGIND_PARAM_TLS_KEY, NULL)
	if(tls_cert != NULL && tls_key != NULL)
	{
		gnutls_global_init ();

		gnutls_certificate_allocate_credentials (&x509_cred);

		ret = gnutls_certificate_set_x509_key_file (x509_cred, tls_cert, tls_key,
				GNUTLS_X509_FMT_PEM);
		if (ret < 0)
		{
			syslog(LOG_ERR, "No certificate or key were found");
			return 1;
		}

		generate_dh_params ();

		gnutls_priority_init (&priority_cache, "PERFORMANCE:%SERVER_PRECEDENCE", NULL);

		gnutls_certificate_set_dh_params (x509_cred, dh_params);
	}
	else if(tls_cert != NULL && tls_key == NULL)
	{
		syslog(LOG_ERR, "%s specified, but %s not specified", DYNALOGIND_PARAM_TLS_CERT,
				DYNALOGIND_PARAM_TLS_KEY);
		return 1;
	}
	else if(tls_key != NULL && tls_cert == NULL)
	{
		syslog(LOG_ERR, "%s specified, but %s not specified", DYNALOGIND_PARAM_TLS_KEY,
				DYNALOGIND_PARAM_TLS_CERT);
		return 1;
	}

	/* Main loop */
	while(done != 1)
	{
		if((res=apr_socket_listen(socket, qlen))!=APR_SUCCESS)
		{
			syslog(LOG_ERR, "failed apr_socket_listen: %s",
					apr_strerror(res, errbuf, ERRBUFLEN));
			apr_socket_close(socket);
			return 1;
		}

		if((res=apr_socket_accept(&socket_new, socket, pool))!=APR_SUCCESS)
		{
			syslog(LOG_ERR, "failed to accept incoming connection: %s",
					apr_strerror(res, errbuf, ERRBUFLEN));
			apr_socket_close(socket);
			return 1;
		}
		syslog(LOG_INFO, "new incoming connection");
		if((res=handle_new_client(socket_new, pool, h))!=APR_SUCCESS)
		{
			syslog(LOG_ERR, "failed to handle incoming connection: %s",
					apr_strerror(res, errbuf, ERRBUFLEN));
			apr_socket_close(socket);
			return 1;
		}
	}
	apr_socket_close(socket);
	return 0;
}
void LLPluginProcessParent::idle(void)
{
	bool idle_again;

	do
	{
		// process queued messages
		mIncomingQueueMutex.lock();
		while(!mIncomingQueue.empty())
		{
			LLPluginMessage message = mIncomingQueue.front();
			mIncomingQueue.pop();
			mIncomingQueueMutex.unlock();
				
			receiveMessage(message);
			
			mIncomingQueueMutex.lock();
		}

		mIncomingQueueMutex.unlock();
		
		// Give time to network processing
		if(mMessagePipe)
		{
			// Drain any queued outgoing messages
			mMessagePipe->pumpOutput();
			
			// Only do input processing here if this instance isn't in a pollset.
			if(!mPolledInput)
			{
				mMessagePipe->pumpInput();
			}
		}
		
		if(mState <= STATE_RUNNING)
		{
			if(APR_STATUS_IS_EOF(mSocketError))
			{
				// Plugin socket was closed.  This covers both normal plugin termination and plugin crashes.
				errorState();
			}
			else if(mSocketError != APR_SUCCESS)
			{
				// The socket is in an error state -- the plugin is gone.
				LL_WARNS("Plugin") << "Socket hit an error state (" << mSocketError << ")" << LL_ENDL;
				errorState();
			}
		}	
		
		// If a state needs to go directly to another state (as a performance enhancement), it can set idle_again to true after calling setState().
		// USE THIS CAREFULLY, since it can starve other code.  Specifically make sure there's no way to get into a closed cycle and never return.
		// When in doubt, don't do it.
		idle_again = false;
		switch(mState)
		{
			case STATE_UNINITIALIZED:
			break;

			case STATE_INITIALIZED:
			{
	
				apr_status_t status = APR_SUCCESS;
				apr_sockaddr_t* addr = NULL;
				mListenSocket = LLSocket::create(LLSocket::STREAM_TCP);
				mBoundPort = 0;
				
				// This code is based on parts of LLSocket::create() in lliosocket.cpp.
				
				status = apr_sockaddr_info_get(
					&addr,
					"127.0.0.1",
					APR_INET,
					0,	// port 0 = ephemeral ("find me a port")
					0,
					LLAPRRootPool::get()());
					
				if(ll_apr_warn_status(status))
				{
					killSockets();
					errorState();
					break;
				}

				// This allows us to reuse the address on quick down/up. This is unlikely to create problems.
				ll_apr_warn_status(apr_socket_opt_set(mListenSocket->getSocket(), APR_SO_REUSEADDR, 1));
				
				status = apr_socket_bind(mListenSocket->getSocket(), addr);
				if(ll_apr_warn_status(status))
				{
					killSockets();
					errorState();
					break;
				}

				// Get the actual port the socket was bound to
				{
					apr_sockaddr_t* bound_addr = NULL;
					if(ll_apr_warn_status(apr_socket_addr_get(&bound_addr, APR_LOCAL, mListenSocket->getSocket())))
					{
						killSockets();
						errorState();
						break;
					}
					mBoundPort = bound_addr->port;	

					if(mBoundPort == 0)
					{
						LL_WARNS("Plugin") << "Bound port number unknown, bailing out." << LL_ENDL;
						
						killSockets();
						errorState();
						break;
					}
				}
				
				LL_DEBUGS("Plugin") << "Bound tcp socket to port: " << addr->port << LL_ENDL;

				// Make the listen socket non-blocking
				status = apr_socket_opt_set(mListenSocket->getSocket(), APR_SO_NONBLOCK, 1);
				if(ll_apr_warn_status(status))
				{
					killSockets();
					errorState();
					break;
				}

				apr_socket_timeout_set(mListenSocket->getSocket(), 0);
				if(ll_apr_warn_status(status))
				{
					killSockets();
					errorState();
					break;
				}
				
				// If it's a stream based socket, we need to tell the OS
				// to keep a queue of incoming connections for ACCEPT.
				status = apr_socket_listen(
					mListenSocket->getSocket(),
					10); // FIXME: Magic number for queue size
					
				if(ll_apr_warn_status(status))
				{
					killSockets();
					errorState();
					break;
				}
				
				// If we got here, we're listening.
				setState(STATE_LISTENING);
			}
			break;
			
			case STATE_LISTENING:
			{
				// Launch the plugin process.
				
				// Only argument to the launcher is the port number we're listening on
				std::stringstream stream;
				stream << mBoundPort;
				mProcess.addArgument(stream.str());
				if(mProcess.launch() != 0)
				{
					errorState();
				}
				else
				{
					// Set PluginAttachDebuggerToPlugins to TRUE to use this. You might also want to set DebugPluginDisableTimeout to TRUE.
					if(mDebug)
					{
						// If we're set to debug, start up a gdb instance in a new terminal window and have it attach to the plugin process and continue.
						std::stringstream cmd;

#if LL_DARWIN
						// The command we're constructing would look like this on the command line:
						// osascript -e 'tell application "Terminal"' -e 'set win to do script "gdb -pid 12345"' -e 'do script "continue" in win' -e 'end tell'
						mDebugger.setExecutable("/usr/bin/osascript");
						mDebugger.addArgument("-e");
						mDebugger.addArgument("tell application \"Terminal\"");
						mDebugger.addArgument("-e");
						cmd << "set win to do script \"gdb -pid " << mProcess.getProcessID() << "\"";
						mDebugger.addArgument(cmd.str());
						mDebugger.addArgument("-e");
						mDebugger.addArgument("do script \"continue\" in win");
						mDebugger.addArgument("-e");
						mDebugger.addArgument("end tell");
						mDebugger.launch();
#elif LL_LINUX
						// The command we're constructing would look like this on the command line:
						// /usr/bin/xterm -geometry 160x24-0+0 -e /usr/bin/gdb -n /proc/12345/exe 12345
						// Note that most terminals demand that all arguments to the process that is
						// started with -e are passed as arguments to the terminal: there are no quotes
						// around '/usr/bin/gdb -n /proc/12345/exe 12345'. This is the case for xterm,
						// uxterm, konsole etc. The exception might be gnome-terminal.
						//
						// The constructed command can be changed by setting the following environment
						// variables, for example:
						//
						// export LL_DEBUG_GDB_PATH=/usr/bin/gdb
						// export LL_DEBUG_TERMINAL_COMMAND='/usr/bin/gnome-terminal --geometry=165x24-0+0 -e "%s"'
						//
						// Or, as second example, if you are running the viewer on host 'A', and you want
						// to open the gdb terminal on the X display of host 'B', you would run on host B:
						// 'ssh -X A' (and then start the viewer, or just leave the terminal open), and
						// then use:
						//
						// export LL_DEBUG_TERMINAL_COMMAND="/usr/bin/uxterm -fs 9 -fa 'DejaVu Sans Mono' -display localhost:10 -geometry 209x31+0-50 -e %s"
						//
						// which would open the terminal on B (no quotes around the %s, since this uses uxterm!).
						// For a list of available strings to pass to the -fa, run in a terminal: fc-list :scalable=true:spacing=mono: family

						char const* env;
						std::string terminal_command = (env = getenv("LL_DEBUG_TERMINAL_COMMAND")) ? env : "/usr/bin/xterm -geometry 160x24+0+0 -e %s";
						char const* const gdb_path = (env = getenv("LL_DEBUG_GDB_PATH")) ? env : "/usr/bin/gdb";
						cmd << gdb_path << " -n /proc/" << mProcess.getProcessID() << "/exe " << mProcess.getProcessID();
						std::string::size_type pos = terminal_command.find("%s");
						if (pos != std::string::npos)
						{
							terminal_command.replace(pos, 2, cmd.str());
						}

                        typedef boost::tokenizer< boost::escaped_list_separator<
                                char>, std::basic_string<
                                char>::const_iterator, 
                                std::basic_string<char> >  tokenizerT;

                        tokenizerT tok(terminal_command.begin(),
                                terminal_command.end(), 
                                boost::escaped_list_separator< char >("\\",
                                " ", "'\""));
                        std::vector< std::basic_string<char> > tokens;
                        for (tokenizerT::iterator
                                cur_token(tok.begin()), end_token(tok.end());
                                cur_token != end_token; ++cur_token) {
                            if (!cur_token->empty())
                                tokens.push_back(*cur_token);
                        }
						std::vector<std::string>::iterator token = tokens.begin();
						mDebugger.setExecutable(*token);
						while (++token != tokens.end())
						{
							mDebugger.addArgument(*token);
						}
						mDebugger.launch();
#endif
					}
					
					// This will allow us to time out if the process never starts.
					mHeartbeat.start();
					mHeartbeat.setTimerExpirySec(mPluginLaunchTimeout);
					setState(STATE_LAUNCHED);
				}
			}
			break;

			case STATE_LAUNCHED:
				// waiting for the plugin to connect
				if(pluginLockedUpOrQuit())
				{
					errorState();
				}
				else
				{
					// Check for the incoming connection.
					if(accept())
					{
						// Stop listening on the server port
						mListenSocket.reset();
						setState(STATE_CONNECTED);
					}
				}
			break;
			
			case STATE_CONNECTED:
				// waiting for hello message from the plugin

				if(pluginLockedUpOrQuit())
				{
					errorState();
				}
			break;

			case STATE_HELLO:
				LL_DEBUGS("Plugin") << "received hello message" << LL_ENDL;
				
				// Send the message to load the plugin
				{
					LLPluginMessage message(LLPLUGIN_MESSAGE_CLASS_INTERNAL, "load_plugin");
					message.setValue("file", mPluginFile);
					message.setValue("dir", mPluginDir);
					sendMessage(message);
				}

				setState(STATE_LOADING);
			break;
			
			case STATE_LOADING:
				// The load_plugin_response message will kick us from here into STATE_RUNNING
				if(pluginLockedUpOrQuit())
				{
					errorState();
				}
			break;
			
			case STATE_RUNNING:
				if(pluginLockedUpOrQuit())
				{
					errorState();
				}
			break;
			
			case STATE_EXITING:
				if(!mProcess.isRunning())
				{
					setState(STATE_CLEANUP);
				}
				else if(pluginLockedUp())
				{
					LL_WARNS("Plugin") << "timeout in exiting state, bailing out" << LL_ENDL;
					errorState();
				}
			break;

			case STATE_LAUNCH_FAILURE:
				if(mOwner != NULL)
				{
					mOwner->pluginLaunchFailed();
				}
				setState(STATE_CLEANUP);
			break;

			case STATE_ERROR:
				if(mOwner != NULL)
				{
					mOwner->pluginDied();
				}
				setState(STATE_CLEANUP);
			break;
			
			case STATE_CLEANUP:
				mProcess.kill();
				killSockets();
				setState(STATE_DONE);
			break;
			
			
			case STATE_DONE:
				// just sit here.
			break;
			
		}
	
	} while (idle_again);
}
Beispiel #4
0
/*
 * On success, leave *EXIT_CODE untouched and return SVN_NO_ERROR. On error,
 * either return an error to be displayed, or set *EXIT_CODE to non-zero and
 * return SVN_NO_ERROR.
 */
static svn_error_t *
sub_main(int *exit_code, int argc, const char *argv[], apr_pool_t *pool)
{
  enum run_mode run_mode = run_mode_unspecified;
  svn_boolean_t foreground = FALSE;
  apr_socket_t *sock;
  apr_sockaddr_t *sa;
  svn_error_t *err;
  apr_getopt_t *os;
  int opt;
  serve_params_t params;
  const char *arg;
  apr_status_t status;
#ifndef WIN32
  apr_proc_t proc;
#endif
  svn_boolean_t is_multi_threaded;
  enum connection_handling_mode handling_mode = CONNECTION_DEFAULT;
  svn_boolean_t cache_fulltexts = TRUE;
  svn_boolean_t cache_nodeprops = TRUE;
  svn_boolean_t cache_txdeltas = TRUE;
  svn_boolean_t cache_revprops = FALSE;
  svn_boolean_t use_block_read = FALSE;
  apr_uint16_t port = SVN_RA_SVN_PORT;
  const char *host = NULL;
  int family = APR_INET;
  apr_int32_t sockaddr_info_flags = 0;
#if APR_HAVE_IPV6
  svn_boolean_t prefer_v6 = FALSE;
#endif
  svn_boolean_t quiet = FALSE;
  svn_boolean_t is_version = FALSE;
  int mode_opt_count = 0;
  int handling_opt_count = 0;
  const char *config_filename = NULL;
  const char *pid_filename = NULL;
  const char *log_filename = NULL;
  svn_node_kind_t kind;
  apr_size_t min_thread_count = THREADPOOL_MIN_SIZE;
  apr_size_t max_thread_count = THREADPOOL_MAX_SIZE;
#ifdef SVN_HAVE_SASL
  SVN_ERR(cyrus_init(pool));
#endif

  /* Check library versions */
  SVN_ERR(check_lib_versions());

  /* Initialize the FS library. */
  SVN_ERR(svn_fs_initialize(pool));

  /* Initialize the efficient Authz support. */
  SVN_ERR(svn_repos_authz_initialize(pool));

  SVN_ERR(svn_cmdline__getopt_init(&os, argc, argv, pool));

  params.root = "/";
  params.tunnel = FALSE;
  params.tunnel_user = NULL;
  params.read_only = FALSE;
  params.base = NULL;
  params.cfg = NULL;
  params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_DEFAULT;
  params.logger = NULL;
  params.config_pool = NULL;
  params.fs_config = NULL;
  params.vhost = FALSE;
  params.username_case = CASE_ASIS;
  params.memory_cache_size = (apr_uint64_t)-1;
  params.zero_copy_limit = 0;
  params.error_check_interval = 4096;
  params.max_request_size = MAX_REQUEST_SIZE * 0x100000;
  params.max_response_size = 0;

  while (1)
    {
      status = apr_getopt_long(os, svnserve__options, &opt, &arg);
      if (APR_STATUS_IS_EOF(status))
        break;
      if (status != APR_SUCCESS)
        {
          usage(argv[0], pool);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }
      switch (opt)
        {
        case '6':
#if APR_HAVE_IPV6
          prefer_v6 = TRUE;
#endif
          /* ### Maybe error here if we don't have IPV6 support? */
          break;

        case 'h':
          help(pool);
          return SVN_NO_ERROR;

        case 'q':
          quiet = TRUE;
          break;

        case SVNSERVE_OPT_VERSION:
          is_version = TRUE;
          break;

        case 'd':
          if (run_mode != run_mode_daemon)
            {
              run_mode = run_mode_daemon;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_FOREGROUND:
          foreground = TRUE;
          break;

        case SVNSERVE_OPT_SINGLE_CONN:
          handling_mode = connection_mode_single;
          handling_opt_count++;
          break;

        case 'i':
          if (run_mode != run_mode_inetd)
            {
              run_mode = run_mode_inetd;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_LISTEN_PORT:
          {
            apr_uint64_t val;

            err = svn_cstring_strtoui64(&val, arg, 0, APR_UINT16_MAX, 10);
            if (err)
              return svn_error_createf(SVN_ERR_CL_ARG_PARSING_ERROR, err,
                                       _("Invalid port '%s'"), arg);
            port = (apr_uint16_t)val;
          }
          break;

        case SVNSERVE_OPT_LISTEN_HOST:
          host = arg;
          break;

        case 't':
          if (run_mode != run_mode_tunnel)
            {
              run_mode = run_mode_tunnel;
              mode_opt_count++;
            }
          break;

        case SVNSERVE_OPT_TUNNEL_USER:
          params.tunnel_user = arg;
          break;

        case 'X':
          if (run_mode != run_mode_listen_once)
            {
              run_mode = run_mode_listen_once;
              mode_opt_count++;
            }
          break;

        case 'r':
          SVN_ERR(svn_utf_cstring_to_utf8(&params.root, arg, pool));

          SVN_ERR(svn_io_check_resolved_path(params.root, &kind, pool));
          if (kind != svn_node_dir)
            {
              return svn_error_createf(SVN_ERR_ILLEGAL_TARGET, NULL,
                       _("Root path '%s' does not exist "
                         "or is not a directory"), params.root);
            }

          params.root = svn_dirent_internal_style(params.root, pool);
          SVN_ERR(svn_dirent_get_absolute(&params.root, params.root, pool));
          break;

        case 'R':
          params.read_only = TRUE;
          break;

        case 'T':
          handling_mode = connection_mode_thread;
          handling_opt_count++;
          break;

        case 'c':
          params.compression_level = atoi(arg);
          if (params.compression_level < SVN_DELTA_COMPRESSION_LEVEL_NONE)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_NONE;
          if (params.compression_level > SVN_DELTA_COMPRESSION_LEVEL_MAX)
            params.compression_level = SVN_DELTA_COMPRESSION_LEVEL_MAX;
          break;

        case 'M':
          {
            apr_uint64_t sz_val;
            SVN_ERR(svn_cstring_atoui64(&sz_val, arg));

            params.memory_cache_size = 0x100000 * sz_val;
          }
          break;

        case SVNSERVE_OPT_CACHE_TXDELTAS:
          cache_txdeltas = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_FULLTEXTS:
          cache_fulltexts = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_REVPROPS:
          cache_revprops = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CACHE_NODEPROPS:
          cache_nodeprops = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_BLOCK_READ:
          use_block_read = svn_tristate__from_word(arg) == svn_tristate_true;
          break;

        case SVNSERVE_OPT_CLIENT_SPEED:
          {
            apr_size_t bandwidth = (apr_size_t)apr_strtoi64(arg, NULL, 0);

            /* for slower clients, don't try anything fancy */
            if (bandwidth >= 1000)
              {
                /* block other clients for at most 1 ms (at full bandwidth).
                   Note that the send buffer is 16kB anyways. */
                params.zero_copy_limit = bandwidth * 120;

                /* check for aborted connections at the same rate */
                params.error_check_interval = bandwidth * 120;
              }
          }
          break;

        case SVNSERVE_OPT_MAX_REQUEST:
          params.max_request_size = 0x100000 * apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_MAX_RESPONSE:
          params.max_response_size = 0x100000 * apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_MIN_THREADS:
          min_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

        case SVNSERVE_OPT_MAX_THREADS:
          max_thread_count = (apr_size_t)apr_strtoi64(arg, NULL, 0);
          break;

#ifdef WIN32
        case SVNSERVE_OPT_SERVICE:
          if (run_mode != run_mode_service)
            {
              run_mode = run_mode_service;
              mode_opt_count++;
            }
          break;
#endif

        case SVNSERVE_OPT_CONFIG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&config_filename, arg, pool));
          config_filename = svn_dirent_internal_style(config_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&config_filename, config_filename,
                                          pool));
          break;

        case SVNSERVE_OPT_PID_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&pid_filename, arg, pool));
          pid_filename = svn_dirent_internal_style(pid_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&pid_filename, pid_filename, pool));
          break;

         case SVNSERVE_OPT_VIRTUAL_HOST:
           params.vhost = TRUE;
           break;

         case SVNSERVE_OPT_LOG_FILE:
          SVN_ERR(svn_utf_cstring_to_utf8(&log_filename, arg, pool));
          log_filename = svn_dirent_internal_style(log_filename, pool);
          SVN_ERR(svn_dirent_get_absolute(&log_filename, log_filename, pool));
          break;

        }
    }

  if (is_version)
    {
      SVN_ERR(version(quiet, pool));
      return SVN_NO_ERROR;
    }

  if (os->ind != argc)
    {
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (mode_opt_count != 1)
    {
      svn_error_clear(svn_cmdline_fputs(
#ifdef WIN32
                      _("You must specify exactly one of -d, -i, -t, "
                        "--service or -X.\n"),
#else
                      _("You must specify exactly one of -d, -i, -t or -X.\n"),
#endif
                       stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  if (handling_opt_count > 1)
    {
      svn_error_clear(svn_cmdline_fputs(
                      _("You may only specify one of -T or --single-thread\n"),
                      stderr, pool));
      usage(argv[0], pool);
      *exit_code = EXIT_FAILURE;
      return SVN_NO_ERROR;
    }

  /* construct object pools */
  is_multi_threaded = handling_mode == connection_mode_thread;
  params.fs_config = apr_hash_make(pool);
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_DELTAS,
                cache_txdeltas ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_FULLTEXTS,
                cache_fulltexts ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_NODEPROPS,
                cache_nodeprops ? "1" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_CACHE_REVPROPS,
                cache_revprops ? "2" :"0");
  svn_hash_sets(params.fs_config, SVN_FS_CONFIG_FSFS_BLOCK_READ,
                use_block_read ? "1" :"0");

  SVN_ERR(svn_repos__config_pool_create(&params.config_pool,
                                        is_multi_threaded,
                                        pool));

  /* If a configuration file is specified, load it and any referenced
   * password and authorization files. */
  if (config_filename)
    {
      params.base = svn_dirent_dirname(config_filename, pool);

      SVN_ERR(svn_repos__config_pool_get(&params.cfg,
                                         params.config_pool,
                                         config_filename,
                                         TRUE, /* must_exist */
                                         NULL,
                                         pool));
    }

  if (log_filename)
    SVN_ERR(logger__create(&params.logger, log_filename, pool));
  else if (run_mode == run_mode_listen_once)
    SVN_ERR(logger__create_for_stderr(&params.logger, pool));

  if (params.tunnel_user && run_mode != run_mode_tunnel)
    {
      return svn_error_create(SVN_ERR_CL_ARG_PARSING_ERROR, NULL,
               _("Option --tunnel-user is only valid in tunnel mode"));
    }

  if (run_mode == run_mode_inetd || run_mode == run_mode_tunnel)
    {
      apr_pool_t *connection_pool;
      svn_ra_svn_conn_t *conn;
      svn_stream_t *stdin_stream;
      svn_stream_t *stdout_stream;

      params.tunnel = (run_mode == run_mode_tunnel);
      apr_pool_cleanup_register(pool, pool, apr_pool_cleanup_null,
                                redirect_stdout);

      /* We are an interactive server, i.e. can't use APR buffering on
       * stdin. */
      SVN_ERR(svn_stream_for_stdin2(&stdin_stream, FALSE, pool));
      SVN_ERR(svn_stream_for_stdout(&stdout_stream, pool));

      /* Use a subpool for the connection to ensure that if SASL is used
       * the pool cleanup handlers that call sasl_dispose() (connection_pool)
       * and sasl_done() (pool) are run in the right order. See issue #3664. */
      connection_pool = svn_pool_create(pool);
      conn = svn_ra_svn_create_conn5(NULL, stdin_stream, stdout_stream,
                                     params.compression_level,
                                     params.zero_copy_limit,
                                     params.error_check_interval,
                                     params.max_request_size,
                                     params.max_response_size,
                                     connection_pool);
      err = serve(conn, &params, connection_pool);
      svn_pool_destroy(connection_pool);

      return err;
    }

#ifdef WIN32
  /* If svnserve needs to run as a Win32 service, then we need to
     coordinate with the Service Control Manager (SCM) before
     continuing.  This function call registers the svnserve.exe
     process with the SCM, waits for the "start" command from the SCM
     (which will come very quickly), and confirms that those steps
     succeeded.

     After this call succeeds, the service is free to run.  At some
     point in the future, the SCM will send a message to the service,
     requesting that it stop.  This is translated into a call to
     winservice_notify_stop().  The service is then responsible for
     cleanly terminating.

     We need to do this before actually starting the service logic
     (opening files, sockets, etc.) because the SCM wants you to
     connect *first*, then do your service-specific logic.  If the
     service process takes too long to connect to the SCM, then the
     SCM will decide that the service is busted, and will give up on
     it.
     */
  if (run_mode == run_mode_service)
    {
      err = winservice_start();
      if (err)
        {
          svn_handle_error2(err, stderr, FALSE, "svnserve: ");

          /* This is the most common error.  It means the user started
             svnserve from a shell, and specified the --service
             argument.  svnserve cannot be started, as a service, in
             this way.  The --service argument is valid only valid if
             svnserve is started by the SCM. */
          if (err->apr_err ==
              APR_FROM_OS_ERROR(ERROR_FAILED_SERVICE_CONTROLLER_CONNECT))
            {
              svn_error_clear(svn_cmdline_fprintf(stderr, pool,
                  _("svnserve: The --service flag is only valid if the"
                    " process is started by the Service Control Manager.\n")));
            }

          svn_error_clear(err);
          *exit_code = EXIT_FAILURE;
          return SVN_NO_ERROR;
        }

      /* The service is now in the "starting" state.  Before the SCM will
         consider the service "started", this thread must call the
         winservice_running() function. */
    }
#endif /* WIN32 */

  /* Make sure we have IPV6 support first before giving apr_sockaddr_info_get
     APR_UNSPEC, because it may give us back an IPV6 address even if we can't
     create IPV6 sockets. */

#if APR_HAVE_IPV6
#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, APR_INET6, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status == 0)
    {
      apr_socket_close(sock);
      family = APR_UNSPEC;

      if (prefer_v6)
        {
          if (host == NULL)
            host = "::";
          sockaddr_info_flags = APR_IPV6_ADDR_OK;
        }
      else
        {
          if (host == NULL)
            host = "0.0.0.0";
          sockaddr_info_flags = APR_IPV4_ADDR_OK;
        }
    }
#endif

  status = apr_sockaddr_info_get(&sa, host, family, port,
                                 sockaddr_info_flags, pool);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't get address info"));
    }


#ifdef MAX_SECS_TO_LINGER
  /* ### old APR interface */
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, pool);
#else
  status = apr_socket_create(&sock, sa->family, SOCK_STREAM, APR_PROTO_TCP,
                             pool);
#endif
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't create server socket"));
    }

  /* Prevents "socket in use" errors when server is killed and quickly
   * restarted. */
  status = apr_socket_opt_set(sock, APR_SO_REUSEADDR, 1);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't set options on server socket"));
    }

  status = apr_socket_bind(sock, sa);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't bind server socket"));
    }

  status = apr_socket_listen(sock, ACCEPT_BACKLOG);
  if (status)
    {
      return svn_error_wrap_apr(status, _("Can't listen on server socket"));
    }

#if APR_HAS_FORK
  if (run_mode != run_mode_listen_once && !foreground)
    /* ### ignoring errors... */
    apr_proc_detach(APR_PROC_DETACH_DAEMONIZE);

  apr_signal(SIGCHLD, sigchld_handler);
#endif

#ifdef SIGPIPE
  /* Disable SIGPIPE generation for the platforms that have it. */
  apr_signal(SIGPIPE, SIG_IGN);
#endif

#ifdef SIGXFSZ
  /* Disable SIGXFSZ generation for the platforms that have it, otherwise
   * working with large files when compiled against an APR that doesn't have
   * large file support will crash the program, which is uncool. */
  apr_signal(SIGXFSZ, SIG_IGN);
#endif

  if (pid_filename)
    SVN_ERR(write_pid_file(pid_filename, pool));

#ifdef WIN32
  status = apr_os_sock_get(&winservice_svnserve_accept_socket, sock);
  if (status)
    winservice_svnserve_accept_socket = INVALID_SOCKET;

  /* At this point, the service is "running".  Notify the SCM. */
  if (run_mode == run_mode_service)
    winservice_running();
#endif

  /* Configure FS caches for maximum efficiency with svnserve.
   * For pre-forked (i.e. multi-processed) mode of operation,
   * keep the per-process caches smaller than the default.
   * Also, apply the respective command line parameters, if given. */
  {
    svn_cache_config_t settings = *svn_cache_config_get();

    if (params.memory_cache_size != -1)
      settings.cache_size = params.memory_cache_size;

    settings.single_threaded = TRUE;
    if (handling_mode == connection_mode_thread)
      {
#if APR_HAS_THREADS
        settings.single_threaded = FALSE;
#else
        /* No requests will be processed at all
         * (see "switch (handling_mode)" code further down).
         * But if they were, some other synchronization code
         * would need to take care of securing integrity of
         * APR-based structures. That would include our caches.
         */
#endif
      }

    svn_cache_config_set(&settings);
  }

#if APR_HAS_THREADS
  SVN_ERR(svn_root_pools__create(&connection_pools));

  if (handling_mode == connection_mode_thread)
    {
      /* create the thread pool with a valid range of threads */
      if (max_thread_count < 1)
        max_thread_count = 1;
      if (min_thread_count > max_thread_count)
        min_thread_count = max_thread_count;

      status = apr_thread_pool_create(&threads,
                                      min_thread_count,
                                      max_thread_count,
                                      pool);
      if (status)
        {
          return svn_error_wrap_apr(status, _("Can't create thread pool"));
        }

      /* let idle threads linger for a while in case more requests are
         coming in */
      apr_thread_pool_idle_wait_set(threads, THREADPOOL_THREAD_IDLE_LIMIT);

      /* don't queue requests unless we reached the worker thread limit */
      apr_thread_pool_threshold_set(threads, 0);
    }
  else
    {
      threads = NULL;
    }
#endif

  while (1)
    {
      connection_t *connection = NULL;
      SVN_ERR(accept_connection(&connection, sock, &params, handling_mode,
                                pool));
      if (run_mode == run_mode_listen_once)
        {
          err = serve_socket(connection, connection->pool);
          close_connection(connection);
          return err;
        }

      switch (handling_mode)
        {
        case connection_mode_fork:
#if APR_HAS_FORK
          status = apr_proc_fork(&proc, connection->pool);
          if (status == APR_INCHILD)
            {
              /* the child would't listen to the main server's socket */
              apr_socket_close(sock);

              /* serve_socket() logs any error it returns, so ignore it. */
              svn_error_clear(serve_socket(connection, connection->pool));
              close_connection(connection);
              return SVN_NO_ERROR;
            }
          else if (status != APR_INPARENT)
            {
              err = svn_error_wrap_apr(status, "apr_proc_fork");
              logger__log_error(params.logger, err, NULL, NULL);
              svn_error_clear(err);
            }
#endif
          break;

        case connection_mode_thread:
          /* Create a detached thread for each connection.  That's not a
             particularly sophisticated strategy for a threaded server, it's
             little different from forking one process per connection. */
#if APR_HAS_THREADS
          attach_connection(connection);

          status = apr_thread_pool_push(threads, serve_thread, connection,
                                        0, NULL);
          if (status)
            {
              return svn_error_wrap_apr(status, _("Can't push task"));
            }
#endif
          break;

        case connection_mode_single:
          /* Serve one connection at a time. */
          /* serve_socket() logs any error it returns, so ignore it. */
          svn_error_clear(serve_socket(connection, connection->pool));
        }

      close_connection(connection);
    }

  /* NOTREACHED */
}