bool w_start_listener(const char *path) { struct sockaddr_un un; pthread_t thr; pthread_attr_t attr; pthread_mutexattr_t mattr; struct sigaction sa; sigset_t sigset; #ifdef HAVE_LIBGIMLI_H volatile struct gimli_heartbeat *hb = NULL; #endif struct timeval tv; void *ignored; int n_clients = 0; listener_thread = pthread_self(); pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&w_client_lock, &mattr); pthread_mutexattr_destroy(&mattr); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS) { struct rlimit limit; int mib[2] = { CTL_KERN, #ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC #else KERN_MAXFILES #endif }; int maxperproc; size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); getrlimit(RLIMIT_NOFILE, &limit); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); #ifndef HAVE_FSEVENTS if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } #endif } #endif proc_pid = (int)getpid(); if (gettimeofday(&tv, NULL) == -1) { w_log(W_LOG_ERR, "gettimeofday failed: %s\n", strerror(errno)); return false; } proc_start_time = (uint64_t)tv.tv_sec; if (strlen(path) >= sizeof(un.sun_path) - 1) { w_log(W_LOG_ERR, "%s: path is too long\n", path); return false; } signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); listener_fd = socket(PF_LOCAL, SOCK_STREAM, 0); if (listener_fd == -1) { w_log(W_LOG_ERR, "socket: %s\n", strerror(errno)); return false; } un.sun_family = PF_LOCAL; strcpy(un.sun_path, path); unlink(path); if (bind(listener_fd, (struct sockaddr*)&un, sizeof(un)) != 0) { w_log(W_LOG_ERR, "bind(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } if (listen(listener_fd, 200) != 0) { w_log(W_LOG_ERR, "listen(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } w_set_cloexec(listener_fd); if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) { w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n", strerror(errno)); return false; } if (!clients) { clients = w_ht_new(2, &client_hash_funcs); } w_state_load(); #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif w_set_nonblock(listener_fd); // Now run the dispatch while (!stopping) { int client_fd; struct watchman_client *client; struct pollfd pfd; int bufsize; #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif pfd.events = POLLIN; pfd.fd = listener_fd; if (poll(&pfd, 1, 10000) < 1 || (pfd.revents & POLLIN) == 0) { continue; } #ifdef HAVE_ACCEPT4 client_fd = accept4(listener_fd, NULL, 0, SOCK_CLOEXEC); #else client_fd = accept(listener_fd, NULL, 0); #endif if (client_fd == -1) { continue; } w_set_cloexec(client_fd); bufsize = WATCHMAN_IO_BUF_SIZE; setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); client = calloc(1, sizeof(*client)); client->fd = client_fd; w_log(W_LOG_DBG, "accepted client %p fd=%d\n", client, client_fd); if (!w_json_buffer_init(&client->reader)) { // FIXME: error handling } if (!w_json_buffer_init(&client->writer)) { // FIXME: error handling } if (pipe(client->ping)) { // FIXME: error handling } client->subscriptions = w_ht_new(2, &subscription_hash_funcs); w_set_cloexec(client->ping[0]); w_set_nonblock(client->ping[0]); w_set_cloexec(client->ping[1]); w_set_nonblock(client->ping[1]); pthread_mutex_lock(&w_client_lock); w_ht_set(clients, client->fd, w_ht_ptr_val(client)); pthread_mutex_unlock(&w_client_lock); // Start a thread for the client. // We used to use libevent for this, but we have // a low volume of concurrent clients and the json // parse/encode APIs are not easily used in a non-blocking // server architecture. if (pthread_create(&thr, &attr, client_thread, client)) { // It didn't work out, sorry! pthread_mutex_lock(&w_client_lock); w_ht_del(clients, client->fd); pthread_mutex_unlock(&w_client_lock); } } pthread_attr_destroy(&attr); /* close out some resources to persuade valgrind to run clean */ close(listener_fd); listener_fd = -1; // Wait for clients, waking any sleeping clients up in the process do { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); n_clients = w_ht_size(clients); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); ignore_result(write(client->ping[1], "a", 1)); } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients); usleep(2000); } while (n_clients > 0); w_root_free_watched_roots(); pthread_join(reaper_thread, &ignored); cfg_shutdown(); return true; }
static void run_service(void) { int fd; bool res; #ifndef _WIN32 // Before we redirect stdin/stdout to the log files, move any inetd-provided // socket to a different descriptor number. if (inetd_style) { if (!w_listener_prep_inetd()) { return; } } #endif // redirect std{in,out,err} fd = open("/dev/null", O_RDONLY); if (fd != -1) { ignore_result(dup2(fd, STDIN_FILENO)); close(fd); } fd = open(log_name, O_WRONLY|O_APPEND|O_CREAT, 0600); if (fd != -1) { ignore_result(dup2(fd, STDOUT_FILENO)); ignore_result(dup2(fd, STDERR_FILENO)); close(fd); } if (!lock_pidfile()) { return; } #ifndef _WIN32 /* we are the child, let's set things up */ ignore_result(chdir("/")); #endif w_set_thread_name("listener"); { char hostname[256]; gethostname(hostname, sizeof(hostname)); hostname[sizeof(hostname) - 1] = '\0'; w_log(W_LOG_ERR, "Watchman %s %s starting up on %s\n", PACKAGE_VERSION, #ifdef WATCHMAN_BUILD_INFO WATCHMAN_BUILD_INFO, #else "<no build info set>", #endif hostname); } #ifndef _WIN32 // Block SIGCHLD by default; we only want it to be delivered // to the reaper thread and only when it is ready to reap. // This MUST happen before we spawn any threads so that they // can pick up our default blocked signal mask. { sigset_t sigset; sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); } #endif watchman_watcher_init(); w_clockspec_init(); // Start the reaper before we load any state; the state may // have triggers associated with it which may spawn processes w_start_reaper(); w_state_load(); res = w_start_listener(sock_name); w_root_free_watched_roots(); if (res) { exit(0); } exit(1); }
bool w_start_listener(const char *path) { pthread_mutexattr_t mattr; #ifndef _WIN32 struct sigaction sa; sigset_t sigset; #endif void *ignored; #ifdef HAVE_LIBGIMLI_H volatile struct gimli_heartbeat *hb = NULL; #endif struct timeval tv; int n_clients = 0; listener_thread = pthread_self(); pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&w_client_lock, &mattr); pthread_mutexattr_destroy(&mattr); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS) { struct rlimit limit; # ifndef __OpenBSD__ int mib[2] = { CTL_KERN, # ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC # else KERN_MAXFILES # endif }; # endif int maxperproc; getrlimit(RLIMIT_NOFILE, &limit); # ifndef __OpenBSD__ size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); # else maxperproc = limit.rlim_max; w_log(W_LOG_ERR, "openfiles-cur is %" PRIu64 " openfiles-max=%i\n", limit.rlim_cur, maxperproc); # endif if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); #ifndef HAVE_FSEVENTS if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } #endif } #endif proc_pid = (int)getpid(); if (gettimeofday(&tv, NULL) == -1) { w_log(W_LOG_ERR, "gettimeofday failed: %s\n", strerror(errno)); return false; } proc_start_time = (uint64_t)tv.tv_sec; #ifndef _WIN32 signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); listener_fd = get_listener_socket(path); if (listener_fd == -1) { return false; } w_set_cloexec(listener_fd); #endif if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) { w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n", strerror(errno)); return false; } if (!clients) { clients = w_ht_new(2, &client_hash_funcs); } w_state_load(); #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } else { w_setup_signal_handlers(); } #else w_setup_signal_handlers(); #endif w_set_nonblock(listener_fd); // Now run the dispatch #ifndef _WIN32 accept_loop(); #else named_pipe_accept_loop(path); #endif #ifndef _WIN32 /* close out some resources to persuade valgrind to run clean */ close(listener_fd); listener_fd = -1; #endif // Wait for clients, waking any sleeping clients up in the process do { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); n_clients = w_ht_size(clients); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); w_event_set(client->ping); } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients); usleep(2000); } while (n_clients > 0); w_root_free_watched_roots(); pthread_join(reaper_thread, &ignored); cfg_shutdown(); return true; }
bool w_start_listener(const char *path) { struct sockaddr_un un; pthread_t thr; pthread_attr_t attr; pthread_mutexattr_t mattr; struct sigaction sa; sigset_t sigset; #ifdef HAVE_LIBGIMLI_H volatile struct gimli_heartbeat *hb = NULL; #endif pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&w_client_lock, &mattr); pthread_mutexattr_destroy(&mattr); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #ifdef HAVE_KQUEUE { struct rlimit limit; int mib[2] = { CTL_KERN, #ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC #else KERN_MAXFILES #endif }; int maxperproc; size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); getrlimit(RLIMIT_NOFILE, &limit); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } } #endif if (strlen(path) >= sizeof(un.sun_path) - 1) { w_log(W_LOG_ERR, "%s: path is too long\n", path); return false; } signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); listener_fd = socket(PF_LOCAL, SOCK_STREAM, 0); if (listener_fd == -1) { w_log(W_LOG_ERR, "socket: %s\n", strerror(errno)); return false; } un.sun_family = PF_LOCAL; strcpy(un.sun_path, path); if (bind(listener_fd, (struct sockaddr*)&un, sizeof(un)) != 0) { w_log(W_LOG_ERR, "bind(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } if (listen(listener_fd, 200) != 0) { w_log(W_LOG_ERR, "listen(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } w_set_cloexec(listener_fd); if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) { w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n", strerror(errno)); return false; } if (!clients) { clients = w_ht_new(2, &client_hash_funcs); } // Wire up the command handlers register_commands(commands); w_state_load(); #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } w_set_nonblock(listener_fd); #endif // Now run the dispatch while (true) { int client_fd; struct watchman_client *client; struct pollfd pfd; int bufsize; #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif pfd.events = POLLIN; pfd.fd = listener_fd; poll(&pfd, 1, 10000); client_fd = accept(listener_fd, NULL, 0); if (client_fd == -1) { continue; } w_set_cloexec(client_fd); bufsize = WATCHMAN_IO_BUF_SIZE; setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); client = calloc(1, sizeof(*client)); client->fd = client_fd; if (!w_json_buffer_init(&client->reader)) { // FIXME: error handling } if (!w_json_buffer_init(&client->writer)) { // FIXME: error handling } if (pipe(client->ping)) { // FIXME: error handling } client->subscriptions = w_ht_new(2, &subscription_hash_funcs); w_set_cloexec(client->ping[0]); w_set_nonblock(client->ping[0]); w_set_cloexec(client->ping[1]); w_set_nonblock(client->ping[1]); pthread_mutex_lock(&w_client_lock); w_ht_set(clients, client->fd, (w_ht_val_t)client); pthread_mutex_unlock(&w_client_lock); // Start a thread for the client. // We used to use libevent for this, but we have // a low volume of concurrent clients and the json // parse/encode APIs are not easily used in a non-blocking // server architecture. if (pthread_create(&thr, &attr, client_thread, client)) { // It didn't work out, sorry! pthread_mutex_lock(&w_client_lock); w_ht_del(clients, client->fd); pthread_mutex_unlock(&w_client_lock); } } pthread_attr_destroy(&attr); return true; }
static void run_service(void) { int fd; bool res; #ifndef _WIN32 // Before we redirect stdin/stdout to the log files, move any inetd-provided // socket to a different descriptor number. if (inetd_style) { if (!w_listener_prep_inetd()) { return; } } #endif // redirect std{in,out,err} fd = open("/dev/null", O_RDONLY); if (fd != -1) { ignore_result(dup2(fd, STDIN_FILENO)); close(fd); } fd = open(log_name, O_WRONLY|O_APPEND|O_CREAT, 0600); if (fd != -1) { ignore_result(dup2(fd, STDOUT_FILENO)); ignore_result(dup2(fd, STDERR_FILENO)); close(fd); } if (!lock_pidfile()) { return; } #ifndef _WIN32 /* we are the child, let's set things up */ ignore_result(chdir("/")); #endif w_set_thread_name("listener"); { char hostname[256]; gethostname(hostname, sizeof(hostname)); hostname[sizeof(hostname) - 1] = '\0'; w_log(W_LOG_ERR, "Watchman %s %s starting up on %s\n", PACKAGE_VERSION, #ifdef WATCHMAN_BUILD_INFO WATCHMAN_BUILD_INFO, #else "<no build info set>", #endif hostname); } watchman_watcher_init(); w_clockspec_init(); w_state_load(); w_start_reaper(); res = w_start_listener(sock_name); w_root_free_watched_roots(); if (res) { exit(0); } exit(1); }