static bool dir_generator( w_query *query, w_root_t *root, struct w_query_ctx *ctx, struct watchman_dir *dir, uint32_t depth) { w_ht_iter_t i; if (w_ht_first(dir->files, &i)) do { struct watchman_file *file = w_ht_val_ptr(i.value); if (!w_query_process_file(query, ctx, file)) { return false; } } while (w_ht_next(dir->files, &i)); if (depth > 0 && w_ht_first(dir->dirs, &i)) do { struct watchman_dir *child = w_ht_val_ptr(i.value); if (!dir_generator(query, root, ctx, child, depth - 1)) { return false; } } while (w_ht_next(dir->dirs, &i)); return true; }
// The ignore logic is to stop recursion of grandchildren or later // generations than an ignored dir. We allow the direct children // of an ignored dir, but no further down. bool w_is_ignored(w_root_t *root, const char *path, uint32_t pathlen) { w_ht_iter_t i; if (w_ht_first(root->ignore_dirs, &i)) do { w_string_t *ign = w_ht_val_ptr(i.value); if (pathlen < ign->len) { continue; } if (memcmp(ign->buf, path, ign->len) == 0) { if (ign->len == pathlen) { // Exact match return true; } if (path[ign->len] == WATCHMAN_DIR_SEP) { // prefix match return true; } } } while (w_ht_next(root->ignore_dirs, &i)); if (w_ht_first(root->ignore_vcs, &i)) do { w_string_t *ign = w_ht_val_ptr(i.value); if (pathlen <= ign->len) { continue; } if (memcmp(ign->buf, path, ign->len) == 0) { // prefix matches, but it isn't a parent if (path[ign->len] != WATCHMAN_DIR_SEP) { continue; } // If we find any '/' in the remainder of the path, then we should // ignore it. Otherwise we allow it. path += ign->len + 1; pathlen -= ign->len + 1; if (memchr(path, WATCHMAN_DIR_SEP, pathlen)) { return true; } } } while (w_ht_next(root->ignore_vcs, &i)); return false; }
void w_log_to_clients(int level, const char *buf) { json_t *json = NULL; w_ht_iter_t iter; if (!clients) { return; } pthread_mutex_lock(&w_client_lock); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); if (client->log_level != W_LOG_OFF && client->log_level >= level) { json = make_response(); if (json) { set_prop(json, "log", json_string_nocheck(buf)); if (!enqueue_response(client, json, true)) { json_decref(json); } } } } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); }
static void cmd_debug_show_cursors(struct watchman_client *client, json_t *args) { w_root_t *root; json_t *resp, *cursors; w_ht_iter_t i; /* resolve the root */ if (json_array_size(args) != 2) { send_error_response(client, "wrong number of arguments for 'debug-show-cursors'"); return; } root = resolve_root_or_err(client, args, 1, false); if (!root) { return; } resp = make_response(); w_root_lock(root); cursors = json_object_of_size(w_ht_size(root->cursors)); if (w_ht_first(root->cursors, &i)) do { w_string_t *name = w_ht_val_ptr(i.key); set_prop(cursors, name->buf, json_integer(i.value)); } while (w_ht_next(root->cursors, &i)); w_root_unlock(root); set_prop(resp, "cursors", cursors); send_and_dispose_response(client, resp); w_root_delref(root); }
bool w_should_log_to_clients(int level) { w_ht_iter_t iter; bool result = false; pthread_mutex_lock(&w_client_lock); if (!clients) { pthread_mutex_unlock(&w_client_lock); return false; } if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); if (client->log_level != W_LOG_OFF && client->log_level >= level) { result = true; break; } } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); return result; }
static bool inot_root_consume_notify(watchman_global_watcher_t watcher, w_root_t *root, struct watchman_pending_collection *coll) { struct inot_root_state *state = root->watch; struct inotify_event *ine; char *iptr; int n; struct timeval now; unused_parameter(watcher); n = read(state->infd, &state->ibuf, sizeof(state->ibuf)); if (n == -1) { if (errno == EINTR) { return false; } w_log(W_LOG_FATAL, "read(%d, %zu): error %s\n", state->infd, sizeof(state->ibuf), strerror(errno)); } w_log(W_LOG_DBG, "inotify read: returned %d.\n", n); gettimeofday(&now, NULL); for (iptr = state->ibuf; iptr < state->ibuf + n; iptr = iptr + sizeof(*ine) + ine->len) { ine = (struct inotify_event*)iptr; process_inotify_event(root, coll, ine, now); if (root->cancelled) { return false; } } // It is possible that we can accumulate a set of pending_move // structs in move_map. This happens when a directory is moved // outside of the watched tree; we get the MOVE_FROM but never // get the MOVE_TO with the same cookie. To avoid leaking these, // we'll age out the move_map after processing a full set of // inotify events. We age out rather than delete all because // the MOVE_TO may yet be waiting to read in another go around. // We allow a somewhat arbitrary but practical grace period to // observe the corresponding MOVE_TO. if (w_ht_size(state->move_map) > 0) { w_ht_iter_t iter; if (w_ht_first(state->move_map, &iter)) do { struct pending_move *pending = w_ht_val_ptr(iter.value); if (now.tv_sec - pending->created > 5 /* seconds */) { w_log(W_LOG_DBG, "deleting pending move %s (moved outside of watch?)\n", pending->name->buf); w_ht_iter_del(state->move_map, &iter); } } while (w_ht_next(state->move_map, &iter)); } return true; }
json_t *w_capability_get_list(void) { json_t *arr = json_array_of_size(w_ht_size(capabilities)); w_ht_iter_t iter; w_ht_first(capabilities, &iter); do { w_string_t *name = w_ht_val_ptr(iter.key); json_array_append(arr, w_string_to_json(name)); } while (w_ht_next(capabilities, &iter)); return arr; }
void w_cancel_subscriptions_for_root(w_root_t *root) { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); if (w_ht_first(clients, &iter)) { do { struct watchman_user_client *client = w_ht_val_ptr(iter.value); w_ht_iter_t citer; if (w_ht_first(client->subscriptions, &citer)) { do { struct watchman_client_subscription *sub = w_ht_val_ptr(citer.value); if (sub->root == root) { json_t *response = make_response(); w_log(W_LOG_ERR, "Cancel subscription %.*s for client:stm=%p due to " "root cancellation\n", sub->name->len, sub->name->buf, client->client.stm); set_prop(response, "root", w_string_to_json(root->root_path)); set_prop(response, "subscription", w_string_to_json(sub->name)); set_prop(response, "unilateral", json_true()); set_prop(response, "canceled", json_true()); if (!enqueue_response(&client->client, response, true)) { w_log(W_LOG_DBG, "failed to queue sub cancellation\n"); json_decref(response); } w_ht_iter_del(client->subscriptions, &citer); } } while (w_ht_next(client->subscriptions, &citer)); } } while (w_ht_next(clients, &iter)); } pthread_mutex_unlock(&w_client_lock); }
static bool dir_generator( w_query *query, w_root_t *root, struct w_query_ctx *ctx, struct watchman_dir *dir, uint32_t depth, int64_t *num_walked) { w_ht_iter_t i; int64_t n = 0; bool result = true; if (w_ht_first(dir->files, &i)) do { struct watchman_file *file = w_ht_val_ptr(i.value); ++n; if (!w_query_process_file(query, ctx, file)) { result = false; goto done; } } while (w_ht_next(dir->files, &i)); if (depth > 0 && w_ht_first(dir->dirs, &i)) do { struct watchman_dir *child = w_ht_val_ptr(i.value); int64_t child_walked = 0; result = dir_generator(query, root, ctx, child, depth - 1, &child_walked); n += child_walked; if (!result) { goto done; } } while (w_ht_next(dir->dirs, &i)); done: *num_walked = n; return result; }
void w_mark_dead(pid_t pid) { w_root_t *root = NULL; w_ht_iter_t iter; pthread_mutex_lock(&spawn_lock); root = lookup_running_pid(pid); if (!root) { pthread_mutex_unlock(&spawn_lock); return; } delete_running_pid(pid); pthread_mutex_unlock(&spawn_lock); w_log(W_LOG_DBG, "mark_dead: %.*s child pid %d\n", root->root_path->len, root->root_path->buf, (int)pid); /* now walk the cmds and try to find our match */ w_root_lock(root); /* walk the list of triggers, and run their rules */ if (w_ht_first(root->commands, &iter)) do { struct watchman_trigger_command *cmd; cmd = w_ht_val_ptr(iter.value); if (cmd->current_proc != pid) { w_log(W_LOG_DBG, "mark_dead: is [%.*s] %d == %d\n", cmd->triggername->len, cmd->triggername->buf, (int)cmd->current_proc, (int)pid); continue; } /* first mark the process as dead */ cmd->current_proc = 0; if (root->cancelled) { w_log(W_LOG_DBG, "mark_dead: root was cancelled\n"); break; } w_assess_trigger(root, cmd); break; } while (w_ht_next(root->commands, &iter)); w_root_unlock(root); w_root_delref(root); }
void print_command_list_for_help(FILE *where) { uint32_t i = 0, n = w_ht_size(command_funcs); struct watchman_command_handler_def **defs; w_ht_iter_t iter; defs = calloc(n, sizeof(*defs)); if (w_ht_first(command_funcs, &iter)) do { defs[i++] = w_ht_val_ptr(iter.value); } while (w_ht_next(command_funcs, &iter)); qsort(defs, n, sizeof(*defs), compare_def); fprintf(where, "\n\nAvailable commands:\n\n"); for (i = 0; i < n; i++) { fprintf(where, " %s\n", defs[i]->name); } }
BOOL w_wait_for_any_child(DWORD timeoutms, DWORD *pid) { HANDLE handles[MAXIMUM_WAIT_OBJECTS]; DWORD pids[MAXIMUM_WAIT_OBJECTS]; int i = 0; w_ht_iter_t iter; DWORD res; *pid = 0; pthread_mutex_lock(&child_proc_lock); if (child_procs && w_ht_first(child_procs, &iter)) do { HANDLE proc = w_ht_val_ptr(iter.value); pids[i] = (DWORD)iter.key; handles[i++] = proc; } while (w_ht_next(child_procs, &iter)); pthread_mutex_unlock(&child_proc_lock); if (i == 0) { return false; } w_log(W_LOG_DBG, "w_wait_for_any_child: waiting for %d handles\n", i); res = WaitForMultipleObjectsEx(i, handles, false, timeoutms, true); if (res == WAIT_FAILED) { errno = map_win32_err(GetLastError()); return false; } if (res < WAIT_OBJECT_0 + i) { i = res - WAIT_OBJECT_0; } else if (res >= WAIT_ABANDONED_0 && res < WAIT_ABANDONED_0 + i) { i = res - WAIT_ABANDONED_0; } else { return false; } pthread_mutex_lock(&child_proc_lock); w_ht_del(child_procs, pids[i]); pthread_mutex_unlock(&child_proc_lock); CloseHandle(handles[i]); *pid = pids[i]; return true; }
bool w_start_listener(const char *path) { struct sockaddr_un un; pthread_t thr; pthread_attr_t attr; pthread_mutexattr_t mattr; struct sigaction sa; sigset_t sigset; #ifdef HAVE_LIBGIMLI_H volatile struct gimli_heartbeat *hb = NULL; #endif struct timeval tv; void *ignored; int n_clients = 0; listener_thread = pthread_self(); pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&w_client_lock, &mattr); pthread_mutexattr_destroy(&mattr); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS) { struct rlimit limit; int mib[2] = { CTL_KERN, #ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC #else KERN_MAXFILES #endif }; int maxperproc; size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); getrlimit(RLIMIT_NOFILE, &limit); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); #ifndef HAVE_FSEVENTS if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } #endif } #endif proc_pid = (int)getpid(); if (gettimeofday(&tv, NULL) == -1) { w_log(W_LOG_ERR, "gettimeofday failed: %s\n", strerror(errno)); return false; } proc_start_time = (uint64_t)tv.tv_sec; if (strlen(path) >= sizeof(un.sun_path) - 1) { w_log(W_LOG_ERR, "%s: path is too long\n", path); return false; } signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); listener_fd = socket(PF_LOCAL, SOCK_STREAM, 0); if (listener_fd == -1) { w_log(W_LOG_ERR, "socket: %s\n", strerror(errno)); return false; } un.sun_family = PF_LOCAL; strcpy(un.sun_path, path); unlink(path); if (bind(listener_fd, (struct sockaddr*)&un, sizeof(un)) != 0) { w_log(W_LOG_ERR, "bind(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } if (listen(listener_fd, 200) != 0) { w_log(W_LOG_ERR, "listen(%s): %s\n", path, strerror(errno)); close(listener_fd); return false; } w_set_cloexec(listener_fd); if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) { w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n", strerror(errno)); return false; } if (!clients) { clients = w_ht_new(2, &client_hash_funcs); } w_state_load(); #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif w_set_nonblock(listener_fd); // Now run the dispatch while (!stopping) { int client_fd; struct watchman_client *client; struct pollfd pfd; int bufsize; #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif pfd.events = POLLIN; pfd.fd = listener_fd; if (poll(&pfd, 1, 10000) < 1 || (pfd.revents & POLLIN) == 0) { continue; } #ifdef HAVE_ACCEPT4 client_fd = accept4(listener_fd, NULL, 0, SOCK_CLOEXEC); #else client_fd = accept(listener_fd, NULL, 0); #endif if (client_fd == -1) { continue; } w_set_cloexec(client_fd); bufsize = WATCHMAN_IO_BUF_SIZE; setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize)); client = calloc(1, sizeof(*client)); client->fd = client_fd; w_log(W_LOG_DBG, "accepted client %p fd=%d\n", client, client_fd); if (!w_json_buffer_init(&client->reader)) { // FIXME: error handling } if (!w_json_buffer_init(&client->writer)) { // FIXME: error handling } if (pipe(client->ping)) { // FIXME: error handling } client->subscriptions = w_ht_new(2, &subscription_hash_funcs); w_set_cloexec(client->ping[0]); w_set_nonblock(client->ping[0]); w_set_cloexec(client->ping[1]); w_set_nonblock(client->ping[1]); pthread_mutex_lock(&w_client_lock); w_ht_set(clients, client->fd, w_ht_ptr_val(client)); pthread_mutex_unlock(&w_client_lock); // Start a thread for the client. // We used to use libevent for this, but we have // a low volume of concurrent clients and the json // parse/encode APIs are not easily used in a non-blocking // server architecture. if (pthread_create(&thr, &attr, client_thread, client)) { // It didn't work out, sorry! pthread_mutex_lock(&w_client_lock); w_ht_del(clients, client->fd); pthread_mutex_unlock(&w_client_lock); } } pthread_attr_destroy(&attr); /* close out some resources to persuade valgrind to run clean */ close(listener_fd); listener_fd = -1; // Wait for clients, waking any sleeping clients up in the process do { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); n_clients = w_ht_size(clients); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); ignore_result(write(client->ping[1], "a", 1)); } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients); usleep(2000); } while (n_clients > 0); w_root_free_watched_roots(); pthread_join(reaper_thread, &ignored); cfg_shutdown(); return true; }
bool w_start_listener(const char *path) { pthread_mutexattr_t mattr; #ifndef _WIN32 struct sigaction sa; sigset_t sigset; #endif void *ignored; #ifdef HAVE_LIBGIMLI_H volatile struct gimli_heartbeat *hb = NULL; #endif struct timeval tv; int n_clients = 0; listener_thread = pthread_self(); pthread_mutexattr_init(&mattr); pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE); pthread_mutex_init(&w_client_lock, &mattr); pthread_mutexattr_destroy(&mattr); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS) { struct rlimit limit; # ifndef __OpenBSD__ int mib[2] = { CTL_KERN, # ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC # else KERN_MAXFILES # endif }; # endif int maxperproc; getrlimit(RLIMIT_NOFILE, &limit); # ifndef __OpenBSD__ size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); # else maxperproc = limit.rlim_max; w_log(W_LOG_ERR, "openfiles-cur is %" PRIu64 " openfiles-max=%i\n", limit.rlim_cur, maxperproc); # endif if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); #ifndef HAVE_FSEVENTS if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } #endif } #endif proc_pid = (int)getpid(); if (gettimeofday(&tv, NULL) == -1) { w_log(W_LOG_ERR, "gettimeofday failed: %s\n", strerror(errno)); return false; } proc_start_time = (uint64_t)tv.tv_sec; #ifndef _WIN32 signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); listener_fd = get_listener_socket(path); if (listener_fd == -1) { return false; } w_set_cloexec(listener_fd); #endif if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) { w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n", strerror(errno)); return false; } if (!clients) { clients = w_ht_new(2, &client_hash_funcs); } w_state_load(); #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } else { w_setup_signal_handlers(); } #else w_setup_signal_handlers(); #endif w_set_nonblock(listener_fd); // Now run the dispatch #ifndef _WIN32 accept_loop(); #else named_pipe_accept_loop(path); #endif #ifndef _WIN32 /* close out some resources to persuade valgrind to run clean */ close(listener_fd); listener_fd = -1; #endif // Wait for clients, waking any sleeping clients up in the process do { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); n_clients = w_ht_size(clients); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); w_event_set(client->ping); } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients); usleep(2000); } while (n_clients > 0); w_root_free_watched_roots(); pthread_join(reaper_thread, &ignored); cfg_shutdown(); return true; }
bool w_start_listener(const char *path) { #ifndef _WIN32 struct sigaction sa; sigset_t sigset; #endif void *ignored; listener_thread = pthread_self(); #ifdef HAVE_LIBGIMLI_H hb = gimli_heartbeat_attach(); #endif #if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS) { struct rlimit limit; # ifndef __OpenBSD__ int mib[2] = { CTL_KERN, # ifdef KERN_MAXFILESPERPROC KERN_MAXFILESPERPROC # else KERN_MAXFILES # endif }; # endif int maxperproc; getrlimit(RLIMIT_NOFILE, &limit); # ifndef __OpenBSD__ { size_t len; len = sizeof(maxperproc); sysctl(mib, 2, &maxperproc, &len, NULL, 0); w_log(W_LOG_ERR, "file limit is %" PRIu64 " kern.maxfilesperproc=%i\n", limit.rlim_cur, maxperproc); } # else maxperproc = limit.rlim_max; w_log(W_LOG_ERR, "openfiles-cur is %" PRIu64 " openfiles-max=%i\n", limit.rlim_cur, maxperproc); # endif if (limit.rlim_cur != RLIM_INFINITY && maxperproc > 0 && limit.rlim_cur < (rlim_t)maxperproc) { limit.rlim_cur = maxperproc; if (setrlimit(RLIMIT_NOFILE, &limit)) { w_log(W_LOG_ERR, "failed to raise limit to %" PRIu64 " (%s).\n", limit.rlim_cur, strerror(errno)); } else { w_log(W_LOG_ERR, "raised file limit to %" PRIu64 "\n", limit.rlim_cur); } } getrlimit(RLIMIT_NOFILE, &limit); #ifndef HAVE_FSEVENTS if (limit.rlim_cur < 10240) { w_log(W_LOG_ERR, "Your file descriptor limit is very low (%" PRIu64 "), " "please consult the watchman docs on raising the limits\n", limit.rlim_cur); } #endif } #endif #ifndef _WIN32 signal(SIGPIPE, SIG_IGN); /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting * syscalls */ memset(&sa, 0, sizeof(sa)); sa.sa_handler = wakeme; sa.sa_flags = 0; sigaction(SIGUSR1, &sa, NULL); sigaction(SIGCHLD, &sa, NULL); // Block SIGCHLD everywhere sigemptyset(&sigset); sigaddset(&sigset, SIGCHLD); sigprocmask(SIG_BLOCK, &sigset, NULL); listener_fd = get_listener_socket(path); if (listener_fd == -1) { return false; } w_set_cloexec(listener_fd); #endif if (!clients) { clients = w_ht_new(2, NULL); } #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } else { w_setup_signal_handlers(); } #else w_setup_signal_handlers(); #endif w_set_nonblock(listener_fd); // Now run the dispatch #ifndef _WIN32 accept_loop(); #else named_pipe_accept_loop(path); #endif #ifndef _WIN32 /* close out some resources to persuade valgrind to run clean */ close(listener_fd); listener_fd = -1; #endif // Wait for clients, waking any sleeping clients up in the process { int interval = 2000; int last_count = 0, n_clients = 0; const int max_interval = 1000000; // 1 second do { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); n_clients = w_ht_size(clients); if (w_ht_first(clients, &iter)) do { struct watchman_client *client = w_ht_val_ptr(iter.value); w_event_set(client->ping); #ifndef _WIN32 // If we've been waiting around for a while, interrupt // the client thread; it may be blocked on a write if (interval >= max_interval) { pthread_kill(client->thread_handle, SIGUSR1); } #endif } while (w_ht_next(clients, &iter)); pthread_mutex_unlock(&w_client_lock); if (n_clients != last_count) { w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients); } usleep(interval); interval = MIN(interval * 2, max_interval); } while (n_clients > 0); } pthread_join(reaper_thread, &ignored); cfg_shutdown(); return true; }