bool cfg_get_bool(w_root_t *root, const char *name, bool defval) { json_t *val = cfg_get_json(root, name); if (val) { if (!json_is_boolean(val)) { w_log(W_LOG_FATAL, "Expected config value %s to be a boolean\n", name); } return json_is_true(val); } return defval; }
bool w_json_buffer_passthru(w_jbuffer_t *jr, enum w_pdu_type output_pdu, w_jbuffer_t *output_pdu_buf, w_stm_t stm) { json_t *j; json_error_t jerr; bool res; if (!read_and_detect_pdu(jr, stm, &jerr)) { w_log(W_LOG_ERR, "failed to identify PDU: %s\n", jerr.text); return false; } if (jr->pdu_type == output_pdu) { // We can stream it through if (!stream_pdu(jr, stm, &jerr)) { w_log(W_LOG_ERR, "stream_pdu: %s\n", jerr.text); return false; } return true; } j = read_pdu_into_json(jr, stm, &jerr); if (!j) { w_log(W_LOG_ERR, "failed to parse response: %s\n", jerr.text); return false; } w_json_buffer_reset(output_pdu_buf); res = w_ser_write_pdu(output_pdu, output_pdu_buf, w_stm_stdout(), j); json_decref(j); return res; }
static json_ref load_root_config(const char* path) { char cfgfilename[WATCHMAN_NAME_MAX]; json_error_t err; snprintf(cfgfilename, sizeof(cfgfilename), "%s/.watchmanconfig", path); if (!w_path_exists(cfgfilename)) { if (errno == ENOENT) { return nullptr; } w_log(W_LOG_ERR, "%s is not accessible: %s\n", cfgfilename, strerror(errno)); return nullptr; } auto res = json_load_file(cfgfilename, 0, &err); if (!res) { w_log(W_LOG_ERR, "failed to parse json from %s: %s\n", cfgfilename, err.text); } return res; }
void w_assess_trigger(w_root_t *root, struct watchman_trigger_command *cmd) { w_query_res res; struct w_clockspec *since_spec = cmd->query->since_spec; if (since_spec && since_spec->tag == w_cs_clock) { w_log(W_LOG_DBG, "running trigger rules! since %" PRIu32 "\n", since_spec->clock.ticks); } else { w_log(W_LOG_DBG, "running trigger rules!\n"); } // Triggers never need to sync explicitly; we are only dispatched // at settle points which are by definition sync'd to the present time cmd->query->sync_timeout = 0; if (!w_query_execute(cmd->query, root, &res, trigger_generator, cmd)) { w_log(W_LOG_ERR, "error running trigger query: %s", res.errmsg); w_query_result_free(&res); return; } w_log(W_LOG_DBG, "trigger generated %" PRIu32 " results\n", res.num_results); // create a new spec that will be used the next time cmd->query->since_spec = w_clockspec_new_clock(res.root_number, res.ticks); if (res.num_results) { spawn_command(root, cmd, &res, since_spec); } if (since_spec) { w_clockspec_free(since_spec); since_spec = NULL; } w_query_result_free(&res); }
void w_cancel_subscriptions_for_root(w_root_t *root) { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); if (w_ht_first(clients, &iter)) { do { struct watchman_user_client *client = w_ht_val_ptr(iter.value); w_ht_iter_t citer; if (w_ht_first(client->subscriptions, &citer)) { do { struct watchman_client_subscription *sub = w_ht_val_ptr(citer.value); if (sub->root == root) { json_t *response = make_response(); w_log(W_LOG_ERR, "Cancel subscription %.*s for client:stm=%p due to " "root cancellation\n", sub->name->len, sub->name->buf, client->client.stm); set_prop(response, "root", w_string_to_json(root->root_path)); set_prop(response, "subscription", w_string_to_json(sub->name)); set_prop(response, "unilateral", json_true()); set_prop(response, "canceled", json_true()); if (!enqueue_response(&client->client, response, true)) { w_log(W_LOG_DBG, "failed to queue sub cancellation\n"); json_decref(response); } w_ht_iter_del(client->subscriptions, &citer); } } while (w_ht_next(client->subscriptions, &citer)); } } while (w_ht_next(clients, &iter)); } pthread_mutex_unlock(&w_client_lock); }
static DIR *portfs_root_start_watch_dir(watchman_global_watcher_t watcher, w_root_t *root, struct watchman_dir *dir, struct timeval now, const char *path) { struct portfs_root_state *state = root->watch; DIR *osdir; struct stat st; unused_parameter(watcher); osdir = opendir_nofollow(path); if (!osdir) { handle_open_errno(root, dir, now, "opendir", errno, NULL); return NULL; } if (fstat(dirfd(osdir), &st) == -1) { // whaaa? w_log(W_LOG_ERR, "fstat on opened dir %s failed: %s\n", path, strerror(errno)); w_root_schedule_recrawl(root, "fstat failed"); closedir(osdir); return NULL; } dir->port_file.fo_mtime = st.st_atim; dir->port_file.fo_mtime = st.st_mtim; dir->port_file.fo_ctime = st.st_ctim; dir->port_file.fo_name = (char*)dir->path->buf; errno = 0; if (port_associate(state->port_fd, PORT_SOURCE_FILE, (uintptr_t)&dir->port_file, WATCHMAN_PORT_EVENTS, SET_DIR_BIT(dir))) { w_log(W_LOG_ERR, "port_associate %s %s\n", dir->port_file.fo_name, strerror(errno)); } return osdir; }
static bool try_command(json_t *cmd, int timeout) { w_stm_t client = NULL; w_jbuffer_t buffer; w_jbuffer_t output_pdu_buffer; int err; client = w_stm_connect(sock_name, timeout * 1000); if (client == NULL) { return false; } if (!cmd) { w_stm_close(client); return true; } w_json_buffer_init(&buffer); // Send command if (!w_ser_write_pdu(server_pdu, &buffer, client, cmd)) { err = errno; w_log(W_LOG_ERR, "error sending PDU to server\n"); w_json_buffer_free(&buffer); w_stm_close(client); errno = err; return false; } w_json_buffer_reset(&buffer); w_json_buffer_init(&output_pdu_buffer); do { if (!w_json_buffer_passthru( &buffer, output_pdu, &output_pdu_buffer, client)) { err = errno; w_json_buffer_free(&buffer); w_json_buffer_free(&output_pdu_buffer); w_stm_close(client); errno = err; return false; } } while (persistent); w_json_buffer_free(&buffer); w_json_buffer_free(&output_pdu_buffer); w_stm_close(client); return true; }
json_int_t cfg_get_int(w_root_t *root, const char *name, json_int_t defval) { json_t *val = cfg_get_json(root, name); if (val) { if (!json_is_integer(val)) { w_log(W_LOG_FATAL, "Expected config value %s to be an integer\n", name); } return json_integer_value(val); } return defval; }
const char *cfg_get_string(w_root_t *root, const char *name, const char *defval) { json_t *val = cfg_get_json(root, name); if (val) { if (!json_is_string(val)) { w_log(W_LOG_FATAL, "Expected config value %s to be a string\n", name); } return json_string_value(val); } return defval; }
static const char *compute_user_name(void) { const char *user = get_env_with_fallback("USER", "LOGNAME", NULL); #ifdef _WIN32 static char user_buf[256]; #endif if (!user) { #ifdef _WIN32 DWORD size = sizeof(user_buf); if (GetUserName(user_buf, &size)) { user_buf[size] = 0; user = user_buf; } else { w_log(W_LOG_FATAL, "GetUserName failed: %s. I don't know who you are\n", win32_strerror(GetLastError())); } #else uid_t uid = getuid(); struct passwd *pw; pw = getpwuid(uid); if (!pw) { w_log(W_LOG_FATAL, "getpwuid(%d) failed: %s. I don't know who you are\n", uid, strerror(errno)); } user = pw->pw_name; #endif if (!user) { w_log(W_LOG_ERR, "watchman requires that you set $USER in your env\n"); abort(); } } return user; }
static void crash_handler(int signo, siginfo_t *si, void *ucontext) { const char *reason = ""; unused_parameter(ucontext); if (si) { switch (si->si_signo) { case SIGILL: switch (si->si_code) { case ILL_ILLOPC: reason = "illegal opcode"; break; case ILL_ILLOPN: reason = "illegal operand"; break; case ILL_ILLADR: reason = "illegal addressing mode"; break; case ILL_ILLTRP: reason = "illegal trap"; break; case ILL_PRVOPC: reason = "privileged opcode"; break; case ILL_PRVREG: reason = "privileged register"; break; case ILL_COPROC: reason = "co-processor error"; break; case ILL_BADSTK: reason = "internal stack error"; break; } break; case SIGFPE: switch (si->si_code) { case FPE_INTDIV: reason = "integer divide by zero"; break; case FPE_INTOVF: reason = "integer overflow"; break; case FPE_FLTDIV: reason = "floating point divide by zero"; break; case FPE_FLTOVF: reason = "floating point overflow"; break; case FPE_FLTUND: reason = "floating point underflow"; break; case FPE_FLTRES: reason = "floating point inexact result"; break; case FPE_FLTINV: reason = "invalid floating point operation"; break; case FPE_FLTSUB: reason = "subscript out of range"; break; } break; case SIGSEGV: switch (si->si_code) { case SEGV_MAPERR: reason = "address not mapped to object"; break; case SEGV_ACCERR: reason = "invalid permissions for mapped object"; break; } break; #ifdef SIGBUS case SIGBUS: switch (si->si_code) { case BUS_ADRALN: reason = "invalid address alignment"; break; case BUS_ADRERR: reason = "non-existent physical address"; break; } break; #endif } } w_log(W_LOG_FATAL, "Terminating due to signal %d %s. %s (%p)\n", signo, strsignal(signo), reason, si ? si->si_value.sival_ptr : NULL); }
static struct watchman_client *make_new_client(w_stm_t stm) { struct watchman_client *client; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); client = calloc(1, derived_client_size); if (!client) { pthread_attr_destroy(&attr); return NULL; } client->stm = stm; w_log(W_LOG_DBG, "accepted client:stm=%p\n", client->stm); if (!w_json_buffer_init(&client->reader)) { // FIXME: error handling } if (!w_json_buffer_init(&client->writer)) { // FIXME: error handling } client->ping = w_event_make(); if (!client->ping) { // FIXME: error handling } derived_client_ctor(client); pthread_mutex_lock(&w_client_lock); w_ht_set(clients, w_ht_ptr_val(client), w_ht_ptr_val(client)); pthread_mutex_unlock(&w_client_lock); // Start a thread for the client. // We used to use libevent for this, but we have // a low volume of concurrent clients and the json // parse/encode APIs are not easily used in a non-blocking // server architecture. if (pthread_create(&client->thread_handle, &attr, client_thread, client)) { // It didn't work out, sorry! pthread_mutex_lock(&w_client_lock); w_ht_del(clients, w_ht_ptr_val(client)); pthread_mutex_unlock(&w_client_lock); client_delete(client); } pthread_attr_destroy(&attr); return client; }
static void accept_loop() { while (!stopping) { int client_fd; struct pollfd pfd; int bufsize; w_stm_t stm; #ifdef HAVE_LIBGIMLI_H if (hb) { gimli_heartbeat_set(hb, GIMLI_HB_RUNNING); } #endif pfd.events = POLLIN; pfd.fd = listener_fd; if (poll(&pfd, 1, 60000) < 1 || (pfd.revents & POLLIN) == 0) { if (stopping) { break; } // Timed out, or error. // Arrange to sanity check that we're working w_check_my_sock(); continue; } #ifdef HAVE_ACCEPT4 client_fd = accept4(listener_fd, NULL, 0, SOCK_CLOEXEC); #else client_fd = accept(listener_fd, NULL, 0); #endif if (client_fd == -1) { continue; } w_set_cloexec(client_fd); bufsize = WATCHMAN_IO_BUF_SIZE; setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, (void*)&bufsize, sizeof(bufsize)); stm = w_stm_fdopen(client_fd); if (!stm) { w_log(W_LOG_ERR, "Failed to allocate stm for fd: %s\n", strerror(errno)); close(client_fd); continue; } make_new_client(stm); } }
w_stm_t w_stm_connect_unix(const char *path, int timeoutms) { w_stm_t stm; struct sockaddr_un un; int max_attempts = timeoutms / 10; int attempts = 0; int bufsize = WATCHMAN_IO_BUF_SIZE; int fd; if (strlen(path) >= sizeof(un.sun_path) - 1) { w_log(W_LOG_ERR, "w_stm_connect_unix(%s) path is too long\n", path); errno = E2BIG; return NULL; } fd = socket(PF_LOCAL, SOCK_STREAM, 0); if (fd == -1) { return NULL; } memset(&un, 0, sizeof(un)); un.sun_family = PF_LOCAL; strcpy(un.sun_path, path); retry_connect: if (connect(fd, (struct sockaddr*)&un, sizeof(un))) { int err = errno; if (err == ECONNREFUSED || err == ENOENT) { if (attempts++ < max_attempts) { usleep(10000); goto retry_connect; } } close(fd); return NULL; } setsockopt(fd, SOL_SOCKET, SO_RCVBUF, (void*)&bufsize, sizeof(bufsize)); stm = w_stm_fdopen(fd); if (!stm) { close(fd); } return stm; }
void send_error_response(struct watchman_client *client, const char *fmt, ...) { char buf[WATCHMAN_NAME_MAX]; va_list ap; json_t *resp = make_response(); va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); w_log(W_LOG_DBG, "send_error_response: fd=%d %s\n", client->fd, buf); set_prop(resp, "error", json_string_nocheck(buf)); send_and_dispose_response(client, resp); }
static void cmd_shutdown( struct watchman_client *client, json_t *args) { json_t *resp = make_response(); unused_parameter(args); w_log(W_LOG_ERR, "shutdown-server was requested, exiting!\n"); stopping = true; // Knock listener thread out of poll/accept pthread_kill(listener_thread, SIGUSR1); set_prop(resp, "shutdown-server", json_true()); send_and_dispose_response(client, resp); }
static void parse_encoding(const char *enc, enum w_pdu_type *pdu) { if (!enc) { return; } if (!strcmp(enc, "json")) { *pdu = is_json_compact; return; } if (!strcmp(enc, "bser")) { *pdu = is_bser; return; } w_log(W_LOG_ERR, "Invalid encoding '%s', use one of json or bser\n", enc); exit(EX_USAGE); }
/* must be called with root and client locked */ void w_run_subscription_rules( struct watchman_client *client, struct watchman_client_subscription *sub, w_root_t *root) { json_t *response = build_subscription_results(sub, root); if (!response) { return; } if (!enqueue_response(client, response, true)) { w_log(W_LOG_DBG, "failed to queue sub response\n"); json_decref(response); } }
static void run_service(void) { int fd; bool res; // redirect std{in,out,err} fd = open("/dev/null", O_RDONLY); if (fd != -1) { ignore_result(dup2(fd, STDIN_FILENO)); close(fd); } fd = open(log_name, O_WRONLY|O_APPEND|O_CREAT, 0600); if (fd != -1) { ignore_result(dup2(fd, STDOUT_FILENO)); ignore_result(dup2(fd, STDERR_FILENO)); close(fd); } #ifndef _WIN32 /* we are the child, let's set things up */ ignore_result(chdir("/")); #endif w_set_thread_name("listener"); { char hostname[256]; gethostname(hostname, sizeof(hostname)); hostname[sizeof(hostname) - 1] = '\0'; w_log(W_LOG_ERR, "Watchman %s %s starting up on %s\n", PACKAGE_VERSION, #ifdef WATCHMAN_BUILD_INFO WATCHMAN_BUILD_INFO, #else "<no build info set>", #endif hostname); } watchman_watcher_init(); res = w_start_listener(sock_name); watchman_watcher_dtor(); if (res) { exit(0); } exit(1); }
/** * This function expects the config to be an object containing the keys 'group' * and 'others', each a bool. */ mode_t cfg_get_perms(const char* name, bool write_bits, bool execute_bits) { auto val = cfg_get_json(name); mode_t ret = S_IRUSR | S_IWUSR; if (execute_bits) { ret |= S_IXUSR; } if (val) { if (!val.isObject()) { w_log(W_LOG_FATAL, "Expected config value %s to be an object\n", name); } ret |= get_group_perm(name, val, write_bits, execute_bits); ret |= get_others_perm(name, val, write_bits, execute_bits); } return ret; }
bool FSEventsWatcher::start(const std::shared_ptr<w_root_t>& root) { // Spin up the fsevents processing thread; it owns a ref on the root auto self = std::dynamic_pointer_cast<FSEventsWatcher>(shared_from_this()); try { // Acquire the mutex so thread initialization waits until we release it auto wlock = items_.wlock(); std::thread thread([self, root]() { try { self->FSEventsThread(root); } catch (const std::exception& e) { watchman::log(watchman::ERR, "uncaught exception: ", e.what()); root->cancel(); } // Ensure that we signal the condition variable before we // finish this thread. That ensures that don't get stuck // waiting in FSEventsWatcher::start if something unexpected happens. self->fse_cond.notify_one(); }); // We have to detach because the readChangesThread may wind up // being the last thread to reference the watcher state and // cannot join itself. thread.detach(); // Allow thread init to proceed; wait for its signal fse_cond.wait(wlock.getUniqueLock()); if (root->failure_reason) { w_log( W_LOG_ERR, "failed to start fsevents thread: %s\n", root->failure_reason.c_str()); return false; } return true; } catch (const std::exception& e) { watchman::log( watchman::ERR, "failed to start fsevents thread: ", e.what(), "\n"); return false; } }
int main(int argc, char **argv) { bool ran; json_t *cmd; parse_cmdline(&argc, &argv); if (foreground) { run_service(); return 0; } cmd = build_command(argc, argv); preprocess_command(cmd, output_pdu); ran = try_command(cmd, 0); if (!ran && should_start(errno)) { if (no_spawn) { if (!no_local) { ran = try_client_mode_command(cmd, !no_pretty); } } else { #ifdef USE_GIMLI spawn_via_gimli(); #elif defined(__APPLE__) spawn_via_launchd(); #else daemonize(); #endif ran = try_command(cmd, 10); } } json_decref(cmd); if (ran) { return 0; } if (!no_spawn) { w_log(W_LOG_ERR, "unable to talk to your watchman!\n"); } return 1; }
static void CALLBACK write_completed(DWORD err, DWORD bytes, LPOVERLAPPED olap) { // Reverse engineer our handle from the olap pointer struct overlapped_op *op = (void*)olap; struct win_handle *h = op->h; struct write_buf *wbuf = op->wbuf; stream_debug("WriteFileEx: completion callback invoked: bytes=%d %s\n", (int)bytes, win32_strerror(err)); EnterCriticalSection(&h->mtx); if (h->write_pending == op) { h->write_pending = NULL; } if (err == 0) { wbuf->cursor += bytes; wbuf->len -= bytes; if (wbuf->len == 0) { // Consumed this buffer free(wbuf); } else { w_log(W_LOG_FATAL, "WriteFileEx: short write: %d written, %d remain\n", bytes, wbuf->len); } } else { stream_debug("WriteFilex: completion: failed: %s\n", win32_strerror(err)); h->errcode = err; h->error_pending = true; SetEvent(h->waitable); } // Send whatever else we have waiting to go initiate_write(h); LeaveCriticalSection(&h->mtx); // Free the prior struct after possibly initiating another write // to minimize the change of the same address being reused and // confusing the completion status free(op); }
BOOL w_wait_for_any_child(DWORD timeoutms, DWORD *pid) { HANDLE handles[MAXIMUM_WAIT_OBJECTS]; DWORD pids[MAXIMUM_WAIT_OBJECTS]; int i = 0; w_ht_iter_t iter; DWORD res; *pid = 0; pthread_mutex_lock(&child_proc_lock); if (child_procs && w_ht_first(child_procs, &iter)) do { HANDLE proc = w_ht_val_ptr(iter.value); pids[i] = (DWORD)iter.key; handles[i++] = proc; } while (w_ht_next(child_procs, &iter)); pthread_mutex_unlock(&child_proc_lock); if (i == 0) { return false; } w_log(W_LOG_DBG, "w_wait_for_any_child: waiting for %d handles\n", i); res = WaitForMultipleObjectsEx(i, handles, false, timeoutms, true); if (res == WAIT_FAILED) { errno = map_win32_err(GetLastError()); return false; } if (res < WAIT_OBJECT_0 + i) { i = res - WAIT_OBJECT_0; } else if (res >= WAIT_ABANDONED_0 && res < WAIT_ABANDONED_0 + i) { i = res - WAIT_ABANDONED_0; } else { return false; } pthread_mutex_lock(&child_proc_lock); w_ht_del(child_procs, pids[i]); pthread_mutex_unlock(&child_proc_lock); CloseHandle(handles[i]); *pid = pids[i]; return true; }
static struct watchman_dir_handle *inot_root_start_watch_dir( watchman_global_watcher_t watcher, w_root_t *root, struct watchman_dir *dir, struct timeval now, const char *path) { struct inot_root_state *state = root->watch; struct watchman_dir_handle *osdir = NULL; int newwd, err; unused_parameter(watcher); // Carry out our very strict opendir first to ensure that we're not // traversing symlinks in the context of this root osdir = w_dir_open(path); if (!osdir) { handle_open_errno(root, dir, now, "opendir", errno, NULL); return NULL; } // The directory might be different since the last time we looked at it, so // call inotify_add_watch unconditionally. newwd = inotify_add_watch(state->infd, path, WATCHMAN_INOTIFY_MASK); if (newwd == -1) { err = errno; if (errno == ENOSPC || errno == ENOMEM) { // Limits exceeded, no recovery from our perspective set_poison_state(root, dir->path, now, "inotify-add-watch", errno, inot_strerror(errno)); } else { handle_open_errno(root, dir, now, "inotify_add_watch", errno, inot_strerror(errno)); } w_dir_close(osdir); errno = err; return NULL; } // record mapping pthread_mutex_lock(&state->lock); w_ht_replace(state->wd_to_name, newwd, w_ht_ptr_val(dir->path)); pthread_mutex_unlock(&state->lock); w_log(W_LOG_DBG, "adding %d -> %s mapping\n", newwd, path); return osdir; }
static bool stream_pdu(w_jbuffer_t *jr, w_stm_t stm, json_error_t *jerr) { switch (jr->pdu_type) { case is_json_compact: case is_json_pretty: return stream_until_newline(jr, stm); case is_bser: { json_int_t len; jr->rpos += 2; if (!w_bser_decode_pdu_len(jr, stm, &len, jerr)) { return false; } return stream_n_bytes(jr, stm, len, jerr); } default: w_log(W_LOG_FATAL, "not streaming for pdu type %d\n", jr->pdu_type); return false; } }
int main(int argc, char **argv) { bool ran; json_t *cmd; parse_cmdline(&argc, &argv); if (foreground) { unlink(sock_name); run_service(); return 0; } cmd = build_command(argc, argv); ran = try_command(cmd, 0); if (!ran && should_start(errno)) { if (no_spawn) { ran = try_client_mode_command(cmd, !no_pretty); } else { unlink(sock_name); #ifdef USE_GIMLI spawn_via_gimli(); #else daemonize(); #endif ran = try_command(cmd, 10); } } json_decref(cmd); if (ran) { return 0; } if (!no_spawn) { w_log(W_LOG_ERR, "unable to talk to your watchman!\n"); } return 1; }
static void client_delete(struct watchman_client *client) { struct watchman_client_response *resp; w_log(W_LOG_DBG, "client_delete %p\n", client); derived_client_dtor(client); while (client->head) { resp = client->head; client->head = resp->next; json_decref(resp->json); free(resp); } w_json_buffer_free(&client->reader); w_json_buffer_free(&client->writer); w_event_destroy(client->ping); w_stm_shutdown(client->stm); w_stm_close(client->stm); free(client); }
static bool subscription_generator( w_query *query, w_root_t *root, struct w_query_ctx *ctx, void *gendata, int64_t *num_walked) { struct watchman_file *f; struct watchman_client_subscription *sub = gendata; int64_t n = 0; bool result = true; w_log(W_LOG_DBG, "running subscription %s %p\n", sub->name->buf, sub); // Walk back in time until we hit the boundary for (f = root->latest_file; f; f = f->next) { ++n; if (ctx->since.is_timestamp && f->otime.timestamp < ctx->since.timestamp) { break; } if (!ctx->since.is_timestamp && f->otime.ticks <= ctx->since.clock.ticks) { break; } if (!w_query_file_matches_relative_root(ctx, f)) { continue; } if (!w_query_process_file(query, ctx, f)) { result = false; goto done; } } done: *num_walked = n; return result; }
// This is the iterator callback we use to prune out obsoleted leaves. // We need to compare the prefix to make sure that we don't delete // a sibling node by mistake (see commentary on the is_path_prefix // function for more on that). int PendingCollectionBase::iterContext::operator()( const w_string& key, std::shared_ptr<watchman_pending_fs>& p) { if (!p) { // It was removed; update the tree to reflect this coll.tree_.erase(key); // Stop iteration: we deleted something and invalidated the iterators. return 1; } if ((p->flags & W_PENDING_CRAWL_ONLY) == 0 && key.size() > root.size() && is_path_prefix( (const char*)key.data(), key.size(), root.data(), root.size()) && !watchman::CookieSync::isPossiblyACookie(p->path)) { w_log( W_LOG_DBG, "delete_kids: removing (%d) %.*s from pending because it is " "obsoleted by (%d) %.*s\n", int(p->path.size()), int(p->path.size()), p->path.data(), int(root.size()), int(root.size()), root.data()); // Unlink the child from the pending index. coll.unlinkItem(p); // Remove it from the art tree. coll.tree_.erase(key); // Stop iteration because we just invalidated the iterator state // by modifying the tree mid-iteration. return 1; } return 0; }