SavedStateInterface::SavedStateResult LocalSavedStateInterface::getMostRecentSavedStateImpl( w_string_piece lookupCommitId) const { auto commitIds = scm_->getCommitsPriorToAndIncluding(lookupCommitId, maxCommits_); for (auto& commitId : commitIds) { auto path = getLocalPath(commitId); // We could return a path that no longer exists if the path is removed // (for example by saved state GC) after we check that the path exists // here, but before the client reads the state. We've explicitly chosen to // return the state without additional safety guarantees, and leave it to // the client to ensure GC happens only after states are no longer likely // to be used. if (w_path_exists(path.c_str())) { log(DBG, "Found saved state for commit ", commitId, "\n"); SavedStateInterface::SavedStateResult result; result.commitId = commitId; result.savedStateInfo = json_object({{"local-path", w_string_to_json(path)}, {"commit-id", w_string_to_json(commitId)}}); return result; } } SavedStateInterface::SavedStateResult result; result.commitId = w_string(); result.savedStateInfo = json_object( {{"error", w_string_to_json("No suitable saved state found")}}); return result; }
json_ref field_list_to_json_name_array(const w_query_field_list& fieldList) { auto templ = json_array_of_size(fieldList.size()); for (auto& f : fieldList) { json_array_append_new(templ, w_string_to_json(f->name)); } return templ; }
json_t *w_capability_get_list(void) { json_t *arr = json_array_of_size(w_ht_size(capabilities)); w_ht_iter_t iter; w_ht_first(capabilities, &iter); do { w_string_t *name = w_ht_val_ptr(iter.key); json_array_append(arr, w_string_to_json(name)); } while (w_ht_next(capabilities, &iter)); return arr; }
void watchman_perf_sample::add_root_meta( const std::shared_ptr<w_root_t>& root) { // Note: if the root lock isn't held, we may read inaccurate numbers for // some of these properties. We're ok with that, and don't want to force // the root lock to be re-acquired just for this. auto meta = json_object( {{"path", w_string_to_json(root->root_path)}, {"recrawl_count", json_integer(root->recrawlInfo.rlock()->recrawlCount)}, {"case_sensitive", json_boolean(root->case_sensitive)}}); // During recrawl, the view may be re-assigned. Protect against // reading a nullptr. auto view = root->inner.view; if (view) { auto position = view->getMostRecentRootNumberAndTickValue(); meta.set({{"number", json_integer(position.rootNumber)}, {"ticks", json_integer(position.ticks)}, {"watcher", w_string_to_json(view->getName())}}); } add_meta("root", std::move(meta)); }
void w_cancel_subscriptions_for_root(w_root_t *root) { w_ht_iter_t iter; pthread_mutex_lock(&w_client_lock); if (w_ht_first(clients, &iter)) { do { struct watchman_user_client *client = w_ht_val_ptr(iter.value); w_ht_iter_t citer; if (w_ht_first(client->subscriptions, &citer)) { do { struct watchman_client_subscription *sub = w_ht_val_ptr(citer.value); if (sub->root == root) { json_t *response = make_response(); w_log(W_LOG_ERR, "Cancel subscription %.*s for client:stm=%p due to " "root cancellation\n", sub->name->len, sub->name->buf, client->client.stm); set_prop(response, "root", w_string_to_json(root->root_path)); set_prop(response, "subscription", w_string_to_json(sub->name)); set_prop(response, "unilateral", json_true()); set_prop(response, "canceled", json_true()); if (!enqueue_response(&client->client, response, true)) { w_log(W_LOG_DBG, "failed to queue sub cancellation\n"); json_decref(response); } w_ht_iter_del(client->subscriptions, &citer); } } while (w_ht_next(client->subscriptions, &citer)); } } while (w_ht_next(clients, &iter)); } pthread_mutex_unlock(&w_client_lock); }
void w_perf_add_root_meta(w_perf_t *perf, w_root_t *root) { // Note: if the root lock isn't held, we may read inaccurate numbers for // some of these properties. We're ok with that, and don't want to force // the root lock to be re-acquired just for this. // The funky comments at the end of the line force clang-format to keep the // elements on lines of their own w_perf_add_meta(perf, "root", json_pack("{s:o, s:i, s:i, s:i, s:b, s:u}", // "path", w_string_to_json(root->root_path), // "recrawl_count", root->recrawl_count, // "number", root->number, // "ticks", root->ticks, // "case_sensitive", root->case_sensitive, // "watcher", root->watcher_ops->name // )); }
json_ref Publisher::getDebugInfo() const { auto ret = json_object(); auto rlock = state_.rlock(); ret.set("next_serial", json_integer(rlock->nextSerial)); auto subscribers = json_array(); auto& subscribers_arr = subscribers.array(); for (auto& sub_ref : rlock->subscribers) { auto sub = sub_ref.lock(); if (sub) { auto sub_json = json_object({{"serial", json_integer(sub->getSerial())}, {"info", w_string_to_json(sub->getInfo())}}); subscribers_arr.emplace_back(sub_json); } else { // This is a subscriber that is now dead. It will be cleaned up the next // time enqueue is called. } } ret.set("subscribers", std::move(subscribers)); auto items = json_array(); auto& items_arr = items.array(); for (auto& item : rlock->items) { auto item_json = json_object( {{"serial", json_integer(item->serial)}, {"payload", item->payload}}); items_arr.emplace_back(item_json); } ret.set("items", std::move(items)); return ret; }
/* trigger /root triggername [watch patterns] -- cmd to run * Sets up a trigger so that we can execute a command when a change * is detected */ static void cmd_trigger(struct watchman_client *client, json_t *args) { w_root_t *root; struct watchman_trigger_command *cmd, *old; json_t *resp; json_t *trig; char *errmsg = NULL; bool need_save = true; root = resolve_root_or_err(client, args, 1, true); if (!root) { return; } if (json_array_size(args) < 3) { send_error_response(client, "not enough arguments"); goto done; } trig = json_array_get(args, 2); if (json_is_string(trig)) { trig = build_legacy_trigger(root, client, args); if (!trig) { goto done; } } else { // Add a ref so that we don't need to conditionally decref later // for the legacy case later json_incref(trig); } cmd = w_build_trigger_from_def(root, trig, &errmsg); json_decref(trig); if (!cmd) { send_error_response(client, "%s", errmsg); goto done; } resp = make_response(); set_prop(resp, "triggerid", w_string_to_json(cmd->triggername)); w_root_lock(root, "trigger-add"); old = w_ht_val_ptr(w_ht_get(root->commands, w_ht_ptr_val(cmd->triggername))); if (old && json_equal(cmd->definition, old->definition)) { // Same definition: we don't and shouldn't touch things, so that we // preserve the associated trigger clock and don't cause the trigger // to re-run immediately set_prop(resp, "disposition", json_string_nocheck("already_defined")); w_trigger_command_free(cmd); cmd = NULL; need_save = false; } else { set_prop(resp, "disposition", json_string_nocheck( old ? "replaced" : "created")); w_ht_replace(root->commands, w_ht_ptr_val(cmd->triggername), w_ht_ptr_val(cmd)); // Force the trigger to be eligible to run now root->ticks++; root->pending_trigger_tick = root->ticks; } w_root_unlock(root); if (need_save) { w_state_save(); } send_and_dispose_response(client, resp); done: if (errmsg) { free(errmsg); } w_root_delref(root); }
static void cmd_flush_subscriptions( struct watchman_client* clientbase, const json_ref& args) { auto client = (watchman_user_client*)clientbase; int sync_timeout; json_ref subs(nullptr); if (json_array_size(args) == 3) { auto& sync_timeout_obj = args.at(2).get("sync_timeout"); subs = args.at(2).get_default("subscriptions", nullptr); if (!json_is_integer(sync_timeout_obj)) { send_error_response(client, "'sync_timeout' must be an integer"); return; } sync_timeout = json_integer_value(sync_timeout_obj); } else { send_error_response( client, "wrong number of arguments to 'flush-subscriptions'"); return; } auto root = resolve_root_or_err(client, args, 1, false); if (!root) { return; } std::vector<w_string> subs_to_sync; if (subs) { if (!json_is_array(subs)) { send_error_response( client, "expected 'subscriptions' to be an array of subscription names"); return; } for (auto& sub_name : subs.array()) { if (!json_is_string(sub_name)) { send_error_response( client, "expected 'subscriptions' to be an array of subscription names"); return; } auto& sub_name_str = json_to_w_string(sub_name); auto sub_iter = client->subscriptions.find(sub_name_str); if (sub_iter == client->subscriptions.end()) { send_error_response( client, "this client does not have a subscription named '%s'", sub_name_str.c_str()); return; } auto& sub = sub_iter->second; if (sub->root != root) { send_error_response( client, "subscription '%s' is on root '%s' different from command root " "'%s'", sub_name_str.c_str(), sub->root->root_path.c_str(), root->root_path.c_str()); return; } subs_to_sync.push_back(sub_name_str); } } else { // Look for all subscriptions matching this root. for (auto& sub_iter : client->subscriptions) { if (sub_iter.second->root == root) { subs_to_sync.push_back(sub_iter.first); } } } if (!root->syncToNow(std::chrono::milliseconds(sync_timeout))) { send_error_response(client, "sync_timeout expired"); return; } auto resp = make_response(); auto synced = json_array(); auto no_sync_needed = json_array(); auto dropped = json_array(); for (auto& sub_name_str : subs_to_sync) { auto sub_iter = client->subscriptions.find(sub_name_str); auto& sub = sub_iter->second; sub_action action; w_string policy_name; std::tie(action, policy_name) = get_subscription_action(sub.get(), root); if (action == sub_action::drop) { auto position = root->view()->getMostRecentRootNumberAndTickValue(); sub->last_sub_tick = position.ticks; sub->query->since_spec = watchman::make_unique<ClockSpec>(position); watchman::log( watchman::DBG, "(flush-subscriptions) dropping subscription notifications for ", sub->name, " until state ", policy_name, " is vacated. Advanced ticks to ", sub->last_sub_tick, "\n"); json_array_append(dropped, w_string_to_json(sub_name_str)); } else { // flush-subscriptions means that we _should NOT defer_ notifications. So // ignore defer and defer_vcs. ClockSpec out_position; watchman::log( watchman::DBG, "(flush-subscriptions) executing subscription ", sub->name, "\n"); auto sub_result = sub->buildSubscriptionResults(root, out_position); if (sub_result) { send_and_dispose_response(client, std::move(sub_result)); json_array_append(synced, w_string_to_json(sub_name_str)); } else { json_array_append(no_sync_needed, w_string_to_json(sub_name_str)); } } } resp.set({{"synced", std::move(synced)}, {"no_sync_needed", std::move(no_sync_needed)}, {"dropped", std::move(dropped)}}); add_root_warnings_to_response(resp, root); send_and_dispose_response(client, std::move(resp)); }
json_ref watchman_client_subscription::buildSubscriptionResults( const std::shared_ptr<w_root_t>& root, ClockSpec& position) { auto since_spec = query->since_spec.get(); if (since_spec && since_spec->tag == w_cs_clock) { watchman::log( watchman::DBG, "running subscription ", name, " rules since ", since_spec->clock.position.ticks, "\n"); } else { watchman::log( watchman::DBG, "running subscription ", name, " rules (no since)\n"); } // Subscriptions never need to sync explicitly; we are only dispatched // at settle points which are by definition sync'd to the present time query->sync_timeout = std::chrono::milliseconds(0); // We're called by the io thread, so there's little chance that the root // could be legitimately blocked by something else. That means that we // can use a short lock_timeout query->lock_timeout = uint32_t(root->config.getInt("subscription_lock_timeout_ms", 100)); w_log(W_LOG_DBG, "running subscription %s %p\n", name.c_str(), this); try { auto res = w_query_execute(query.get(), root, time_generator); w_log( W_LOG_DBG, "subscription %s generated %" PRIu32 " results\n", name.c_str(), uint32_t(res.resultsArray.array().size())); position = res.clockAtStartOfQuery; if (res.resultsArray.array().empty()) { updateSubscriptionTicks(&res); return nullptr; } auto response = make_response(); // It is way too much of a hassle to try to recreate the clock value if it's // not a relative clock spec, and it's only going to happen on the first run // anyway, so just skip doing that entirely. if (since_spec && since_spec->tag == w_cs_clock) { response.set("since", since_spec->toJson()); } updateSubscriptionTicks(&res); response.set({{"is_fresh_instance", json_boolean(res.is_fresh_instance)}, {"clock", res.clockAtStartOfQuery.toJson()}, {"files", std::move(res.resultsArray)}, {"root", w_string_to_json(root->root_path)}, {"subscription", w_string_to_json(name)}, {"unilateral", json_true()}}); return response; } catch (const QueryExecError& e) { watchman::log( watchman::ERR, "error running subscription ", name, " query: ", e.what()); return nullptr; } }
static json_t *build_subscription_results( struct watchman_client_subscription *sub, w_root_t *root) { w_query_res res; json_t *response; json_t *file_list; char clockbuf[128]; struct w_clockspec *since_spec = sub->query->since_spec; if (since_spec && since_spec->tag == w_cs_clock) { w_log(W_LOG_DBG, "running subscription %s rules since %" PRIu32 "\n", sub->name->buf, since_spec->clock.ticks); } else { w_log(W_LOG_DBG, "running subscription %s rules (no since)\n", sub->name->buf); } // Subscriptions never need to sync explicitly; we are only dispatched // at settle points which are by definition sync'd to the present time sub->query->sync_timeout = 0; if (!w_query_execute(sub->query, root, &res, subscription_generator, sub)) { w_log(W_LOG_ERR, "error running subscription %s query: %s", sub->name->buf, res.errmsg); w_query_result_free(&res); return NULL; } w_log(W_LOG_DBG, "subscription %s generated %" PRIu32 " results\n", sub->name->buf, res.num_results); if (res.num_results == 0) { update_subscription_ticks(sub, &res); w_query_result_free(&res); return NULL; } file_list = w_query_results_to_json(&sub->field_list, res.num_results, res.results); w_query_result_free(&res); response = make_response(); // It is way too much of a hassle to try to recreate the clock value if it's // not a relative clock spec, and it's only going to happen on the first run // anyway, so just skip doing that entirely. if (since_spec && since_spec->tag == w_cs_clock && clock_id_string(since_spec->clock.root_number, since_spec->clock.ticks, clockbuf, sizeof(clockbuf))) { set_prop(response, "since", json_string_nocheck(clockbuf)); } if (clock_id_string(res.root_number, res.ticks, clockbuf, sizeof(clockbuf))) { set_prop(response, "clock", json_string_nocheck(clockbuf)); } update_subscription_ticks(sub, &res); set_prop(response, "is_fresh_instance", json_boolean(res.is_fresh_instance)); set_prop(response, "files", file_list); set_prop(response, "root", w_string_to_json(root->root_path)); set_prop(response, "subscription", w_string_to_json(sub->name)); return response; }
static json_ref make_symlink(const struct watchman_rule_match* match) { return (match->file->symlink_target) ? w_string_to_json(match->file->symlink_target) : json_null(); }
static json_ref make_name(const struct watchman_rule_match* match) { return w_string_to_json(match->relname); }
static void spawn_command( const std::shared_ptr<w_root_t>& root, struct watchman_trigger_command* cmd, w_query_res* res, struct w_clockspec* since_spec) { char **envp = NULL; uint32_t i = 0; int ret; char **argv = NULL; uint32_t env_size; posix_spawn_file_actions_t actions; posix_spawnattr_t attr; #ifndef _WIN32 sigset_t mask; #endif long arg_max; size_t argspace_remaining; bool file_overflow = false; int result_log_level; w_string_t *working_dir = NULL; #ifdef _WIN32 arg_max = 32*1024; #else arg_max = sysconf(_SC_ARG_MAX); #endif if (arg_max <= 0) { argspace_remaining = UINT_MAX; } else { argspace_remaining = (uint32_t)arg_max; } // Allow some misc working overhead argspace_remaining -= 32; // Record an overflow before we call prepare_stdin(), which mutates // and resizes the results to fit the specified limit. if (cmd->max_files_stdin > 0 && res->resultsArray.array().size() > cmd->max_files_stdin) { file_overflow = true; } auto stdin_file = prepare_stdin(cmd, res); if (!stdin_file) { w_log( W_LOG_ERR, "trigger %s:%s %s\n", root->root_path.c_str(), cmd->triggername.c_str(), strerror(errno)); return; } // Assumption: that only one thread will be executing on a given // cmd instance so that mutation of cmd->envht is safe. // This is guaranteed in the current architecture. // It is way too much of a hassle to try to recreate the clock value if it's // not a relative clock spec, and it's only going to happen on the first run // anyway, so just skip doing that entirely. if (since_spec && since_spec->tag == w_cs_clock) { w_envp_set_cstring( cmd->envht, "WATCHMAN_SINCE", since_spec->clock.position.toClockString().c_str()); } else { w_envp_unset(cmd->envht, "WATCHMAN_SINCE"); } w_envp_set_cstring( cmd->envht, "WATCHMAN_CLOCK", res->clockAtStartOfQuery.toClockString().c_str()); if (cmd->query->relative_root) { w_envp_set(cmd->envht, "WATCHMAN_RELATIVE_ROOT", cmd->query->relative_root); } else { w_envp_unset(cmd->envht, "WATCHMAN_RELATIVE_ROOT"); } // Compute args auto args = json_deep_copy(cmd->command); if (cmd->append_files) { // Measure how much space the base args take up for (i = 0; i < json_array_size(args); i++) { const char *ele = json_string_value(json_array_get(args, i)); argspace_remaining -= strlen(ele) + 1 + sizeof(char*); } // Dry run with env to compute space envp = w_envp_make_from_ht(cmd->envht, &env_size); free(envp); envp = NULL; argspace_remaining -= env_size; for (const auto& item : res->dedupedFileNames) { // also: NUL terminator and entry in argv uint32_t size = item.size() + 1 + sizeof(char*); if (argspace_remaining < size) { file_overflow = true; break; } argspace_remaining -= size; json_array_append_new(args, w_string_to_json(item)); } } argv = w_argv_copy_from_json(args, 0); args = nullptr; w_envp_set_bool(cmd->envht, "WATCHMAN_FILES_OVERFLOW", file_overflow); envp = w_envp_make_from_ht(cmd->envht, &env_size); posix_spawnattr_init(&attr); #ifndef _WIN32 sigemptyset(&mask); posix_spawnattr_setsigmask(&attr, &mask); #endif posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETSIGMASK| #ifdef POSIX_SPAWN_CLOEXEC_DEFAULT // Darwin: close everything except what we put in file actions POSIX_SPAWN_CLOEXEC_DEFAULT| #endif POSIX_SPAWN_SETPGROUP); posix_spawn_file_actions_init(&actions); #ifndef _WIN32 posix_spawn_file_actions_adddup2( &actions, stdin_file->getFileDescriptor(), STDIN_FILENO); #else posix_spawn_file_actions_adddup2_handle_np( &actions, stdin_file->getWindowsHandle(), STDIN_FILENO); #endif if (cmd->stdout_name) { posix_spawn_file_actions_addopen(&actions, STDOUT_FILENO, cmd->stdout_name, cmd->stdout_flags, 0666); } else { posix_spawn_file_actions_adddup2(&actions, STDOUT_FILENO, STDOUT_FILENO); } if (cmd->stderr_name) { posix_spawn_file_actions_addopen(&actions, STDERR_FILENO, cmd->stderr_name, cmd->stderr_flags, 0666); } else { posix_spawn_file_actions_adddup2(&actions, STDERR_FILENO, STDERR_FILENO); } // Figure out the appropriate cwd { const char *cwd = NULL; working_dir = NULL; if (cmd->query->relative_root) { working_dir = cmd->query->relative_root; } else { working_dir = root->root_path; } w_string_addref(working_dir); json_unpack(cmd->definition, "{s:s}", "chdir", &cwd); if (cwd) { w_string_t *cwd_str = w_string_new_typed(cwd, W_STRING_BYTE); if (w_is_path_absolute_cstr(cwd)) { w_string_delref(working_dir); working_dir = cwd_str; } else { w_string_t *joined; joined = w_string_path_cat(working_dir, cwd_str); w_string_delref(cwd_str); w_string_delref(working_dir); working_dir = joined; } } w_log(W_LOG_DBG, "using %.*s for working dir\n", working_dir->len, working_dir->buf); } #ifndef _WIN32 // This mutex is present to avoid fighting over the cwd when multiple // triggers run at the same time. It doesn't coordinate with all // possible chdir() calls, but this is the only place that we do this // in the watchman server process. static std::mutex cwdMutex; { std::unique_lock<std::mutex> lock(cwdMutex); ignore_result(chdir(working_dir->buf)); #else posix_spawnattr_setcwd_np(&attr, working_dir->buf); #endif w_string_delref(working_dir); working_dir = nullptr; ret = posix_spawnp(&cmd->current_proc, argv[0], &actions, &attr, argv, envp); if (ret != 0) { // On Darwin (at least), posix_spawn can fail but will still populate the // pid. Since we use the pid to gate future spawns, we need to ensure // that we clear out the pid on failure, otherwise the trigger would be // effectively disabled for the rest of the watch lifetime cmd->current_proc = 0; } #ifndef _WIN32 ignore_result(chdir("/")); } #endif // If failed, we want to make sure we log enough info to figure out why result_log_level = res == 0 ? W_LOG_DBG : W_LOG_ERR; w_log(result_log_level, "posix_spawnp: %s\n", cmd->triggername.c_str()); for (i = 0; argv[i]; i++) { w_log(result_log_level, "argv[%d] %s\n", i, argv[i]); } for (i = 0; envp[i]; i++) { w_log(result_log_level, "envp[%d] %s\n", i, envp[i]); } w_log( result_log_level, "trigger %s:%s pid=%d ret=%d %s\n", root->root_path.c_str(), cmd->triggername.c_str(), (int)cmd->current_proc, ret, strerror(ret)); free(argv); free(envp); posix_spawnattr_destroy(&attr); posix_spawn_file_actions_destroy(&actions); }