Esempio n. 1
0
static bool kqueue_root_start_watch_file(watchman_global_watcher_t watcher,
      w_root_t *root, struct watchman_file *file) {
  struct kqueue_root_state *state = root->watch;
  struct kevent k;
  w_ht_val_t fdval;
  int fd;
  w_string_t *full_name;
  unused_parameter(watcher);

  full_name = w_string_path_cat(file->parent->path, file->name);
  pthread_mutex_lock(&state->lock);
  if (w_ht_lookup(state->name_to_fd, w_ht_ptr_val(full_name), &fdval, false)) {
    // Already watching it
    pthread_mutex_unlock(&state->lock);
    return true;
  }
  pthread_mutex_unlock(&state->lock);

  w_log(W_LOG_DBG, "watch_file(%s)\n", full_name->buf);

  fd = open(full_name->buf, O_EVTONLY|O_CLOEXEC);
  if (fd == -1) {
    w_log(W_LOG_ERR, "failed to open %s O_EVTONLY: %s\n",
        full_name->buf, strerror(errno));
    w_string_delref(full_name);
    return false;
  }

  memset(&k, 0, sizeof(k));
  EV_SET(&k, fd, EVFILT_VNODE, EV_ADD|EV_CLEAR,
      NOTE_WRITE|NOTE_DELETE|NOTE_EXTEND|NOTE_RENAME|NOTE_ATTRIB,
      0, full_name);

  pthread_mutex_lock(&state->lock);
  w_ht_replace(state->name_to_fd, w_ht_ptr_val(full_name), fd);
  w_ht_replace(state->fd_to_name, fd, w_ht_ptr_val(full_name));
  pthread_mutex_unlock(&state->lock);

  if (kevent(state->kq_fd, &k, 1, NULL, 0, 0)) {
    w_log(W_LOG_DBG, "kevent EV_ADD file %s failed: %s",
        full_name->buf, strerror(errno));
    close(fd);
    pthread_mutex_lock(&state->lock);
    w_ht_del(state->name_to_fd, w_ht_ptr_val(full_name));
    w_ht_del(state->fd_to_name, fd);
    pthread_mutex_unlock(&state->lock);
  } else {
    w_log(W_LOG_DBG, "kevent file %s -> %d\n", full_name->buf, fd);
  }
  w_string_delref(full_name);

  return true;
}
Esempio n. 2
0
// Caller must hold spawn_lock
static void delete_running_pid(pid_t pid)
{
  if (!running_kids) {
    return;
  }
  w_ht_del(running_kids, pid);
}
Esempio n. 3
0
static void cmd_shutdown(
    struct watchman_client *client,
    json_t *args)
{
  void *ignored;
  unused_parameter(client);
  unused_parameter(args);

  w_log(W_LOG_ERR, "shutdown-server was requested, exiting!\n");

  /* close out some resources to persuade valgrind to run clean */
  close(listener_fd);
  listener_fd = -1;
  w_root_free_watched_roots();

  pthread_mutex_lock(&w_client_lock);
  w_ht_del(clients, client->fd);
  pthread_mutex_unlock(&w_client_lock);
  pthread_join(reaper_thread, &ignored);
  cfg_shutdown();

  close(STDIN_FILENO);
  close(STDOUT_FILENO);
  close(STDERR_FILENO);
  exit(0);
}
Esempio n. 4
0
static struct watchman_client *make_new_client(w_stm_t stm) {
  struct watchman_client *client;
  pthread_attr_t attr;

  pthread_attr_init(&attr);
  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);

  client = calloc(1, derived_client_size);
  if (!client) {
    pthread_attr_destroy(&attr);
    return NULL;
  }
  client->stm = stm;
  w_log(W_LOG_DBG, "accepted client:stm=%p\n", client->stm);

  if (!w_json_buffer_init(&client->reader)) {
    // FIXME: error handling
  }
  if (!w_json_buffer_init(&client->writer)) {
    // FIXME: error handling
  }
  client->ping = w_event_make();
  if (!client->ping) {
    // FIXME: error handling
  }

  derived_client_ctor(client);

  pthread_mutex_lock(&w_client_lock);
  w_ht_set(clients, w_ht_ptr_val(client), w_ht_ptr_val(client));
  pthread_mutex_unlock(&w_client_lock);

  // Start a thread for the client.
  // We used to use libevent for this, but we have
  // a low volume of concurrent clients and the json
  // parse/encode APIs are not easily used in a non-blocking
  // server architecture.
  if (pthread_create(&client->thread_handle, &attr, client_thread, client)) {
    // It didn't work out, sorry!
    pthread_mutex_lock(&w_client_lock);
    w_ht_del(clients, w_ht_ptr_val(client));
    pthread_mutex_unlock(&w_client_lock);
    client_delete(client);
  }

  pthread_attr_destroy(&attr);

  return client;
}
Esempio n. 5
0
BOOL w_wait_for_any_child(DWORD timeoutms, DWORD *pid) {
  HANDLE handles[MAXIMUM_WAIT_OBJECTS];
  DWORD pids[MAXIMUM_WAIT_OBJECTS];
  int i = 0;
  w_ht_iter_t iter;
  DWORD res;

  *pid = 0;

  pthread_mutex_lock(&child_proc_lock);
  if (child_procs && w_ht_first(child_procs, &iter)) do {
    HANDLE proc = w_ht_val_ptr(iter.value);
    pids[i] = (DWORD)iter.key;
    handles[i++] = proc;
  } while (w_ht_next(child_procs, &iter));
  pthread_mutex_unlock(&child_proc_lock);

  if (i == 0) {
    return false;
  }

  w_log(W_LOG_DBG, "w_wait_for_any_child: waiting for %d handles\n", i);
  res = WaitForMultipleObjectsEx(i, handles, false, timeoutms, true);
  if (res == WAIT_FAILED) {
    errno = map_win32_err(GetLastError());
    return false;
  }

  if (res < WAIT_OBJECT_0 + i) {
    i = res - WAIT_OBJECT_0;
  } else if (res >= WAIT_ABANDONED_0 && res < WAIT_ABANDONED_0 + i) {
    i = res - WAIT_ABANDONED_0;
  } else {
    return false;
  }

  pthread_mutex_lock(&child_proc_lock);
  w_ht_del(child_procs, pids[i]);
  pthread_mutex_unlock(&child_proc_lock);

  CloseHandle(handles[i]);
  *pid = pids[i];
  return true;
}
Esempio n. 6
0
/* trigger-del /root triggername
 * Delete a trigger from a root
 */
static void cmd_trigger_delete(struct watchman_client *client, json_t *args)
{
    w_root_t *root;
    json_t *resp;
    json_t *jname;
    w_string_t *tname;
    bool res;

    root = resolve_root_or_err(client, args, 1, false);
    if (!root) {
        return;
    }

    if (json_array_size(args) != 3) {
        send_error_response(client, "wrong number of arguments");
        w_root_delref(root);
        return;
    }
    jname = json_array_get(args, 2);
    if (!json_is_string(jname)) {
        send_error_response(client, "expected 2nd parameter to be trigger name");
        w_root_delref(root);
        return;
    }
    tname = json_to_w_string_incref(jname);

    w_root_lock(root, "trigger-del");
    res = w_ht_del(root->commands, w_ht_ptr_val(tname));
    w_root_unlock(root);

    if (res) {
        w_state_save();
    }

    w_string_delref(tname);

    resp = make_response();
    set_prop(resp, "deleted", json_boolean(res));
    json_incref(jname);
    set_prop(resp, "trigger", jname);
    send_and_dispose_response(client, resp);
    w_root_delref(root);
}
Esempio n. 7
0
/* trigger-del /root triggername
 * Delete a trigger from a root
 */
void cmd_trigger_delete(struct watchman_client *client, json_t *args)
{
  w_root_t *root;
  json_t *resp;
  const char *name;
  w_string_t *tname;
  bool res;

  root = resolve_root_or_err(client, args, 1, false);
  if (!root) {
    return;
  }

  if (json_array_size(args) != 3) {
    send_error_response(client, "wrong number of arguments");
    w_root_delref(root);
    return;
  }
  name = json_string_value(json_array_get(args, 2));
  if (!name) {
    send_error_response(client, "expected 2nd parameter to be trigger name");
    w_root_delref(root);
    return;
  }
  tname = w_string_new(name);

  w_root_lock(root);
  res = w_ht_del(root->commands, (w_ht_val_t)tname);
  w_root_unlock(root);

  w_state_save();

  w_string_delref(tname);

  resp = make_response();
  set_prop(resp, "deleted", json_boolean(res));
  set_prop(resp, "trigger", json_string_nocheck(name));
  send_and_dispose_response(client, resp);
  w_root_delref(root);
}
Esempio n. 8
0
/* unsubscribe /root subname
 * Cancels a subscription */
static void cmd_unsubscribe(struct watchman_client *clientbase, json_t *args)
{
  w_root_t *root;
  const char *name;
  w_string_t *sname;
  bool deleted;
  json_t *resp;
  const json_t *jstr;
  struct watchman_user_client *client =
      (struct watchman_user_client *)clientbase;

  root = resolve_root_or_err(&client->client, args, 1, false);
  if (!root) {
    return;
  }

  jstr = json_array_get(args, 2);
  name = json_string_value(jstr);
  if (!name) {
    send_error_response(&client->client,
        "expected 2nd parameter to be subscription name");
    w_root_delref(root);
    return;
  }

  sname = json_to_w_string_incref(jstr);

  pthread_mutex_lock(&w_client_lock);
  deleted = w_ht_del(client->subscriptions, w_ht_ptr_val(sname));
  pthread_mutex_unlock(&w_client_lock);

  w_string_delref(sname);

  resp = make_response();
  set_bytestring_prop(resp, "unsubscribe", name);
  set_prop(resp, "deleted", json_boolean(deleted));

  send_and_dispose_response(&client->client, resp);
  w_root_delref(root);
}
Esempio n. 9
0
bool w_start_listener(const char *path)
{
  struct sockaddr_un un;
  pthread_t thr;
  pthread_attr_t attr;
  pthread_mutexattr_t mattr;
  struct sigaction sa;
  sigset_t sigset;
#ifdef HAVE_LIBGIMLI_H
  volatile struct gimli_heartbeat *hb = NULL;
#endif
  struct timeval tv;
  void *ignored;
  int n_clients = 0;

  listener_thread = pthread_self();

  pthread_mutexattr_init(&mattr);
  pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
  pthread_mutex_init(&w_client_lock, &mattr);
  pthread_mutexattr_destroy(&mattr);

#ifdef HAVE_LIBGIMLI_H
  hb = gimli_heartbeat_attach();
#endif

#if defined(HAVE_KQUEUE) || defined(HAVE_FSEVENTS)
  {
    struct rlimit limit;
    int mib[2] = { CTL_KERN,
#ifdef KERN_MAXFILESPERPROC
      KERN_MAXFILESPERPROC
#else
      KERN_MAXFILES
#endif
    };
    int maxperproc;
    size_t len;

    len = sizeof(maxperproc);
    sysctl(mib, 2, &maxperproc, &len, NULL, 0);

    getrlimit(RLIMIT_NOFILE, &limit);
    w_log(W_LOG_ERR, "file limit is %" PRIu64
        " kern.maxfilesperproc=%i\n",
        limit.rlim_cur, maxperproc);

    if (limit.rlim_cur != RLIM_INFINITY &&
        maxperproc > 0 &&
        limit.rlim_cur < (rlim_t)maxperproc) {
      limit.rlim_cur = maxperproc;

      if (setrlimit(RLIMIT_NOFILE, &limit)) {
        w_log(W_LOG_ERR,
          "failed to raise limit to %" PRIu64 " (%s).\n",
          limit.rlim_cur,
          strerror(errno));
      } else {
        w_log(W_LOG_ERR,
            "raised file limit to %" PRIu64 "\n",
            limit.rlim_cur);
      }
    }

    getrlimit(RLIMIT_NOFILE, &limit);
#ifndef HAVE_FSEVENTS
    if (limit.rlim_cur < 10240) {
      w_log(W_LOG_ERR,
          "Your file descriptor limit is very low (%" PRIu64 "), "
          "please consult the watchman docs on raising the limits\n",
          limit.rlim_cur);
    }
#endif
  }
#endif

  proc_pid = (int)getpid();
  if (gettimeofday(&tv, NULL) == -1) {
    w_log(W_LOG_ERR, "gettimeofday failed: %s\n", strerror(errno));
    return false;
  }
  proc_start_time = (uint64_t)tv.tv_sec;

  if (strlen(path) >= sizeof(un.sun_path) - 1) {
    w_log(W_LOG_ERR, "%s: path is too long\n",
        path);
    return false;
  }

  signal(SIGPIPE, SIG_IGN);

  /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting
   * syscalls */
  memset(&sa, 0, sizeof(sa));
  sa.sa_handler = wakeme;
  sa.sa_flags = 0;
  sigaction(SIGUSR1, &sa, NULL);
  sigaction(SIGCHLD, &sa, NULL);

  // Block SIGCHLD everywhere
  sigemptyset(&sigset);
  sigaddset(&sigset, SIGCHLD);
  sigprocmask(SIG_BLOCK, &sigset, NULL);

  pthread_attr_init(&attr);
  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);

  listener_fd = socket(PF_LOCAL, SOCK_STREAM, 0);
  if (listener_fd == -1) {
    w_log(W_LOG_ERR, "socket: %s\n",
        strerror(errno));
    return false;
  }

  un.sun_family = PF_LOCAL;
  strcpy(un.sun_path, path);
  unlink(path);

  if (bind(listener_fd, (struct sockaddr*)&un, sizeof(un)) != 0) {
    w_log(W_LOG_ERR, "bind(%s): %s\n",
      path, strerror(errno));
    close(listener_fd);
    return false;
  }

  if (listen(listener_fd, 200) != 0) {
    w_log(W_LOG_ERR, "listen(%s): %s\n",
        path, strerror(errno));
    close(listener_fd);
    return false;
  }

  w_set_cloexec(listener_fd);

  if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) {
    w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n",
        strerror(errno));
    return false;
  }

  if (!clients) {
    clients = w_ht_new(2, &client_hash_funcs);
  }

  w_state_load();

#ifdef HAVE_LIBGIMLI_H
  if (hb) {
    gimli_heartbeat_set(hb, GIMLI_HB_RUNNING);
  }
#endif
  w_set_nonblock(listener_fd);

  // Now run the dispatch
  while (!stopping) {
    int client_fd;
    struct watchman_client *client;
    struct pollfd pfd;
    int bufsize;

#ifdef HAVE_LIBGIMLI_H
    if (hb) {
      gimli_heartbeat_set(hb, GIMLI_HB_RUNNING);
    }
#endif

    pfd.events = POLLIN;
    pfd.fd = listener_fd;
    if (poll(&pfd, 1, 10000) < 1 || (pfd.revents & POLLIN) == 0) {
      continue;
    }

#ifdef HAVE_ACCEPT4
    client_fd = accept4(listener_fd, NULL, 0, SOCK_CLOEXEC);
#else
    client_fd = accept(listener_fd, NULL, 0);
#endif
    if (client_fd == -1) {
      continue;
    }
    w_set_cloexec(client_fd);
    bufsize = WATCHMAN_IO_BUF_SIZE;
    setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize));

    client = calloc(1, sizeof(*client));
    client->fd = client_fd;
    w_log(W_LOG_DBG, "accepted client %p fd=%d\n", client, client_fd);

    if (!w_json_buffer_init(&client->reader)) {
      // FIXME: error handling
    }
    if (!w_json_buffer_init(&client->writer)) {
      // FIXME: error handling
    }
    if (pipe(client->ping)) {
      // FIXME: error handling
    }
    client->subscriptions = w_ht_new(2, &subscription_hash_funcs);
    w_set_cloexec(client->ping[0]);
    w_set_nonblock(client->ping[0]);
    w_set_cloexec(client->ping[1]);
    w_set_nonblock(client->ping[1]);

    pthread_mutex_lock(&w_client_lock);
    w_ht_set(clients, client->fd, w_ht_ptr_val(client));
    pthread_mutex_unlock(&w_client_lock);

    // Start a thread for the client.
    // We used to use libevent for this, but we have
    // a low volume of concurrent clients and the json
    // parse/encode APIs are not easily used in a non-blocking
    // server architecture.
    if (pthread_create(&thr, &attr, client_thread, client)) {
      // It didn't work out, sorry!
      pthread_mutex_lock(&w_client_lock);
      w_ht_del(clients, client->fd);
      pthread_mutex_unlock(&w_client_lock);
    }
  }

  pthread_attr_destroy(&attr);

  /* close out some resources to persuade valgrind to run clean */
  close(listener_fd);
  listener_fd = -1;

  // Wait for clients, waking any sleeping clients up in the process
  do {
    w_ht_iter_t iter;

    pthread_mutex_lock(&w_client_lock);
    n_clients = w_ht_size(clients);

    if (w_ht_first(clients, &iter)) do {
      struct watchman_client *client = w_ht_val_ptr(iter.value);
      ignore_result(write(client->ping[1], "a", 1));
    } while (w_ht_next(clients, &iter));

    pthread_mutex_unlock(&w_client_lock);

    w_log(W_LOG_ERR, "waiting for %d clients to terminate\n", n_clients);
    usleep(2000);
  } while (n_clients > 0);

  w_root_free_watched_roots();

  pthread_join(reaper_thread, &ignored);
  cfg_shutdown();

  return true;
}
Esempio n. 10
0
// The client thread reads and decodes json packets,
// then dispatches the commands that it finds
static void *client_thread(void *ptr)
{
  struct watchman_client *client = ptr;
  struct pollfd pfd[2];
  json_t *request;
  json_error_t jerr;
  char buf[16];

  w_set_nonblock(client->fd);

  while (!stopping) {
    // Wait for input from either the client socket or
    // via the ping pipe, which signals that some other
    // thread wants to unilaterally send data to the client

    pfd[0].fd = client->fd;
    pfd[0].events = POLLIN|POLLHUP|POLLERR;
    pfd[0].revents = 0;

    pfd[1].fd = client->ping[0];
    pfd[1].events = POLLIN|POLLHUP|POLLERR;
    pfd[1].revents = 0;

    ignore_result(poll(pfd, 2, 200));

    if (stopping) {
      break;
    }

    if (pfd[0].revents & (POLLHUP|POLLERR)) {
      w_log(W_LOG_DBG, "got HUP|ERR on client %p fd=%d, disconnecting\n",
          client, client->fd);
      break;
    }

    if (pfd[0].revents) {
      // Solaris: we may not detect POLLHUP until we try to read, so
      // let's peek ahead and characterize it correctly.  This is only
      // needed if we have no data buffered
      if (client->reader.wpos == client->reader.rpos) {
        char peek;
        if (recv(client->fd, &peek, sizeof(peek), MSG_PEEK) == 0) {
          w_log(W_LOG_DBG, "got HUP|ERR on client fd=%d, disconnecting\n",
            client->fd);
          goto disconected;
        }
      }

      request = w_json_buffer_next(&client->reader, client->fd, &jerr);

      if (!request && errno == EAGAIN) {
        // That's fine
      } else if (!request) {
        // Not so cool
        send_error_response(client, "invalid json at position %d: %s",
            jerr.position, jerr.text);
        w_log(W_LOG_ERR, "invalid data from client: %s\n", jerr.text);

        goto disconected;
      } else if (request) {
        client->pdu_type = client->reader.pdu_type;
        dispatch_command(client, request, CMD_DAEMON);
        json_decref(request);
      }
    }

    if (pfd[1].revents) {
      ignore_result(read(client->ping[0], buf, sizeof(buf)));
    }

    /* now send our response(s) */
    while (client->head) {
      struct watchman_client_response *resp;

      /* de-queue the first response */
      pthread_mutex_lock(&w_client_lock);
      resp = client->head;
      if (resp) {
        client->head = resp->next;
        if (client->tail == resp) {
          client->tail = NULL;
        }
      }
      pthread_mutex_unlock(&w_client_lock);

      if (resp) {
        w_clear_nonblock(client->fd);

        /* Return the data in the same format that was used to ask for it */
        w_ser_write_pdu(client->pdu_type, &client->writer,
            client->fd, resp->json);

        json_decref(resp->json);
        free(resp);

        w_set_nonblock(client->fd);
      }
    }
  }

disconected:
  pthread_mutex_lock(&w_client_lock);
  w_ht_del(clients, client->fd);
  pthread_mutex_unlock(&w_client_lock);

  return NULL;
}
Esempio n. 11
0
static bool kqueue_root_consume_notify(watchman_global_watcher_t watcher,
    w_root_t *root, struct watchman_pending_collection *coll)
{
  struct kqueue_root_state *state = root->watch;
  int n;
  int i;
  struct timespec ts = { 0, 0 };
  struct timeval now;
  unused_parameter(watcher);

  errno = 0;
  n = kevent(state->kq_fd, NULL, 0,
      state->keventbuf, sizeof(state->keventbuf) / sizeof(state->keventbuf[0]),
      &ts);
  w_log(W_LOG_DBG, "consume_kqueue: %s n=%d err=%s\n",
      root->root_path->buf, n, strerror(errno));
  if (root->cancelled) {
    return 0;
  }

  gettimeofday(&now, NULL);
  for (i = 0; n > 0 && i < n; i++) {
    uint32_t fflags = state->keventbuf[i].fflags;
    bool is_dir = IS_DIR_BIT_SET(state->keventbuf[i].udata);
    w_string_t *path;
    char flags_label[128];
    int fd = state->keventbuf[i].ident;

    w_expand_flags(kflags, fflags, flags_label, sizeof(flags_label));
    pthread_mutex_lock(&state->lock);
    path = w_ht_val_ptr(w_ht_get(state->fd_to_name, fd));
    if (!path) {
      // Was likely a buffered notification for something that we decided
      // to stop watching
      w_log(W_LOG_DBG,
          " KQ notif for fd=%d; flags=0x%x %s no ref for it in fd_to_name\n",
          fd, fflags, flags_label);
      pthread_mutex_unlock(&state->lock);
      continue;
    }
    w_string_addref(path);

    w_log(W_LOG_DBG, " KQ fd=%d path %s [0x%x %s]\n",
        fd, path->buf, fflags, flags_label);
    if ((fflags & (NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE))) {
      struct kevent k;

      if (w_string_equal(path, root->root_path)) {
        w_log(W_LOG_ERR,
            "root dir %s has been (re)moved [code 0x%x], canceling watch\n",
            root->root_path->buf, fflags);
        w_root_cancel(root);
        pthread_mutex_unlock(&state->lock);
        return 0;
      }

      // Remove our watch bits
      memset(&k, 0, sizeof(k));
      EV_SET(&k, fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL);
      kevent(state->kq_fd, &k, 1, NULL, 0, 0);
      w_ht_del(state->name_to_fd, w_ht_ptr_val(path));
      w_ht_del(state->fd_to_name, fd);
    }

    pthread_mutex_unlock(&state->lock);
    w_pending_coll_add(coll, path, !is_dir, now, !is_dir);
    w_string_delref(path);
  }

  return n > 0;
}
Esempio n. 12
0
static DIR *kqueue_root_start_watch_dir(watchman_global_watcher_t watcher,
    w_root_t *root, struct watchman_dir *dir, struct timeval now,
    const char *path) {
  struct kqueue_root_state *state = root->watch;
  DIR *osdir;
  struct stat st, osdirst;
  struct kevent k;
  int newwd;
  unused_parameter(watcher);

  osdir = opendir_nofollow(path);
  if (!osdir) {
    handle_open_errno(root, dir, now, "opendir", errno, NULL);
    return NULL;
  }

  newwd = open(path, O_NOFOLLOW|O_EVTONLY|O_CLOEXEC);

  if (newwd == -1) {
    // directory got deleted between opendir and open
    handle_open_errno(root, dir, now, "open", errno, NULL);
    closedir(osdir);
    return NULL;
  }
  if (fstat(newwd, &st) == -1 || fstat(dirfd(osdir), &osdirst) == -1) {
    // whaaa?
    w_log(W_LOG_ERR, "fstat on opened dir %s failed: %s\n", path,
        strerror(errno));
    w_root_schedule_recrawl(root, "fstat failed");
    close(newwd);
    closedir(osdir);
    return NULL;
  }

  if (st.st_dev != osdirst.st_dev || st.st_ino != osdirst.st_ino) {
    // directory got replaced between opendir and open -- at this point its
    // parent's being watched, so we let filesystem events take care of it
    handle_open_errno(root, dir, now, "open", ENOTDIR, NULL);
    close(newwd);
    closedir(osdir);
    return NULL;
  }

  memset(&k, 0, sizeof(k));
  EV_SET(&k, newwd, EVFILT_VNODE, EV_ADD|EV_CLEAR,
      NOTE_WRITE|NOTE_DELETE|NOTE_EXTEND|NOTE_RENAME,
      0, SET_DIR_BIT(dir->path));

  // Our mapping needs to be visible before we add it to the queue,
  // otherwise we can get a wakeup and not know what it is
  pthread_mutex_lock(&state->lock);
  w_ht_replace(state->name_to_fd, w_ht_ptr_val(dir->path), newwd);
  w_ht_replace(state->fd_to_name, newwd, w_ht_ptr_val(dir->path));
  pthread_mutex_unlock(&state->lock);

  if (kevent(state->kq_fd, &k, 1, NULL, 0, 0)) {
    w_log(W_LOG_DBG, "kevent EV_ADD dir %s failed: %s",
        path, strerror(errno));
    close(newwd);

    pthread_mutex_lock(&state->lock);
    w_ht_del(state->name_to_fd, w_ht_ptr_val(dir->path));
    w_ht_del(state->fd_to_name, newwd);
    pthread_mutex_unlock(&state->lock);
  } else {
    w_log(W_LOG_DBG, "kevent dir %s -> %d\n", dir->path->buf, newwd);
  }

  return osdir;
}
Esempio n. 13
0
// The client thread reads and decodes json packets,
// then dispatches the commands that it finds
static void *client_thread(void *ptr)
{
  struct watchman_client *client = ptr;
  struct watchman_event_poll pfd[2];
  struct watchman_client_response *queued_responses_to_send;
  json_t *request;
  json_error_t jerr;
  bool send_ok = true;

  w_stm_set_nonblock(client->stm, true);
  w_set_thread_name("client=%p:stm=%p", client, client->stm);

  client->client_is_owner = w_stm_peer_is_owner(client->stm);

  w_stm_get_events(client->stm, &pfd[0].evt);
  pfd[1].evt = client->ping;

  while (!stopping) {
    // Wait for input from either the client socket or
    // via the ping pipe, which signals that some other
    // thread wants to unilaterally send data to the client

    ignore_result(w_poll_events(pfd, 2, 2000));

    if (stopping) {
      break;
    }

    if (pfd[0].ready) {
      request = w_json_buffer_next(&client->reader, client->stm, &jerr);

      if (!request && errno == EAGAIN) {
        // That's fine
      } else if (!request) {
        // Not so cool
        if (client->reader.wpos == client->reader.rpos) {
          // If they disconnected in between PDUs, no need to log
          // any error
          goto disconected;
        }
        send_error_response(client, "invalid json at position %d: %s",
            jerr.position, jerr.text);
        w_log(W_LOG_ERR, "invalid data from client: %s\n", jerr.text);

        goto disconected;
      } else if (request) {
        client->pdu_type = client->reader.pdu_type;
        dispatch_command(client, request, CMD_DAEMON);
        json_decref(request);
      }
    }

    if (pfd[1].ready) {
      w_event_test_and_clear(client->ping);
    }

    /* de-queue the pending responses under the lock */
    pthread_mutex_lock(&w_client_lock);
    queued_responses_to_send = client->head;
    client->head = NULL;
    client->tail = NULL;
    pthread_mutex_unlock(&w_client_lock);

    /* now send our response(s) */
    while (queued_responses_to_send) {
      struct watchman_client_response *response_to_send =
        queued_responses_to_send;

      if (send_ok) {
        w_stm_set_nonblock(client->stm, false);
        /* Return the data in the same format that was used to ask for it.
         * Don't bother sending any more messages if the client disconnects,
         * but still free their memory.
         */
        send_ok = w_ser_write_pdu(client->pdu_type, &client->writer,
                                  client->stm, response_to_send->json);
        w_stm_set_nonblock(client->stm, true);
      }

      queued_responses_to_send = response_to_send->next;

      json_decref(response_to_send->json);
      free(response_to_send);
    }
  }

disconected:
  w_set_thread_name("NOT_CONN:client=%p:stm=%p", client, client->stm);
  // Remove the client from the map before we tear it down, as this makes
  // it easier to flush out pending writes on windows without worrying
  // about w_log_to_clients contending for the write buffers
  pthread_mutex_lock(&w_client_lock);
  w_ht_del(clients, w_ht_ptr_val(client));
  pthread_mutex_unlock(&w_client_lock);

  client_delete(client);

  return NULL;
}
Esempio n. 14
0
static void process_inotify_event(
    w_root_t *root,
    struct watchman_pending_collection *coll,
    struct inotify_event *ine,
    struct timeval now)
{
  struct inot_root_state *state = root->watch;
  char flags_label[128];

  w_expand_flags(inflags, ine->mask, flags_label, sizeof(flags_label));
  w_log(W_LOG_DBG, "notify: wd=%d mask=0x%x %s %s\n", ine->wd, ine->mask,
      flags_label, ine->len > 0 ? ine->name : "");

  if (ine->wd == -1 && (ine->mask & IN_Q_OVERFLOW)) {
    /* we missed something, will need to re-crawl */
    w_root_schedule_recrawl(root, "IN_Q_OVERFLOW");
  } else if (ine->wd != -1) {
    w_string_t *dir_name = NULL;
    w_string_t *name = NULL;
    char buf[WATCHMAN_NAME_MAX];
    int pending_flags = W_PENDING_VIA_NOTIFY;

    pthread_mutex_lock(&state->lock);
    dir_name = w_ht_val_ptr(w_ht_get(state->wd_to_name, ine->wd));
    if (dir_name) {
      w_string_addref(dir_name);
    }
    pthread_mutex_unlock(&state->lock);

    if (dir_name) {
      if (ine->len > 0) {
        snprintf(buf, sizeof(buf), "%.*s/%s",
            dir_name->len, dir_name->buf,
            ine->name);
        name = w_string_new(buf);
      } else {
        name = dir_name;
        w_string_addref(name);
      }
    }

    if (ine->len > 0 && (ine->mask & (IN_MOVED_FROM|IN_ISDIR))
        == (IN_MOVED_FROM|IN_ISDIR)) {
      struct pending_move mv;

      // record this as a pending move, so that we can automatically
      // watch the target when we get the other side of it.
      mv.created = now.tv_sec;
      mv.name = name;

      pthread_mutex_lock(&state->lock);
      if (!w_ht_replace(state->move_map, ine->cookie, w_ht_ptr_val(&mv))) {
        w_log(W_LOG_FATAL,
            "failed to store %" PRIx32 " -> %s in move map\n",
            ine->cookie, name->buf);
      }
      pthread_mutex_unlock(&state->lock);

      w_log(W_LOG_DBG,
          "recording move_from %" PRIx32 " %s\n", ine->cookie,
          name->buf);
    }

    if (ine->len > 0 && (ine->mask & (IN_MOVED_TO|IN_ISDIR))
        == (IN_MOVED_FROM|IN_ISDIR)) {
      struct pending_move *old;

      pthread_mutex_lock(&state->lock);
      old = w_ht_val_ptr(w_ht_get(state->move_map, ine->cookie));
      if (old) {
        int wd = inotify_add_watch(state->infd, name->buf,
                    WATCHMAN_INOTIFY_MASK);
        if (wd == -1) {
          if (errno == ENOSPC || errno == ENOMEM) {
            // Limits exceeded, no recovery from our perspective
            set_poison_state(root, name, now, "inotify-add-watch", errno,
                inot_strerror(errno));
          } else {
            w_log(W_LOG_DBG, "add_watch: %s %s\n",
                name->buf, inot_strerror(errno));
          }
        } else {
          w_log(W_LOG_DBG, "moved %s -> %s\n", old->name->buf, name->buf);
          w_ht_replace(state->wd_to_name, wd, w_ht_ptr_val(name));
        }
      } else {
        w_log(W_LOG_DBG, "move: cookie=%" PRIx32 " not found in move map %s\n",
            ine->cookie, name->buf);
      }
      pthread_mutex_unlock(&state->lock);
    }

    if (dir_name) {
      if ((ine->mask & (IN_UNMOUNT|IN_IGNORED|IN_DELETE_SELF|IN_MOVE_SELF))) {
        w_string_t *pname;

        if (w_string_equal(root->root_path, name)) {
          w_log(W_LOG_ERR,
              "root dir %s has been (re)moved, canceling watch\n",
              root->root_path->buf);
          w_string_delref(name);
          w_string_delref(dir_name);
          w_root_cancel(root);
          return;
        }

        // We need to examine the parent and crawl down
        pname = w_string_dirname(name);
        w_log(W_LOG_DBG, "mask=%x, focus on parent: %.*s\n",
            ine->mask, pname->len, pname->buf);
        w_string_delref(name);
        name = pname;
        pending_flags |= W_PENDING_RECURSIVE;
      }

      if (ine->mask & (IN_CREATE|IN_DELETE)) {
        pending_flags |= W_PENDING_RECURSIVE;
      }

      w_log(W_LOG_DBG, "add_pending for inotify mask=%x %.*s\n",
          ine->mask, name->len, name->buf);
      w_pending_coll_add(coll, name, now, pending_flags);

      w_string_delref(name);

      // The kernel removed the wd -> name mapping, so let's update
      // our state here also
      if ((ine->mask & IN_IGNORED) != 0) {
        w_log(W_LOG_DBG, "mask=%x: remove watch %d %.*s\n", ine->mask,
            ine->wd, dir_name->len, dir_name->buf);
        pthread_mutex_lock(&state->lock);
        w_ht_del(state->wd_to_name, ine->wd);
        pthread_mutex_unlock(&state->lock);
      }

      w_string_delref(dir_name);

    } else if ((ine->mask & (IN_MOVE_SELF|IN_IGNORED)) == 0) {
      // If we can't resolve the dir, and this isn't notification
      // that it has gone away, then we want to recrawl to fix
      // up our state.
      w_log(W_LOG_ERR, "wanted dir %d for mask %x but not found %.*s\n",
          ine->wd, ine->mask, ine->len, ine->name);
      w_root_schedule_recrawl(root, "dir missing from internal state");
    }
  }
}
Esempio n. 15
0
bool w_start_listener(const char *path)
{
  struct sockaddr_un un;
  pthread_t thr;
  pthread_attr_t attr;
  pthread_mutexattr_t mattr;
  struct sigaction sa;
  sigset_t sigset;
#ifdef HAVE_LIBGIMLI_H
  volatile struct gimli_heartbeat *hb = NULL;
#endif

  pthread_mutexattr_init(&mattr);
  pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
  pthread_mutex_init(&w_client_lock, &mattr);
  pthread_mutexattr_destroy(&mattr);

#ifdef HAVE_LIBGIMLI_H
  hb = gimli_heartbeat_attach();
#endif

#ifdef HAVE_KQUEUE
  {
    struct rlimit limit;
    int mib[2] = { CTL_KERN,
#ifdef KERN_MAXFILESPERPROC
      KERN_MAXFILESPERPROC
#else
      KERN_MAXFILES
#endif
    };
    int maxperproc;
    size_t len;

    len = sizeof(maxperproc);
    sysctl(mib, 2, &maxperproc, &len, NULL, 0);

    getrlimit(RLIMIT_NOFILE, &limit);
    w_log(W_LOG_ERR, "file limit is %" PRIu64
        " kern.maxfilesperproc=%i\n",
        limit.rlim_cur, maxperproc);

    if (limit.rlim_cur != RLIM_INFINITY &&
        maxperproc > 0 &&
        limit.rlim_cur < (rlim_t)maxperproc) {
      limit.rlim_cur = maxperproc;

      if (setrlimit(RLIMIT_NOFILE, &limit)) {
        w_log(W_LOG_ERR,
          "failed to raise limit to %" PRIu64 " (%s).\n",
          limit.rlim_cur,
          strerror(errno));
      } else {
        w_log(W_LOG_ERR,
            "raised file limit to %" PRIu64 "\n",
            limit.rlim_cur);
      }
    }

    getrlimit(RLIMIT_NOFILE, &limit);
    if (limit.rlim_cur < 10240) {
      w_log(W_LOG_ERR,
          "Your file descriptor limit is very low (%" PRIu64 "), "
          "please consult the watchman docs on raising the limits\n",
          limit.rlim_cur);
    }
  }
#endif

  if (strlen(path) >= sizeof(un.sun_path) - 1) {
    w_log(W_LOG_ERR, "%s: path is too long\n",
        path);
    return false;
  }

  signal(SIGPIPE, SIG_IGN);

  /* allow SIGUSR1 and SIGCHLD to wake up a blocked thread, without restarting
   * syscalls */
  memset(&sa, 0, sizeof(sa));
  sa.sa_handler = wakeme;
  sa.sa_flags = 0;
  sigaction(SIGUSR1, &sa, NULL);
  sigaction(SIGCHLD, &sa, NULL);

  // Block SIGCHLD everywhere
  sigemptyset(&sigset);
  sigaddset(&sigset, SIGCHLD);
  sigprocmask(SIG_BLOCK, &sigset, NULL);

  pthread_attr_init(&attr);
  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);

  listener_fd = socket(PF_LOCAL, SOCK_STREAM, 0);
  if (listener_fd == -1) {
    w_log(W_LOG_ERR, "socket: %s\n",
        strerror(errno));
    return false;
  }

  un.sun_family = PF_LOCAL;
  strcpy(un.sun_path, path);

  if (bind(listener_fd, (struct sockaddr*)&un, sizeof(un)) != 0) {
    w_log(W_LOG_ERR, "bind(%s): %s\n",
      path, strerror(errno));
    close(listener_fd);
    return false;
  }

  if (listen(listener_fd, 200) != 0) {
    w_log(W_LOG_ERR, "listen(%s): %s\n",
        path, strerror(errno));
    close(listener_fd);
    return false;
  }

  w_set_cloexec(listener_fd);

  if (pthread_create(&reaper_thread, NULL, child_reaper, NULL)) {
    w_log(W_LOG_FATAL, "pthread_create(reaper): %s\n",
        strerror(errno));
    return false;
  }

  if (!clients) {
    clients = w_ht_new(2, &client_hash_funcs);
  }

  // Wire up the command handlers
  register_commands(commands);

  w_state_load();

#ifdef HAVE_LIBGIMLI_H
  if (hb) {
    gimli_heartbeat_set(hb, GIMLI_HB_RUNNING);
  }
  w_set_nonblock(listener_fd);
#endif

  // Now run the dispatch
  while (true) {
    int client_fd;
    struct watchman_client *client;
    struct pollfd pfd;
    int bufsize;

#ifdef HAVE_LIBGIMLI_H
    if (hb) {
      gimli_heartbeat_set(hb, GIMLI_HB_RUNNING);
    }
#endif

    pfd.events = POLLIN;
    pfd.fd = listener_fd;
    poll(&pfd, 1, 10000);

    client_fd = accept(listener_fd, NULL, 0);
    if (client_fd == -1) {
      continue;
    }
    w_set_cloexec(client_fd);
    bufsize = WATCHMAN_IO_BUF_SIZE;
    setsockopt(client_fd, SOL_SOCKET, SO_SNDBUF, &bufsize, sizeof(bufsize));

    client = calloc(1, sizeof(*client));
    client->fd = client_fd;
    if (!w_json_buffer_init(&client->reader)) {
      // FIXME: error handling
    }
    if (!w_json_buffer_init(&client->writer)) {
      // FIXME: error handling
    }
    if (pipe(client->ping)) {
      // FIXME: error handling
    }
    client->subscriptions = w_ht_new(2, &subscription_hash_funcs);
    w_set_cloexec(client->ping[0]);
    w_set_nonblock(client->ping[0]);
    w_set_cloexec(client->ping[1]);
    w_set_nonblock(client->ping[1]);

    pthread_mutex_lock(&w_client_lock);
    w_ht_set(clients, client->fd, (w_ht_val_t)client);
    pthread_mutex_unlock(&w_client_lock);

    // Start a thread for the client.
    // We used to use libevent for this, but we have
    // a low volume of concurrent clients and the json
    // parse/encode APIs are not easily used in a non-blocking
    // server architecture.
    if (pthread_create(&thr, &attr, client_thread, client)) {
      // It didn't work out, sorry!
      pthread_mutex_lock(&w_client_lock);
      w_ht_del(clients, client->fd);
      pthread_mutex_unlock(&w_client_lock);
    }
  }

  pthread_attr_destroy(&attr);
  return true;
}
Esempio n. 16
0
// The client thread reads and decodes json packets,
// then dispatches the commands that it finds
static void *client_thread(void *ptr)
{
  struct watchman_client *client = ptr;
  struct pollfd pfd[2];
  json_t *request;
  json_error_t jerr;
  char buf[16];

  w_set_nonblock(client->fd);

  while (true) {
    // Wait for input from either the client socket or
    // via the ping pipe, which signals that some other
    // thread wants to unilaterally send data to the client

    pfd[0].fd = client->fd;
    pfd[0].events = POLLIN|POLLHUP|POLLERR;
    pfd[0].revents = 0;

    pfd[1].fd = client->ping[0];
    pfd[1].events = POLLIN|POLLHUP|POLLERR;
    pfd[1].revents = 0;

    ignore_result(poll(pfd, 2, 200));

    if (pfd[0].revents & (POLLHUP|POLLERR)) {
disconected:
      pthread_mutex_lock(&w_client_lock);
      w_ht_del(clients, client->fd);
      pthread_mutex_unlock(&w_client_lock);
      break;
    }

    if (pfd[0].revents) {
      request = w_json_buffer_next(&client->reader, client->fd, &jerr);

      if (!request && errno == EAGAIN) {
        // That's fine
      } else if (!request) {
        // Not so cool
        send_error_response(client, "invalid json at position %d: %s",
            jerr.position, jerr.text);
        w_log(W_LOG_ERR, "invalid data from client: %s\n", jerr.text);

        goto disconected;
      } else if (request) {
        client->pdu_type = client->reader.pdu_type;
        dispatch_command(client, request);
        json_decref(request);
      }
    }

    if (pfd[1].revents) {
      ignore_result(read(client->ping[0], buf, sizeof(buf)));
    }

    /* now send our response(s) */
    while (client->head) {
      struct watchman_client_response *resp;

      /* de-queue the first response */
      pthread_mutex_lock(&w_client_lock);
      resp = client->head;
      if (resp) {
        client->head = resp->next;
        if (client->tail == resp) {
          client->tail = NULL;
        }
      }
      pthread_mutex_unlock(&w_client_lock);

      if (resp) {
        w_clear_nonblock(client->fd);

        /* Return the data in the same format that was used to ask for it */
        w_ser_write_pdu(client->pdu_type, &client->writer,
            client->fd, resp->json);

        json_decref(resp->json);
        free(resp);

        w_set_nonblock(client->fd);
      }
    }
  }

  return NULL;
}
Esempio n. 17
0
// The client thread reads and decodes json packets,
// then dispatches the commands that it finds
static void *client_thread(void *ptr)
{
  struct watchman_client *client = ptr;
  struct watchman_event_poll pfd[2];
  json_t *request;
  json_error_t jerr;

  w_stm_set_nonblock(client->stm, true);
  w_set_thread_name("client:stm=%p", client->stm);

  w_stm_get_events(client->stm, &pfd[0].evt);
  pfd[1].evt = client->ping;

  while (!stopping) {
    // Wait for input from either the client socket or
    // via the ping pipe, which signals that some other
    // thread wants to unilaterally send data to the client

    ignore_result(w_poll_events(pfd, 2, 2000));

    if (stopping) {
      break;
    }

    if (pfd[0].ready) {
      request = w_json_buffer_next(&client->reader, client->stm, &jerr);

      if (!request && errno == EAGAIN) {
        // That's fine
      } else if (!request) {
        // Not so cool
        if (client->reader.wpos == client->reader.rpos) {
          // If they disconnected in between PDUs, no need to log
          // any error
          goto disconected;
        }
        send_error_response(client, "invalid json at position %d: %s",
            jerr.position, jerr.text);
        w_log(W_LOG_ERR, "invalid data from client: %s\n", jerr.text);

        goto disconected;
      } else if (request) {
        client->pdu_type = client->reader.pdu_type;
        dispatch_command(client, request, CMD_DAEMON);
        json_decref(request);
      }
    }

    if (pfd[1].ready) {
      w_event_test_and_clear(client->ping);
    }

    /* now send our response(s) */
    while (client->head) {
      struct watchman_client_response *resp;

      /* de-queue the first response */
      pthread_mutex_lock(&w_client_lock);
      resp = client->head;
      if (resp) {
        client->head = resp->next;
        if (client->tail == resp) {
          client->tail = NULL;
        }
      }
      pthread_mutex_unlock(&w_client_lock);

      if (resp) {
        bool ok;

        w_stm_set_nonblock(client->stm, false);

        /* Return the data in the same format that was used to ask for it */
        ok = w_ser_write_pdu(client->pdu_type, &client->writer,
            client->stm, resp->json);

        json_decref(resp->json);
        free(resp);

        w_stm_set_nonblock(client->stm, true);

        if (!ok) {
          break;
        }
      }
    }
  }

disconected:
  pthread_mutex_lock(&w_client_lock);
  w_ht_del(clients, w_ht_ptr_val(client));
  pthread_mutex_unlock(&w_client_lock);

  return NULL;
}