Exemple #1
0
/* add a pending entry.  Will consolidate an existing entry with the
 * same name.  Returns false if an allocation fails.
 * The caller must own the collection lock. */
bool PendingCollectionBase::add(
    const w_string& path,
    struct timeval now,
    int flags) {
  char flags_label[128];

  auto existing = tree_.search(path);
  if (existing) {
    /* Entry already exists: consolidate */
    consolidateItem(existing->get(), flags);
    /* all done */
    return true;
  }

  if (isObsoletedByContainingDir(path)) {
    return true;
  }

  // Try to allocate the new node before we prune any children.
  auto p = std::make_shared<watchman_pending_fs>(path, now, flags);

  maybePruneObsoletedChildren(path, flags);

  w_expand_flags(kflags, flags, flags_label, sizeof(flags_label));
  w_log(
      W_LOG_DBG,
      "add_pending: %.*s %s\n",
      int(path.size()),
      path.data(),
      flags_label);

  tree_.insert(path, p);
  linkHead(std::move(p));

  return true;
}
Exemple #2
0
static bool kqueue_root_consume_notify(watchman_global_watcher_t watcher,
    w_root_t *root, struct watchman_pending_collection *coll)
{
  struct kqueue_root_state *state = root->watch;
  int n;
  int i;
  struct timespec ts = { 0, 0 };
  struct timeval now;
  unused_parameter(watcher);

  errno = 0;
  n = kevent(state->kq_fd, NULL, 0,
      state->keventbuf, sizeof(state->keventbuf) / sizeof(state->keventbuf[0]),
      &ts);
  w_log(W_LOG_DBG, "consume_kqueue: %s n=%d err=%s\n",
      root->root_path->buf, n, strerror(errno));
  if (root->cancelled) {
    return 0;
  }

  gettimeofday(&now, NULL);
  for (i = 0; n > 0 && i < n; i++) {
    uint32_t fflags = state->keventbuf[i].fflags;
    bool is_dir = IS_DIR_BIT_SET(state->keventbuf[i].udata);
    w_string_t *path;
    char flags_label[128];
    int fd = state->keventbuf[i].ident;

    w_expand_flags(kflags, fflags, flags_label, sizeof(flags_label));
    pthread_mutex_lock(&state->lock);
    path = w_ht_val_ptr(w_ht_get(state->fd_to_name, fd));
    if (!path) {
      // Was likely a buffered notification for something that we decided
      // to stop watching
      w_log(W_LOG_DBG,
          " KQ notif for fd=%d; flags=0x%x %s no ref for it in fd_to_name\n",
          fd, fflags, flags_label);
      pthread_mutex_unlock(&state->lock);
      continue;
    }
    w_string_addref(path);

    w_log(W_LOG_DBG, " KQ fd=%d path %s [0x%x %s]\n",
        fd, path->buf, fflags, flags_label);
    if ((fflags & (NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE))) {
      struct kevent k;

      if (w_string_equal(path, root->root_path)) {
        w_log(W_LOG_ERR,
            "root dir %s has been (re)moved [code 0x%x], canceling watch\n",
            root->root_path->buf, fflags);
        w_root_cancel(root);
        pthread_mutex_unlock(&state->lock);
        return 0;
      }

      // Remove our watch bits
      memset(&k, 0, sizeof(k));
      EV_SET(&k, fd, EVFILT_VNODE, EV_DELETE, 0, 0, NULL);
      kevent(state->kq_fd, &k, 1, NULL, 0, 0);
      w_ht_del(state->name_to_fd, w_ht_ptr_val(path));
      w_ht_del(state->fd_to_name, fd);
    }

    pthread_mutex_unlock(&state->lock);
    w_pending_coll_add(coll, path, !is_dir, now, !is_dir);
    w_string_delref(path);
  }

  return n > 0;
}
Exemple #3
0
bool FSEventsWatcher::consumeNotify(
    const std::shared_ptr<w_root_t>& root,
    PendingCollection::LockedPtr& coll) {
  struct timeval now;
  bool recurse;
  char flags_label[128];
  std::deque<watchman_fsevent> items;

  {
    auto wlock = items_.wlock();
    std::swap(items, *wlock);
  }

  gettimeofday(&now, nullptr);

  for (auto& item : items) {
    w_expand_flags(kflags, item.flags, flags_label, sizeof(flags_label));
    w_log(
        W_LOG_DBG,
        "fsevents: got %s 0x%" PRIx32 " %s\n",
        item.path.c_str(),
        item.flags,
        flags_label);

    if (item.flags & kFSEventStreamEventFlagUserDropped) {
      root->scheduleRecrawl("kFSEventStreamEventFlagUserDropped");
      break;
    }

    if (item.flags & kFSEventStreamEventFlagKernelDropped) {
      root->scheduleRecrawl("kFSEventStreamEventFlagKernelDropped");
      break;
    }

    if (item.flags & kFSEventStreamEventFlagUnmount) {
      w_log(
          W_LOG_ERR,
          "kFSEventStreamEventFlagUnmount %s, cancel watch\n",
          item.path.c_str());
      root->cancel();
      break;
    }

    if (item.flags & kFSEventStreamEventFlagRootChanged) {
      w_log(
          W_LOG_ERR,
          "kFSEventStreamEventFlagRootChanged %s, cancel watch\n",
          item.path.c_str());
      root->cancel();
      break;
    }

    recurse = (item.flags & (kFSEventStreamEventFlagMustScanSubDirs |
                             kFSEventStreamEventFlagItemRenamed))
        ? true
        : false;

    coll->add(
        item.path,
        now,
        W_PENDING_VIA_NOTIFY | (recurse ? W_PENDING_RECURSIVE : 0));
  }

  return !items.empty();
}
Exemple #4
0
static void process_inotify_event(
    w_root_t *root,
    struct watchman_pending_collection *coll,
    struct inotify_event *ine,
    struct timeval now)
{
  struct inot_root_state *state = root->watch;
  char flags_label[128];

  w_expand_flags(inflags, ine->mask, flags_label, sizeof(flags_label));
  w_log(W_LOG_DBG, "notify: wd=%d mask=0x%x %s %s\n", ine->wd, ine->mask,
      flags_label, ine->len > 0 ? ine->name : "");

  if (ine->wd == -1 && (ine->mask & IN_Q_OVERFLOW)) {
    /* we missed something, will need to re-crawl */
    w_root_schedule_recrawl(root, "IN_Q_OVERFLOW");
  } else if (ine->wd != -1) {
    w_string_t *dir_name = NULL;
    w_string_t *name = NULL;
    char buf[WATCHMAN_NAME_MAX];
    int pending_flags = W_PENDING_VIA_NOTIFY;

    pthread_mutex_lock(&state->lock);
    dir_name = w_ht_val_ptr(w_ht_get(state->wd_to_name, ine->wd));
    if (dir_name) {
      w_string_addref(dir_name);
    }
    pthread_mutex_unlock(&state->lock);

    if (dir_name) {
      if (ine->len > 0) {
        snprintf(buf, sizeof(buf), "%.*s/%s",
            dir_name->len, dir_name->buf,
            ine->name);
        name = w_string_new(buf);
      } else {
        name = dir_name;
        w_string_addref(name);
      }
    }

    if (ine->len > 0 && (ine->mask & (IN_MOVED_FROM|IN_ISDIR))
        == (IN_MOVED_FROM|IN_ISDIR)) {
      struct pending_move mv;

      // record this as a pending move, so that we can automatically
      // watch the target when we get the other side of it.
      mv.created = now.tv_sec;
      mv.name = name;

      pthread_mutex_lock(&state->lock);
      if (!w_ht_replace(state->move_map, ine->cookie, w_ht_ptr_val(&mv))) {
        w_log(W_LOG_FATAL,
            "failed to store %" PRIx32 " -> %s in move map\n",
            ine->cookie, name->buf);
      }
      pthread_mutex_unlock(&state->lock);

      w_log(W_LOG_DBG,
          "recording move_from %" PRIx32 " %s\n", ine->cookie,
          name->buf);
    }

    if (ine->len > 0 && (ine->mask & (IN_MOVED_TO|IN_ISDIR))
        == (IN_MOVED_FROM|IN_ISDIR)) {
      struct pending_move *old;

      pthread_mutex_lock(&state->lock);
      old = w_ht_val_ptr(w_ht_get(state->move_map, ine->cookie));
      if (old) {
        int wd = inotify_add_watch(state->infd, name->buf,
                    WATCHMAN_INOTIFY_MASK);
        if (wd == -1) {
          if (errno == ENOSPC || errno == ENOMEM) {
            // Limits exceeded, no recovery from our perspective
            set_poison_state(root, name, now, "inotify-add-watch", errno,
                inot_strerror(errno));
          } else {
            w_log(W_LOG_DBG, "add_watch: %s %s\n",
                name->buf, inot_strerror(errno));
          }
        } else {
          w_log(W_LOG_DBG, "moved %s -> %s\n", old->name->buf, name->buf);
          w_ht_replace(state->wd_to_name, wd, w_ht_ptr_val(name));
        }
      } else {
        w_log(W_LOG_DBG, "move: cookie=%" PRIx32 " not found in move map %s\n",
            ine->cookie, name->buf);
      }
      pthread_mutex_unlock(&state->lock);
    }

    if (dir_name) {
      if ((ine->mask & (IN_UNMOUNT|IN_IGNORED|IN_DELETE_SELF|IN_MOVE_SELF))) {
        w_string_t *pname;

        if (w_string_equal(root->root_path, name)) {
          w_log(W_LOG_ERR,
              "root dir %s has been (re)moved, canceling watch\n",
              root->root_path->buf);
          w_string_delref(name);
          w_string_delref(dir_name);
          w_root_cancel(root);
          return;
        }

        // We need to examine the parent and crawl down
        pname = w_string_dirname(name);
        w_log(W_LOG_DBG, "mask=%x, focus on parent: %.*s\n",
            ine->mask, pname->len, pname->buf);
        w_string_delref(name);
        name = pname;
        pending_flags |= W_PENDING_RECURSIVE;
      }

      if (ine->mask & (IN_CREATE|IN_DELETE)) {
        pending_flags |= W_PENDING_RECURSIVE;
      }

      w_log(W_LOG_DBG, "add_pending for inotify mask=%x %.*s\n",
          ine->mask, name->len, name->buf);
      w_pending_coll_add(coll, name, now, pending_flags);

      w_string_delref(name);

      // The kernel removed the wd -> name mapping, so let's update
      // our state here also
      if ((ine->mask & IN_IGNORED) != 0) {
        w_log(W_LOG_DBG, "mask=%x: remove watch %d %.*s\n", ine->mask,
            ine->wd, dir_name->len, dir_name->buf);
        pthread_mutex_lock(&state->lock);
        w_ht_del(state->wd_to_name, ine->wd);
        pthread_mutex_unlock(&state->lock);
      }

      w_string_delref(dir_name);

    } else if ((ine->mask & (IN_MOVE_SELF|IN_IGNORED)) == 0) {
      // If we can't resolve the dir, and this isn't notification
      // that it has gone away, then we want to recrawl to fix
      // up our state.
      w_log(W_LOG_ERR, "wanted dir %d for mask %x but not found %.*s\n",
          ine->wd, ine->mask, ine->len, ine->name);
      w_root_schedule_recrawl(root, "dir missing from internal state");
    }
  }
}