예제 #1
0
static std::shared_ptr<watchman_client> make_new_client(
    std::unique_ptr<watchman_stream>&& stm) {
  auto client = std::make_shared<watchman_user_client>(std::move(stm));

  clients.wlock()->insert(client);

  // Start a thread for the client.
  // We used to use libevent for this, but we have
  // a low volume of concurrent clients and the json
  // parse/encode APIs are not easily used in a non-blocking
  // server architecture.
  try {
    std::thread thr([client]() { client_thread(client); });
    thr.detach();
  } catch (const std::exception& e) {
    clients.wlock()->erase(client);
    throw;
  }

  return client;
}
예제 #2
0
bool FSEventsWatcher::start(const std::shared_ptr<w_root_t>& root) {
  // Spin up the fsevents processing thread; it owns a ref on the root

  auto self = std::dynamic_pointer_cast<FSEventsWatcher>(shared_from_this());
  try {
    // Acquire the mutex so thread initialization waits until we release it
    auto wlock = items_.wlock();

    std::thread thread([self, root]() {
      try {
        self->FSEventsThread(root);
      } catch (const std::exception& e) {
        watchman::log(watchman::ERR, "uncaught exception: ", e.what());
        root->cancel();
      }

      // Ensure that we signal the condition variable before we
      // finish this thread.  That ensures that don't get stuck
      // waiting in FSEventsWatcher::start if something unexpected happens.
      self->fse_cond.notify_one();
    });
    // We have to detach because the readChangesThread may wind up
    // being the last thread to reference the watcher state and
    // cannot join itself.
    thread.detach();

    // Allow thread init to proceed; wait for its signal
    fse_cond.wait(wlock.getUniqueLock());

    if (root->failure_reason) {
      w_log(
          W_LOG_ERR,
          "failed to start fsevents thread: %s\n",
          root->failure_reason.c_str());
      return false;
    }

    return true;
  } catch (const std::exception& e) {
    watchman::log(
        watchman::ERR, "failed to start fsevents thread: ", e.what(), "\n");
    return false;
  }
}
예제 #3
0
bool FSEventsWatcher::waitNotify(int timeoutms) {
  auto wlock = items_.wlock();
  fse_cond.wait_for(
      wlock.getUniqueLock(), std::chrono::milliseconds(timeoutms));
  return !wlock->empty();
}
예제 #4
0
bool FSEventsWatcher::consumeNotify(
    const std::shared_ptr<w_root_t>& root,
    PendingCollection::LockedPtr& coll) {
  struct timeval now;
  bool recurse;
  char flags_label[128];
  std::deque<watchman_fsevent> items;

  {
    auto wlock = items_.wlock();
    std::swap(items, *wlock);
  }

  gettimeofday(&now, nullptr);

  for (auto& item : items) {
    w_expand_flags(kflags, item.flags, flags_label, sizeof(flags_label));
    w_log(
        W_LOG_DBG,
        "fsevents: got %s 0x%" PRIx32 " %s\n",
        item.path.c_str(),
        item.flags,
        flags_label);

    if (item.flags & kFSEventStreamEventFlagUserDropped) {
      root->scheduleRecrawl("kFSEventStreamEventFlagUserDropped");
      break;
    }

    if (item.flags & kFSEventStreamEventFlagKernelDropped) {
      root->scheduleRecrawl("kFSEventStreamEventFlagKernelDropped");
      break;
    }

    if (item.flags & kFSEventStreamEventFlagUnmount) {
      w_log(
          W_LOG_ERR,
          "kFSEventStreamEventFlagUnmount %s, cancel watch\n",
          item.path.c_str());
      root->cancel();
      break;
    }

    if (item.flags & kFSEventStreamEventFlagRootChanged) {
      w_log(
          W_LOG_ERR,
          "kFSEventStreamEventFlagRootChanged %s, cancel watch\n",
          item.path.c_str());
      root->cancel();
      break;
    }

    recurse = (item.flags & (kFSEventStreamEventFlagMustScanSubDirs |
                             kFSEventStreamEventFlagItemRenamed))
        ? true
        : false;

    coll->add(
        item.path,
        now,
        W_PENDING_VIA_NOTIFY | (recurse ? W_PENDING_RECURSIVE : 0));
  }

  return !items.empty();
}
예제 #5
0
void FSEventsWatcher::FSEventsThread(const std::shared_ptr<w_root_t>& root) {
  CFFileDescriptorRef fdref;
  CFFileDescriptorContext fdctx;

  w_set_thread_name("fsevents %s", root->root_path.c_str());

  {
    // Block until fsevents_root_start is waiting for our initialization
    auto wlock = items_.wlock();

    attempt_resync_on_drop = root->config.getBool("fsevents_try_resync", true);

    memset(&fdctx, 0, sizeof(fdctx));
    fdctx.info = root.get();

    fdref = CFFileDescriptorCreate(
        nullptr, fse_pipe.read.fd(), true, fse_pipe_callback, &fdctx);
    CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack);
    {
      CFRunLoopSourceRef fdsrc;

      fdsrc = CFFileDescriptorCreateRunLoopSource(nullptr, fdref, 0);
      if (!fdsrc) {
        root->failure_reason = w_string_new_typed(
            "CFFileDescriptorCreateRunLoopSource failed", W_STRING_UNICODE);
        goto done;
      }
      CFRunLoopAddSource(CFRunLoopGetCurrent(), fdsrc, kCFRunLoopDefaultMode);
      CFRelease(fdsrc);
    }

    stream = fse_stream_make(
        root, kFSEventStreamEventIdSinceNow, root->failure_reason);
    if (!stream) {
      goto done;
    }

    if (!FSEventStreamStart(stream->stream)) {
      root->failure_reason = w_string::printf(
          "FSEventStreamStart failed, look at your log file %s for "
          "lines mentioning FSEvents and see %s#fsevents for more information\n",
          log_name,
          cfg_get_trouble_url());
      goto done;
    }

    // Signal to fsevents_root_start that we're done initializing
    fse_cond.notify_one();
  }

  // Process the events stream until we get signalled to quit
  CFRunLoopRun();

done:
  if (stream) {
    delete stream;
  }
  if (fdref) {
    CFRelease(fdref);
  }

  w_log(W_LOG_DBG, "fse_thread done\n");
}
예제 #6
0
// The client thread reads and decodes json packets,
// then dispatches the commands that it finds
static void client_thread(std::shared_ptr<watchman_client> client) noexcept {
  struct watchman_event_poll pfd[2];
  json_error_t jerr;
  bool client_alive = true;
  // Keep a persistent vector around so that we can avoid allocating
  // and releasing heap memory when we collect items from the publisher
  std::vector<std::shared_ptr<const watchman::Publisher::Item>> pending;

  client->stm->setNonBlock(true);
  w_set_thread_name(
      "client=%p:stm=%p:pid=%d",
      client.get(),
      client->stm.get(),
      client->stm->getPeerProcessID());

  client->client_is_owner = client->stm->peerIsOwner();

  pfd[0].evt = client->stm->getEvents();
  pfd[1].evt = client->ping.get();

  while (!stopping && client_alive) {
    // Wait for input from either the client socket or
    // via the ping pipe, which signals that some other
    // thread wants to unilaterally send data to the client

    ignore_result(w_poll_events(pfd, 2, 2000));
    if (stopping) {
      break;
    }

    if (pfd[0].ready) {
      auto request = client->reader.decodeNext(client->stm.get(), &jerr);

      if (!request && errno == EAGAIN) {
        // That's fine
      } else if (!request) {
        // Not so cool
        if (client->reader.wpos == client->reader.rpos) {
          // If they disconnected in between PDUs, no need to log
          // any error
          goto disconnected;
        }
        send_error_response(
            client.get(),
            "invalid json at position %d: %s",
            jerr.position,
            jerr.text);
        w_log(W_LOG_ERR, "invalid data from client: %s\n", jerr.text);

        goto disconnected;
      } else if (request) {
        client->pdu_type = client->reader.pdu_type;
        client->capabilities = client->reader.capabilities;
        dispatch_command(client.get(), request, CMD_DAEMON);
      }
    }

    if (pfd[1].ready) {
      while (client->ping->testAndClear()) {
        // Enqueue refs to pending log payloads
        pending.clear();
        getPending(pending, client->debugSub, client->errorSub);
        for (auto& item : pending) {
          client->enqueueResponse(json_ref(item->payload), false);
        }

        // Maybe we have subscriptions to dispatch?
        auto userClient =
            std::dynamic_pointer_cast<watchman_user_client>(client);

        if (userClient) {
          std::vector<w_string> subsToDelete;
          for (auto& subiter : userClient->unilateralSub) {
            auto sub = subiter.first;
            auto subStream = subiter.second;

            watchman::log(
                watchman::DBG, "consider fan out sub ", sub->name, "\n");

            pending.clear();
            subStream->getPending(pending);
            bool seenSettle = false;
            for (auto& item : pending) {
              auto dumped = json_dumps(item->payload, 0);
              watchman::log(
                  watchman::DBG,
                  "Unilateral payload for sub ",
                  sub->name,
                  " ",
                  dumped ? dumped : "<<MISSING!!>>",
                  "\n");
              free(dumped);

              if (item->payload.get_default("canceled")) {
                auto resp = make_response();

                watchman::log(
                    watchman::ERR,
                    "Cancel subscription ",
                    sub->name,
                    " due to root cancellation\n");

                resp.set({{"root", item->payload.get_default("root")},
                          {"unilateral", json_true()},
                          {"canceled", json_true()},
                          {"subscription", w_string_to_json(sub->name)}});
                client->enqueueResponse(std::move(resp), false);
                // Remember to cancel this subscription.
                // We can't do it in this loop because that would
                // invalidate the iterators and cause a headache.
                subsToDelete.push_back(sub->name);
                continue;
              }

              if (item->payload.get_default("state-enter") ||
                  item->payload.get_default("state-leave")) {
                auto resp = make_response();
                json_object_update(item->payload, resp);
                // We have the opportunity to populate additional response
                // fields here (since we don't want to block the command).
                // We don't populate the fat clock for SCM aware queries
                // because determination of mergeBase could add latency.
                resp.set({{"unilateral", json_true()},
                          {"subscription", w_string_to_json(sub->name)}});
                client->enqueueResponse(std::move(resp), false);

                watchman::log(
                    watchman::DBG,
                    "Fan out subscription state change for ",
                    sub->name,
                    "\n");
                continue;
              }

              if (!sub->debug_paused && item->payload.get_default("settled")) {
                seenSettle = true;
                continue;
              }
            }

            if (seenSettle) {
              sub->processSubscription();
            }
          }

          for (auto& name : subsToDelete) {
            userClient->unsubByName(name);
          }
        }
      }
    }

    /* now send our response(s) */
    while (!client->responses.empty() && client_alive) {
      auto& response_to_send = client->responses.front();

      client->stm->setNonBlock(false);
      /* Return the data in the same format that was used to ask for it.
       * Update client liveness based on send success.
       */
      client_alive = client->writer.pduEncodeToStream(
          client->pdu_type,
          client->capabilities,
          response_to_send,
          client->stm.get());
      client->stm->setNonBlock(true);
      client->responses.pop_front();
    }
  }

disconnected:
  w_set_thread_name(
      "NOT_CONN:client=%p:stm=%p:pid=%d",
      client.get(),
      client->stm.get(),
      client->stm->getPeerProcessID());
  // Remove the client from the map before we tear it down, as this makes
  // it easier to flush out pending writes on windows without worrying
  // about w_log_to_clients contending for the write buffers
  clients.wlock()->erase(client);
}
예제 #7
0
파일: kqueue.cpp 프로젝트: danez/watchman
bool KQueueWatcher::startWatchFile(struct watchman_file* file) {
  struct kevent k;

  auto full_name = w_dir_path_cat_str(file->parent, file->getName());
  {
    auto rlock = maps_.rlock();
    if (rlock->name_to_fd.find(full_name) != rlock->name_to_fd.end()) {
      // Already watching it
      return true;
    }
  }

  w_log(W_LOG_DBG, "watch_file(%s)\n", full_name.c_str());

  FileDescriptor fdHolder(open(full_name.c_str(), O_EVTONLY | O_CLOEXEC));

  auto rawFd = fdHolder.fd();

  if (rawFd == -1) {
    watchman::log(
        watchman::ERR,
        "failed to open ",
        full_name,
        ", O_EVTONLY: ",
        strerror(errno),
        "\n");
    return false;
  }

  memset(&k, 0, sizeof(k));
  EV_SET(
      &k,
      rawFd,
      EVFILT_VNODE,
      EV_ADD | EV_CLEAR,
      NOTE_WRITE | NOTE_DELETE | NOTE_EXTEND | NOTE_RENAME | NOTE_ATTRIB,
      0,
      (w_string_t*)full_name);

  {
    auto wlock = maps_.wlock();
    wlock->name_to_fd[full_name] = std::move(fdHolder);
    wlock->fd_to_name[rawFd] = full_name;
  }

  if (kevent(kq_fd.fd(), &k, 1, nullptr, 0, 0)) {
    watchman::log(
        watchman::DBG,
        "kevent EV_ADD file ",
        full_name,
        " failed: ",
        full_name.c_str(),
        strerror(errno),
        "\n");
    auto wlock = maps_.wlock();
    wlock->name_to_fd.erase(full_name);
    wlock->fd_to_name.erase(rawFd);
  } else {
    watchman::log(
        watchman::DBG, "kevent file ", full_name, " -> ", rawFd, "\n");
  }

  return true;
}
예제 #8
0
파일: kqueue.cpp 프로젝트: danez/watchman
bool KQueueWatcher::consumeNotify(
    const std::shared_ptr<w_root_t>& root,
    PendingCollection::LockedPtr& coll) {
  int n;
  int i;
  struct timespec ts = { 0, 0 };
  struct timeval now;

  errno = 0;
  n = kevent(
      kq_fd.fd(),
      nullptr,
      0,
      keventbuf,
      sizeof(keventbuf) / sizeof(keventbuf[0]),
      &ts);
  w_log(
      W_LOG_DBG,
      "consume_kqueue: %s n=%d err=%s\n",
      root->root_path.c_str(),
      n,
      strerror(errno));
  if (root->inner.cancelled) {
    return 0;
  }

  gettimeofday(&now, nullptr);
  for (i = 0; n > 0 && i < n; i++) {
    uint32_t fflags = keventbuf[i].fflags;
    bool is_dir = IS_DIR_BIT_SET(keventbuf[i].udata);
    char flags_label[128];
    int fd = keventbuf[i].ident;

    w_expand_flags(kflags, fflags, flags_label, sizeof(flags_label));
    auto wlock = maps_.wlock();
    auto it = wlock->fd_to_name.find(fd);
    w_string path = it == wlock->fd_to_name.end() ? nullptr : it->second;
    if (!path) {
      // Was likely a buffered notification for something that we decided
      // to stop watching
      w_log(W_LOG_DBG,
          " KQ notif for fd=%d; flags=0x%x %s no ref for it in fd_to_name\n",
          fd, fflags, flags_label);
      continue;
    }

    w_log(
        W_LOG_DBG,
        " KQ fd=%d path %s [0x%x %s]\n",
        fd,
        path.data(),
        fflags,
        flags_label);
    if ((fflags & (NOTE_DELETE|NOTE_RENAME|NOTE_REVOKE))) {
      struct kevent k;

      if (w_string_equal(path, root->root_path)) {
        w_log(
            W_LOG_ERR,
            "root dir %s has been (re)moved [code 0x%x], canceling watch\n",
            root->root_path.c_str(),
            fflags);
        root->cancel();
        return 0;
      }

      // Remove our watch bits
      memset(&k, 0, sizeof(k));
      EV_SET(&k, fd, EVFILT_VNODE, EV_DELETE, 0, 0, nullptr);
      kevent(kq_fd.fd(), &k, 1, nullptr, 0, 0);
      wlock->name_to_fd.erase(path);
      wlock->fd_to_name.erase(fd);
    }

    coll->add(
        path, now, is_dir ? 0 : (W_PENDING_RECURSIVE | W_PENDING_VIA_NOTIFY));
  }

  return n > 0;
}
예제 #9
0
파일: kqueue.cpp 프로젝트: danez/watchman
std::unique_ptr<watchman_dir_handle> KQueueWatcher::startWatchDir(
    const std::shared_ptr<w_root_t>& root,
    struct watchman_dir* dir,
    struct timeval,
    const char* path) {
  struct stat st, osdirst;
  struct kevent k;

  auto osdir = w_dir_open(path);

  FileDescriptor fdHolder(open(path, O_NOFOLLOW | O_EVTONLY | O_CLOEXEC));
  auto rawFd = fdHolder.fd();
  if (rawFd == -1) {
    // directory got deleted between opendir and open
    throw std::system_error(
        errno, std::generic_category(), std::string("open O_EVTONLY: ") + path);
  }
  if (fstat(rawFd, &st) == -1 || fstat(osdir->getFd(), &osdirst) == -1) {
    // whaaa?
    root->scheduleRecrawl("fstat failed");
    throw std::system_error(
        errno,
        std::generic_category(),
        std::string("fstat failed for dir ") + path);
  }

  if (st.st_dev != osdirst.st_dev || st.st_ino != osdirst.st_ino) {
    // directory got replaced between opendir and open -- at this point its
    // parent's being watched, so we let filesystem events take care of it
    throw std::system_error(
        ENOTDIR,
        std::generic_category(),
        std::string("directory replaced between opendir and open: ") + path);
  }

  memset(&k, 0, sizeof(k));
  auto dir_name = dir->getFullPath();
  EV_SET(
      &k,
      rawFd,
      EVFILT_VNODE,
      EV_ADD | EV_CLEAR,
      NOTE_WRITE | NOTE_DELETE | NOTE_EXTEND | NOTE_RENAME,
      0,
      SET_DIR_BIT((w_string_t*)dir_name));

  // Our mapping needs to be visible before we add it to the queue,
  // otherwise we can get a wakeup and not know what it is
  {
    auto wlock = maps_.wlock();
    wlock->name_to_fd[dir_name] = std::move(fdHolder);
    wlock->fd_to_name[rawFd] = dir_name;
  }

  if (kevent(kq_fd.fd(), &k, 1, nullptr, 0, 0)) {
    w_log(W_LOG_DBG, "kevent EV_ADD dir %s failed: %s",
        path, strerror(errno));

    auto wlock = maps_.wlock();
    wlock->name_to_fd.erase(dir_name);
    wlock->fd_to_name.erase(rawFd);
  } else {
    watchman::log(watchman::DBG, "kevent dir ", dir_name, " -> ", rawFd, "\n");
  }

  return osdir;
}