Esempio n. 1
0
static inline void dispatch_kevent(struct ph_nbio_emitter *emitter,
    ph_thread_t *thread, struct kevent *event)
{
  ph_iomask_t mask;
  ph_job_t *job;

  if (event->filter != EVFILT_TIMER && (event->flags & EV_ERROR) != 0) {
    // We're pretty strict about errors at this stage to try to
    // ensure that we're doing the right thing.  There may be
    // cases that we should ignore
    ph_panic("kqueue error on fd:%d `Pe%d",
        (int)event->ident, (int)event->data);
  }

  switch (event->filter) {
    case EVFILT_TIMER:
      gettimeofday(&thread->now, NULL);
      thread->refresh_time = false;
      ph_nbio_emitter_timer_tick(emitter);
      break;

    case EVFILT_READ:
      mask = PH_IOMASK_READ;

      // You'd think that we'd want to do this here, but EV_EOF can
      // be set when we notice that read has been shutdown, but while
      // we still have data in the buffer that we want to read.
      // On this platform we detect EOF as part of attempting to read
      /*
      if (event->flags & EV_EOF) {
        mask |= PH_IOMASK_ERR;
      }
      */

      thread->refresh_time = true;
      job = event->udata;
      job->kmask = 0;
      ph_nbio_emitter_dispatch_immediate(emitter, job, mask);
      break;

    case EVFILT_WRITE:
      thread->refresh_time = true;
      job = event->udata;
      job->kmask = 0;
      ph_nbio_emitter_dispatch_immediate(emitter, job, PH_IOMASK_WRITE);
      break;
  }
}
Esempio n. 2
0
static void tick_epoll(ph_job_t *job, ph_iomask_t why, void *data)
{
    uint64_t expirations = 0;
    struct ph_nbio_emitter *emitter = data;

    ph_unused_parameter(job);
    ph_unused_parameter(why);
    ph_unused_parameter(data);

    /* consume the number of ticks; ideally this is 1; anything bigger
     * means that we've fallen behind */
    if (read(emitter->timer_fd, &expirations, sizeof(expirations)) > 0) {
        if (expirations) {
            ph_nbio_emitter_timer_tick(emitter);
        }
    }

    ph_job_set_nbio(job, PH_IOMASK_READ, 0);
}
Esempio n. 3
0
void ph_nbio_emitter_run(struct ph_nbio_emitter *emitter, ph_thread_t *thread)
{
  port_event_t *event;
  uint_t n, i, max_chunk, max_sleep;
  ph_job_t *job;
  ph_iomask_t mask;
  struct timespec ts;

  max_chunk = ph_config_query_int("$.nbio.max_per_wakeup", 1024);
  max_sleep = ph_config_query_int("$.nbio.max_sleep", 5000);
  ts.tv_sec = max_sleep / 1000;
  ts.tv_nsec = (max_sleep - (ts.tv_sec * 1000)) * 1000000;
  event = malloc(max_chunk * sizeof(port_event_t));

  while (ck_pr_load_int(&_ph_run_loop)) {
    n = 1;
    memset(event, 0, sizeof(*event));

    if (port_getn(emitter->io_fd, event, max_chunk, &n, &ts)) {
      if (errno != EINTR && errno != ETIME) {
        ph_panic("port_getn: `Pe%d", errno);
      }
      n = 0;
    }

    if (!n) {
      ph_thread_epoch_poll();
      continue;
    }

    for (i = 0; i < n; i++) {
      ph_thread_epoch_begin();

      switch (event[i].portev_source) {
        case PORT_SOURCE_TIMER:
          gettimeofday(&thread->now, NULL);
          thread->refresh_time = false;
          ph_nbio_emitter_timer_tick(emitter);
          break;

        case PORT_SOURCE_USER:
          break;

        case PORT_SOURCE_FD:
          thread->refresh_time = true;
          job = event[i].portev_user;

          switch (event[i].portev_events & (POLLIN|POLLOUT|POLLERR|POLLHUP)) {
            case POLLIN:
              mask = PH_IOMASK_READ;
              break;
            case POLLOUT:
              mask = PH_IOMASK_WRITE;
              break;
            case POLLIN|POLLOUT:
              mask = PH_IOMASK_READ|PH_IOMASK_WRITE;
              break;
            default:
              mask = PH_IOMASK_ERR;
          }
          job->kmask = 0;
          ph_nbio_emitter_dispatch_immediate(emitter, job, mask);
          break;
      }

      if (ph_job_have_deferred_items(thread)) {
        ph_job_pool_apply_deferred_items(thread);
      }
      ph_thread_epoch_end();
      ph_thread_epoch_poll();
    }
  }

  free(event);
}