Ejemplo n.º 1
0
static void verify_readable_and_reset(grpc_exec_ctx *exec_ctx, test_fd *tfds,
                                      const int num_fds) {
  for (int i = 0; i < num_fds; i++) {
    /* Verify that the on_readable callback was called */
    GPR_ASSERT(tfds[i].is_on_readable_called);

    /* Reset the tfd[i] structure */
    GPR_ASSERT(GRPC_ERROR_NONE ==
               grpc_wakeup_fd_consume_wakeup(&tfds[i].wakeup_fd));
    reset_test_fd(exec_ctx, &tfds[i]);
  }
}
static int multipoll_with_epoll_pollset_maybe_work(
    grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
    int allow_synchronous_callback) {
  struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
  int ep_rv;
  pollset_hdr *h = pollset->data.ptr;
  int timeout_ms;

  /* If you want to ignore epoll's ability to sanely handle parallel pollers,
   * for a more apples-to-apples performance comparison with poll, add a
   * if (pollset->counter != 0) { return 0; }
   * here.
   */

  timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);
  pollset->counter += 1;
  gpr_mu_unlock(&pollset->mu);

  do {
    ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, timeout_ms);
    if (ep_rv < 0) {
      if (errno != EINTR) {
        gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
      }
    } else {
      int i;
      for (i = 0; i < ep_rv; ++i) {
        if (ep_ev[i].data.ptr == 0) {
          grpc_wakeup_fd_consume_wakeup(&h->wakeup_fd);
        } else {
          grpc_fd *fd = ep_ev[i].data.ptr;
          /* TODO(klempner): We might want to consider making err and pri
           * separate events */
          int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
          int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
          int write = ep_ev[i].events & EPOLLOUT;
          if (read || cancel) {
            grpc_fd_become_readable(fd, allow_synchronous_callback);
          }
          if (write || cancel) {
            grpc_fd_become_writable(fd, allow_synchronous_callback);
          }
        }
      }
    }
    timeout_ms = 0;
  } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);

  gpr_mu_lock(&pollset->mu);
  pollset->counter -= 1;
  return 1;
}
Ejemplo n.º 3
0
static void test_threading_wakeup(grpc_exec_ctx *exec_ctx, void *arg,
                                  grpc_error *error) {
  threading_shared *shared = arg;
  ++shared->wakeups;
  ++thread_wakeups;
  if (error == GRPC_ERROR_NONE) {
    GPR_ASSERT(GRPC_LOG_IF_ERROR(
        "consume_wakeup", grpc_wakeup_fd_consume_wakeup(shared->wakeup_fd)));
    grpc_fd_notify_on_read(exec_ctx, shared->wakeup_desc, &shared->on_wakeup);
    GPR_ASSERT(GRPC_LOG_IF_ERROR("wakeup_next",
                                 grpc_wakeup_fd_wakeup(shared->wakeup_fd)));
  }
}
Ejemplo n.º 4
0
static void on_readable(grpc_exec_ctx *exec_ctx, void *arg, grpc_error *error) {
  GPR_TIMER_BEGIN("workqueue.on_readable", 0);

  grpc_workqueue *workqueue = arg;

  if (error != GRPC_ERROR_NONE) {
    /* HACK: let wakeup_fd code know that we stole the fd */
    workqueue->wakeup_fd.read_fd = 0;
    grpc_wakeup_fd_destroy(&workqueue->wakeup_fd);
    grpc_fd_orphan(exec_ctx, workqueue->wakeup_read_fd, NULL, NULL, "destroy");
    GPR_ASSERT(gpr_atm_no_barrier_load(&workqueue->state) == 0);
    gpr_free(workqueue);
  } else {
    error = grpc_wakeup_fd_consume_wakeup(&workqueue->wakeup_fd);
    gpr_mpscq_node *n = gpr_mpscq_pop(&workqueue->queue);
    if (error == GRPC_ERROR_NONE) {
      grpc_fd_notify_on_read(exec_ctx, workqueue->wakeup_read_fd,
                             &workqueue->read_closure);
    } else {
      /* recurse to get error handling */
      on_readable(exec_ctx, arg, error);
    }
    if (n == NULL) {
      /* try again - queue in an inconsistant state */
      wakeup(exec_ctx, workqueue);
    } else {
      switch (gpr_atm_full_fetch_add(&workqueue->state, -2)) {
        case 3:  // had one count, one unorphaned --> done, unorphaned
          break;
        case 2:  // had one count, one orphaned --> done, orphaned
          workqueue_destroy(exec_ctx, workqueue);
          break;
        case 1:
        case 0:
          // these values are illegal - representing an already done or
          // deleted workqueue
          GPR_UNREACHABLE_CODE(break);
        default:
          // schedule a wakeup since there's more to do
          wakeup(exec_ctx, workqueue);
      }
      grpc_closure *cl = (grpc_closure *)n;
      grpc_error *clerr = cl->error;
      cl->cb(exec_ctx, cl->cb_arg, clerr);
      GRPC_ERROR_UNREF(clerr);
    }
  }

  GPR_TIMER_END("workqueue.on_readable", 0);
}
Ejemplo n.º 5
0
static grpc_error *pollset_process_events(grpc_exec_ctx *exec_ctx,
                                          grpc_pollset *pollset, bool drain) {
  static const char *err_desc = "pollset_process_events";
  grpc_error *error = GRPC_ERROR_NONE;
  for (int i = 0; (drain || i < MAX_EPOLL_EVENTS_HANDLED_EACH_POLL_CALL) &&
                  pollset->event_cursor != pollset->event_count;
       i++) {
    int n = pollset->event_cursor++;
    struct epoll_event *ev = &pollset->events[n];
    void *data_ptr = ev->data.ptr;
    if (1 & (intptr_t)data_ptr) {
      if (GRPC_TRACER_ON(grpc_polling_trace)) {
        gpr_log(GPR_DEBUG, "PS:%p got pollset_wakeup %p", pollset, data_ptr);
      }
      append_error(&error, grpc_wakeup_fd_consume_wakeup(
                               (void *)((~(intptr_t)1) & (intptr_t)data_ptr)),
                   err_desc);
    } else {
      grpc_fd *fd = (grpc_fd *)data_ptr;
      bool cancel = (ev->events & (EPOLLERR | EPOLLHUP)) != 0;
      bool read_ev = (ev->events & (EPOLLIN | EPOLLPRI)) != 0;
      bool write_ev = (ev->events & EPOLLOUT) != 0;
      if (GRPC_TRACER_ON(grpc_polling_trace)) {
        gpr_log(GPR_DEBUG,
                "PS:%p got fd %p: cancel=%d read=%d "
                "write=%d",
                pollset, fd, cancel, read_ev, write_ev);
      }
      if (read_ev || cancel) {
        fd_become_readable(exec_ctx, fd, pollset);
      }
      if (write_ev || cancel) {
        fd_become_writable(exec_ctx, fd);
      }
    }
  }

  return error;
}
Ejemplo n.º 6
0
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
        grpc_pollset *pollset,
        grpc_pollset_worker *worker,
        gpr_timespec deadline,
        gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)

    struct pollfd pfd[3];
    grpc_fd *fd;
    grpc_fd_watcher fd_watcher;
    int timeout;
    int r;
    nfds_t nfds;

    fd = pollset->data.ptr;
    if (fd && grpc_fd_is_orphaned(fd)) {
        GRPC_FD_UNREF(fd, "basicpoll");
        fd = pollset->data.ptr = NULL;
    }
    timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
    pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
    pfd[0].events = POLLIN;
    pfd[0].revents = 0;
    pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
    pfd[1].events = POLLIN;
    pfd[1].revents = 0;
    nfds = 2;
    if (fd) {
        pfd[2].fd = fd->fd;
        pfd[2].revents = 0;
        GRPC_FD_REF(fd, "basicpoll_begin");
        gpr_mu_unlock(&pollset->mu);
        pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
                        POLLOUT, &fd_watcher);
        if (pfd[2].events != 0) {
            nfds++;
        }
    } else {
        gpr_mu_unlock(&pollset->mu);
    }

    /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
       even going into the blocking annotation if possible */
    /* poll fd count (argument 2) is shortened by one if we have no events
       to poll on - such that it only includes the kicker */
    GPR_TIMER_BEGIN("poll", 0);
    GRPC_SCHEDULING_START_BLOCKING_REGION;
    r = grpc_poll_function(pfd, nfds, timeout);
    GRPC_SCHEDULING_END_BLOCKING_REGION;
    GPR_TIMER_END("poll", 0);

    if (r < 0) {
        if (errno != EINTR) {
            gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
        }
        if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    } else if (r == 0) {
        if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    } else {
        if (pfd[0].revents & POLLIN_CHECK) {
            grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
        }
        if (pfd[1].revents & POLLIN_CHECK) {
            grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
        }
        if (nfds > 2) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
                             pfd[2].revents & POLLOUT_CHECK);
        } else if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    }

    if (fd) {
        GRPC_FD_UNREF(fd, "basicpoll_begin");
    }
}
Ejemplo n.º 7
0
static void basic_pollset_maybe_work(grpc_pollset *pollset,
                                     grpc_pollset_worker *worker,
                                     gpr_timespec deadline, gpr_timespec now,
                                     int allow_synchronous_callback) {
  struct pollfd pfd[2];
  grpc_fd *fd;
  grpc_fd_watcher fd_watcher;
  int timeout;
  int r;
  nfds_t nfds;

  fd = pollset->data.ptr;
  if (fd && grpc_fd_is_orphaned(fd)) {
    GRPC_FD_UNREF(fd, "basicpoll");
    fd = pollset->data.ptr = NULL;
  }
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfd[0].events = POLLIN;
  pfd[0].revents = 0;
  nfds = 1;
  if (fd) {
    pfd[1].fd = fd->fd;
    pfd[1].revents = 0;
    gpr_mu_unlock(&pollset->mu);
    pfd[1].events =
        (short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
    if (pfd[1].events != 0) {
      nfds++;
    }
  } else {
    gpr_mu_unlock(&pollset->mu);
  }

  /* poll fd count (argument 2) is shortened by one if we have no events
     to poll on - such that it only includes the kicker */
  r = grpc_poll_function(pfd, nfds, timeout);
  GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);

  if (fd) {
    grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN,
                     pfd[1].revents & POLLOUT);
  }

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (pfd[0].revents & POLLIN) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    if (nfds > 1) {
      if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(fd, allow_synchronous_callback);
      }
      if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(fd, allow_synchronous_callback);
      }
    }
  }

  gpr_mu_lock(&pollset->mu);
}
static void multipoll_with_epoll_pollset_maybe_work_and_unlock(
    grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
    gpr_timespec deadline, gpr_timespec now) {
  struct epoll_event ep_ev[GRPC_EPOLL_MAX_EVENTS];
  int ep_rv;
  int poll_rv;
  pollset_hdr *h = pollset->data.ptr;
  int timeout_ms;
  struct pollfd pfds[2];

  /* If you want to ignore epoll's ability to sanely handle parallel pollers,
   * for a more apples-to-apples performance comparison with poll, add a
   * if (pollset->counter != 0) { return 0; }
   * here.
   */

  gpr_mu_unlock(&pollset->mu);

  timeout_ms = grpc_poll_deadline_to_millis_timeout(deadline, now);

  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = 0;
  pfds[1].fd = h->epoll_fd;
  pfds[1].events = POLLIN;
  pfds[1].revents = 0;

  GRPC_SCHEDULING_START_BLOCKING_REGION;
  poll_rv = grpc_poll_function(pfds, 2, timeout_ms);
  GRPC_SCHEDULING_END_BLOCKING_REGION;

  if (poll_rv < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (poll_rv == 0) {
    /* do nothing */
  } else {
    if (pfds[0].revents) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    if (pfds[1].revents) {
      do {
	/* The following epoll_wait never blocks; it has a timeout of 0 */
        ep_rv = epoll_wait(h->epoll_fd, ep_ev, GRPC_EPOLL_MAX_EVENTS, 0);
        if (ep_rv < 0) {
          if (errno != EINTR) {
            gpr_log(GPR_ERROR, "epoll_wait() failed: %s", strerror(errno));
          }
        } else {
          int i;
          for (i = 0; i < ep_rv; ++i) {
            grpc_fd *fd = ep_ev[i].data.ptr;
            /* TODO(klempner): We might want to consider making err and pri
             * separate events */
            int cancel = ep_ev[i].events & (EPOLLERR | EPOLLHUP);
            int read = ep_ev[i].events & (EPOLLIN | EPOLLPRI);
            int write = ep_ev[i].events & EPOLLOUT;
            if (read || cancel) {
              grpc_fd_become_readable(exec_ctx, fd);
            }
            if (write || cancel) {
              grpc_fd_become_writable(exec_ctx, fd);
            }
          }
        }
      } while (ep_rv == GRPC_EPOLL_MAX_EVENTS);
    }
  }
}
Ejemplo n.º 9
0
void test_poll_cv_trigger(void) {
  grpc_wakeup_fd cvfd1, cvfd2, cvfd3;
  struct pollfd pfds[6];
  poll_args pargs;
  gpr_thd_id t_id;
  gpr_thd_options opt;

  GPR_ASSERT(grpc_wakeup_fd_init(&cvfd1) == GRPC_ERROR_NONE);
  GPR_ASSERT(grpc_wakeup_fd_init(&cvfd2) == GRPC_ERROR_NONE);
  GPR_ASSERT(grpc_wakeup_fd_init(&cvfd3) == GRPC_ERROR_NONE);
  GPR_ASSERT(cvfd1.read_fd < 0);
  GPR_ASSERT(cvfd2.read_fd < 0);
  GPR_ASSERT(cvfd3.read_fd < 0);
  GPR_ASSERT(cvfd1.read_fd != cvfd2.read_fd);
  GPR_ASSERT(cvfd2.read_fd != cvfd3.read_fd);
  GPR_ASSERT(cvfd1.read_fd != cvfd3.read_fd);

  pfds[0].fd = cvfd1.read_fd;
  pfds[1].fd = cvfd2.read_fd;
  pfds[2].fd = 20;
  pfds[3].fd = 30;
  pfds[4].fd = cvfd3.read_fd;
  pfds[5].fd = 50;

  pfds[0].events = 0;
  pfds[1].events = POLLIN;
  pfds[2].events = POLLIN | POLLHUP;
  pfds[3].events = POLLIN | POLLHUP;
  pfds[4].events = POLLIN;
  pfds[5].events = POLLIN;

  pargs.fds = pfds;
  pargs.nfds = 6;
  pargs.timeout = 1000;
  pargs.result = -2;

  opt = gpr_thd_options_default();
  gpr_thd_options_set_joinable(&opt);
  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);

  // Wakeup wakeup_fd not listening for events
  GPR_ASSERT(grpc_wakeup_fd_wakeup(&cvfd1) == GRPC_ERROR_NONE);
  gpr_thd_join(t_id);
  GPR_ASSERT(pargs.result == 0);
  GPR_ASSERT(pfds[0].revents == 0);
  GPR_ASSERT(pfds[1].revents == 0);
  GPR_ASSERT(pfds[2].revents == 0);
  GPR_ASSERT(pfds[3].revents == 0);
  GPR_ASSERT(pfds[4].revents == 0);
  GPR_ASSERT(pfds[5].revents == 0);

  // Pollin on socket fd
  pargs.timeout = -1;
  pargs.result = -2;
  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
  trigger_socket_event();
  gpr_thd_join(t_id);
  GPR_ASSERT(pargs.result == 1);
  GPR_ASSERT(pfds[0].revents == 0);
  GPR_ASSERT(pfds[1].revents == 0);
  GPR_ASSERT(pfds[2].revents == POLLIN);
  GPR_ASSERT(pfds[3].revents == 0);
  GPR_ASSERT(pfds[4].revents == 0);
  GPR_ASSERT(pfds[5].revents == 0);

  // Pollin on wakeup fd
  reset_socket_event();
  pargs.result = -2;
  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
  GPR_ASSERT(grpc_wakeup_fd_wakeup(&cvfd2) == GRPC_ERROR_NONE);
  gpr_thd_join(t_id);

  GPR_ASSERT(pargs.result == 1);
  GPR_ASSERT(pfds[0].revents == 0);
  GPR_ASSERT(pfds[1].revents == POLLIN);
  GPR_ASSERT(pfds[2].revents == 0);
  GPR_ASSERT(pfds[3].revents == 0);
  GPR_ASSERT(pfds[4].revents == 0);
  GPR_ASSERT(pfds[5].revents == 0);

  // Pollin on wakeupfd before poll()
  pargs.result = -2;
  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
  gpr_thd_join(t_id);

  GPR_ASSERT(pargs.result == 1);
  GPR_ASSERT(pfds[0].revents == 0);
  GPR_ASSERT(pfds[1].revents == POLLIN);
  GPR_ASSERT(pfds[2].revents == 0);
  GPR_ASSERT(pfds[3].revents == 0);
  GPR_ASSERT(pfds[4].revents == 0);
  GPR_ASSERT(pfds[5].revents == 0);

  // No Events
  pargs.result = -2;
  pargs.timeout = 1000;
  reset_socket_event();
  GPR_ASSERT(grpc_wakeup_fd_consume_wakeup(&cvfd1) == GRPC_ERROR_NONE);
  GPR_ASSERT(grpc_wakeup_fd_consume_wakeup(&cvfd2) == GRPC_ERROR_NONE);
  gpr_thd_new(&t_id, &background_poll, &pargs, &opt);
  gpr_thd_join(t_id);

  GPR_ASSERT(pargs.result == 0);
  GPR_ASSERT(pfds[0].revents == 0);
  GPR_ASSERT(pfds[1].revents == 0);
  GPR_ASSERT(pfds[2].revents == 0);
  GPR_ASSERT(pfds[3].revents == 0);
  GPR_ASSERT(pfds[4].revents == 0);
  GPR_ASSERT(pfds[5].revents == 0);
}
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
    grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
    gpr_timespec deadline, gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)

  int timeout;
  int r;
  size_t i, j, fd_count;
  nfds_t pfd_count;
  pollset_hdr *h;
  /* TODO(ctiller): inline some elements to avoid an allocation */
  grpc_fd_watcher *watchers;
  struct pollfd *pfds;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
  pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2));
  watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2));
  fd_count = 0;
  pfd_count = 2;
  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = 0;
  pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
  pfds[1].events = POLLIN;
  pfds[1].revents = 0;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (j = 0; !remove && j < h->del_count; j++) {
      if (h->fds[i] == h->dels[j]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[fd_count++] = h->fds[i];
      watchers[pfd_count].fd = h->fds[i];
      pfds[pfd_count].fd = h->fds[i]->fd;
      pfds[pfd_count].revents = 0;
      pfd_count++;
    }
  }
  for (j = 0; j < h->del_count; j++) {
    GRPC_FD_UNREF(h->dels[j], "multipoller_del");
  }
  h->del_count = 0;
  h->fd_count = fd_count;
  gpr_mu_unlock(&pollset->mu);

  for (i = 2; i < pfd_count; i++) {
    pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,
                                               POLLIN, POLLOUT, &watchers[i]);
  }

  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
     even going into the blocking annotation if possible */
  GRPC_SCHEDULING_START_BLOCKING_REGION;
  r = grpc_poll_function(pfds, pfd_count, timeout);
  GRPC_SCHEDULING_END_BLOCKING_REGION;

  if (r < 0) {
    gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    for (i = 2; i < pfd_count; i++) {
      grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
    }
  } else if (r == 0) {
    for (i = 2; i < pfd_count; i++) {
      grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
    }
  } else {
    if (pfds[0].revents & POLLIN_CHECK) {
      grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
    }
    if (pfds[1].revents & POLLIN_CHECK) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
    }
    for (i = 2; i < pfd_count; i++) {
      if (watchers[i].fd == NULL) {
        grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
        continue;
      }
      grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
                       pfds[i].revents & POLLOUT_CHECK);
    }
  }

  gpr_free(pfds);
  gpr_free(watchers);
}
static void multipoll_with_poll_pollset_maybe_work(
    grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
    gpr_timespec now, int allow_synchronous_callback) {
  int timeout;
  int r;
  size_t i, j, pfd_count, fd_count;
  pollset_hdr *h;
  /* TODO(ctiller): inline some elements to avoid an allocation */
  grpc_fd_watcher *watchers;
  struct pollfd *pfds;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
  pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 1));
  watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 1));
  fd_count = 0;
  pfd_count = 1;
  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = POLLOUT;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (j = 0; !remove && j < h->del_count; j++) {
      if (h->fds[i] == h->dels[j]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[fd_count++] = h->fds[i];
      watchers[pfd_count].fd = h->fds[i];
      pfds[pfd_count].fd = h->fds[i]->fd;
      pfds[pfd_count].revents = 0;
      pfd_count++;
    }
  }
  for (j = 0; j < h->del_count; j++) {
    GRPC_FD_UNREF(h->dels[j], "multipoller_del");
  }
  h->del_count = 0;
  h->fd_count = fd_count;
  gpr_mu_unlock(&pollset->mu);

  for (i = 1; i < pfd_count; i++) {
    pfds[i].events = grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN,
                                        POLLOUT, &watchers[i]);
  }

  r = grpc_poll_function(pfds, pfd_count, timeout);

  for (i = 1; i < pfd_count; i++) {
    grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,
                     pfds[i].revents & POLLOUT);
  }

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (pfds[0].revents & POLLIN) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    for (i = 1; i < pfd_count; i++) {
      if (watchers[i].fd == NULL) {
        continue;
      }
      if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback);
      }
      if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback);
      }
    }
  }

  gpr_free(pfds);
  gpr_free(watchers);

  gpr_mu_lock(&pollset->mu);
}