Beispiel #1
0
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, int success) {
  grpc_unary_promote_args *up_args = args;
  const grpc_pollset_vtable *original_vtable = up_args->original_vtable;
  grpc_pollset *pollset = up_args->pollset;
  grpc_fd *fd = up_args->fd;

  /*
   * This is quite tricky. There are a number of cases to keep in mind here:
   * 1. fd may have been orphaned
   * 2. The pollset may no longer be a unary poller (and we can't let case #1
   * leak to other pollset types!)
   * 3. pollset's fd (which may have changed) may have been orphaned
   * 4. The pollset may be shutting down.
   */

  gpr_mu_lock(&pollset->mu);
  /* First we need to ensure that nobody is polling concurrently */
  GPR_ASSERT(!grpc_pollset_has_workers(pollset));

  gpr_free(up_args);
  /* At this point the pollset may no longer be a unary poller. In that case
   * we should just call the right add function and be done. */
  /* TODO(klempner): If we're not careful this could cause infinite recursion.
   * That's not a problem for now because empty_pollset has a trivial poller
   * and we don't have any mechanism to unbecome multipoller. */
  pollset->in_flight_cbs--;
  if (pollset->shutting_down) {
    /* We don't care about this pollset anymore. */
    if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) {
      finish_shutdown(exec_ctx, pollset);
    }
  } else if (grpc_fd_is_orphaned(fd)) {
    /* Don't try to add it to anything, we'll drop our ref on it below */
  } else if (pollset->vtable != original_vtable) {
    pollset->vtable->add_fd(exec_ctx, pollset, fd, 0);
  } else if (fd != pollset->data.ptr) {
    grpc_fd *fds[2];
    fds[0] = pollset->data.ptr;
    fds[1] = fd;

    if (fds[0] && !grpc_fd_is_orphaned(fds[0])) {
      grpc_platform_become_multipoller(exec_ctx, pollset, fds,
                                       GPR_ARRAY_SIZE(fds));
      GRPC_FD_UNREF(fds[0], "basicpoll");
    } else {
      /* old fd is orphaned and we haven't cleaned it up until now, so remain a
       * unary poller */
      /* Note that it is possible that fds[1] is also orphaned at this point.
       * That's okay, we'll correct it at the next add or poll. */
      if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll");
      pollset->data.ptr = fd;
      GRPC_FD_REF(fd, "basicpoll");
    }
  }

  gpr_mu_unlock(&pollset->mu);

  /* Matching ref in basic_pollset_add_fd */
  GRPC_FD_UNREF(fd, "basicpoll_add");
}
static void multipoll_with_poll_pollset_finish_shutdown(grpc_pollset *pollset) {
  size_t i;
  pollset_hdr *h = pollset->data.ptr;
  for (i = 0; i < h->fd_count; i++) {
    GRPC_FD_UNREF(h->fds[i], "multipoller");
  }
  for (i = 0; i < h->del_count; i++) {
    GRPC_FD_UNREF(h->dels[i], "multipoller_del");
  }
  h->fd_count = 0;
  h->del_count = 0;
}
Beispiel #3
0
static void basic_pollset_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                                 grpc_fd *fd, int and_unlock_pollset) {
    grpc_unary_promote_args *up_args;
    GPR_ASSERT(fd);
    if (fd == pollset->data.ptr) goto exit;

    if (!grpc_pollset_has_workers(pollset)) {
        /* Fast path -- no in flight cbs */
        /* TODO(klempner): Comment this out and fix any test failures or establish
         * they are due to timing issues */
        grpc_fd *fds[2];
        fds[0] = pollset->data.ptr;
        fds[1] = fd;

        if (fds[0] == NULL) {
            pollset->data.ptr = fd;
            GRPC_FD_REF(fd, "basicpoll");
        } else if (!grpc_fd_is_orphaned(fds[0])) {
            grpc_platform_become_multipoller(exec_ctx, pollset, fds,
                                             GPR_ARRAY_SIZE(fds));
            GRPC_FD_UNREF(fds[0], "basicpoll");
        } else {
            /* old fd is orphaned and we haven't cleaned it up until now, so remain a
             * unary poller */
            GRPC_FD_UNREF(fds[0], "basicpoll");
            pollset->data.ptr = fd;
            GRPC_FD_REF(fd, "basicpoll");
        }
        goto exit;
    }

    /* Now we need to promote. This needs to happen when we're not polling. Since
     * this may be called from poll, the wait needs to happen asynchronously. */
    GRPC_FD_REF(fd, "basicpoll_add");
    pollset->in_flight_cbs++;
    up_args = gpr_malloc(sizeof(*up_args));
    up_args->fd = fd;
    up_args->original_vtable = pollset->vtable;
    up_args->pollset = pollset;
    up_args->promotion_closure.cb = basic_do_promote;
    up_args->promotion_closure.cb_arg = up_args;

    grpc_closure_list_add(&pollset->idle_jobs, &up_args->promotion_closure, 1);
    grpc_pollset_kick(pollset, GRPC_POLLSET_KICK_BROADCAST);

exit:
    if (and_unlock_pollset) {
        gpr_mu_unlock(&pollset->mu);
    }
}
Beispiel #4
0
void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
  int was_polling = 0;
  int kick = 0;
  grpc_fd *fd = watcher->fd;

  gpr_mu_lock(&fd->watcher_mu);
  if (watcher == fd->read_watcher) {
    /* remove read watcher, kick if we still need a read */
    was_polling = 1;
    kick = kick || !got_read;
    fd->read_watcher = NULL;
  }
  if (watcher == fd->write_watcher) {
    /* remove write watcher, kick if we still need a write */
    was_polling = 1;
    kick = kick || !got_write;
    fd->write_watcher = NULL;
  }
  if (!was_polling) {
    /* remove from inactive list */
    watcher->next->prev = watcher->prev;
    watcher->prev->next = watcher->next;
  }
  if (kick) {
    maybe_wake_one_watcher_locked(fd);
  }
  gpr_mu_unlock(&fd->watcher_mu);

  GRPC_FD_UNREF(fd, "poll");
}
static void perform_delayed_add(void *arg, int iomgr_status) {
  delayed_add *da = arg;
  int do_shutdown_cb = 0;

  if (!grpc_fd_is_orphaned(da->fd)) {
    finally_add_fd(da->pollset, da->fd);
  }

  gpr_mu_lock(&da->pollset->mu);
  da->pollset->in_flight_cbs--;
  if (da->pollset->shutting_down) {
    /* We don't care about this pollset anymore. */
    if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) {
      da->pollset->called_shutdown = 1;
      do_shutdown_cb = 1;
    }
  }
  gpr_mu_unlock(&da->pollset->mu);

  GRPC_FD_UNREF(da->fd, "delayed_add");

  if (do_shutdown_cb) {
    da->pollset->shutdown_done_cb(da->pollset->shutdown_done_arg);
  }

  gpr_free(da);
}
Beispiel #6
0
void grpc_fd_end_poll(grpc_exec_ctx *exec_ctx, grpc_fd_watcher *watcher,
                      int got_read, int got_write) {
  int was_polling = 0;
  int kick = 0;
  grpc_fd *fd = watcher->fd;

  if (fd == NULL) {
    return;
  }

  gpr_mu_lock(&fd->mu);

  if (watcher == fd->read_watcher) {
    /* remove read watcher, kick if we still need a read */
    was_polling = 1;
    if (!got_read) {
      kick = 1;
    }
    fd->read_watcher = NULL;
  }
  if (watcher == fd->write_watcher) {
    /* remove write watcher, kick if we still need a write */
    was_polling = 1;
    if (!got_write) {
      kick = 1;
    }
    fd->write_watcher = NULL;
  }
  if (!was_polling && watcher->worker != NULL) {
    /* remove from inactive list */
    watcher->next->prev = watcher->prev;
    watcher->prev->next = watcher->next;
  }
  if (got_read) {
    if (set_ready_locked(exec_ctx, fd, &fd->read_closure)) {
      kick = 1;
    }
  }
  if (got_write) {
    if (set_ready_locked(exec_ctx, fd, &fd->write_closure)) {
      kick = 1;
    }
  }
  if (kick) {
    maybe_wake_one_watcher_locked(fd);
  }
  if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
    fd->closed = 1;
    if (!fd->released) {
      close(fd->fd);
    }
    grpc_exec_ctx_enqueue(exec_ctx, fd->on_done_closure, 1);
  }
  gpr_mu_unlock(&fd->mu);

  GRPC_FD_UNREF(fd, "poll");
}
Beispiel #7
0
void grpc_pollset_set_destroy(grpc_pollset_set *pollset_set) {
  size_t i;
  gpr_mu_destroy(&pollset_set->mu);
  for (i = 0; i < pollset_set->fd_count; i++) {
    GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
  }
  gpr_free(pollset_set->pollsets);
  gpr_free(pollset_set->pollset_sets);
  gpr_free(pollset_set->fds);
  gpr_free(pollset_set);
}
Beispiel #8
0
static void basic_pollset_del_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset,
                                 grpc_fd *fd, int and_unlock_pollset) {
  GPR_ASSERT(fd);
  if (fd == pollset->data.ptr) {
    GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
    pollset->data.ptr = NULL;
  }

  if (and_unlock_pollset) {
    gpr_mu_unlock(&pollset->mu);
  }
}
Beispiel #9
0
uint32_t grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
                            grpc_pollset_worker *worker, uint32_t read_mask,
                            uint32_t write_mask, grpc_fd_watcher *watcher) {
  uint32_t mask = 0;
  grpc_closure *cur;
  int requested;
  /* keep track of pollers that have requested our events, in case they change
   */
  GRPC_FD_REF(fd, "poll");

  gpr_mu_lock(&fd->mu);

  /* if we are shutdown, then don't add to the watcher set */
  if (fd->shutdown) {
    watcher->fd = NULL;
    watcher->pollset = NULL;
    watcher->worker = NULL;
    gpr_mu_unlock(&fd->mu);
    GRPC_FD_UNREF(fd, "poll");
    return 0;
  }

  /* if there is nobody polling for read, but we need to, then start doing so */
  cur = fd->read_closure;
  requested = cur != CLOSURE_READY;
  if (read_mask && fd->read_watcher == NULL && requested) {
    fd->read_watcher = watcher;
    mask |= read_mask;
  }
  /* if there is nobody polling for write, but we need to, then start doing so
   */
  cur = fd->write_closure;
  requested = cur != CLOSURE_READY;
  if (write_mask && fd->write_watcher == NULL && requested) {
    fd->write_watcher = watcher;
    mask |= write_mask;
  }
  /* if not polling, remember this watcher in case we need someone to later */
  if (mask == 0 && worker != NULL) {
    watcher->next = &fd->inactive_watcher_root;
    watcher->prev = watcher->next->prev;
    watcher->next->prev = watcher->prev->next = watcher;
  }
  watcher->pollset = pollset;
  watcher->worker = worker;
  watcher->fd = fd;
  gpr_mu_unlock(&fd->mu);

  return mask;
}
Beispiel #10
0
gpr_uint32 grpc_fd_begin_poll(grpc_fd *fd, grpc_pollset *pollset,
                              gpr_uint32 read_mask, gpr_uint32 write_mask,
                              grpc_fd_watcher *watcher) {
    gpr_uint32 mask = 0;
    /* keep track of pollers that have requested our events, in case they change
     */
    GRPC_FD_REF(fd, "poll");

    gpr_mu_lock(&fd->watcher_mu);
    /* if we are shutdown, then don't add to the watcher set */
    if (gpr_atm_no_barrier_load(&fd->shutdown)) {
        watcher->fd = NULL;
        watcher->pollset = NULL;
        gpr_mu_unlock(&fd->watcher_mu);
        GRPC_FD_UNREF(fd, "poll");
        return 0;
    }
    /* if there is nobody polling for read, but we need to, then start doing so */
    if (read_mask && !fd->read_watcher &&
            (gpr_uintptr)gpr_atm_acq_load(&fd->readst) > READY) {
        fd->read_watcher = watcher;
        mask |= read_mask;
    }
    /* if there is nobody polling for write, but we need to, then start doing so
     */
    if (write_mask && !fd->write_watcher &&
            (gpr_uintptr)gpr_atm_acq_load(&fd->writest) > READY) {
        fd->write_watcher = watcher;
        mask |= write_mask;
    }
    /* if not polling, remember this watcher in case we need someone to later */
    if (mask == 0) {
        watcher->next = &fd->inactive_watcher_root;
        watcher->prev = watcher->next->prev;
        watcher->next->prev = watcher->prev->next = watcher;
    }
    watcher->pollset = pollset;
    watcher->fd = fd;
    gpr_mu_unlock(&fd->watcher_mu);

    return mask;
}
Beispiel #11
0
void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) {
    int was_polling = 0;
    int kick = 0;
    grpc_fd *fd = watcher->fd;

    if (fd == NULL) {
        return;
    }

    gpr_mu_lock(&fd->watcher_mu);
    if (watcher == fd->read_watcher) {
        /* remove read watcher, kick if we still need a read */
        was_polling = 1;
        kick = kick || !got_read;
        fd->read_watcher = NULL;
    }
    if (watcher == fd->write_watcher) {
        /* remove write watcher, kick if we still need a write */
        was_polling = 1;
        kick = kick || !got_write;
        fd->write_watcher = NULL;
    }
    if (!was_polling) {
        /* remove from inactive list */
        watcher->next->prev = watcher->prev;
        watcher->prev->next = watcher->next;
    }
    if (kick) {
        maybe_wake_one_watcher_locked(fd);
    }
    if (grpc_fd_is_orphaned(fd) && !has_watchers(fd) && !fd->closed) {
        fd->closed = 1;
        close(fd->fd);
        if (fd->on_done_closure != NULL) {
            grpc_iomgr_add_callback(fd->on_done_closure);
        }
    }
    gpr_mu_unlock(&fd->watcher_mu);

    GRPC_FD_UNREF(fd, "poll");
}
Beispiel #12
0
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx,
                                  grpc_pollset_set *pollset_set,
                                  grpc_pollset *pollset) {
  size_t i, j;
  gpr_mu_lock(&pollset_set->mu);
  if (pollset_set->pollset_count == pollset_set->pollset_capacity) {
    pollset_set->pollset_capacity =
        GPR_MAX(8, 2 * pollset_set->pollset_capacity);
    pollset_set->pollsets =
        gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity *
                                               sizeof(*pollset_set->pollsets));
  }
  pollset_set->pollsets[pollset_set->pollset_count++] = pollset;
  for (i = 0, j = 0; i < pollset_set->fd_count; i++) {
    if (grpc_fd_is_orphaned(pollset_set->fds[i])) {
      GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set");
    } else {
      grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]);
      pollset_set->fds[j++] = pollset_set->fds[i];
    }
  }
  pollset_set->fd_count = j;
  gpr_mu_unlock(&pollset_set->mu);
}
static int multipoll_with_poll_pollset_maybe_work(
    grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now,
    int allow_synchronous_callback) {
  int timeout;
  int r;
  size_t i, np, nf, nd;
  pollset_hdr *h;
  grpc_kick_fd_info *kfd;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  if (h->pfd_capacity < h->fd_count + 1) {
    h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1);
    gpr_free(h->pfds);
    gpr_free(h->watchers);
    h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity);
    h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity);
  }
  nf = 0;
  np = 1;
  kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state);
  if (kfd == NULL) {
    /* Already kicked */
    return 1;
  }
  h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd);
  h->pfds[0].events = POLLIN;
  h->pfds[0].revents = POLLOUT;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (nd = 0; nd < h->del_count; nd++) {
      if (h->fds[i] == h->dels[nd]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[nf++] = h->fds[i];
      h->watchers[np].fd = h->fds[i];
      h->pfds[np].fd = h->fds[i]->fd;
      h->pfds[np].revents = 0;
      np++;
    }
  }
  h->pfd_count = np;
  h->fd_count = nf;
  for (nd = 0; nd < h->del_count; nd++) {
    GRPC_FD_UNREF(h->dels[nd], "multipoller_del");
  }
  h->del_count = 0;
  if (h->pfd_count == 0) {
    end_polling(pollset);
    return 0;
  }
  pollset->counter++;
  gpr_mu_unlock(&pollset->mu);

  for (i = 1; i < np; i++) {
    h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN,
                                           POLLOUT, &h->watchers[i]);
  }

  r = poll(h->pfds, h->pfd_count, timeout);

  end_polling(pollset);

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (h->pfds[0].revents & POLLIN) {
      grpc_pollset_kick_consume(&pollset->kick_state, kfd);
    }
    for (i = 1; i < np; i++) {
      if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback);
      }
      if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback);
      }
    }
  }
  grpc_pollset_kick_post_poll(&pollset->kick_state, kfd);

  gpr_mu_lock(&pollset->mu);
  pollset->counter--;

  return 1;
}
static void multipoll_with_poll_pollset_maybe_work_and_unlock(
    grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker,
    gpr_timespec deadline, gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)

  int timeout;
  int r;
  size_t i, j, fd_count;
  nfds_t pfd_count;
  pollset_hdr *h;
  /* TODO(ctiller): inline some elements to avoid an allocation */
  grpc_fd_watcher *watchers;
  struct pollfd *pfds;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
  pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2));
  watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2));
  fd_count = 0;
  pfd_count = 2;
  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = 0;
  pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
  pfds[1].events = POLLIN;
  pfds[1].revents = 0;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (j = 0; !remove && j < h->del_count; j++) {
      if (h->fds[i] == h->dels[j]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[fd_count++] = h->fds[i];
      watchers[pfd_count].fd = h->fds[i];
      pfds[pfd_count].fd = h->fds[i]->fd;
      pfds[pfd_count].revents = 0;
      pfd_count++;
    }
  }
  for (j = 0; j < h->del_count; j++) {
    GRPC_FD_UNREF(h->dels[j], "multipoller_del");
  }
  h->del_count = 0;
  h->fd_count = fd_count;
  gpr_mu_unlock(&pollset->mu);

  for (i = 2; i < pfd_count; i++) {
    pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker,
                                               POLLIN, POLLOUT, &watchers[i]);
  }

  /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
     even going into the blocking annotation if possible */
  GRPC_SCHEDULING_START_BLOCKING_REGION;
  r = grpc_poll_function(pfds, pfd_count, timeout);
  GRPC_SCHEDULING_END_BLOCKING_REGION;

  if (r < 0) {
    gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    for (i = 2; i < pfd_count; i++) {
      grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
    }
  } else if (r == 0) {
    for (i = 2; i < pfd_count; i++) {
      grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
    }
  } else {
    if (pfds[0].revents & POLLIN_CHECK) {
      grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
    }
    if (pfds[1].revents & POLLIN_CHECK) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
    }
    for (i = 2; i < pfd_count; i++) {
      if (watchers[i].fd == NULL) {
        grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0);
        continue;
      }
      grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK,
                       pfds[i].revents & POLLOUT_CHECK);
    }
  }

  gpr_free(pfds);
  gpr_free(watchers);
}
static void multipoll_with_poll_pollset_maybe_work(
    grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline,
    gpr_timespec now, int allow_synchronous_callback) {
  int timeout;
  int r;
  size_t i, j, pfd_count, fd_count;
  pollset_hdr *h;
  /* TODO(ctiller): inline some elements to avoid an allocation */
  grpc_fd_watcher *watchers;
  struct pollfd *pfds;

  h = pollset->data.ptr;
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  /* TODO(ctiller): perform just one malloc here if we exceed the inline case */
  pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 1));
  watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 1));
  fd_count = 0;
  pfd_count = 1;
  pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfds[0].events = POLLIN;
  pfds[0].revents = POLLOUT;
  for (i = 0; i < h->fd_count; i++) {
    int remove = grpc_fd_is_orphaned(h->fds[i]);
    for (j = 0; !remove && j < h->del_count; j++) {
      if (h->fds[i] == h->dels[j]) remove = 1;
    }
    if (remove) {
      GRPC_FD_UNREF(h->fds[i], "multipoller");
    } else {
      h->fds[fd_count++] = h->fds[i];
      watchers[pfd_count].fd = h->fds[i];
      pfds[pfd_count].fd = h->fds[i]->fd;
      pfds[pfd_count].revents = 0;
      pfd_count++;
    }
  }
  for (j = 0; j < h->del_count; j++) {
    GRPC_FD_UNREF(h->dels[j], "multipoller_del");
  }
  h->del_count = 0;
  h->fd_count = fd_count;
  gpr_mu_unlock(&pollset->mu);

  for (i = 1; i < pfd_count; i++) {
    pfds[i].events = grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN,
                                        POLLOUT, &watchers[i]);
  }

  r = grpc_poll_function(pfds, pfd_count, timeout);

  for (i = 1; i < pfd_count; i++) {
    grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN,
                     pfds[i].revents & POLLOUT);
  }

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (pfds[0].revents & POLLIN) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    for (i = 1; i < pfd_count; i++) {
      if (watchers[i].fd == NULL) {
        continue;
      }
      if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback);
      }
      if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback);
      }
    }
  }

  gpr_free(pfds);
  gpr_free(watchers);

  gpr_mu_lock(&pollset->mu);
}
Beispiel #16
0
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx,
        grpc_pollset *pollset,
        grpc_pollset_worker *worker,
        gpr_timespec deadline,
        gpr_timespec now) {
#define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR)
#define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR)

    struct pollfd pfd[3];
    grpc_fd *fd;
    grpc_fd_watcher fd_watcher;
    int timeout;
    int r;
    nfds_t nfds;

    fd = pollset->data.ptr;
    if (fd && grpc_fd_is_orphaned(fd)) {
        GRPC_FD_UNREF(fd, "basicpoll");
        fd = pollset->data.ptr = NULL;
    }
    timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
    pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd);
    pfd[0].events = POLLIN;
    pfd[0].revents = 0;
    pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd);
    pfd[1].events = POLLIN;
    pfd[1].revents = 0;
    nfds = 2;
    if (fd) {
        pfd[2].fd = fd->fd;
        pfd[2].revents = 0;
        GRPC_FD_REF(fd, "basicpoll_begin");
        gpr_mu_unlock(&pollset->mu);
        pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN,
                        POLLOUT, &fd_watcher);
        if (pfd[2].events != 0) {
            nfds++;
        }
    } else {
        gpr_mu_unlock(&pollset->mu);
    }

    /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid
       even going into the blocking annotation if possible */
    /* poll fd count (argument 2) is shortened by one if we have no events
       to poll on - such that it only includes the kicker */
    GPR_TIMER_BEGIN("poll", 0);
    GRPC_SCHEDULING_START_BLOCKING_REGION;
    r = grpc_poll_function(pfd, nfds, timeout);
    GRPC_SCHEDULING_END_BLOCKING_REGION;
    GPR_TIMER_END("poll", 0);

    if (r < 0) {
        if (errno != EINTR) {
            gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
        }
        if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    } else if (r == 0) {
        if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    } else {
        if (pfd[0].revents & POLLIN_CHECK) {
            grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd);
        }
        if (pfd[1].revents & POLLIN_CHECK) {
            grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd);
        }
        if (nfds > 2) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK,
                             pfd[2].revents & POLLOUT_CHECK);
        } else if (fd) {
            grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0);
        }
    }

    if (fd) {
        GRPC_FD_UNREF(fd, "basicpoll_begin");
    }
}
Beispiel #17
0
static void basic_pollset_destroy(grpc_pollset *pollset) {
    if (pollset->data.ptr != NULL) {
        GRPC_FD_UNREF(pollset->data.ptr, "basicpoll");
        pollset->data.ptr = NULL;
    }
}
Beispiel #18
0
static void basic_pollset_maybe_work(grpc_pollset *pollset,
                                     grpc_pollset_worker *worker,
                                     gpr_timespec deadline, gpr_timespec now,
                                     int allow_synchronous_callback) {
  struct pollfd pfd[2];
  grpc_fd *fd;
  grpc_fd_watcher fd_watcher;
  int timeout;
  int r;
  nfds_t nfds;

  fd = pollset->data.ptr;
  if (fd && grpc_fd_is_orphaned(fd)) {
    GRPC_FD_UNREF(fd, "basicpoll");
    fd = pollset->data.ptr = NULL;
  }
  timeout = grpc_poll_deadline_to_millis_timeout(deadline, now);
  pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd);
  pfd[0].events = POLLIN;
  pfd[0].revents = 0;
  nfds = 1;
  if (fd) {
    pfd[1].fd = fd->fd;
    pfd[1].revents = 0;
    gpr_mu_unlock(&pollset->mu);
    pfd[1].events =
        (short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher);
    if (pfd[1].events != 0) {
      nfds++;
    }
  } else {
    gpr_mu_unlock(&pollset->mu);
  }

  /* poll fd count (argument 2) is shortened by one if we have no events
     to poll on - such that it only includes the kicker */
  r = grpc_poll_function(pfd, nfds, timeout);
  GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r);

  if (fd) {
    grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN,
                     pfd[1].revents & POLLOUT);
  }

  if (r < 0) {
    if (errno != EINTR) {
      gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno));
    }
  } else if (r == 0) {
    /* do nothing */
  } else {
    if (pfd[0].revents & POLLIN) {
      grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd);
    }
    if (nfds > 1) {
      if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) {
        grpc_fd_become_readable(fd, allow_synchronous_callback);
      }
      if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) {
        grpc_fd_become_writable(fd, allow_synchronous_callback);
      }
    }
  }

  gpr_mu_lock(&pollset->mu);
}