void grpc_fd_end_poll(grpc_fd_watcher *watcher, int got_read, int got_write) { int was_polling = 0; int kick = 0; grpc_fd *fd = watcher->fd; if (fd == NULL) { return; } gpr_mu_lock(&fd->watcher_mu); if (watcher == fd->read_watcher) { /* remove read watcher, kick if we still need a read */ was_polling = 1; kick = kick || !got_read; fd->read_watcher = NULL; } if (watcher == fd->write_watcher) { /* remove write watcher, kick if we still need a write */ was_polling = 1; kick = kick || !got_write; fd->write_watcher = NULL; } if (!was_polling) { /* remove from inactive list */ watcher->next->prev = watcher->prev; watcher->prev->next = watcher->next; } if (kick) { maybe_wake_one_watcher_locked(fd); } if (grpc_fd_is_orphaned(fd) && !has_watchers(fd)) { close(fd->fd); if (fd->on_done_closure != NULL) { grpc_iomgr_add_callback(fd->on_done_closure); } } gpr_mu_unlock(&fd->watcher_mu); GRPC_FD_UNREF(fd, "poll"); }
static void perform_delayed_add(grpc_exec_ctx *exec_ctx, void *arg, int iomgr_status) { delayed_add *da = arg; if (!grpc_fd_is_orphaned(da->fd)) { finally_add_fd(exec_ctx, da->pollset, da->fd); } gpr_mu_lock(&da->pollset->mu); da->pollset->in_flight_cbs--; if (da->pollset->shutting_down) { /* We don't care about this pollset anymore. */ if (da->pollset->in_flight_cbs == 0 && !da->pollset->called_shutdown) { da->pollset->called_shutdown = 1; grpc_exec_ctx_enqueue(exec_ctx, da->pollset->shutdown_done, 1); } } gpr_mu_unlock(&da->pollset->mu); GRPC_FD_UNREF(da->fd, "delayed_add"); gpr_free(da); }
void grpc_pollset_set_add_pollset(grpc_exec_ctx *exec_ctx, grpc_pollset_set *pollset_set, grpc_pollset *pollset) { size_t i, j; gpr_mu_lock(&pollset_set->mu); if (pollset_set->pollset_count == pollset_set->pollset_capacity) { pollset_set->pollset_capacity = GPR_MAX(8, 2 * pollset_set->pollset_capacity); pollset_set->pollsets = gpr_realloc(pollset_set->pollsets, pollset_set->pollset_capacity * sizeof(*pollset_set->pollsets)); } pollset_set->pollsets[pollset_set->pollset_count++] = pollset; for (i = 0, j = 0; i < pollset_set->fd_count; i++) { if (grpc_fd_is_orphaned(pollset_set->fds[i])) { GRPC_FD_UNREF(pollset_set->fds[i], "pollset_set"); } else { grpc_pollset_add_fd(exec_ctx, pollset, pollset_set->fds[i]); pollset_set->fds[j++] = pollset_set->fds[i]; } } pollset_set->fd_count = j; gpr_mu_unlock(&pollset_set->mu); }
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now) { #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) struct pollfd pfd[3]; grpc_fd *fd; grpc_fd_watcher fd_watcher; int timeout; int r; nfds_t nfds; fd = pollset->data.ptr; if (fd && grpc_fd_is_orphaned(fd)) { GRPC_FD_UNREF(fd, "basicpoll"); fd = pollset->data.ptr = NULL; } timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); pfd[0].events = POLLIN; pfd[0].revents = 0; pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); pfd[1].events = POLLIN; pfd[1].revents = 0; nfds = 2; if (fd) { pfd[2].fd = fd->fd; pfd[2].revents = 0; GRPC_FD_REF(fd, "basicpoll_begin"); gpr_mu_unlock(&pollset->mu); pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, &fd_watcher); if (pfd[2].events != 0) { nfds++; } } else { gpr_mu_unlock(&pollset->mu); } /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ /* poll fd count (argument 2) is shortened by one if we have no events to poll on - such that it only includes the kicker */ GPR_TIMER_BEGIN("poll", 0); GRPC_SCHEDULING_START_BLOCKING_REGION; r = grpc_poll_function(pfd, nfds, timeout); GRPC_SCHEDULING_END_BLOCKING_REGION; GPR_TIMER_END("poll", 0); if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } else if (r == 0) { if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } else { if (pfd[0].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd); } if (pfd[1].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd); } if (nfds > 2) { grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK, pfd[2].revents & POLLOUT_CHECK); } else if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } if (fd) { GRPC_FD_UNREF(fd, "basicpoll_begin"); } }
static void basic_do_promote(grpc_exec_ctx *exec_ctx, void *args, bool success) { grpc_unary_promote_args *up_args = args; const grpc_pollset_vtable *original_vtable = up_args->original_vtable; grpc_pollset *pollset = up_args->pollset; grpc_fd *fd = up_args->fd; /* * This is quite tricky. There are a number of cases to keep in mind here: * 1. fd may have been orphaned * 2. The pollset may no longer be a unary poller (and we can't let case #1 * leak to other pollset types!) * 3. pollset's fd (which may have changed) may have been orphaned * 4. The pollset may be shutting down. */ gpr_mu_lock(&pollset->mu); /* First we need to ensure that nobody is polling concurrently */ GPR_ASSERT(!grpc_pollset_has_workers(pollset)); gpr_free(up_args); /* At this point the pollset may no longer be a unary poller. In that case * we should just call the right add function and be done. */ /* TODO(klempner): If we're not careful this could cause infinite recursion. * That's not a problem for now because empty_pollset has a trivial poller * and we don't have any mechanism to unbecome multipoller. */ pollset->in_flight_cbs--; if (pollset->shutting_down) { /* We don't care about this pollset anymore. */ if (pollset->in_flight_cbs == 0 && !pollset->called_shutdown) { pollset->called_shutdown = 1; finish_shutdown(exec_ctx, pollset); } } else if (grpc_fd_is_orphaned(fd)) { /* Don't try to add it to anything, we'll drop our ref on it below */ } else if (pollset->vtable != original_vtable) { pollset->vtable->add_fd(exec_ctx, pollset, fd, 0); } else if (fd != pollset->data.ptr) { grpc_fd *fds[2]; fds[0] = pollset->data.ptr; fds[1] = fd; if (fds[0] && !grpc_fd_is_orphaned(fds[0])) { grpc_platform_become_multipoller(exec_ctx, pollset, fds, GPR_ARRAY_SIZE(fds)); GRPC_FD_UNREF(fds[0], "basicpoll"); } else { /* old fd is orphaned and we haven't cleaned it up until now, so remain a * unary poller */ /* Note that it is possible that fds[1] is also orphaned at this point. * That's okay, we'll correct it at the next add or poll. */ if (fds[0]) GRPC_FD_UNREF(fds[0], "basicpoll"); pollset->data.ptr = fd; GRPC_FD_REF(fd, "basicpoll"); } } gpr_mu_unlock(&pollset->mu); /* Matching ref in basic_pollset_add_fd */ GRPC_FD_UNREF(fd, "basicpoll_add"); }
static void basic_pollset_maybe_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { struct pollfd pfd[2]; grpc_fd *fd; grpc_fd_watcher fd_watcher; int timeout; int r; nfds_t nfds; fd = pollset->data.ptr; if (fd && grpc_fd_is_orphaned(fd)) { GRPC_FD_UNREF(fd, "basicpoll"); fd = pollset->data.ptr = NULL; } timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd); pfd[0].events = POLLIN; pfd[0].revents = 0; nfds = 1; if (fd) { pfd[1].fd = fd->fd; pfd[1].revents = 0; gpr_mu_unlock(&pollset->mu); pfd[1].events = (short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher); if (pfd[1].events != 0) { nfds++; } } else { gpr_mu_unlock(&pollset->mu); } /* poll fd count (argument 2) is shortened by one if we have no events to poll on - such that it only includes the kicker */ r = grpc_poll_function(pfd, nfds, timeout); GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r); if (fd) { grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, pfd[1].revents & POLLOUT); } if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (pfd[0].revents & POLLIN) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd); } if (nfds > 1) { if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(fd, allow_synchronous_callback); } if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(fd, allow_synchronous_callback); } } } gpr_mu_lock(&pollset->mu); }
static void multipoll_with_poll_pollset_maybe_work_and_unlock( grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now) { #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) int timeout; int r; size_t i, j, fd_count; nfds_t pfd_count; pollset_hdr *h; /* TODO(ctiller): inline some elements to avoid an allocation */ grpc_fd_watcher *watchers; struct pollfd *pfds; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); /* TODO(ctiller): perform just one malloc here if we exceed the inline case */ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2)); watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2)); fd_count = 0; pfd_count = 2; pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); pfds[0].events = POLLIN; pfds[0].revents = 0; pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); pfds[1].events = POLLIN; pfds[1].revents = 0; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (j = 0; !remove && j < h->del_count; j++) { if (h->fds[i] == h->dels[j]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[fd_count++] = h->fds[i]; watchers[pfd_count].fd = h->fds[i]; pfds[pfd_count].fd = h->fds[i]->fd; pfds[pfd_count].revents = 0; pfd_count++; } } for (j = 0; j < h->del_count; j++) { GRPC_FD_UNREF(h->dels[j], "multipoller_del"); } h->del_count = 0; h->fd_count = fd_count; gpr_mu_unlock(&pollset->mu); for (i = 2; i < pfd_count; i++) { pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker, POLLIN, POLLOUT, &watchers[i]); } /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ GRPC_SCHEDULING_START_BLOCKING_REGION; r = grpc_poll_function(pfds, pfd_count, timeout); GRPC_SCHEDULING_END_BLOCKING_REGION; if (r < 0) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); for (i = 2; i < pfd_count; i++) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); } } else if (r == 0) { for (i = 2; i < pfd_count; i++) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); } } else { if (pfds[0].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd); } if (pfds[1].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd); } for (i = 2; i < pfd_count; i++) { if (watchers[i].fd == NULL) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); continue; } grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK, pfds[i].revents & POLLOUT_CHECK); } } gpr_free(pfds); gpr_free(watchers); }
static void multipoll_with_poll_pollset_maybe_work( grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { int timeout; int r; size_t i, j, pfd_count, fd_count; pollset_hdr *h; /* TODO(ctiller): inline some elements to avoid an allocation */ grpc_fd_watcher *watchers; struct pollfd *pfds; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); /* TODO(ctiller): perform just one malloc here if we exceed the inline case */ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 1)); watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 1)); fd_count = 0; pfd_count = 1; pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd); pfds[0].events = POLLIN; pfds[0].revents = POLLOUT; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (j = 0; !remove && j < h->del_count; j++) { if (h->fds[i] == h->dels[j]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[fd_count++] = h->fds[i]; watchers[pfd_count].fd = h->fds[i]; pfds[pfd_count].fd = h->fds[i]->fd; pfds[pfd_count].revents = 0; pfd_count++; } } for (j = 0; j < h->del_count; j++) { GRPC_FD_UNREF(h->dels[j], "multipoller_del"); } h->del_count = 0; h->fd_count = fd_count; gpr_mu_unlock(&pollset->mu); for (i = 1; i < pfd_count; i++) { pfds[i].events = grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN, POLLOUT, &watchers[i]); } r = grpc_poll_function(pfds, pfd_count, timeout); for (i = 1; i < pfd_count; i++) { grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN, pfds[i].revents & POLLOUT); } if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (pfds[0].revents & POLLIN) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd); } for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == NULL) { continue; } if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback); } if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback); } } } gpr_free(pfds); gpr_free(watchers); gpr_mu_lock(&pollset->mu); }
static int multipoll_with_poll_pollset_maybe_work( grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { int timeout; int r; size_t i, np, nf, nd; pollset_hdr *h; grpc_kick_fd_info *kfd; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); if (h->pfd_capacity < h->fd_count + 1) { h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1); gpr_free(h->pfds); gpr_free(h->watchers); h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity); h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity); } nf = 0; np = 1; kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state); if (kfd == NULL) { /* Already kicked */ return 1; } h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd); h->pfds[0].events = POLLIN; h->pfds[0].revents = POLLOUT; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (nd = 0; nd < h->del_count; nd++) { if (h->fds[i] == h->dels[nd]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[nf++] = h->fds[i]; h->watchers[np].fd = h->fds[i]; h->pfds[np].fd = h->fds[i]->fd; h->pfds[np].revents = 0; np++; } } h->pfd_count = np; h->fd_count = nf; for (nd = 0; nd < h->del_count; nd++) { GRPC_FD_UNREF(h->dels[nd], "multipoller_del"); } h->del_count = 0; if (h->pfd_count == 0) { end_polling(pollset); return 0; } pollset->counter++; gpr_mu_unlock(&pollset->mu); for (i = 1; i < np; i++) { h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN, POLLOUT, &h->watchers[i]); } r = poll(h->pfds, h->pfd_count, timeout); end_polling(pollset); if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (h->pfds[0].revents & POLLIN) { grpc_pollset_kick_consume(&pollset->kick_state, kfd); } for (i = 1; i < np; i++) { if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback); } if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback); } } } grpc_pollset_kick_post_poll(&pollset->kick_state, kfd); gpr_mu_lock(&pollset->mu); pollset->counter--; return 1; }