static void finally_add_fd(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_fd *fd) { pollset_hdr *h = pollset->data.ptr; struct epoll_event ev; int err; grpc_fd_watcher watcher; /* We pretend to be polling whilst adding an fd to keep the fd from being closed during the add. This may result in a spurious wakeup being assigned to this pollset whilst adding, but that should be benign. */ GPR_ASSERT(grpc_fd_begin_poll(fd, pollset, 0, 0, &watcher) == 0); if (watcher.fd != NULL) { ev.events = (uint32_t)(EPOLLIN | EPOLLOUT | EPOLLET); ev.data.ptr = fd; err = epoll_ctl(h->epoll_fd, EPOLL_CTL_ADD, fd->fd, &ev); if (err < 0) { /* FDs may be added to a pollset multiple times, so EEXIST is normal. */ if (errno != EEXIST) { gpr_log(GPR_ERROR, "epoll_ctl add for %d failed: %s", fd->fd, strerror(errno)); } } } grpc_fd_end_poll(exec_ctx, &watcher, 0, 0); }
static void basic_pollset_maybe_work_and_unlock(grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now) { #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) struct pollfd pfd[3]; grpc_fd *fd; grpc_fd_watcher fd_watcher; int timeout; int r; nfds_t nfds; fd = pollset->data.ptr; if (fd && grpc_fd_is_orphaned(fd)) { GRPC_FD_UNREF(fd, "basicpoll"); fd = pollset->data.ptr = NULL; } timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); pfd[0].events = POLLIN; pfd[0].revents = 0; pfd[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); pfd[1].events = POLLIN; pfd[1].revents = 0; nfds = 2; if (fd) { pfd[2].fd = fd->fd; pfd[2].revents = 0; GRPC_FD_REF(fd, "basicpoll_begin"); gpr_mu_unlock(&pollset->mu); pfd[2].events = (short)grpc_fd_begin_poll(fd, pollset, worker, POLLIN, POLLOUT, &fd_watcher); if (pfd[2].events != 0) { nfds++; } } else { gpr_mu_unlock(&pollset->mu); } /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ /* poll fd count (argument 2) is shortened by one if we have no events to poll on - such that it only includes the kicker */ GPR_TIMER_BEGIN("poll", 0); GRPC_SCHEDULING_START_BLOCKING_REGION; r = grpc_poll_function(pfd, nfds, timeout); GRPC_SCHEDULING_END_BLOCKING_REGION; GPR_TIMER_END("poll", 0); if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } else if (r == 0) { if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } else { if (pfd[0].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd); } if (pfd[1].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd); } if (nfds > 2) { grpc_fd_end_poll(exec_ctx, &fd_watcher, pfd[2].revents & POLLIN_CHECK, pfd[2].revents & POLLOUT_CHECK); } else if (fd) { grpc_fd_end_poll(exec_ctx, &fd_watcher, 0, 0); } } if (fd) { GRPC_FD_UNREF(fd, "basicpoll_begin"); } }
static void basic_pollset_maybe_work(grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { struct pollfd pfd[2]; grpc_fd *fd; grpc_fd_watcher fd_watcher; int timeout; int r; nfds_t nfds; fd = pollset->data.ptr; if (fd && grpc_fd_is_orphaned(fd)) { GRPC_FD_UNREF(fd, "basicpoll"); fd = pollset->data.ptr = NULL; } timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); pfd[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd); pfd[0].events = POLLIN; pfd[0].revents = 0; nfds = 1; if (fd) { pfd[1].fd = fd->fd; pfd[1].revents = 0; gpr_mu_unlock(&pollset->mu); pfd[1].events = (short)grpc_fd_begin_poll(fd, pollset, POLLIN, POLLOUT, &fd_watcher); if (pfd[1].events != 0) { nfds++; } } else { gpr_mu_unlock(&pollset->mu); } /* poll fd count (argument 2) is shortened by one if we have no events to poll on - such that it only includes the kicker */ r = grpc_poll_function(pfd, nfds, timeout); GRPC_TIMER_MARK(GRPC_PTAG_POLL_FINISHED, r); if (fd) { grpc_fd_end_poll(&fd_watcher, pfd[1].revents & POLLIN, pfd[1].revents & POLLOUT); } if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (pfd[0].revents & POLLIN) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd); } if (nfds > 1) { if (pfd[1].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(fd, allow_synchronous_callback); } if (pfd[1].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(fd, allow_synchronous_callback); } } } gpr_mu_lock(&pollset->mu); }
static void multipoll_with_poll_pollset_maybe_work_and_unlock( grpc_exec_ctx *exec_ctx, grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now) { #define POLLOUT_CHECK (POLLOUT | POLLHUP | POLLERR) #define POLLIN_CHECK (POLLIN | POLLHUP | POLLERR) int timeout; int r; size_t i, j, fd_count; nfds_t pfd_count; pollset_hdr *h; /* TODO(ctiller): inline some elements to avoid an allocation */ grpc_fd_watcher *watchers; struct pollfd *pfds; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); /* TODO(ctiller): perform just one malloc here if we exceed the inline case */ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 2)); watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 2)); fd_count = 0; pfd_count = 2; pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&grpc_global_wakeup_fd); pfds[0].events = POLLIN; pfds[0].revents = 0; pfds[1].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd->fd); pfds[1].events = POLLIN; pfds[1].revents = 0; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (j = 0; !remove && j < h->del_count; j++) { if (h->fds[i] == h->dels[j]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[fd_count++] = h->fds[i]; watchers[pfd_count].fd = h->fds[i]; pfds[pfd_count].fd = h->fds[i]->fd; pfds[pfd_count].revents = 0; pfd_count++; } } for (j = 0; j < h->del_count; j++) { GRPC_FD_UNREF(h->dels[j], "multipoller_del"); } h->del_count = 0; h->fd_count = fd_count; gpr_mu_unlock(&pollset->mu); for (i = 2; i < pfd_count; i++) { pfds[i].events = (short)grpc_fd_begin_poll(watchers[i].fd, pollset, worker, POLLIN, POLLOUT, &watchers[i]); } /* TODO(vpai): Consider first doing a 0 timeout poll here to avoid even going into the blocking annotation if possible */ GRPC_SCHEDULING_START_BLOCKING_REGION; r = grpc_poll_function(pfds, pfd_count, timeout); GRPC_SCHEDULING_END_BLOCKING_REGION; if (r < 0) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); for (i = 2; i < pfd_count; i++) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); } } else if (r == 0) { for (i = 2; i < pfd_count; i++) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); } } else { if (pfds[0].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&grpc_global_wakeup_fd); } if (pfds[1].revents & POLLIN_CHECK) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd->fd); } for (i = 2; i < pfd_count; i++) { if (watchers[i].fd == NULL) { grpc_fd_end_poll(exec_ctx, &watchers[i], 0, 0); continue; } grpc_fd_end_poll(exec_ctx, &watchers[i], pfds[i].revents & POLLIN_CHECK, pfds[i].revents & POLLOUT_CHECK); } } gpr_free(pfds); gpr_free(watchers); }
static void multipoll_with_poll_pollset_maybe_work( grpc_pollset *pollset, grpc_pollset_worker *worker, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { int timeout; int r; size_t i, j, pfd_count, fd_count; pollset_hdr *h; /* TODO(ctiller): inline some elements to avoid an allocation */ grpc_fd_watcher *watchers; struct pollfd *pfds; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); /* TODO(ctiller): perform just one malloc here if we exceed the inline case */ pfds = gpr_malloc(sizeof(*pfds) * (h->fd_count + 1)); watchers = gpr_malloc(sizeof(*watchers) * (h->fd_count + 1)); fd_count = 0; pfd_count = 1; pfds[0].fd = GRPC_WAKEUP_FD_GET_READ_FD(&worker->wakeup_fd); pfds[0].events = POLLIN; pfds[0].revents = POLLOUT; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (j = 0; !remove && j < h->del_count; j++) { if (h->fds[i] == h->dels[j]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[fd_count++] = h->fds[i]; watchers[pfd_count].fd = h->fds[i]; pfds[pfd_count].fd = h->fds[i]->fd; pfds[pfd_count].revents = 0; pfd_count++; } } for (j = 0; j < h->del_count; j++) { GRPC_FD_UNREF(h->dels[j], "multipoller_del"); } h->del_count = 0; h->fd_count = fd_count; gpr_mu_unlock(&pollset->mu); for (i = 1; i < pfd_count; i++) { pfds[i].events = grpc_fd_begin_poll(watchers[i].fd, pollset, POLLIN, POLLOUT, &watchers[i]); } r = grpc_poll_function(pfds, pfd_count, timeout); for (i = 1; i < pfd_count; i++) { grpc_fd_end_poll(&watchers[i], pfds[i].revents & POLLIN, pfds[i].revents & POLLOUT); } if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (pfds[0].revents & POLLIN) { grpc_wakeup_fd_consume_wakeup(&worker->wakeup_fd); } for (i = 1; i < pfd_count; i++) { if (watchers[i].fd == NULL) { continue; } if (pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(watchers[i].fd, allow_synchronous_callback); } if (pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(watchers[i].fd, allow_synchronous_callback); } } } gpr_free(pfds); gpr_free(watchers); gpr_mu_lock(&pollset->mu); }
static int multipoll_with_poll_pollset_maybe_work( grpc_pollset *pollset, gpr_timespec deadline, gpr_timespec now, int allow_synchronous_callback) { int timeout; int r; size_t i, np, nf, nd; pollset_hdr *h; grpc_kick_fd_info *kfd; h = pollset->data.ptr; timeout = grpc_poll_deadline_to_millis_timeout(deadline, now); if (h->pfd_capacity < h->fd_count + 1) { h->pfd_capacity = GPR_MAX(h->pfd_capacity * 3 / 2, h->fd_count + 1); gpr_free(h->pfds); gpr_free(h->watchers); h->pfds = gpr_malloc(sizeof(struct pollfd) * h->pfd_capacity); h->watchers = gpr_malloc(sizeof(grpc_fd_watcher) * h->pfd_capacity); } nf = 0; np = 1; kfd = grpc_pollset_kick_pre_poll(&pollset->kick_state); if (kfd == NULL) { /* Already kicked */ return 1; } h->pfds[0].fd = GRPC_POLLSET_KICK_GET_FD(kfd); h->pfds[0].events = POLLIN; h->pfds[0].revents = POLLOUT; for (i = 0; i < h->fd_count; i++) { int remove = grpc_fd_is_orphaned(h->fds[i]); for (nd = 0; nd < h->del_count; nd++) { if (h->fds[i] == h->dels[nd]) remove = 1; } if (remove) { GRPC_FD_UNREF(h->fds[i], "multipoller"); } else { h->fds[nf++] = h->fds[i]; h->watchers[np].fd = h->fds[i]; h->pfds[np].fd = h->fds[i]->fd; h->pfds[np].revents = 0; np++; } } h->pfd_count = np; h->fd_count = nf; for (nd = 0; nd < h->del_count; nd++) { GRPC_FD_UNREF(h->dels[nd], "multipoller_del"); } h->del_count = 0; if (h->pfd_count == 0) { end_polling(pollset); return 0; } pollset->counter++; gpr_mu_unlock(&pollset->mu); for (i = 1; i < np; i++) { h->pfds[i].events = grpc_fd_begin_poll(h->watchers[i].fd, pollset, POLLIN, POLLOUT, &h->watchers[i]); } r = poll(h->pfds, h->pfd_count, timeout); end_polling(pollset); if (r < 0) { if (errno != EINTR) { gpr_log(GPR_ERROR, "poll() failed: %s", strerror(errno)); } } else if (r == 0) { /* do nothing */ } else { if (h->pfds[0].revents & POLLIN) { grpc_pollset_kick_consume(&pollset->kick_state, kfd); } for (i = 1; i < np; i++) { if (h->pfds[i].revents & (POLLIN | POLLHUP | POLLERR)) { grpc_fd_become_readable(h->watchers[i].fd, allow_synchronous_callback); } if (h->pfds[i].revents & (POLLOUT | POLLHUP | POLLERR)) { grpc_fd_become_writable(h->watchers[i].fd, allow_synchronous_callback); } } } grpc_pollset_kick_post_poll(&pollset->kick_state, kfd); gpr_mu_lock(&pollset->mu); pollset->counter--; return 1; }