static void * vwp_main(void *priv) { int v; struct vwp *vwp; struct waiter *w; struct waited *wp; double now, then; int i; THR_SetName("cache-poll"); CAST_OBJ_NOTNULL(vwp, priv, VWP_MAGIC); w = vwp->waiter; while (1) { then = Wait_HeapDue(w, &wp); if (wp == NULL) i = -1; else i = (int)floor(1e3 * (then - VTIM_real())); assert(vwp->hpoll > 0); AN(vwp->pollfd); v = poll(vwp->pollfd, vwp->hpoll, i); assert(v >= 0); now = VTIM_real(); if (vwp->pollfd[0].revents) v--; for (i = 1; i < vwp->hpoll;) { VSL(SLT_Debug, vwp->pollfd[i].fd, "POLL loop i=%d revents=0x%x", i, vwp->pollfd[i].revents); assert(vwp->pollfd[i].fd != vwp->pipes[0]); wp = vwp->idx[i]; CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC); if (v == 0 && Wait_HeapDue(w, NULL) > now) break; if (vwp->pollfd[i].revents) v--; then = Wait_When(wp); if (then <= now) { Wait_HeapDelete(w, wp); Wait_Call(w, wp, WAITER_TIMEOUT, now); vwp_del(vwp, i); } else if (vwp->pollfd[i].revents & POLLIN) { assert(wp->fd > 0); assert(wp->fd == vwp->pollfd[i].fd); Wait_HeapDelete(w, wp); Wait_Call(w, wp, WAITER_ACTION, now); vwp_del(vwp, i); } else { i++; } } if (vwp->pollfd[0].revents) vwp_dopipe(vwp); } NEEDLESS_RETURN(NULL); }
static void * vws_thread(void *priv) { struct waited *wp; struct waiter *w; struct vws *vws; double now, then; struct timespec ts; const double max_t = 100.0; port_event_t ev[MAX_EVENTS]; u_int nevents; int ei, ret; CAST_OBJ_NOTNULL(vws, priv, VWS_MAGIC); w = vws->waiter; CHECK_OBJ_NOTNULL(w, WAITER_MAGIC); THR_SetName("cache-ports"); now = VTIM_real(); while (!vws->die) { while (1) { then = Wait_HeapDue(w, &wp); if (wp == NULL) { vws->next = now + max_t; break; } else if (then > now) { vws->next = then; break; } CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC); vws_del(vws, wp->fd); Wait_Call(w, wp, WAITER_TIMEOUT, now); } then = vws->next - now; ts.tv_sec = (time_t)floor(then); ts.tv_nsec = (long)(1e9 * (then - ts.tv_sec)); /* * min number of events we accept. could consider to scale up * for efficiency, but as we always get all waiting events up to * the maximum, we'd only optimize the idle case sacrificing * some latency */ nevents = 1; /* * see disucssion in * - https://issues.apache.org/bugzilla/show_bug.cgi?id=47645 * - http://mail.opensolaris.org/pipermail/\ * networking-discuss/2009-August/011979.html * * comment from apr/poll/unix/port.c : * * This confusing API can return an event at the same time * that it reports EINTR or ETIME. * */ ret = port_getn(vws->dport, ev, MAX_EVENTS, &nevents, &ts); now = VTIM_real(); if (ret < 0 && errno == EBADF) { /* close on dport is our stop signal */ AN(vws->die); break; } if (ret < 0) assert((errno == EINTR) || (errno == ETIME)); for (ei = 0; ei < nevents; ei++) vws_port_ev(vws, w, &ev[ei], now); } return NULL; }
static void * vwe_thread(void *priv) { struct epoll_event ev[NEEV], *ep; struct waited *wp; struct waiter *w; double now, then; int i, n, active; struct vwe *vwe; char c; CAST_OBJ_NOTNULL(vwe, priv, VWE_MAGIC); w = vwe->waiter; CHECK_OBJ_NOTNULL(w, WAITER_MAGIC); THR_SetName("cache-epoll"); THR_Init(); now = VTIM_real(); while (1) { while (1) { Lck_Lock(&vwe->mtx); /* * XXX: We could avoid many syscalls here if we were * XXX: allowed to just close the fd's on timeout. */ then = Wait_HeapDue(w, &wp); if (wp == NULL) { vwe->next = now + 100; break; } else if (then > now) { vwe->next = then; break; } CHECK_OBJ_NOTNULL(wp, WAITED_MAGIC); AZ(epoll_ctl(vwe->epfd, EPOLL_CTL_DEL, wp->fd, NULL)); vwe->nwaited--; AN(Wait_HeapDelete(w, wp)); Lck_Unlock(&vwe->mtx); Wait_Call(w, wp, WAITER_TIMEOUT, now); } then = vwe->next - now; i = (int)ceil(1e3 * then); assert(i > 0); Lck_Unlock(&vwe->mtx); do { /* Due to a linux kernel bug, epoll_wait can return EINTR when the process is subjected to ptrace or waking from OS suspend. */ n = epoll_wait(vwe->epfd, ev, NEEV, i); } while (n < 0 && errno == EINTR); assert(n >= 0); assert(n <= NEEV); now = VTIM_real(); for (ep = ev, i = 0; i < n; i++, ep++) { if (ep->data.ptr == vwe) { assert(read(vwe->pipe[0], &c, 1) == 1); continue; } CAST_OBJ_NOTNULL(wp, ep->data.ptr, WAITED_MAGIC); Lck_Lock(&vwe->mtx); active = Wait_HeapDelete(w, wp); Lck_Unlock(&vwe->mtx); if (!active) { VSL(SLT_Debug, wp->fd, "epoll: spurious event"); continue; } AZ(epoll_ctl(vwe->epfd, EPOLL_CTL_DEL, wp->fd, NULL)); vwe->nwaited--; if (ep->events & EPOLLIN) Wait_Call(w, wp, WAITER_ACTION, now); else if (ep->events & EPOLLERR) Wait_Call(w, wp, WAITER_REMCLOSE, now); else if (ep->events & EPOLLHUP) Wait_Call(w, wp, WAITER_REMCLOSE, now); else Wait_Call(w, wp, WAITER_REMCLOSE, now); } if (vwe->nwaited == 0 && vwe->die) break; } closefd(&vwe->pipe[0]); closefd(&vwe->pipe[1]); closefd(&vwe->epfd); return (NULL); }