/* * Arguments: evq_udata, ev_ludata, [reuse_fd (boolean)] * Returns: [evq_udata] */ static int levq_del (lua_State *L) { struct event_queue *evq = checkudata(L, 1, EVQ_TYPENAME); struct event *ev = levq_toevent(L, 2); const int reuse_fd = lua_toboolean(L, 3); int res = 0; lua_assert(ev); #undef ARG_LAST #define ARG_LAST 1 #ifdef EVQ_POST_INIT if (ev == evq->ev_post) evq->ev_post = NULL; #endif if (!event_deleted(ev)) res = evq_del(ev, reuse_fd); if (!(ev->flags & (EVENT_ACTIVE | EVENT_DELETE))) levq_del_event(evq, ev); ev->flags |= EVENT_DELETE; if (!res) { lua_settop(L, 1); return 1; } return sys_seterror(L, 0); }
static struct event * win32iocr_process (struct event_queue *evq, struct win32overlapped *ov, struct event *ev_ready, const msec_t now) { struct win32overlapped *ov_next; for (; ov; ov = ov_next) { struct event *ev; const DWORD err = ov->il.err; const int cancelled = (err == STATUS_CANCELLED) || !ov->e.ev; ov_next = ov->o.ov_next; win32iocr_overlapped_del(evq, ov); if (cancelled) continue; ev = ov->e.ev; ev->w.ov = NULL; ev->flags &= ~EVENT_AIO_PENDING; /* have to set AIO request */ ev->flags |= err ? EVENT_EOF_RES : ((ev->flags & EVENT_READ) ? EVENT_READ_RES : EVENT_WRITE_RES); if (!(ev->flags & EVENT_ACTIVE)) { ev->flags |= EVENT_ACTIVE; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) timeout_reset(ev, now); ev->next_ready = ev_ready; ev_ready = ev; } } return ev_ready; }
EVQ_API int win32iocp_set (struct event *ev, const unsigned int rw_flags) { static WSABUF buf = {0, 0}; struct event_queue *evq = ev->wth->evq; const sd_t sd = (sd_t) ev->fd; if ((rw_flags & EVENT_READ) && !ev->w.iocp.rov) { struct win32overlapped *ov = win32iocp_new_overlapped(evq); DWORD flags = 0; if (!ov) return -1; if (!WSARecv(sd, &buf, 1, NULL, &flags, (OVERLAPPED *) ov, NULL)) { if (ev->flags & EVENT_AIO_SKIP) { win32iocp_del_overlapped(evq, ov); ev->flags |= EVENT_READ_RES | EVENT_ACTIVE; goto ready; } } else if (WSAGetLastError() != WSA_IO_PENDING) { win32iocp_del_overlapped(evq, ov); return -1; } ov->ev = ev; ev->w.iocp.rov = ov; ev->flags |= EVENT_RPENDING; /* IOCP read request is installed */ } if ((rw_flags & EVENT_WRITE) && !ev->w.iocp.wov) { struct win32overlapped *ov = win32iocp_new_overlapped(evq); if (!ov) return -1; if (!WSASend(sd, &buf, 1, NULL, 0, (OVERLAPPED *) ov, NULL)) { if (ev->flags & EVENT_AIO_SKIP) { win32iocp_del_overlapped(evq, ov); ev->flags |= EVENT_WRITE_RES | EVENT_ACTIVE; goto ready; } } else if (WSAGetLastError() != WSA_IO_PENDING) { win32iocp_del_overlapped(evq, ov); return -1; } ov->ev = ev; ev->w.iocp.wov = ov; ev->flags |= EVENT_WPENDING; /* IOCP write request is installed */ } return 0; ready: if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) timeout_reset(ev, evq->now); ev->next_ready = evq->ev_ready; evq->ev_ready = ev; return 0; }
/* * Arguments: evq_udata */ static int levq_done (lua_State *L) { struct event_queue *evq = checkudata(L, 1, EVQ_TYPENAME); struct event *buffers[EVQ_ENV_BUF_MAX + 1]; /* cache */ lua_State *NL = evq->L; memset(buffers, 0, sizeof(buffers)); #undef ARG_LAST #define ARG_LAST 1 /* delete object events */ lua_pushnil(NL); while (lua_next(NL, EVQ_CORO_UDATA)) { const int ev_id = lua_tointeger(NL, -2); const int buf_idx = getmaxbit( (ev_id | ((1 << EVQ_ENV_BUF_IDX) - 1)) + 1); const int nmax = (1 << buf_idx); struct event *ev = buffers[buf_idx]; if (!ev) { lua_rawgeti(NL, EVQ_CORO_ENV, buf_idx - EVQ_ENV_BUF_IDX + 1); ev = lua_touserdata(NL, -1); lua_pop(NL, 1); /* pop events buffer */ buffers[buf_idx] = ev; } ev += ev_id - ((nmax - 1) & ~((1 << EVQ_ENV_BUF_IDX) - 1)); if (!event_deleted(ev)) evq_del(ev, 0); lua_pop(NL, 1); /* pop value */ } evq_done(evq); return 0; }
EVQ_API int evq_wait (struct event_queue *evq, msec_t timeout) { struct event *ev_ready; struct kevent *kev = evq->kev_list; struct timespec ts, *tsp; int nready; if (timeout != 0L) { timeout = timeout_get(evq->tq, timeout, evq->now); if (timeout == 0L) { ev_ready = timeout_process(evq->tq, NULL, evq->now); goto end; } } if (timeout == TIMEOUT_INFINITE) tsp = NULL; else { ts.tv_sec = timeout / 1000; ts.tv_nsec = (timeout % 1000) * 1000000; tsp = &ts; } sys_vm_leave(); nready = kevent(evq->kqueue_fd, kev, evq->nchanges, kev, NEVENT, tsp); sys_vm_enter(); evq->nchanges = 0; evq->now = sys_milliseconds(); if (nready == -1) return (errno == EINTR) ? 0 : EVQ_FAILED; if (tsp) { if (!nready) { ev_ready = !evq->tq ? NULL : timeout_process(evq->tq, NULL, evq->now); if (ev_ready) goto end; return EVQ_TIMEOUT; } timeout = evq->now; } ev_ready = NULL; for (; nready--; ++kev) { struct event *ev; const int flags = kev->flags; const int filter = kev->filter; if (flags & EV_ERROR) continue; if (filter == EVFILT_SIGNAL) { ev_ready = signal_process_actives(evq, kev->ident, ev_ready, timeout); continue; } ev = (struct event *) kev->udata; if (!ev) { ev_ready = signal_process_interrupt(evq, ev_ready, timeout); continue; } ev->flags |= (filter == EVFILT_READ ? EVENT_READ_RES : EVENT_WRITE_RES) | ((flags & EV_EOF) ? EVENT_EOF_RES : 0); if (ev->flags & EVENT_ACTIVE) continue; ev->flags |= EVENT_ACTIVE; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) timeout_reset(ev, timeout); ev->next_ready = ev_ready; ev_ready = ev; } if (!ev_ready) return 0; end: evq->ev_ready = ev_ready; return 0; }
static struct event * win32iocp_process (struct event_queue *evq, struct event *ev_ready, msec_t now) { const HANDLE iocph = evq->iocp.h; OVERLAPPED_ENTRY entries[NENTRY]; ULONG nentries = 0; for (; ; ) { struct win32overlapped *ov; struct event *ev; BOOL status; int cancelled = 0; if (pGetQueuedCompletionStatusEx) { if (!nentries && !pGetQueuedCompletionStatusEx(iocph, entries, NENTRY, &nentries, 0L, FALSE)) break; { const OVERLAPPED_ENTRY *ove = &entries[--nentries]; const DWORD err = (DWORD) ove->lpOverlapped->Internal; ov = (struct win32overlapped *) ove->lpOverlapped; status = !err; cancelled = (err == STATUS_CANCELLED); } } else { ULONG_PTR key; DWORD nr; status = GetQueuedCompletionStatus(iocph, &nr, &key, (OVERLAPPED **) &ov, 0L); if (!status) { const DWORD err = GetLastError(); if (err == WAIT_TIMEOUT) break; cancelled = (err == ERROR_OPERATION_ABORTED); } } if (!ov) { if (pGetQueuedCompletionStatusEx) continue; break; /* error */ } ev = ov->ev; cancelled = ev ? cancelled : 1; win32iocp_del_overlapped(evq, ov); if (cancelled) continue; if (!status) ev->flags |= EVENT_EOF_RES; else if (ov == ev->w.iocp.rov) { ev->w.iocp.rov = NULL; ev->flags |= EVENT_READ_RES; ev->flags &= ~EVENT_RPENDING; /* have to set IOCP read request */ } else { ev->w.iocp.wov = NULL; ev->flags |= EVENT_WRITE_RES; ev->flags &= ~EVENT_WPENDING; /* have to set IOCP write request */ } if (ev->flags & EVENT_ACTIVE) continue; ev->flags |= EVENT_ACTIVE; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) { if (now == 0L) { now = evq->now = sys_milliseconds(); } timeout_reset(ev, now); } ev->next_ready = ev_ready; ev_ready = ev; } return ev_ready; }
EVQ_API int evq_wait (struct event_queue *evq, msec_t timeout) { struct event *ev_ready; struct event **events = evq->events; struct pollfd *fdset = evq->fdset; const int npolls = evq->npolls; int i, nready; if (timeout != 0L) { timeout = timeout_get(evq->tq, timeout, evq->now); if (timeout == 0L) { ev_ready = timeout_process(evq->tq, NULL, evq->now); goto end; } } sys_vm_leave(); nready = poll(fdset, npolls, (int) timeout); sys_vm_enter(); evq->now = get_milliseconds(); if (nready == -1) return (errno == EINTR) ? 0 : EVQ_FAILED; if (timeout != TIMEOUT_INFINITE) { if (!nready) { ev_ready = !evq->tq ? NULL : timeout_process(evq->tq, NULL, evq->now); if (ev_ready) goto end; return EVQ_TIMEOUT; } timeout = evq->now; } ev_ready = NULL; if (fdset[0].revents & POLLIN) { fdset[0].revents = 0; ev_ready = signal_process_interrupt(evq, ev_ready, timeout); --nready; } for (i = 1; i < npolls; i++) { const int revents = fdset[i].revents; struct event *ev; unsigned int res; if (!revents) continue; fdset[i].revents = 0; ev = events[i]; res = EVENT_ACTIVE; if ((revents & POLLFD_READ) && (ev->flags & EVENT_READ)) res |= EVENT_READ_RES; if ((revents & POLLFD_WRITE) && (ev->flags & EVENT_WRITE)) res |= EVENT_WRITE_RES; if (revents & POLLHUP) res |= EVENT_EOF_RES; ev->flags |= res; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) timeout_reset(ev, timeout); ev->next_ready = ev_ready; ev_ready = ev; if (!--nready) break; } if (!ev_ready) return 0; end: evq->ev_ready = ev_ready; return 0; }
/* * Arguments: evq_udata, [timeout (milliseconds), once (boolean), * fetch (boolean)] * Returns: [evq_udata | timeout (false)] * | * Returns: [ev_ludata, obj_udata, event (string: "r", "w", "t", "e"), * eof_status (number)] */ static int levq_loop (lua_State *L) { struct event_queue *evq = checkudata(L, 1, EVQ_TYPENAME); const msec_t timeout = (lua_type(L, 2) != LUA_TNUMBER) ? TIMEOUT_INFINITE : (msec_t) lua_tointeger(L, 2); const int once = lua_toboolean(L, 3); const int fetch = lua_toboolean(L, 4); #undef ARG_LAST #define ARG_LAST 1 lua_settop(L, ARG_LAST); { lua_State *NL = evq->L; lua_pushvalue(NL, EVQ_CORO_CALLBACK); lua_pushvalue(NL, EVQ_CORO_UDATA); lua_xmove(NL, L, 2); } #ifdef EVQ_POST_INIT if (evq->ev_post) { evq_post_init(evq->ev_post); evq->ev_post = NULL; } #endif while (!evq_is_empty(evq)) { struct event *ev; if (evq->stop) { evq->stop = 0; break; } /* process synchronous operations */ if (evq->sync_op) { struct evq_sync_op *op = evq->sync_op; evq->sync_op = NULL; levq_sync_process(L, evq, op); } if (!evq->ev_ready) { const int res = evq_wait(evq, timeout); if (res == EVQ_TIMEOUT) { lua_pushboolean(L, 0); return 1; } if (res == EVQ_FAILED) return sys_seterror(L, 0); } ev = evq->ev_ready; if (!ev) continue; do { const unsigned int ev_flags = ev->flags; /* clear EVENT_ACTIVE and EVENT_*_RES flags */ ev->flags &= EVENT_MASK; evq->ev_ready = ev->next_ready; if (ev_flags & EVENT_DELETE) { /* postponed deletion of active event */ levq_del_event(evq, ev); } else { if ((ev_flags & EVENT_CALLBACK) || fetch) { const int ev_id = ev->ev_id; /* callback function */ lua_rawgeti(L, ARG_LAST+1, ev_id); /* arguments */ if (!(ev_flags & EVENT_CALLBACK_SCHED)) { lua_pushvalue(L, 1); /* evq_udata */ lua_pushlightuserdata(L, ev); /* ev_ludata */ lua_rawgeti(L, ARG_LAST+2, ev_id); /* obj_udata */ } if (ev_flags & EVENT_EOF_MASK_RES) { lua_pushliteral(L, "e"); lua_pushinteger(L, (int) ev_flags >> EVENT_EOF_SHIFT_RES); } else { lua_pushstring(L, (ev_flags & EVENT_TIMEOUT_RES) ? "t" : (ev_flags & EVENT_WRITE_RES) ? "w" : "r"); lua_pushnil(L); } } if ((ev_flags & EVENT_ONESHOT) && !event_deleted(ev)) evq_del(ev, 1); if (event_deleted(ev)) levq_del_event(evq, ev); /* deletion of oneshot event */ #ifdef EVQ_POST_INIT else evq->ev_post = ev; #endif if (ev_flags & EVENT_CALLBACK_SCHED) { /* callback function: coroutine */ lua_State *co = lua_tothread(L, ARG_LAST+3); lua_xmove(L, co, 2); lua_pop(L, 1); /* pop coroutine */ sys_sched_event_ready(co, ev); } else if (ev_flags & EVENT_CALLBACK_CORO) { lua_State *co = lua_tothread(L, ARG_LAST+3); lua_xmove(L, co, 5); lua_pop(L, 1); /* pop coroutine */ switch (lua_resume(co, L, 5)) { case 0: lua_settop(co, 0); if (!event_deleted(ev)) { evq_del(ev, 0); levq_del_event(evq, ev); #ifdef EVQ_POST_INIT evq->ev_post = NULL; #endif } break; case LUA_YIELD: lua_settop(co, 0); break; default: lua_xmove(co, L, 1); /* error message */ lua_error(L); } } else if (ev_flags & EVENT_CALLBACK) lua_call(L, 5, 0); else if (fetch) return 4; #ifdef EVQ_POST_INIT if (evq->ev_post) { evq_post_init(evq->ev_post); evq->ev_post = NULL; } #endif } ev = evq->ev_ready; } while (ev);
EVQ_API int evq_wait (struct event_queue *evq, struct sys_thread *td, msec_t timeout) { struct event *ev_ready; fd_set work_readset = evq->readset; fd_set work_writeset = evq->writeset; struct timeval tv, *tvp; struct event **events = evq->events; const int npolls = evq->npolls; int i, nready, max_fd; if (timeout != 0L) { timeout = timeout_get(evq->tq, timeout, evq->now); if (timeout == 0L) { ev_ready = timeout_process(evq->tq, NULL, evq->now); goto end; } } if (timeout == TIMEOUT_INFINITE) tvp = NULL; else { tv.tv_sec = timeout / 1000; tv.tv_usec = (timeout % 1000) * 1000; tvp = &tv; } max_fd = evq->max_fd; if (max_fd == -1) { for (i = 1; i < npolls; ++i) { struct event *ev = events[i]; if (max_fd < ev->fd) max_fd = ev->fd; } evq->max_fd = max_fd; } if (td) sys_vm2_leave(td); nready = select(max_fd + 1, &work_readset, &work_writeset, NULL, tvp); if (td) sys_vm2_enter(td); evq->now = sys_milliseconds(); if (nready == -1) return (errno == EINTR) ? 0 : -1; ev_ready = evq->ev_ready; if (tvp) { if (!nready) { if (evq->tq) { struct event *ev = timeout_process(evq->tq, ev_ready, evq->now); if (ev != ev_ready) { ev_ready = ev; goto end; } } return SYS_ERR_TIMEOUT; } timeout = evq->now; } if (FD_ISSET(evq->sig_fd[0], &work_readset)) { ev_ready = signal_process_interrupt(evq, ev_ready, timeout); --nready; } for (i = 1; i < npolls && nready; i++) { struct event *ev = events[i]; unsigned int res = 0; if ((ev->flags & EVENT_READ) && FD_ISSET(ev->fd, &work_readset)) res |= EVENT_READ_RES; else if ((ev->flags & EVENT_WRITE) && FD_ISSET(ev->fd, &work_writeset)) res |= EVENT_WRITE_RES; if (!res) continue; ev->flags |= res; if (!(ev->flags & EVENT_ACTIVE)) { ev->flags |= EVENT_ACTIVE; if (ev->flags & EVENT_ONESHOT) evq_del(ev, 1); else if (ev->tq && !(ev->flags & EVENT_TIMEOUT_MANUAL)) timeout_reset(ev, timeout); ev->next_ready = ev_ready; ev_ready = ev; } --nready; } if (!ev_ready) return 0; end: evq->ev_ready = ev_ready; return 0; }