/* Iterate through all the event lists (such as connect_events, read_events, * timer_events, etc) and take action for those that have completed (due to * timeout, i/o, etc) */ void iterate_through_event_lists(mspool *nsp, int evcount) { int n; struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; msiod *nsi; for (n = 0; n < evcount; n++) { struct kevent *kev = &kinfo->events[n]; nsi = (msiod *)kev->udata; /* process all the pending events for this IOD */ process_iod_events(nsp, nsi, get_evmask(nsi, kev)); IOD_PROPSET(nsi, IOD_PROCESSED); } for (n = 0; n < evcount; n++) { struct kevent *kev = &kinfo->events[n]; nsi = (msiod *)kev->udata; if (nsi->state == NSIOD_STATE_DELETED) { if (IOD_PROPGET(nsi, IOD_PROCESSED)) { IOD_PROPCLR(nsi, IOD_PROCESSED); gh_list_remove(&nsp->active_iods, &nsi->nodeq); gh_list_prepend(&nsp->free_iods, &nsi->nodeq); } } } /* iterate through timers and expired events */ process_expired_events(nsp); }
/* Update the events that the IO engine should watch for a given IOD. * * ev_inc is a set of events for which the event counters should be increased. * These events will therefore be watched by the IO engine for this IOD. * * ev_dec is a set of events for which the event counters should be decreased. * If this counter reaches zero, the event won't be watched anymore by the * IO engine for this IOD. */ static void update_events(struct niod * iod, struct npool *ms, int ev_inc, int ev_dec) { int setmask, clrmask, ev_temp; /* Filter out events that belong to both sets. */ ev_temp = ev_inc ^ ev_dec; ev_inc = ev_inc & ev_temp; ev_dec = ev_dec & ev_temp; setmask = ev_inc; clrmask = EV_NONE; if ((ev_dec & EV_READ) && #if HAVE_PCAP !iod->readpcapsd_count && #endif !iod->readsd_count) clrmask |= EV_READ; if ((ev_dec & EV_WRITE) && !iod->writesd_count) clrmask |= EV_WRITE; if (ev_dec & EV_EXCEPT) clrmask |= EV_EXCEPT; if (!IOD_PROPGET(iod, IOD_REGISTERED)) { assert(clrmask == EV_NONE); nsock_engine_iod_register(ms, iod, setmask); } else { nsock_engine_iod_modify(ms, iod, setmask, clrmask); } }
int select_iod_unregister(mspool *nsp, msiod *iod) { struct select_engine_info *sinfo = (struct select_engine_info *)nsp->engine_data; iod->watched_events = EV_NONE; /* some IODs can be unregistered here if they're associated to an event that was * immediately completed */ if (IOD_PROPGET(iod, IOD_REGISTERED)) { #if HAVE_PCAP if (iod->pcap) { int sd = ((mspcap *)iod->pcap)->pcap_desc; if (sd >= 0) { CHECKED_FD_CLR(sd, &sinfo->fds_master_r); CHECKED_FD_CLR(sd, &sinfo->fds_results_r); } } else #endif { CHECKED_FD_CLR(iod->sd, &sinfo->fds_master_r); CHECKED_FD_CLR(iod->sd, &sinfo->fds_master_w); CHECKED_FD_CLR(iod->sd, &sinfo->fds_master_x); CHECKED_FD_CLR(iod->sd, &sinfo->fds_results_r); CHECKED_FD_CLR(iod->sd, &sinfo->fds_results_w); CHECKED_FD_CLR(iod->sd, &sinfo->fds_results_x); } if (sinfo->max_sd == iod->sd) sinfo->max_sd--; IOD_PROPCLR(iod, IOD_REGISTERED); } return 1; }
int kqueue_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) { struct kevent kev[2]; int new_events, i; struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; assert((ev_set & ev_clr) == 0); assert(IOD_PROPGET(iod, IOD_REGISTERED)); new_events = iod->watched_events; new_events |= ev_set; new_events &= ~ev_clr; if (new_events == iod->watched_events) return 1; /* nothing to do */ i = 0; if ((ev_set ^ ev_clr) & EV_READ) { EV_SET(&kev[i], nsi_getsd(iod), EVFILT_READ, EV_SETFLAG(ev_set, EV_READ), 0, 0, (void *)iod); i++; } if ((ev_set ^ ev_clr) & EV_WRITE) { EV_SET(&kev[i], nsi_getsd(iod), EVFILT_WRITE, EV_SETFLAG(ev_set, EV_WRITE), 0, 0, (void *)iod); i++; } if (i > 0 && kevent(kinfo->kqfd, kev, i, NULL, 0, NULL) < 0) fatal("Unable to update events for IOD #%lu: %s", iod->id, strerror(errno)); iod->watched_events = new_events; return 1; }
int poll_iod_modify(struct npool *nsp, struct niod *iod, int ev_set, int ev_clr) { int sd; int new_events; struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data; assert((ev_set & ev_clr) == 0); assert(IOD_PROPGET(iod, IOD_REGISTERED)); new_events = iod->watched_events; new_events |= ev_set; new_events &= ~ev_clr; if (new_events == iod->watched_events) return 1; /* nothing to do */ iod->watched_events = new_events; sd = nsi_getsd(iod); pinfo->events[sd].fd = sd; pinfo->events[sd].events = 0; /* regenerate the current set of events for this IOD */ if (iod->watched_events & EV_READ) pinfo->events[sd].events |= POLL_R_FLAGS; if (iod->watched_events & EV_WRITE) pinfo->events[sd].events |= POLL_W_FLAGS; #ifndef WIN32 if (iod->watched_events & EV_EXCEPT) pinfo->events[sd].events |= POLL_X_FLAGS; #endif return 1; }
static inline int get_evmask(struct npool *nsp, struct niod *nsi) { struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data; int sd, evmask = EV_NONE; POLLFD *pev; if (nsi->state != NSIOD_STATE_DELETED && nsi->events_pending && IOD_PROPGET(nsi, IOD_REGISTERED)) { #if HAVE_PCAP if (nsi->pcap) sd = ((mspcap *)nsi->pcap)->pcap_desc; else #endif sd = nsi->sd; assert(sd < pinfo->capacity); pev = &pinfo->events[sd]; if (pev->revents & POLL_R_FLAGS) evmask |= EV_READ; if (pev->revents & POLL_W_FLAGS) evmask |= EV_WRITE; if (pev->events && (pev->revents & POLL_X_FLAGS)) evmask |= (EV_READ | EV_WRITE | EV_EXCEPT); } return evmask; }
int poll_iod_register(struct npool *nsp, struct niod *iod, int ev) { struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data; int sd; assert(!IOD_PROPGET(iod, IOD_REGISTERED)); iod->watched_events = ev; sd = nsi_getsd(iod); while (pinfo->capacity < sd + 1) evlist_grow(pinfo); pinfo->events[sd].fd = sd; pinfo->events[sd].events = 0; pinfo->events[sd].revents = 0; pinfo->max_fd = MAX(pinfo->max_fd, sd); if (ev & EV_READ) pinfo->events[sd].events |= POLL_R_FLAGS; if (ev & EV_WRITE) pinfo->events[sd].events |= POLL_W_FLAGS; #ifndef WIN32 if (ev & EV_EXCEPT) pinfo->events[sd].events |= POLL_X_FLAGS; #endif IOD_PROPSET(iod, IOD_REGISTERED); return 1; }
int epoll_iod_register(mspool *nsp, msiod *iod, int ev) { int sd; struct epoll_event epev; struct epoll_engine_info *einfo = (struct epoll_engine_info *)nsp->engine_data; assert(!IOD_PROPGET(iod, IOD_REGISTERED)); iod->watched_events = ev; memset(&epev, 0x00, sizeof(struct epoll_event)); epev.events = EPOLLET; epev.data.ptr = (void *)iod; if (ev & EV_READ) epev.events |= EPOLL_R_FLAGS; if (ev & EV_WRITE) epev.events |= EPOLL_W_FLAGS; if (ev & EV_EXCEPT) epev.events |= EPOLL_X_FLAGS; sd = nsi_getsd(iod); if (epoll_ctl(einfo->epfd, EPOLL_CTL_ADD, sd, &epev) < 0) fatal("Unable to register IOD #%lu: %s", iod->id, strerror(errno)); IOD_PROPSET(iod, IOD_REGISTERED); return 1; }
int select_iod_register(mspool *nsp, msiod *iod, int ev) { assert(!IOD_PROPGET(iod, IOD_REGISTERED)); iod->watched_events = ev; select_iod_modify(nsp, iod, ev, EV_NONE); IOD_PROPSET(iod, IOD_REGISTERED); return 1; }
/* Iterate through all the event lists (such as connect_events, read_events, * timer_events, etc) and take action for those that have completed (due to * timeout, i/o, etc) */ void iterate_through_event_lists(mspool *nsp, int evcount) { int n; struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; gh_list_elem *current, *next, *last, *timer_last; msevent *nse; msiod *nsi; /* Clear it -- We will find the next event as we go through the list */ nsp->next_ev.tv_sec = 0; last = GH_LIST_LAST_ELEM(&nsp->active_iods); timer_last = GH_LIST_LAST_ELEM(&nsp->timer_events); for (n = 0; n < evcount; n++) { struct kevent *kev = &kinfo->events[n]; nsi = (msiod *)kev->udata; /* process all the pending events for this IOD */ process_iod_events(nsp, nsi, get_evmask(nsi, kev)); IOD_PROPSET(nsi, IOD_PROCESSED); } current = GH_LIST_FIRST_ELEM(&nsp->active_iods); /* cull timeouts amongst the non active IODs */ while (current != NULL && GH_LIST_ELEM_PREV(current) != last) { msiod *nsi = (msiod *)GH_LIST_ELEM_DATA(current); if (IOD_PROPGET(nsi, IOD_PROCESSED)) IOD_PROPCLR(nsi, IOD_PROCESSED); else if (nsi->state != NSIOD_STATE_DELETED && nsi->events_pending) process_iod_events(nsp, nsi, EV_NONE); next = GH_LIST_ELEM_NEXT(current); if (nsi->state == NSIOD_STATE_DELETED) { gh_list_remove_elem(&nsp->active_iods, current); gh_list_prepend(&nsp->free_iods, nsi); } current = next; } /* iterate through timers */ for (current = GH_LIST_FIRST_ELEM(&nsp->timer_events); current != NULL && GH_LIST_ELEM_PREV(current) != timer_last; current = next) { nse = (msevent *)GH_LIST_ELEM_DATA(current); process_event(nsp, &nsp->timer_events, nse, EV_NONE); next = GH_LIST_ELEM_NEXT(current); if (nse->event_done) gh_list_remove_elem(&nsp->timer_events, current); } }
int kqueue_iod_unregister(mspool *nsp, msiod *iod) { struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; /* some IODs can be unregistered here if they're associated to an event that was * immediately completed */ if (IOD_PROPGET(iod, IOD_REGISTERED)) { kqueue_iod_modify(nsp, iod, EV_NONE, EV_READ|EV_WRITE); IOD_PROPCLR(iod, IOD_REGISTERED); if (nsi_getsd(iod) == kinfo->maxfd) kinfo->maxfd--; } iod->watched_events = EV_NONE; return 1; }
int kqueue_iod_register(mspool *nsp, msiod *iod, int ev) { struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; assert(!IOD_PROPGET(iod, IOD_REGISTERED)); IOD_PROPSET(iod, IOD_REGISTERED); iod->watched_events = EV_NONE; kqueue_iod_modify(nsp, iod, ev, EV_NONE); if (nsi_getsd(iod) > kinfo->maxfd) kinfo->maxfd = nsi_getsd(iod); return 1; }
int epoll_iod_unregister(mspool *nsp, msiod *iod) { iod->watched_events = EV_NONE; /* some IODs can be unregistered here if they're associated to an event that was * immediately completed */ if (IOD_PROPGET(iod, IOD_REGISTERED)) { struct epoll_engine_info *einfo = (struct epoll_engine_info *)nsp->engine_data; int sd; sd = nsi_getsd(iod); epoll_ctl(einfo->epfd, EPOLL_CTL_DEL, sd, NULL); IOD_PROPCLR(iod, IOD_REGISTERED); } return 1; }
int poll_iod_unregister(struct npool *nsp, struct niod *iod) { iod->watched_events = EV_NONE; /* some IODs can be unregistered here if they're associated to an event that was * immediately completed */ if (IOD_PROPGET(iod, IOD_REGISTERED)) { struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data; int sd; sd = nsi_getsd(iod); pinfo->events[sd].fd = -1; pinfo->events[sd].events = 0; pinfo->events[sd].revents = 0; if (pinfo->max_fd == sd) lower_max_fd(pinfo); IOD_PROPCLR(iod, IOD_REGISTERED); } return 1; }
int epoll_iod_modify(mspool *nsp, msiod *iod, int ev_set, int ev_clr) { int sd; struct epoll_event epev; int new_events; struct epoll_engine_info *einfo = (struct epoll_engine_info *)nsp->engine_data; assert((ev_set & ev_clr) == 0); assert(IOD_PROPGET(iod, IOD_REGISTERED)); memset(&epev, 0x00, sizeof(struct epoll_event)); epev.events = EPOLLET; epev.data.ptr = (void *)iod; new_events = iod->watched_events; new_events |= ev_set; new_events &= ~ev_clr; if (new_events == iod->watched_events) return 1; /* nothing to do */ iod->watched_events = new_events; /* regenerate the current set of events for this IOD */ if (iod->watched_events & EV_READ) epev.events |= EPOLL_R_FLAGS; if (iod->watched_events & EV_WRITE) epev.events |= EPOLL_W_FLAGS; if (iod->watched_events & EV_EXCEPT) epev.events |= EPOLL_X_FLAGS; sd = nsi_getsd(iod); if (epoll_ctl(einfo->epfd, EPOLL_CTL_MOD, sd, &epev) < 0) fatal("Unable to update events for IOD #%lu: %s", iod->id, strerror(errno)); return 1; }