void process_expired_events(struct npool *nsp) { for (;;) { gh_hnode_t *hnode; struct nevent *nse; hnode = gh_heap_min(&nsp->expirables); if (!hnode) break; nse = container_of(hnode, struct nevent, expire); if (!event_timedout(nse)) break; gh_heap_pop(&nsp->expirables); process_event(nsp, NULL, nse, EV_NONE); assert(nse->event_done); update_first_events(nse); nevent_unref(nsp, nse); } }
void process_event(struct npool *nsp, gh_list_t *evlist, struct nevent *nse, int ev) { int match_r = 0, match_w = 0; #if HAVE_OPENSSL int desire_r = 0, desire_w = 0; #endif nsock_log_debug_all("Processing event %lu (timeout in %ldms, done=%d)", nse->id, (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod), nse->event_done); if (!nse->event_done) { switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (ev != EV_NONE) handle_connect_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_connect_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_READ: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_r) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_read_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_WRITE: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_w) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_write_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_TIMER: if (event_timedout(nse)) handle_timer_result(nsp, nse, NSE_STATUS_SUCCESS); break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ:{ nsock_log_debug_all("PCAP iterating %lu", nse->id); if (ev & EV_READ) { /* buffer empty? check it! */ if (fs_length(&(nse->iobuf)) == 0) do_actual_pcap_read(nse); } /* if already received something */ if (fs_length(&(nse->iobuf)) > 0) handle_pcap_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_pcap_read_result(nsp, nse, NSE_STATUS_TIMEOUT); #if PCAP_BSD_SELECT_HACK /* If event occurred, and we're in BSD_HACK mode, then this event was added * to two queues. read_event and pcap_read_event * Of course we should destroy it only once. * I assume we're now in read_event, so just unlink this event from * pcap_read_event */ if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->read_events) { /* event is done, list is read_events and we're in BSD_HACK mode. * So unlink event from pcap_read_events */ update_first_events(nse); gh_list_remove(&nsp->pcap_read_events, &nse->nodeq_pcap); nsock_log_debug_all("PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id); } if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->pcap_read_events) { update_first_events(nse); gh_list_remove(&nsp->read_events, &nse->nodeq_io); nsock_log_debug_all("PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id); } #endif break; } #endif default: fatal("Event has unknown type (%d)", nse->type); } } if (nse->event_done) { /* Security sanity check: don't return a functional SSL iod without * setting an SSL data structure. */ if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS) assert(nse->iod->ssl != NULL); nsock_log_debug_all("NSE #%lu: Sending event", nse->id); /* WooHoo! The event is ready to be sent */ event_dispatch_and_delete(nsp, nse, 1); } }
/* Adds an event to the appropriate nsp event list, handles housekeeping such as * adjusting the descriptor select/poll lists, registering the timeout value, * etc. */ void nsock_pool_add_event(struct npool *nsp, struct nevent *nse) { nsock_log_debug("NSE #%lu: Adding event (timeout in %ldms)", nse->id, (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod)); nsp->events_pending++; if (!nse->event_done && nse->timeout.tv_sec) { /* This event is expirable, add it to the queue */ gh_heap_push(&nsp->expirables, &nse->expire); } /* Now we do the event type specific actions */ switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_READ|EV_WRITE|EV_EXCEPT, EV_NONE); } iod_add_event(nse->iod, nse); break; case NSE_TYPE_READ: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_WRITE: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_WRITE, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_TIMER: /* nothing to do */ break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ: { mspcap *mp = (mspcap *)nse->iod->pcap; assert(mp); if (mp->pcap_desc >= 0) { /* pcap descriptor present */ if (!nse->event_done) { socket_count_readpcap_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); } nsock_log_debug_all("PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id); #if PCAP_BSD_SELECT_HACK /* when using BSD hack we must do pcap_next() after select(). * Let's insert this pcap to bot queues, to selectable and nonselectable. * This will result in doing pcap_next_ex() just before select() */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); #endif } else { /* pcap isn't selectable. Add it to pcap-specific queue. */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); } iod_add_event(nse->iod, nse); break; } #endif default: fatal("Unknown nsock event type (%d)", nse->type); } /* It can happen that the event already completed. In which case we can * already deliver it, even though we're probably not inside nsock_loop(). */ if (nse->event_done) { event_dispatch_and_delete(nsp, nse, 1); update_first_events(nse); nevent_unref(nsp, nse); } }
void process_iod_events(struct npool *nsp, struct niod *nsi, int ev) { int i = 0; /* store addresses of the pointers to the first elements of each kind instead * of storing the values, as a connect can add a read for instance */ gh_lnode_t **start_elems[] = { &nsi->first_connect, &nsi->first_read, &nsi->first_write, #if HAVE_PCAP &nsi->first_pcap_read, #endif NULL }; gh_list_t *evlists[] = { &nsp->connect_events, &nsp->read_events, &nsp->write_events, #if HAVE_PCAP &nsp->pcap_read_events, #endif NULL }; assert(nsp == nsi->nsp); nsock_log_debug_all("Processing events on IOD %lu (ev=%d)", nsi->id, ev); /* We keep the events separate because we want to handle them in the * order: connect => read => write => timer for several reasons: * * 1) Makes sure we have gone through all the net i/o events before * a timer expires (would be a shame to timeout after the data was * available but before we delivered the events * * 2) The connect() results often lead to a read or write that can be * processed in the same cycle. In the same way, read() often * leads to write(). */ for (i = 0; evlists[i] != NULL; i++) { gh_lnode_t *current, *next, *last; /* for each list, get the last event and don't look past it as an event * could add another event in the same list and so on... */ last = gh_list_last_elem(evlists[i]); for (current = *start_elems[i]; current != NULL && gh_lnode_prev(current) != last; current = next) { struct nevent *nse; #if HAVE_PCAP if (evlists[i] == &nsi->nsp->pcap_read_events) nse = lnode_nevent2(current); else #endif nse = lnode_nevent(current); /* events are grouped by IOD. Break if we're done with the events for the * current IOD */ if (nse->iod != nsi) break; process_event(nsp, evlists[i], nse, ev); next = gh_lnode_next(current); if (nse->event_done) { /* event is done, remove it from the event list and update IOD pointers * to the first events of each kind */ update_first_events(nse); gh_list_remove(evlists[i], current); gh_list_append(&nsp->free_events, &nse->nodeq_io); if (nse->timeout.tv_sec) gh_heap_remove(&nsp->expirables, &nse->expire); } } } }
void process_event(mspool *nsp, gh_list *evlist, msevent *nse, int ev) { int match_r = 0, match_w = 0; #if HAVE_OPENSSL int desire_r = 0, desire_w = 0; #endif if (nsp->tracelevel > 7) nsock_trace(nsp, "Processing event %lu", nse->id); if (!nse->event_done) { switch(nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (ev != EV_NONE) handle_connect_result(nsp, nse, NSE_STATUS_SUCCESS); if (!nse->event_done && nse->timeout.tv_sec && !TIMEVAL_AFTER(nse->timeout, nsock_tod)) handle_connect_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_READ: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_r) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (!nse->event_done && nse->timeout.tv_sec && !TIMEVAL_AFTER(nse->timeout, nsock_tod)) handle_read_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_WRITE: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_w) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); if (!nse->event_done && nse->timeout.tv_sec && !TIMEVAL_AFTER(nse->timeout, nsock_tod)) handle_write_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_TIMER: if (nse->timeout.tv_sec && !TIMEVAL_AFTER(nse->timeout, nsock_tod)) handle_timer_result(nsp, nse, NSE_STATUS_SUCCESS); break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ:{ if (nsp->tracelevel > 5) nsock_trace(nsp, "PCAP iterating %lu", nse->id); if (ev & EV_READ) { /* buffer empty? check it! */ if (FILESPACE_LENGTH(&(nse->iobuf)) == 0) do_actual_pcap_read(nse); } /* if already received smth */ if (FILESPACE_LENGTH(&(nse->iobuf)) > 0) handle_pcap_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (!nse->event_done && nse->timeout.tv_sec && !TIMEVAL_AFTER(nse->timeout, nsock_tod)) handle_pcap_read_result(nsp, nse, NSE_STATUS_TIMEOUT); #if PCAP_BSD_SELECT_HACK /* If event occurred, and we're in BSD_HACK mode, then this event was added * to two queues. read_event and pcap_read_event * Of course we should destroy it only once. * I assume we're now in read_event, so just unlink this event from * pcap_read_event */ if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->read_events) { /* event is done, list is read_events and we're in BSD_HACK mode. * So unlink event from pcap_read_events */ update_first_events(nse); gh_list_remove(&nsp->pcap_read_events, nse); if (nsp->tracelevel > 8) nsock_trace(nsp, "PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id); } if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->pcap_read_events) { update_first_events(nse); gh_list_remove(&nsp->read_events, nse); if (nsp->tracelevel > 8) nsock_trace(nsp, "PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id); } #endif break; } #endif default: fatal("Event has unknown type (%d)", nse->type); break; /* unreached */ } } if (nse->event_done) { /* Security sanity check: don't return a functional SSL iod without setting an SSL data structure. */ if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS) assert(nse->iod->ssl != NULL); if (nsp->tracelevel > 8) nsock_trace(nsp, "NSE #%lu: Sending event", nse->id); /* WooHoo! The event is ready to be sent */ msevent_dispatch_and_delete(nsp, nse, 1); } else { /* Is this event the next-to-timeout? */ if (nse->timeout.tv_sec != 0) { if (nsp->next_ev.tv_sec == 0) nsp->next_ev = nse->timeout; else if (TIMEVAL_AFTER(nsp->next_ev, nse->timeout)) nsp->next_ev = nse->timeout; } } }
void process_iod_events(mspool *nsp, msiod *nsi, int ev) { int i = 0; /* store addresses of the pointers to the first elements of each kind instead * of storing the values, as a connect can add a read for instance */ gh_list_elem **start_elems[] = { &nsi->first_connect, &nsi->first_read, &nsi->first_write, #if HAVE_PCAP &nsi->first_pcap_read, #endif NULL }; gh_list *evlists[] = { &nsi->nsp->connect_events, &nsi->nsp->read_events, &nsi->nsp->write_events, #if HAVE_PCAP &nsi->nsp->pcap_read_events, #endif NULL }; /* We keep the events separate because we want to handle them in the * order: connect => read => write => timer for several reasons: * * 1) Makes sure we have gone through all the net i/o events before * a timer expires (would be a shame to timeout after the data was * available but before we delivered the events * * 2) The connect() results often lead to a read or write that can be * processed in the same cycle. In the same way, read() often * leads to write(). */ for (i = 0; start_elems[i] != NULL; i++) { gh_list_elem *current, *next, *last; /* for each list, get the last event and don't look past it as an event could * add another event in the same list and so on... */ last = GH_LIST_LAST_ELEM(evlists[i]); for (current = *start_elems[i]; current != NULL && GH_LIST_ELEM_PREV(current) != last; current = next) { msevent *nse = (msevent *)GH_LIST_ELEM_DATA(current); /* events are grouped by IOD. Break if we're done with the events for the * current IOD */ if (nse->iod != nsi) break; process_event(nsp, evlists[i], nse, ev); next = GH_LIST_ELEM_NEXT(current); if (nse->event_done) { /* event is done, remove it from the event list and update IOD pointers * to the first events of each kind */ update_first_events(nse); gh_list_remove_elem(evlists[i], current); } } } }
/* An internal function for cancelling an event when you already have a pointer * to the msevent (use nsock_event_cancel if you just have an ID). The * event_list passed in should correspond to the type of the event. For example, * with NSE_TYPE_READ, you would pass in &nsp->read_events;. elem is the list * element in event_list which holds the event. Pass a nonzero for notify if * you want the program owning the event to be notified that it has been * cancelled */ int msevent_cancel(mspool *nsp, msevent *nse, gh_list_t *event_list, gh_lnode_t *elem, int notify) { if (nse->event_done) { /* This event has already been marked for death somewhere else -- it will be * gone soon (and if we try to kill it now all hell will break loose due to * reentrancy. */ return 0; } nsock_log_info(nsp, "msevent_cancel on event #%li (type %s)", nse->id, nse_type2str(nse->type)); /* Now that we found the event... we go through the motions of cleanly * cancelling it */ switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: handle_connect_result(nsp, nse, NSE_STATUS_CANCELLED); break; case NSE_TYPE_READ: handle_read_result(nsp, nse, NSE_STATUS_CANCELLED); break; case NSE_TYPE_WRITE: handle_write_result(nsp, nse, NSE_STATUS_CANCELLED); break; case NSE_TYPE_TIMER: handle_timer_result(nsp, nse, NSE_STATUS_CANCELLED); break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ: handle_pcap_read_result(nsp, nse, NSE_STATUS_CANCELLED); break; #endif default: fatal("Invalid nsock event type (%d)", nse->type); } assert(nse->event_done); if (nse->timeout.tv_sec) gh_heap_remove(&nsp->expirables, &nse->expire); if (event_list) { update_first_events(nse); gh_list_remove(event_list, elem); } gh_list_append(&nsp->free_events, &nse->nodeq_io); nsock_log_debug_all(nsp, "NSE #%lu: Removing event from list", nse->id); #if HAVE_PCAP #if PCAP_BSD_SELECT_HACK if (nse->type == NSE_TYPE_PCAP_READ) { nsock_log_debug_all(nsp, "PCAP NSE #%lu: CANCEL TEST pcap=%p read=%p curr=%p sd=%i", nse->id, &nsp->pcap_read_events, &nsp->read_events, event_list,((mspcap *)nse->iod->pcap)->pcap_desc); /* If event occurred, and we're in BSD_HACK mode, then this event was added to * two queues. read_event and pcap_read_event Of course we should * destroy it only once. I assume we're now in read_event, so just unlink * this event from pcap_read_event */ if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && event_list == &nsp->read_events) { /* event is done, list is read_events and we're in BSD_HACK mode. So unlink * event from pcap_read_events */ gh_list_remove(&nsp->pcap_read_events, &nse->nodeq_pcap); nsock_log_debug_all(nsp, "PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id); } if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && event_list == &nsp->pcap_read_events) { /* event is done, list is read_events and we're in BSD_HACK mode. * So unlink event from read_events */ gh_list_remove(&nsp->read_events, &nse->nodeq_io); nsock_log_debug_all(nsp, "PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id); } } #endif #endif msevent_dispatch_and_delete(nsp, nse, notify); return 1; }