static int handle_state_initial(struct npool *nsp, struct nevent *nse, void *udata) { struct proxy_chain_context *px_ctx = nse->iod->px_ctx; struct sockaddr_storage *ss; size_t sslen; unsigned short port; struct proxy_node *next; int timeout; px_ctx->px_state = PROXY_STATE_HTTP_TCP_CONNECTED; next = proxy_ctx_node_next(px_ctx); if (next) { ss = &next->ss; sslen = next->sslen; port = next->port; } else { ss = &px_ctx->target_ss; sslen = px_ctx->target_sslen; port = px_ctx->target_port; } timeout = TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod); nsock_printf(nsp, (nsock_iod)nse->iod, nsock_proxy_ev_dispatch, timeout, udata, "CONNECT %s:%d HTTP/1.1\r\n\r\n", inet_ntop_ez(ss, sslen), (int)port); nsock_readlines(nsp, (nsock_iod)nse->iod, nsock_proxy_ev_dispatch, timeout, udata, 1); return 0; }
static int _base_save_state(struct base *base) { struct timeval tv0, tv1; gettimeofday(&tv0, NULL); struct swriter *swriter = swriter_new(base->index_dir, STATE_FILENAME); if (swriter == NULL) { log_error(base->db, "Unable to write snapshot. %s", ""); return -1; } struct _save_ctx ctx = {swriter, logs_newest(base->logs)}; int r = logs_iterate(base->logs, _save_callback, &ctx); if (r != 0) { swriter_free(swriter, 0); log_error(base->db, "Unable to write snapshot. %s", ""); return -1; } r = swriter_free(swriter, 1); if (r == 0) { gettimeofday(&tv1, NULL); log_info(base->db, "Snapshot saved in %lu ms.", TIMEVAL_MSEC_SUBTRACT(tv1, tv0)); } else { log_error(base->db, "Unable to save snapshot.%s", ""); } return r; }
/* Here is the all important looping function that tells the event engine to * start up and begin processing events. It will continue until all events have * been delivered (including new ones started from event handlers), or the * msec_timeout is reached, or a major error has occurred. Use -1 if you don't * want to set a maximum time for it to run. A timeout of 0 will return after 1 * non-blocking loop. The nsock loop can be restarted again after it returns. * For example you could do a series of 15 second runs, allowing you to do other * stuff between them */ enum nsock_loopstatus nsock_loop(nsock_pool nsp, int msec_timeout) { mspool *ms = (mspool *)nsp; struct timeval loop_timeout; int msecs_left; unsigned long loopnum = 0; enum nsock_loopstatus quitstatus = NSOCK_LOOP_ERROR; gettimeofday(&nsock_tod, NULL); if (msec_timeout < -1) { ms->errnum = EINVAL; return NSOCK_LOOP_ERROR; } TIMEVAL_MSEC_ADD(loop_timeout, nsock_tod, msec_timeout); msecs_left = msec_timeout; if (ms->tracelevel > 2) { if (msec_timeout >= 0) nsock_trace(ms, "nsock_loop() started (timeout=%dms). %d events pending", msec_timeout, ms->events_pending); else nsock_trace(ms, "nsock_loop() started (no timeout). %d events pending", ms->events_pending); } while (1) { if (ms->quit) { /* We've been asked to quit the loop through nsock_loop_quit. */ ms->quit = 0; quitstatus = NSOCK_LOOP_QUIT; break; } if (ms->events_pending == 0) { /* if no events at all are pending, then none can be created until * we quit nsock_loop() -- so we do that now. */ quitstatus = NSOCK_LOOP_NOEVENTS; break; } if (msec_timeout >= 0) { msecs_left = MAX(0, TIMEVAL_MSEC_SUBTRACT(loop_timeout, nsock_tod)); if (msecs_left == 0 && loopnum > 0) { quitstatus = NSOCK_LOOP_TIMEOUT; break; } } if (ms->engine->loop(ms, msecs_left) == -1) { quitstatus = NSOCK_LOOP_ERROR; break; } gettimeofday(&nsock_tod, NULL); /* we do this at end because there is one * at beginning of function */ loopnum++; } return quitstatus; }
void nsock_trace(mspool *ms, char *fmt, ...) { va_list ap; int elapsed_time_ms; assert(ms->tracefile != NULL); elapsed_time_ms = TIMEVAL_MSEC_SUBTRACT(nsock_tod, ms->tracebasetime); va_start(ap, fmt); fflush(ms->tracefile); fprintf(ms->tracefile, "NSOCK (%.4fs) ", elapsed_time_ms / 1000.0); vfprintf(ms->tracefile, fmt, ap); fprintf(ms->tracefile, "\n"); va_end(ap); }
static void _ydb_close(struct ydb *ydb, int msg) { struct timeval tv0, tv1; gettimeofday(&tv0, NULL); base_free(ydb->base); gettimeofday(&tv1, NULL); if (msg) { log_info(ydb->db, "Closing YDB database (took %lu ms)", TIMEVAL_MSEC_SUBTRACT(tv1, tv0)); } db_free(ydb->db); free(ydb); }
int log_iterate_sorted(struct log *log, uint64_t prefetch_size, log_iterate_callback callback, void *userdata) { struct timeval tv0, tv1; gettimeofday(&tv0, NULL); struct hashdir *shd = hashdir_dup_sorted(log->hashdir); gettimeofday(&tv1, NULL); log_info(log->db, "Sorting index in log %llx took %5li ms.", (unsigned long long)log->log_number, TIMEVAL_MSEC_SUBTRACT(tv1, tv0)); int hpos_max = hashdir_size(shd); int last_hpos = 1; int r = 0; int i; for (i=1; i < hpos_max; i++) { if (i == last_hpos) { last_hpos = _iterate_prefetch(log, shd, last_hpos, prefetch_size); } struct hashdir_item hi = hashdir_get(shd, i); struct keyvalue kv; char *buf = malloc(hi.size); r = reader_read(log->reader, hi.offset, buf, hi.size, &kv); if (r) { free(buf); break; } r = callback(userdata, kv.key, kv.key_sz, kv.value, kv.value_sz); free(buf); if (r) { break; } } hashdir_free(shd); return r; }
struct ydb *ydb_open(const char *directory, struct ydb_options *options) { struct db *db = db_new(directory); if (db == NULL) { return NULL; } struct base *base = base_new(db, db_log_dir(db), db_index_dir(db), options); if (base == NULL) { db_free(db); return NULL; } log_info(db, "Opening YDB database \"%s\" by pid=%i.", directory, getpid()); linux_check_overcommit(db); struct ydb *ydb = malloc(sizeof(struct ydb)); ydb->db = db; ydb->base = base; struct timeval tv0, tv1; gettimeofday(&tv0, NULL); int r = base_load(ydb->base); if (r != 0) { _ydb_close(ydb, 0); return NULL; } base_print_stats(ydb->base); gettimeofday(&tv1, NULL); log_info(db, "YDB loaded %llu items in %.3f seconds.", (unsigned long long)base->used_size.count, (float)TIMEVAL_MSEC_SUBTRACT(tv1, tv0) / 1000.); return ydb; }
void process_event(struct npool *nsp, gh_list_t *evlist, struct nevent *nse, int ev) { int match_r = 0, match_w = 0; #if HAVE_OPENSSL int desire_r = 0, desire_w = 0; #endif nsock_log_debug_all("Processing event %lu (timeout in %ldms, done=%d)", nse->id, (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod), nse->event_done); if (!nse->event_done) { switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (ev != EV_NONE) handle_connect_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_connect_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_READ: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_r) handle_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_read_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_WRITE: match_r = ev & EV_READ; match_w = ev & EV_WRITE; #if HAVE_OPENSSL desire_r = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ; desire_w = nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE; if (nse->iod->ssl && ((desire_r && match_r) || (desire_w && match_w))) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); else #endif if (!nse->iod->ssl && match_w) handle_write_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_write_result(nsp, nse, NSE_STATUS_TIMEOUT); break; case NSE_TYPE_TIMER: if (event_timedout(nse)) handle_timer_result(nsp, nse, NSE_STATUS_SUCCESS); break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ:{ nsock_log_debug_all("PCAP iterating %lu", nse->id); if (ev & EV_READ) { /* buffer empty? check it! */ if (fs_length(&(nse->iobuf)) == 0) do_actual_pcap_read(nse); } /* if already received something */ if (fs_length(&(nse->iobuf)) > 0) handle_pcap_read_result(nsp, nse, NSE_STATUS_SUCCESS); if (event_timedout(nse)) handle_pcap_read_result(nsp, nse, NSE_STATUS_TIMEOUT); #if PCAP_BSD_SELECT_HACK /* If event occurred, and we're in BSD_HACK mode, then this event was added * to two queues. read_event and pcap_read_event * Of course we should destroy it only once. * I assume we're now in read_event, so just unlink this event from * pcap_read_event */ if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->read_events) { /* event is done, list is read_events and we're in BSD_HACK mode. * So unlink event from pcap_read_events */ update_first_events(nse); gh_list_remove(&nsp->pcap_read_events, &nse->nodeq_pcap); nsock_log_debug_all("PCAP NSE #%lu: Removing event from PCAP_READ_EVENTS", nse->id); } if (((mspcap *)nse->iod->pcap)->pcap_desc >= 0 && nse->event_done && evlist == &nsp->pcap_read_events) { update_first_events(nse); gh_list_remove(&nsp->read_events, &nse->nodeq_io); nsock_log_debug_all("PCAP NSE #%lu: Removing event from READ_EVENTS", nse->id); } #endif break; } #endif default: fatal("Event has unknown type (%d)", nse->type); } } if (nse->event_done) { /* Security sanity check: don't return a functional SSL iod without * setting an SSL data structure. */ if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS) assert(nse->iod->ssl != NULL); nsock_log_debug_all("NSE #%lu: Sending event", nse->id); /* WooHoo! The event is ready to be sent */ event_dispatch_and_delete(nsp, nse, 1); } }
/* Adds an event to the appropriate nsp event list, handles housekeeping such as * adjusting the descriptor select/poll lists, registering the timeout value, * etc. */ void nsock_pool_add_event(struct npool *nsp, struct nevent *nse) { nsock_log_debug("NSE #%lu: Adding event (timeout in %ldms)", nse->id, (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod)); nsp->events_pending++; if (!nse->event_done && nse->timeout.tv_sec) { /* This event is expirable, add it to the queue */ gh_heap_push(&nsp->expirables, &nse->expire); } /* Now we do the event type specific actions */ switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_READ|EV_WRITE|EV_EXCEPT, EV_NONE); } iod_add_event(nse->iod, nse); break; case NSE_TYPE_READ: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_WRITE: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_WRITE, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_TIMER: /* nothing to do */ break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ: { mspcap *mp = (mspcap *)nse->iod->pcap; assert(mp); if (mp->pcap_desc >= 0) { /* pcap descriptor present */ if (!nse->event_done) { socket_count_readpcap_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); } nsock_log_debug_all("PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id); #if PCAP_BSD_SELECT_HACK /* when using BSD hack we must do pcap_next() after select(). * Let's insert this pcap to bot queues, to selectable and nonselectable. * This will result in doing pcap_next_ex() just before select() */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); #endif } else { /* pcap isn't selectable. Add it to pcap-specific queue. */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); } iod_add_event(nse->iod, nse); break; } #endif default: fatal("Unknown nsock event type (%d)", nse->type); } /* It can happen that the event already completed. In which case we can * already deliver it, even though we're probably not inside nsock_loop(). */ if (nse->event_done) { event_dispatch_and_delete(nsp, nse, 1); update_first_events(nse); nevent_unref(nsp, nse); } }
int kqueue_loop(mspool *nsp, int msec_timeout) { int results_left = 0; int event_msecs; /* msecs before an event goes off */ int combined_msecs; struct timespec ts, *ts_p; int sock_err = 0; struct kqueue_engine_info *kinfo = (struct kqueue_engine_info *)nsp->engine_data; assert(msec_timeout >= -1); if (nsp->events_pending == 0) return 0; /* No need to wait on 0 events ... */ if (gh_list_count(&nsp->active_iods) > kinfo->evlen) { kinfo->evlen = gh_list_count(&nsp->active_iods) * 2; kinfo->events = (struct kevent *)safe_realloc(kinfo->events, kinfo->evlen * sizeof(struct kevent)); } do { msevent *nse; nsock_log_debug_all(nsp, "wait for events"); nse = next_expirable_event(nsp); if (!nse) event_msecs = -1; /* None of the events specified a timeout */ else event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod)); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* Force a low timeout when capturing packets on systems where * the pcap descriptor is not select()able. */ if (gh_list_count(&nsp->pcap_read_events) > 0) if (event_msecs > PCAP_POLL_INTERVAL) event_msecs = PCAP_POLL_INTERVAL; #endif #endif /* We cast to unsigned because we want -1 to be very high (since it means no * timeout) */ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout); /* Set up the timeval pointer we will give to kevent() */ memset(&ts, 0, sizeof(struct timespec)); if (combined_msecs >= 0) { ts.tv_sec = combined_msecs / 1000; ts.tv_nsec = (combined_msecs % 1000) * 1000000L; ts_p = &ts; } else { ts_p = NULL; } #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* do non-blocking read on pcap devices that doesn't support select() * If there is anything read, just leave this loop. */ if (pcap_read_on_nonselect(nsp)) { /* okay, something was read. */ } else #endif #endif { results_left = kevent(kinfo->kqfd, NULL, 0, kinfo->events, kinfo->evlen, ts_p); if (results_left == -1) sock_err = socket_errno(); } gettimeofday(&nsock_tod, NULL); /* Due to kevent delay */ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */ if (results_left == -1 && sock_err != EINTR) { nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err)); nsp->errnum = sock_err; return -1; } iterate_through_event_lists(nsp, results_left); return 1; }
int ncat_connect(void) { nsock_pool mypool; int rc; /* Unless explicitely asked not to do so, ncat uses the * fallback nsock engine to maximize compatibility between * operating systems and the different use cases. */ if (!o.nsock_engine) nsock_set_default_engine("select"); /* Create an nsock pool */ if ((mypool = nsp_new(NULL)) == NULL) bye("Failed to create nsock_pool."); if (o.debug >= 6) nsock_set_loglevel(mypool, NSOCK_LOG_DBG_ALL); else if (o.debug >= 3) nsock_set_loglevel(mypool, NSOCK_LOG_DBG); else if (o.debug >= 1) nsock_set_loglevel(mypool, NSOCK_LOG_INFO); else nsock_set_loglevel(mypool, NSOCK_LOG_ERROR); /* Allow connections to broadcast addresses. */ nsp_setbroadcast(mypool, 1); #ifdef HAVE_OPENSSL set_ssl_ctx_options((SSL_CTX *) nsp_ssl_init(mypool)); #endif if (httpconnect.storage.ss_family == AF_UNSPEC && socksconnect.storage.ss_family == AF_UNSPEC) { /* A non-proxy connection. Create an iod for a new socket. */ cs.sock_nsi = nsi_new(mypool, NULL); if (cs.sock_nsi == NULL) bye("Failed to create nsock_iod."); if (nsi_set_hostname(cs.sock_nsi, o.target) == -1) bye("Failed to set hostname on iod."); #if HAVE_SYS_UN_H /* For DGRAM UNIX socket we have to use source socket */ if (o.af == AF_UNIX && o.udp) { if (srcaddr.storage.ss_family != AF_UNIX) { char *tmp_name = NULL; /* If no source socket was specified, we have to create temporary one. */ if ((tmp_name = tempnam(NULL, "ncat.")) == NULL) bye("Failed to create name for temporary DGRAM source Unix domain socket (tempnam)."); srcaddr.un.sun_family = AF_UNIX; strncpy(srcaddr.un.sun_path, tmp_name, sizeof(srcaddr.un.sun_path)); free (tmp_name); } nsi_set_localaddr(cs.sock_nsi, &srcaddr.storage, SUN_LEN((struct sockaddr_un *)&srcaddr.storage)); if (o.verbose) loguser("[%s] used as source DGRAM Unix domain socket.\n", srcaddr.un.sun_path); } else #endif if (srcaddr.storage.ss_family != AF_UNSPEC) nsi_set_localaddr(cs.sock_nsi, &srcaddr.storage, sizeof(srcaddr.storage)); if (o.numsrcrtes) { unsigned char *ipopts = NULL; size_t ipoptslen = 0; if (o.af != AF_INET) bye("Sorry, -g can only currently be used with IPv4."); ipopts = buildsrcrte(targetss.in.sin_addr, o.srcrtes, o.numsrcrtes, o.srcrteptr, &ipoptslen); nsi_set_ipoptions(cs.sock_nsi, ipopts, ipoptslen); free(ipopts); /* Nsock has its own copy */ } #if HAVE_SYS_UN_H if (o.af == AF_UNIX) { if (o.udp) { nsock_connect_unixsock_datagram(mypool, cs.sock_nsi, connect_handler, NULL, &targetss.sockaddr, SUN_LEN((struct sockaddr_un *)&targetss.sockaddr)); } else { nsock_connect_unixsock_stream(mypool, cs.sock_nsi, connect_handler, o.conntimeout, NULL, &targetss.sockaddr, SUN_LEN((struct sockaddr_un *)&targetss.sockaddr)); } } else #endif if (o.udp) { nsock_connect_udp(mypool, cs.sock_nsi, connect_handler, NULL, &targetss.sockaddr, targetsslen, inet_port(&targetss)); } #ifdef HAVE_OPENSSL else if (o.sctp && o.ssl) { nsock_connect_ssl(mypool, cs.sock_nsi, connect_handler, o.conntimeout, NULL, &targetss.sockaddr, targetsslen, IPPROTO_SCTP, inet_port(&targetss), NULL); } #endif else if (o.sctp) { nsock_connect_sctp(mypool, cs.sock_nsi, connect_handler, o.conntimeout, NULL, &targetss.sockaddr, targetsslen, inet_port(&targetss)); } #ifdef HAVE_OPENSSL else if (o.ssl) { nsock_connect_ssl(mypool, cs.sock_nsi, connect_handler, o.conntimeout, NULL, &targetss.sockaddr, targetsslen, IPPROTO_TCP, inet_port(&targetss), NULL); } #endif else { nsock_connect_tcp(mypool, cs.sock_nsi, connect_handler, o.conntimeout, NULL, &targetss.sockaddr, targetsslen, inet_port(&targetss)); } } else { /* A proxy connection. */ static int connect_socket; int len; char *line; size_t n; if (httpconnect.storage.ss_family != AF_UNSPEC) { connect_socket = do_proxy_http(); if (connect_socket == -1) return 1; } else if (socksconnect.storage.ss_family != AF_UNSPEC) { struct socket_buffer stateful_buf; struct socks4_data socks4msg; char socksbuf[8]; connect_socket = do_connect(SOCK_STREAM); if (connect_socket == -1) { loguser("Proxy connection failed: %s.\n", socket_strerror(socket_errno())); return 1; } socket_buffer_init(&stateful_buf, connect_socket); if (o.verbose) { loguser("Connected to proxy %s:%hu\n", inet_socktop(&targetss), inet_port(&targetss)); } /* Fill the socks4_data struct */ zmem(&socks4msg, sizeof(socks4msg)); socks4msg.version = SOCKS4_VERSION; socks4msg.type = SOCKS_CONNECT; socks4msg.port = socksconnect.in.sin_port; socks4msg.address = socksconnect.in.sin_addr.s_addr; if (o.proxy_auth) Strncpy(socks4msg.username, (char *) o.proxy_auth, sizeof(socks4msg.username)); len = 8 + strlen(socks4msg.username) + 1; if (send(connect_socket, (char *) &socks4msg, len, 0) < 0) { loguser("Error sending proxy request: %s.\n", socket_strerror(socket_errno())); return 1; } /* The size of the socks4 response is 8 bytes. So read exactly 8 bytes from the buffer */ if (socket_buffer_readcount(&stateful_buf, socksbuf, 8) < 0) { loguser("Error: short reponse from proxy.\n"); return 1; } if (socksbuf[1] != 90) { loguser("Proxy connection failed.\n"); return 1; } /* Clear out whatever is left in the socket buffer which may be already sent by proxy server along with http response headers. */ line = socket_buffer_remainder(&stateful_buf, &n); /* Write the leftover data to stdout. */ Write(STDOUT_FILENO, line, n); } /* Once the proxy negotiation is done, Nsock takes control of the socket. */ cs.sock_nsi = nsi_new2(mypool, connect_socket, NULL); /* Create IOD for nsp->stdin */ if ((cs.stdin_nsi = nsi_new2(mypool, 0, NULL)) == NULL) bye("Failed to create stdin nsiod."); post_connect(mypool, cs.sock_nsi); } /* connect */ rc = nsock_loop(mypool, -1); if (o.verbose) { struct timeval end_time; double time; gettimeofday(&end_time, NULL); time = TIMEVAL_MSEC_SUBTRACT(end_time, start_time) / 1000.0; loguser("%lu bytes sent, %lu bytes received in %.2f seconds.\n", nsi_get_write_count(cs.sock_nsi), nsi_get_read_count(cs.sock_nsi), time); } #if HAVE_SYS_UN_H if (o.af == AF_UNIX && o.udp) { if (o.verbose) loguser("Deleting source DGRAM Unix domain socket. [%s]\n", srcaddr.un.sun_path); unlink(srcaddr.un.sun_path); } #endif nsp_delete(mypool); return rc == NSOCK_LOOP_ERROR ? 1 : 0; }
int select_loop(mspool *nsp, int msec_timeout) { int results_left = 0; int event_msecs; /* msecs before an event goes off */ int combined_msecs; int sock_err = 0; struct timeval select_tv; struct timeval *select_tv_p; struct select_engine_info *sinfo = (struct select_engine_info *)nsp->engine_data; assert(msec_timeout >= -1); if (nsp->events_pending == 0) return 0; /* No need to wait on 0 events ... */ do { nsock_log_debug_all(nsp, "wait for events"); if (nsp->next_ev.tv_sec == 0) event_msecs = -1; /* None of the events specified a timeout */ else event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nsp->next_ev, nsock_tod)); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* Force a low timeout when capturing packets on systems where * the pcap descriptor is not select()able. */ if (GH_LIST_COUNT(&nsp->pcap_read_events)) if (event_msecs > PCAP_POLL_INTERVAL) event_msecs = PCAP_POLL_INTERVAL; #endif #endif /* We cast to unsigned because we want -1 to be very high (since it means no * timeout) */ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout); /* Set up the timeval pointer we will give to select() */ memset(&select_tv, 0, sizeof(select_tv)); if (combined_msecs > 0) { select_tv.tv_sec = combined_msecs / 1000; select_tv.tv_usec = (combined_msecs % 1000) * 1000; select_tv_p = &select_tv; } else if (combined_msecs == 0) { /* we want the tv_sec and tv_usec to be zero but they already are from bzero */ select_tv_p = &select_tv; } else { assert(combined_msecs == -1); select_tv_p = NULL; } #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* do non-blocking read on pcap devices that doesn't support select() * If there is anything read, just leave this loop. */ if (pcap_read_on_nonselect(nsp)) { /* okay, something was read. */ } else #endif #endif { /* Set up the descriptors for select */ sinfo->fds_results_r = sinfo->fds_master_r; sinfo->fds_results_w = sinfo->fds_master_w; sinfo->fds_results_x = sinfo->fds_master_x; results_left = fselect(sinfo->max_sd + 1, &sinfo->fds_results_r, &sinfo->fds_results_w, &sinfo->fds_results_x, select_tv_p); if (results_left == -1) sock_err = socket_errno(); } gettimeofday(&nsock_tod, NULL); /* Due to select delay */ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */ if (results_left == -1 && sock_err != EINTR) { nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err)); nsp->errnum = sock_err; return -1; } iterate_through_event_lists(nsp); return 1; }
int poll_loop(struct npool *nsp, int msec_timeout) { int results_left = 0; int event_msecs; /* msecs before an event goes off */ int combined_msecs; int sock_err = 0; struct poll_engine_info *pinfo = (struct poll_engine_info *)nsp->engine_data; assert(msec_timeout >= -1); if (nsp->events_pending == 0) return 0; /* No need to wait on 0 events ... */ do { struct nevent *nse; nsock_log_debug_all(nsp, "wait for events"); nse = next_expirable_event(nsp); if (!nse) event_msecs = -1; /* None of the events specified a timeout */ else event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod)); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* Force a low timeout when capturing packets on systems where * the pcap descriptor is not select()able. */ if (gh_list_count(&nsp->pcap_read_events) > 0) if (event_msecs > PCAP_POLL_INTERVAL) event_msecs = PCAP_POLL_INTERVAL; #endif #endif /* We cast to unsigned because we want -1 to be very high (since it means no * timeout) */ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* do non-blocking read on pcap devices that doesn't support select() * If there is anything read, just leave this loop. */ if (pcap_read_on_nonselect(nsp)) { /* okay, something was read. */ } else #endif #endif { results_left = Poll(pinfo->events, pinfo->max_fd + 1, combined_msecs); if (results_left == -1) sock_err = socket_errno(); } gettimeofday(&nsock_tod, NULL); /* Due to poll delay */ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */ if (results_left == -1 && sock_err != EINTR) { nsock_log_error(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err)); nsp->errnum = sock_err; return -1; } iterate_through_event_lists(nsp); return 1; }
int epoll_loop(mspool *nsp, int msec_timeout) { int results_left = 0; int event_msecs; /* msecs before an event goes off */ int combined_msecs; int sock_err = 0; struct epoll_engine_info *einfo = (struct epoll_engine_info *)nsp->engine_data; assert(msec_timeout >= -1); if (nsp->events_pending == 0) return 0; /* No need to wait on 0 events ... */ if (GH_LIST_COUNT(&nsp->active_iods) > einfo->evlen) { einfo->evlen = GH_LIST_COUNT(&nsp->active_iods) * 2; einfo->events = (struct epoll_event *)safe_realloc(einfo->events, einfo->evlen * sizeof(struct epoll_event)); } do { if (nsp->tracelevel > 6) nsock_trace(nsp, "wait_for_events"); if (nsp->next_ev.tv_sec == 0) event_msecs = -1; /* None of the events specified a timeout */ else event_msecs = MAX(0, TIMEVAL_MSEC_SUBTRACT(nsp->next_ev, nsock_tod)); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* Force a low timeout when capturing packets on systems where * the pcap descriptor is not select()able. */ if (GH_LIST_COUNT(&nsp->pcap_read_events) > 0) if (event_msecs > PCAP_POLL_INTERVAL) event_msecs = PCAP_POLL_INTERVAL; #endif #endif /* We cast to unsigned because we want -1 to be very high (since it means no * timeout) */ combined_msecs = MIN((unsigned)event_msecs, (unsigned)msec_timeout); #if HAVE_PCAP #ifndef PCAP_CAN_DO_SELECT /* do non-blocking read on pcap devices that doesn't support select() * If there is anything read, just leave this loop. */ if (pcap_read_on_nonselect(nsp)) { /* okay, something was read. */ } else #endif #endif { if (einfo->evlen) results_left = epoll_wait(einfo->epfd, einfo->events, einfo->evlen, combined_msecs); else results_left = 0; if (results_left == -1) sock_err = socket_errno(); } gettimeofday(&nsock_tod, NULL); /* Due to epoll delay */ } while (results_left == -1 && sock_err == EINTR); /* repeat only if signal occurred */ if (results_left == -1 && sock_err != EINTR) { nsock_trace(nsp, "nsock_loop error %d: %s", sock_err, socket_strerror(sock_err)); nsp->errnum = sock_err; return -1; } iterate_through_event_lists(nsp, results_left); return 1; }