/* Returns -1 if an error, otherwise the number of newly written bytes */ static int do_actual_read(mspool *ms, msevent *nse) { char buf[8192]; int buflen = 0; msiod *iod = nse->iod; int err = 0; int max_chunk = NSOCK_READ_CHUNK_SIZE; int startlen = FILESPACE_LENGTH(&nse->iobuf); if (nse->readinfo.read_type == NSOCK_READBYTES) max_chunk = nse->readinfo.num; if (!iod->ssl) { do { struct sockaddr_storage peer; socklen_t peerlen; peerlen = sizeof(peer); buflen = recvfrom(iod->sd, buf, sizeof(buf), 0, (struct sockaddr *)&peer, &peerlen); /* Using recv() was failing, at least on UNIX, for non-network sockets * (i.e. stdin) in this case, a read() is done - as on ENOTSOCK we may * have a non-network socket */ if (buflen == -1) { if (socket_errno() == ENOTSOCK) { peer.ss_family = AF_UNSPEC; peerlen = 0; buflen = read(iod->sd, buf, sizeof(buf)); } } if (buflen == -1) { err = socket_errno(); break; } if (peerlen > 0) { assert(peerlen <= sizeof(iod->peer)); memcpy(&iod->peer, &peer, peerlen); iod->peerlen = peerlen; } if (buflen > 0) { if (fscat(&nse->iobuf, buf, buflen) == -1) { nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = ENOMEM; return -1; } /* Sometimes a service just spews and spews data. So we return after a * somewhat large amount to avoid monopolizing resources and avoid DOS * attacks. */ if (FILESPACE_LENGTH(&nse->iobuf) > max_chunk) return FILESPACE_LENGTH(&nse->iobuf) - startlen; /* No good reason to read again if we we were successful in the read but * didn't fill up the buffer. Especially for UDP, where we want to * return only one datagram at a time. The consistency of the above * assignment of iod->peer depends on not consolidating more than one * UDP read buffer. */ if (buflen > 0 && buflen < sizeof(buf)) return FILESPACE_LENGTH(&nse->iobuf) - startlen; } } while (buflen > 0 || (buflen == -1 && err == EINTR)); if (buflen == -1) { if (err != EINTR && err != EAGAIN) { nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = err; return -1; } } } else { #if HAVE_OPENSSL /* OpenSSL read */ while ((buflen = SSL_read(iod->ssl, buf, sizeof(buf))) > 0) { if (fscat(&nse->iobuf, buf, buflen) == -1) { nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = ENOMEM; return -1; } /* Sometimes a service just spews and spews data. So we return * after a somewhat large amount to avoid monopolizing resources * and avoid DOS attacks. */ if (FILESPACE_LENGTH(&nse->iobuf) > max_chunk) return FILESPACE_LENGTH(&nse->iobuf) - startlen; } if (buflen == -1) { err = SSL_get_error(iod->ssl, buflen); if (err == SSL_ERROR_WANT_READ) { int evclr; evclr = socket_count_dec_ssl_desire(nse); socket_count_read_inc(iod); update_events(iod, ms, EV_READ, evclr); nse->sslinfo.ssl_desire = err; } else if (err == SSL_ERROR_WANT_WRITE) { int evclr; evclr = socket_count_dec_ssl_desire(nse); socket_count_write_inc(iod); update_events(iod, ms, EV_WRITE, evclr); nse->sslinfo.ssl_desire = err; } else { /* Unexpected error */ nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = EIO; if (ms->tracelevel > 2) nsock_trace(ms, "SSL_read() failed for reason %s on NSI %li", ERR_reason_error_string(err), iod->id); return -1; } } #endif /* HAVE_OPENSSL */ } if (buflen == 0) { nse->event_done = 1; nse->eof = 1; if (FILESPACE_LENGTH(&nse->iobuf) > 0) { nse->status = NSE_STATUS_SUCCESS; return FILESPACE_LENGTH(&nse->iobuf) - startlen; } else { nse->status = NSE_STATUS_EOF; return 0; } } return FILESPACE_LENGTH(&nse->iobuf) - startlen; }
void handle_write_result(mspool *ms, msevent *nse, enum nse_status status) { int bytesleft; char *str; int res; int err; msiod *iod = nse->iod; if (status == NSE_STATUS_TIMEOUT || status == NSE_STATUS_CANCELLED) { nse->event_done = 1; nse->status = status; } else if (status == NSE_STATUS_SUCCESS) { str = FILESPACE_STR(&nse->iobuf) + nse->writeinfo.written_so_far; bytesleft = FILESPACE_LENGTH(&nse->iobuf) - nse->writeinfo.written_so_far; if (nse->writeinfo.written_so_far > 0) assert(bytesleft > 0); #if HAVE_OPENSSL if (iod->ssl) res = SSL_write(iod->ssl, str, bytesleft); else #endif if (nse->writeinfo.dest.ss_family == AF_UNSPEC) res = send(nse->iod->sd, str, bytesleft, 0); else res = sendto(nse->iod->sd, str, bytesleft, 0, (struct sockaddr *)&nse->writeinfo.dest, (int)nse->writeinfo.destlen); if (res == bytesleft) { nse->event_done = 1; nse->status = NSE_STATUS_SUCCESS; } else if (res >= 0) { nse->writeinfo.written_so_far += res; } else { assert(res == -1); if (iod->ssl) { #if HAVE_OPENSSL err = SSL_get_error(iod->ssl, res); if (err == SSL_ERROR_WANT_READ) { int evclr; evclr = socket_count_dec_ssl_desire(nse); socket_count_read_inc(iod); update_events(iod, ms, EV_READ, evclr); nse->sslinfo.ssl_desire = err; } else if (err == SSL_ERROR_WANT_WRITE) { int evclr; evclr = socket_count_dec_ssl_desire(nse); socket_count_write_inc(iod); update_events(iod, ms, EV_WRITE, evclr); nse->sslinfo.ssl_desire = err; } else { /* Unexpected error */ nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = EIO; } #endif } else { err = socket_errno(); if (err != EINTR && err != EAGAIN #ifndef WIN32 && err != EBUSY #endif ) { nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = err; } } } if (res >= 0) nse->iod->write_count += res; } if (nse->event_done && nse->iod->sd != -1) { int ev = EV_NONE; #if HAVE_OPENSSL if (nse->iod->ssl != NULL) ev |= socket_count_dec_ssl_desire(nse); else #endif ev |= socket_count_write_dec(nse->iod); update_events(nse->iod, ms, EV_NONE, ev); } return; }
/* Adds an event to the appropriate nsp event list, handles housekeeping such as * adjusting the descriptor select/poll lists, registering the timeout value, * etc. */ void nsp_add_event(mspool *nsp, msevent *nse) { if (nsp->tracelevel > 5) nsock_trace(nsp, "NSE #%lu: Adding event", nse->id); /* First lets do the event-type independent stuff, starting with timeouts */ if (nse->event_done) { nsp->next_ev = nsock_tod; } else { if (nse->timeout.tv_sec != 0) { if (nsp->next_ev.tv_sec == 0) nsp->next_ev = nse->timeout; else if (TIMEVAL_AFTER(nsp->next_ev, nse->timeout)) nsp->next_ev = nse->timeout; } } nsp->events_pending++; /* Now we do the event type specific actions */ switch(nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_READ|EV_WRITE|EV_EXCEPT, EV_NONE); } iod_add_event(nse->iod, nse); break; case NSE_TYPE_READ: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_WRITE: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_WRITE, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_TIMER: gh_list_append(&nsp->timer_events, nse); break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ: { mspcap *mp = (mspcap *)nse->iod->pcap; assert(mp); if (mp->pcap_desc >= 0) { /* pcap descriptor present */ if (!nse->event_done) { socket_count_readpcap_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); } if (nsp->tracelevel > 8) nsock_trace(nsp, "PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id); #if PCAP_BSD_SELECT_HACK /* when using BSD hack we must do pcap_next() after select(). * Let's insert this pcap to bot queues, to selectable and nonselectable. * This will result in doing pcap_next_ex() just before select() */ if (nsp->tracelevel > 8) nsock_trace(nsp, "PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); #endif } else { /* pcap isn't selectable. Add it to pcap-specific queue. */ if (nsp->tracelevel > 8) nsock_trace(nsp, "PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); } iod_add_event(nse->iod, nse); break; } #endif default: assert(0); break; /* unreached */ } }
/* handle_connect_results assumes that select or poll have already shown the * descriptor to be active */ void handle_connect_result(mspool *ms, msevent *nse, enum nse_status status) { int optval; socklen_t optlen = sizeof(int); msiod *iod = nse->iod; #if HAVE_OPENSSL int sslerr; int rc = 0; int sslconnect_inprogress = nse->type == NSE_TYPE_CONNECT_SSL && nse->iod && (nse->sslinfo.ssl_desire == SSL_ERROR_WANT_READ || nse->sslinfo.ssl_desire == SSL_ERROR_WANT_WRITE); #else int sslconnect_inprogress = 0; #endif if (status == NSE_STATUS_TIMEOUT || status == NSE_STATUS_CANCELLED) { nse->status = status; nse->event_done = 1; } else if (sslconnect_inprogress) { /* Do nothing */ } else if (status == NSE_STATUS_SUCCESS) { /* First we want to determine whether the socket really is connected */ if (getsockopt(iod->sd, SOL_SOCKET, SO_ERROR, (char *)&optval, &optlen) != 0) optval = socket_errno(); /* Stupid Solaris */ switch(optval) { case 0: nse->status = NSE_STATUS_SUCCESS; break; /* EACCES can be caused by ICMPv6 dest-unreach-admin, or when a port is blocked by Windows Firewall (WSAEACCES). */ case EACCES: case ECONNREFUSED: case EHOSTUNREACH: case ENETDOWN: case ENETUNREACH: case ENETRESET: case ECONNABORTED: case ETIMEDOUT: case EHOSTDOWN: case ECONNRESET: #ifdef WIN32 case WSAEADDRINUSE: case WSAEADDRNOTAVAIL: #endif #ifndef WIN32 case EPIPE: /* Has been seen after connect on Linux. */ case ENOPROTOOPT: /* Also seen on Linux, perhaps in response to protocol unreachable. */ #endif nse->status = NSE_STATUS_ERROR; nse->errnum = optval; break; default: fprintf(stderr, "Strange connect error from %s (%d): %s", inet_ntop_ez(&iod->peer, iod->peerlen), optval, socket_strerror(optval)); assert(0); /* I'd like for someone to report it */ break; } /* Now special code for the SSL case where the TCP connection was successful. */ if (nse->type == NSE_TYPE_CONNECT_SSL && nse->status == NSE_STATUS_SUCCESS) { #if HAVE_OPENSSL assert(ms->sslctx != NULL); /* Reuse iod->ssl if present. If set, this is the second try at connection without the SSL_OP_NO_SSLv2 option set. */ if (iod->ssl == NULL) { iod->ssl = SSL_new(ms->sslctx); if (!iod->ssl) fatal("SSL_new failed: %s", ERR_error_string(ERR_get_error(), NULL)); } if (iod->hostname != NULL) { #if HAVE_SSL_SET_TLSEXT_HOST_NAME if (SSL_set_tlsext_host_name(iod->ssl, iod->hostname) != 1) fatal("SSL_set_tlsext_host_name failed: %s", ERR_error_string(ERR_get_error(), NULL)); #endif } /* Associate our new SSL with the connected socket. It will inherit the * non-blocking nature of the sd */ if (SSL_set_fd(iod->ssl, iod->sd) != 1) { fatal("SSL_set_fd failed: %s", ERR_error_string(ERR_get_error(), NULL)); } /* Event not done -- need to do SSL connect below */ nse->sslinfo.ssl_desire = SSL_ERROR_WANT_CONNECT; #endif } else { /* This is not an SSL connect (in which case we are always done), or the * TCP connect() underlying the SSL failed (in which case we are also done */ nse->event_done = 1; } } else { assert(0); /* Currently we only know about TIMEOUT and SUCCESS callbacks */ } /* At this point the TCP connection is done, whether successful or not. * Therefore decrease the read/write listen counts that were incremented in * nsp_add_event. In the SSL case, we may increase one of the counts depending * on whether SSL_connect returns an error of SSL_ERROR_WANT_READ or * SSL_ERROR_WANT_WRITE. In that case we will re-enter this function, but we * don't want to execute this block again. */ if (iod->sd != -1 && !sslconnect_inprogress) { int ev = EV_NONE; ev |= socket_count_read_dec(iod); ev |= socket_count_write_dec(iod); ev |= EV_EXCEPT; update_events(iod, ms, EV_NONE, ev); } #if HAVE_OPENSSL if (nse->type == NSE_TYPE_CONNECT_SSL && !nse->event_done) { /* Lets now start/continue/finish the connect! */ if (iod->ssl_session) { rc = SSL_set_session(iod->ssl, iod->ssl_session); if (rc == 0) fprintf(stderr, "Uh-oh: SSL_set_session() failed - please tell Fyodor\n"); iod->ssl_session = NULL; /* No need for this any more */ } /* If this is a reinvocation of handle_connect_result, clear out the listen * bits that caused it, based on the previous SSL desire. */ if (sslconnect_inprogress) { int ev; ev = socket_count_dec_ssl_desire(nse); update_events(iod, ms, EV_NONE, ev); } rc = SSL_connect(iod->ssl); if (rc == 1) { /* Woop! Connect is done! */ nse->event_done = 1; /* Check that certificate verification was okay, if requested. */ if (nsi_ssl_post_connect_verify(iod)) { nse->status = NSE_STATUS_SUCCESS; } else { if (ms->tracelevel > 0) nsock_trace(ms, "certificate verification error for EID %li: %s", nse->id, ERR_error_string(ERR_get_error(), NULL)); nse->status = NSE_STATUS_ERROR; } } else { long options = SSL_get_options(iod->ssl); sslerr = SSL_get_error(iod->ssl, rc); if (rc == -1 && sslerr == SSL_ERROR_WANT_READ) { nse->sslinfo.ssl_desire = sslerr; socket_count_read_inc(iod); update_events(iod, ms, EV_READ, EV_NONE); } else if (rc == -1 && sslerr == SSL_ERROR_WANT_WRITE) { nse->sslinfo.ssl_desire = sslerr; socket_count_write_inc(iod); update_events(iod, ms, EV_WRITE, EV_NONE); } else if (!(options & SSL_OP_NO_SSLv2)) { int saved_ev; /* SSLv3-only and TLSv1-only servers can't be connected to when the * SSL_OP_NO_SSLv2 option is not set, which is the case when the pool * was initialized with nsp_ssl_init_max_speed. Try reconnecting with * SSL_OP_NO_SSLv2. Never downgrade a NO_SSLv2 connection to one that * might use SSLv2. */ if (ms->tracelevel > 0) nsock_trace(ms, "EID %li reconnecting with SSL_OP_NO_SSLv2", nse->id); saved_ev = iod->watched_events; ms->engine->iod_unregister(ms, iod); close(iod->sd); nsock_connect_internal(ms, nse, SOCK_STREAM, iod->lastproto, &iod->peer, iod->peerlen, nsi_peerport(iod)); ms->engine->iod_register(ms, iod, saved_ev); SSL_clear(iod->ssl); if(!SSL_clear(iod->ssl)) fatal("SSL_clear failed: %s", ERR_error_string(ERR_get_error(), NULL)); SSL_set_options(iod->ssl, options | SSL_OP_NO_SSLv2); socket_count_read_inc(nse->iod); socket_count_write_inc(nse->iod); update_events(iod, ms, EV_READ|EV_WRITE, EV_NONE); nse->sslinfo.ssl_desire = SSL_ERROR_WANT_CONNECT; } else { if (ms->tracelevel > 0) nsock_trace(ms, "EID %li %s", nse->id, ERR_error_string(ERR_get_error(), NULL)); nse->event_done = 1; nse->status = NSE_STATUS_ERROR; nse->errnum = EIO; } } } #endif return; }
/* Adds an event to the appropriate nsp event list, handles housekeeping such as * adjusting the descriptor select/poll lists, registering the timeout value, * etc. */ void nsock_pool_add_event(struct npool *nsp, struct nevent *nse) { nsock_log_debug("NSE #%lu: Adding event (timeout in %ldms)", nse->id, (long)TIMEVAL_MSEC_SUBTRACT(nse->timeout, nsock_tod)); nsp->events_pending++; if (!nse->event_done && nse->timeout.tv_sec) { /* This event is expirable, add it to the queue */ gh_heap_push(&nsp->expirables, &nse->expire); } /* Now we do the event type specific actions */ switch (nse->type) { case NSE_TYPE_CONNECT: case NSE_TYPE_CONNECT_SSL: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_READ|EV_WRITE|EV_EXCEPT, EV_NONE); } iod_add_event(nse->iod, nse); break; case NSE_TYPE_READ: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_read_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_READ; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_WRITE: if (!nse->event_done) { assert(nse->iod->sd >= 0); socket_count_write_inc(nse->iod); update_events(nse->iod, nsp, EV_WRITE, EV_NONE); #if HAVE_OPENSSL if (nse->iod->ssl) nse->sslinfo.ssl_desire = SSL_ERROR_WANT_WRITE; #endif } iod_add_event(nse->iod, nse); break; case NSE_TYPE_TIMER: /* nothing to do */ break; #if HAVE_PCAP case NSE_TYPE_PCAP_READ: { mspcap *mp = (mspcap *)nse->iod->pcap; assert(mp); if (mp->pcap_desc >= 0) { /* pcap descriptor present */ if (!nse->event_done) { socket_count_readpcap_inc(nse->iod); update_events(nse->iod, nsp, EV_READ, EV_NONE); } nsock_log_debug_all("PCAP NSE #%lu: Adding event to READ_EVENTS", nse->id); #if PCAP_BSD_SELECT_HACK /* when using BSD hack we must do pcap_next() after select(). * Let's insert this pcap to bot queues, to selectable and nonselectable. * This will result in doing pcap_next_ex() just before select() */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); #endif } else { /* pcap isn't selectable. Add it to pcap-specific queue. */ nsock_log_debug_all("PCAP NSE #%lu: Adding event to PCAP_READ_EVENTS", nse->id); } iod_add_event(nse->iod, nse); break; } #endif default: fatal("Unknown nsock event type (%d)", nse->type); } /* It can happen that the event already completed. In which case we can * already deliver it, even though we're probably not inside nsock_loop(). */ if (nse->event_done) { event_dispatch_and_delete(nsp, nse, 1); update_first_events(nse); nevent_unref(nsp, nse); } }