LWS_VISIBLE int lws_ssl_capable_read(struct lws *wsi, unsigned char *buf, int len) { struct lws_context *context = wsi->context; struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; int n; if (!wsi->ssl) return lws_ssl_capable_read_no_ssl(wsi, buf, len); n = SSL_read(wsi->ssl, buf, len); /* manpage: returning 0 means connection shut down */ if (!n) return LWS_SSL_CAPABLE_ERROR; if (n < 0) { n = SSL_get_error(wsi->ssl, n); if (n == SSL_ERROR_WANT_READ || n == SSL_ERROR_WANT_WRITE) return LWS_SSL_CAPABLE_MORE_SERVICE; return LWS_SSL_CAPABLE_ERROR; } /* * if it was our buffer that limited what we read, * check if SSL has additional data pending inside SSL buffers. * * Because these won't signal at the network layer with POLLIN * and if we don't realize, this data will sit there forever */ if (n != len) goto bail; if (!wsi->ssl) goto bail; if (!SSL_pending(wsi->ssl)) goto bail; if (wsi->pending_read_list_next) return n; if (wsi->pending_read_list_prev) return n; if (pt->pending_read_list == wsi) return n; /* add us to the linked list of guys with pending ssl */ if (pt->pending_read_list) pt->pending_read_list->pending_read_list_prev = wsi; wsi->pending_read_list_next = pt->pending_read_list; wsi->pending_read_list_prev = NULL; pt->pending_read_list = wsi; return n; bail: lws_ssl_remove_wsi_from_buffered_list(wsi); return n; }
LWS_VISIBLE int lws_ssl_capable_read(struct libwebsocket_context *context, struct libwebsocket *wsi, unsigned char *buf, int len) { int n; if (!wsi->ssl) return lws_ssl_capable_read_no_ssl(context, wsi, buf, len); n = SSL_read(wsi->ssl, buf, len); if (n >= 0) { /* * if it was our buffer that limited what we read, * check if SSL has additional data pending inside SSL buffers. * * Because these won't signal at the network layer with POLLIN * and if we don't realize, this data will sit there forever */ if (n == len && wsi->ssl && SSL_pending(wsi->ssl)) { if (!wsi->pending_read_list_next && !wsi->pending_read_list_prev) { if (context->pending_read_list != wsi) { /* add us to the linked list of guys with pending ssl */ if (context->pending_read_list) context->pending_read_list->pending_read_list_prev = wsi; wsi->pending_read_list_next = context->pending_read_list; wsi->pending_read_list_prev = NULL; context->pending_read_list = wsi; } } } else lws_ssl_remove_wsi_from_buffered_list(context, wsi); return n; } n = SSL_get_error(wsi->ssl, n); if (n == SSL_ERROR_WANT_READ || n == SSL_ERROR_WANT_WRITE) return LWS_SSL_CAPABLE_MORE_SERVICE; return LWS_SSL_CAPABLE_ERROR; }
void libwebsocket_close_and_free_session(struct libwebsocket_context *context, struct libwebsocket *wsi, enum lws_close_status reason) { int n, m, ret; int old_state; unsigned char buf[LWS_SEND_BUFFER_PRE_PADDING + 2 + LWS_SEND_BUFFER_POST_PADDING]; struct lws_tokens eff_buf; if (!wsi) return; old_state = wsi->state; if (wsi->socket_is_permanently_unusable || reason == LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY) goto just_kill_connection; switch (old_state) { case WSI_STATE_DEAD_SOCKET: return; /* we tried the polite way... */ case WSI_STATE_AWAITING_CLOSE_ACK: goto just_kill_connection; case WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE: if (wsi->truncated_send_len) { libwebsocket_callback_on_writable(context, wsi); return; } lwsl_info("wsi %p completed WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE\n", wsi); goto just_kill_connection; default: if (wsi->truncated_send_len) { lwsl_info("wsi %p entering WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE\n", wsi); wsi->state = WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE; libwebsocket_set_timeout(wsi, PENDING_FLUSH_STORED_SEND_BEFORE_CLOSE, 5); return; } break; } wsi->u.ws.close_reason = reason; if (wsi->mode == LWS_CONNMODE_WS_CLIENT_WAITING_CONNECT || wsi->mode == LWS_CONNMODE_WS_CLIENT_ISSUE_HANDSHAKE) { context->protocols[0].callback(context, wsi, LWS_CALLBACK_CLIENT_CONNECTION_ERROR, wsi->user_space, NULL, 0); goto just_kill_connection; } if (wsi->mode == LWS_CONNMODE_HTTP_SERVING) context->protocols[0].callback(context, wsi, LWS_CALLBACK_CLOSED_HTTP, wsi->user_space, NULL, 0); if (wsi->mode == LWS_CONNMODE_HTTP_SERVING_ACCEPTED) { if (wsi->u.http.fd != LWS_INVALID_FILE) { // TODO: If we're just closing with LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY this file descriptor might leak? lwsl_debug("closing http file\n"); compatible_file_close(wsi->u.http.fd); wsi->u.http.fd = LWS_INVALID_FILE; context->protocols[0].callback(context, wsi, LWS_CALLBACK_CLOSED_HTTP, wsi->user_space, NULL, 0); } } /* * are his extensions okay with him closing? Eg he might be a mux * parent and just his ch1 aspect is closing? */ if (lws_ext_callback_for_each_active(wsi, LWS_EXT_CALLBACK_CHECK_OK_TO_REALLY_CLOSE, NULL, 0) > 0) { lwsl_ext("extension vetoed close\n"); return; } /* * flush any tx pending from extensions, since we may send close packet * if there are problems with send, just nuke the connection */ do { ret = 0; eff_buf.token = NULL; eff_buf.token_len = 0; /* show every extension the new incoming data */ m = lws_ext_callback_for_each_active(wsi, LWS_EXT_CALLBACK_FLUSH_PENDING_TX, &eff_buf, 0); if (m < 0) { lwsl_ext("Extension reports fatal error\n"); goto just_kill_connection; } if (m) /* * at least one extension told us he has more * to spill, so we will go around again after */ ret = 1; /* assuming they left us something to send, send it */ if (eff_buf.token_len) if (lws_issue_raw(wsi, (unsigned char *)eff_buf.token, eff_buf.token_len) != eff_buf.token_len) { lwsl_debug("close: ext spill failed\n"); goto just_kill_connection; } } while (ret); /* * signal we are closing, libwebsocket_write will * add any necessary version-specific stuff. If the write fails, * no worries we are closing anyway. If we didn't initiate this * close, then our state has been changed to * WSI_STATE_RETURNED_CLOSE_ALREADY and we will skip this. * * Likewise if it's a second call to close this connection after we * sent the close indication to the peer already, we are in state * WSI_STATE_AWAITING_CLOSE_ACK and will skip doing this a second time. */ if (old_state == WSI_STATE_ESTABLISHED && reason != LWS_CLOSE_STATUS_NOSTATUS && reason != LWS_CLOSE_STATUS_NOSTATUS_CONTEXT_DESTROY) { lwsl_debug("sending close indication...\n"); /* make valgrind happy */ memset(buf, 0, sizeof(buf)); n = libwebsocket_write(wsi, &buf[LWS_SEND_BUFFER_PRE_PADDING + 2], 0, LWS_WRITE_CLOSE); if (n >= 0) { /* * we have sent a nice protocol level indication we * now wish to close, we should not send anything more */ wsi->state = WSI_STATE_AWAITING_CLOSE_ACK; /* * ...and we should wait for a reply for a bit * out of politeness */ libwebsocket_set_timeout(wsi, PENDING_TIMEOUT_CLOSE_ACK, 1); lwsl_debug("sent close indication, awaiting ack\n"); return; } lwsl_info("close: sending close packet failed, hanging up\n"); /* else, the send failed and we should just hang up */ } just_kill_connection: lwsl_debug("close: just_kill_connection\n"); /* * we won't be servicing or receiving anything further from this guy * delete socket from the internal poll list if still present */ lws_ssl_remove_wsi_from_buffered_list(context, wsi); // checking return redundant since we anyway close remove_wsi_socket_from_fds(context, wsi); wsi->state = WSI_STATE_DEAD_SOCKET; lws_free2(wsi->rxflow_buffer); lws_free_header_table(wsi); if ((old_state == WSI_STATE_ESTABLISHED || wsi->mode == LWS_CONNMODE_WS_SERVING || wsi->mode == LWS_CONNMODE_WS_CLIENT)) { lws_free2(wsi->u.ws.rx_user_buffer); if (wsi->truncated_send_malloc) { /* not going to be completed... nuke it */ lws_free2(wsi->truncated_send_malloc); wsi->truncated_send_len = 0; } if (wsi->u.ws.ping_payload_buf) { lws_free2(wsi->u.ws.ping_payload_buf); wsi->u.ws.ping_payload_alloc = 0; wsi->u.ws.ping_payload_len = 0; wsi->u.ws.ping_pending_flag = 0; } } /* tell the user it's all over for this guy */ if (wsi->protocol && wsi->protocol->callback && ((old_state == WSI_STATE_ESTABLISHED) || (old_state == WSI_STATE_RETURNED_CLOSE_ALREADY) || (old_state == WSI_STATE_AWAITING_CLOSE_ACK) || (old_state == WSI_STATE_FLUSHING_STORED_SEND_BEFORE_CLOSE))) { lwsl_debug("calling back CLOSED\n"); wsi->protocol->callback(context, wsi, LWS_CALLBACK_CLOSED, wsi->user_space, NULL, 0); } else if (wsi->mode == LWS_CONNMODE_HTTP_SERVING_ACCEPTED) { lwsl_debug("calling back CLOSED_HTTP\n"); context->protocols[0].callback(context, wsi, LWS_CALLBACK_CLOSED_HTTP, wsi->user_space, NULL, 0 ); } else if (wsi->mode == LWS_CONNMODE_WS_CLIENT_WAITING_SERVER_REPLY || wsi->mode == LWS_CONNMODE_WS_CLIENT_WAITING_CONNECT) { lwsl_debug("Connection closed before server reply\n"); context->protocols[0].callback(context, wsi, LWS_CALLBACK_CLIENT_CONNECTION_ERROR, wsi->user_space, NULL, 0 ); } else lwsl_debug("not calling back closed mode=%d state=%d\n", wsi->mode, old_state); /* deallocate any active extension contexts */ if (lws_ext_callback_for_each_active(wsi, LWS_EXT_CALLBACK_DESTROY, NULL, 0) < 0) lwsl_warn("extension destruction failed\n"); #ifndef LWS_NO_EXTENSIONS for (n = 0; n < wsi->count_active_extensions; n++) lws_free(wsi->active_extensions_user[n]); #endif /* * inform all extensions in case they tracked this guy out of band * even though not active on him specifically */ if (lws_ext_callback_for_each_extension_type(context, wsi, LWS_EXT_CALLBACK_DESTROY_ANY_WSI_CLOSING, NULL, 0) < 0) lwsl_warn("ext destroy wsi failed\n"); /* lwsl_info("closing fd=%d\n", wsi->sock); */ if (!lws_ssl_close(wsi) && wsi->sock >= 0) { n = shutdown(wsi->sock, SHUT_RDWR); if (n) lwsl_debug("closing: shutdown ret %d\n", LWS_ERRNO); n = compatible_close(wsi->sock); if (n) lwsl_debug("closing: close ret %d\n", LWS_ERRNO); } /* outermost destroy notification for wsi (user_space still intact) */ context->protocols[0].callback(context, wsi, LWS_CALLBACK_WSI_DESTROY, wsi->user_space, NULL, 0); lws_free_wsi(wsi); }
/* * guys that need POLLIN service again without waiting for network action * can force POLLIN here if not flowcontrolled, so they will get service. * * Return nonzero if anybody got their POLLIN faked */ int lws_service_flag_pending(struct lws_context *context, int tsi) { struct lws_context_per_thread *pt = &context->pt[tsi]; #ifdef LWS_OPENSSL_SUPPORT struct lws *wsi_next; #endif struct lws *wsi; int forced = 0; int n; /* POLLIN faking */ /* * 1) For all guys with already-available ext data to drain, if they are * not flowcontrolled, fake their POLLIN status */ wsi = pt->rx_draining_ext_list; while (wsi) { pt->fds[wsi->position_in_fds_table].revents |= pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN; if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) forced = 1; wsi = wsi->u.ws.rx_draining_ext_list; } #ifdef LWS_OPENSSL_SUPPORT /* * 2) For all guys with buffered SSL read data already saved up, if they * are not flowcontrolled, fake their POLLIN status so they'll get * service to use up the buffered incoming data, even though their * network socket may have nothing */ wsi = pt->pending_read_list; while (wsi) { wsi_next = wsi->pending_read_list_next; pt->fds[wsi->position_in_fds_table].revents |= pt->fds[wsi->position_in_fds_table].events & LWS_POLLIN; if (pt->fds[wsi->position_in_fds_table].revents & LWS_POLLIN) { forced = 1; /* * he's going to get serviced now, take him off the * list of guys with buffered SSL. If he still has some * at the end of the service, he'll get put back on the * list then. */ lws_ssl_remove_wsi_from_buffered_list(wsi); } wsi = wsi_next; } #endif /* * 3) For any wsi who have an ah with pending RX who did not * complete their current headers, and are not flowcontrolled, * fake their POLLIN status so they will be able to drain the * rx buffered in the ah */ for (n = 0; n < context->max_http_header_pool; n++) if (pt->ah_pool[n].rxpos != pt->ah_pool[n].rxlen && !pt->ah_pool[n].wsi->hdr_parsing_completed) { pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents |= pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].events & LWS_POLLIN; if (pt->fds[pt->ah_pool[n].wsi->position_in_fds_table].revents & LWS_POLLIN) forced = 1; } return forced; }
LWS_VISIBLE int lws_plat_service(struct lws_context *context, int timeout_ms) { int n; int m; char buf; #ifdef LWS_OPENSSL_SUPPORT struct lws *wsi, *wsi_next; #endif /* stay dead once we are dead */ if (!context) return 1; lws_libev_run(context); context->service_tid = context->protocols[0].callback(NULL, LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0); #ifdef LWS_OPENSSL_SUPPORT /* if we know we have non-network pending data, do not wait in poll */ if (lws_ssl_anybody_has_buffered_read(context)) timeout_ms = 0; #endif n = poll(context->fds, context->fds_count, timeout_ms); context->service_tid = 0; #ifdef LWS_OPENSSL_SUPPORT if (!lws_ssl_anybody_has_buffered_read(context) && n == 0) { #else if (n == 0) /* poll timeout */ { #endif lws_service_fd(context, NULL); return 0; } if (n < 0) { if (LWS_ERRNO != LWS_EINTR) return -1; return 0; } #ifdef LWS_OPENSSL_SUPPORT /* * For all guys with buffered SSL read data already saved up, if they * are not flowcontrolled, fake their POLLIN status so they'll get * service to use up the buffered incoming data, even though their * network socket may have nothing */ wsi = context->pending_read_list; while (wsi) { wsi_next = wsi->pending_read_list_next; context->fds[wsi->position_in_fds_table].revents |= context->fds[wsi->position_in_fds_table].events & POLLIN; if (context->fds[wsi->position_in_fds_table].revents & POLLIN) /* * he's going to get serviced now, take him off the * list of guys with buffered SSL. If he still has some * at the end of the service, he'll get put back on the * list then. */ lws_ssl_remove_wsi_from_buffered_list(wsi); wsi = wsi_next; } #endif /* any socket with events to service? */ for (n = 0; n < context->fds_count; n++) { if (!context->fds[n].revents) continue; if (context->fds[n].fd == context->dummy_pipe_fds[0]) { if (read(context->fds[n].fd, &buf, 1) != 1) lwsl_err("Cannot read from dummy pipe."); continue; } m = lws_service_fd(context, &context->fds[n]); if (m < 0) return -1; /* if something closed, retry this slot */ if (m) n--; } return 0; } LWS_VISIBLE int lws_plat_set_socket_options(struct lws_context *context, int fd) { int optval = 1; socklen_t optlen = sizeof(optval); #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ defined(__OpenBSD__) struct protoent *tcp_proto; #endif if (context->ka_time) { /* enable keepalive on this socket */ optval = 1; if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, (const void *)&optval, optlen) < 0) return 1; #if defined(__APPLE__) || defined(__FreeBSD__) || defined(__NetBSD__) || \ defined(__CYGWIN__) || defined(__OpenBSD__) /* * didn't find a way to set these per-socket, need to * tune kernel systemwide values */ #else /* set the keepalive conditions we want on it too */ optval = context->ka_time; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, (const void *)&optval, optlen) < 0) return 1; optval = context->ka_interval; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPINTVL, (const void *)&optval, optlen) < 0) return 1; optval = context->ka_probes; if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPCNT, (const void *)&optval, optlen) < 0) return 1; #endif } /* Disable Nagle */ optval = 1; #if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \ !defined(__OpenBSD__) if (setsockopt(fd, SOL_TCP, TCP_NODELAY, (const void *)&optval, optlen) < 0) return 1; #else tcp_proto = getprotobyname("TCP"); if (setsockopt(fd, tcp_proto->p_proto, TCP_NODELAY, &optval, optlen) < 0) return 1; #endif /* We are nonblocking... */ if (fcntl(fd, F_SETFL, O_NONBLOCK) < 0) return 1; return 0; }
LWS_VISIBLE int lws_ssl_capable_read(struct lws *wsi, unsigned char *buf, int len) { struct lws_context *context = wsi->context; struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; int n = 0, m; if (!wsi->ssl) return lws_ssl_capable_read_no_ssl(wsi, buf, len); lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1); errno = 0; n = SSL_read(wsi->ssl, buf, len); #if defined(LWS_WITH_ESP32) if (!n && errno == ENOTCONN) { lwsl_debug("%p: SSL_read ENOTCONN\n", wsi); return LWS_SSL_CAPABLE_ERROR; } #endif #if defined(LWS_WITH_STATS) if (!wsi->seen_rx) { lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_SSL_RX_DELAY, time_in_microseconds() - wsi->accept_start_us); lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_SSL_CONNS_HAD_RX, 1); wsi->seen_rx = 1; } #endif lwsl_debug("%p: SSL_read says %d\n", wsi, n); /* manpage: returning 0 means connection shut down */ if (!n || (n == -1 && errno == ENOTCONN)) { wsi->socket_is_permanently_unusable = 1; return LWS_SSL_CAPABLE_ERROR; } if (n < 0) { m = lws_ssl_get_error(wsi, n); lwsl_debug("%p: ssl err %d errno %d\n", wsi, m, errno); if (m == SSL_ERROR_ZERO_RETURN || m == SSL_ERROR_SYSCALL) return LWS_SSL_CAPABLE_ERROR; if (m == SSL_ERROR_WANT_READ || SSL_want_read(wsi->ssl)) { lwsl_debug("%s: WANT_READ\n", __func__); lwsl_debug("%p: LWS_SSL_CAPABLE_MORE_SERVICE\n", wsi); return LWS_SSL_CAPABLE_MORE_SERVICE; } if (m == SSL_ERROR_WANT_WRITE || SSL_want_write(wsi->ssl)) { lwsl_debug("%s: WANT_WRITE\n", __func__); lwsl_debug("%p: LWS_SSL_CAPABLE_MORE_SERVICE\n", wsi); return LWS_SSL_CAPABLE_MORE_SERVICE; } wsi->socket_is_permanently_unusable = 1; return LWS_SSL_CAPABLE_ERROR; } lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n); if (wsi->vhost) wsi->vhost->conn_stats.rx += n; lws_restart_ws_ping_pong_timer(wsi); /* * if it was our buffer that limited what we read, * check if SSL has additional data pending inside SSL buffers. * * Because these won't signal at the network layer with POLLIN * and if we don't realize, this data will sit there forever */ if (n != len) goto bail; if (!wsi->ssl) goto bail; if (!SSL_pending(wsi->ssl)) goto bail; if (wsi->pending_read_list_next) return n; if (wsi->pending_read_list_prev) return n; if (pt->pending_read_list == wsi) return n; /* add us to the linked list of guys with pending ssl */ if (pt->pending_read_list) pt->pending_read_list->pending_read_list_prev = wsi; wsi->pending_read_list_next = pt->pending_read_list; wsi->pending_read_list_prev = NULL; pt->pending_read_list = wsi; return n; bail: lws_ssl_remove_wsi_from_buffered_list(wsi); return n; }
LWS_VISIBLE int lws_ssl_capable_read(struct lws *wsi, unsigned char *buf, int len) { struct lws_context *context = wsi->context; struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; int n = 0, m; if (!wsi->tls.ssl) return lws_ssl_capable_read_no_ssl(wsi, buf, len); lws_stats_atomic_bump(context, pt, LWSSTATS_C_API_READ, 1); errno = 0; n = SSL_read(wsi->tls.ssl, buf, len); #if defined(LWS_WITH_ESP32) if (!n && errno == LWS_ENOTCONN) { lwsl_debug("%p: SSL_read ENOTCONN\n", wsi); return LWS_SSL_CAPABLE_ERROR; } #endif #if defined(LWS_WITH_STATS) if (!wsi->seen_rx && wsi->accept_start_us) { lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_MS_SSL_RX_DELAY, lws_time_in_microseconds() - wsi->accept_start_us); lws_stats_atomic_bump(wsi->context, pt, LWSSTATS_C_SSL_CONNS_HAD_RX, 1); wsi->seen_rx = 1; } #endif lwsl_debug("%p: SSL_read says %d\n", wsi, n); /* manpage: returning 0 means connection shut down * * 2018-09-10: https://github.com/openssl/openssl/issues/1903 * * So, in summary, if you get a 0 or -1 return from SSL_read() / * SSL_write(), you should call SSL_get_error(): * * - If you get back SSL_ERROR_RETURN_ZERO then you know the connection * has been cleanly shutdown by the peer. To fully close the * connection you may choose to call SSL_shutdown() to send a * close_notify back. * * - If you get back SSL_ERROR_SSL then some kind of internal or * protocol error has occurred. More details will be on the SSL error * queue. You can also call SSL_get_shutdown(). If this indicates a * state of SSL_RECEIVED_SHUTDOWN then you know a fatal alert has * been received from the peer (if it had been a close_notify then * SSL_get_error() would have returned SSL_ERROR_RETURN_ZERO). * SSL_ERROR_SSL is considered fatal - you should not call * SSL_shutdown() in this case. * * - If you get back SSL_ERROR_SYSCALL then some kind of fatal (i.e. * non-retryable) error has occurred in a system call. */ if (n <= 0) { m = lws_ssl_get_error(wsi, n); lwsl_debug("%p: ssl err %d errno %d\n", wsi, m, errno); if (m == SSL_ERROR_ZERO_RETURN) /* cleanly shut down */ return LWS_SSL_CAPABLE_ERROR; /* hm not retryable.. could be 0 size pkt or error */ if (m == SSL_ERROR_SSL || m == SSL_ERROR_SYSCALL || errno == LWS_ENOTCONN) { /* unclean, eg closed conn */ wsi->socket_is_permanently_unusable = 1; return LWS_SSL_CAPABLE_ERROR; } /* retryable? */ if (SSL_want_read(wsi->tls.ssl)) { lwsl_debug("%s: WANT_READ\n", __func__); lwsl_debug("%p: LWS_SSL_CAPABLE_MORE_SERVICE\n", wsi); return LWS_SSL_CAPABLE_MORE_SERVICE; } if (SSL_want_write(wsi->tls.ssl)) { lwsl_debug("%s: WANT_WRITE\n", __func__); lwsl_debug("%p: LWS_SSL_CAPABLE_MORE_SERVICE\n", wsi); return LWS_SSL_CAPABLE_MORE_SERVICE; } /* keep on trucking it seems */ } lws_stats_atomic_bump(context, pt, LWSSTATS_B_READ, n); if (wsi->vhost) wsi->vhost->conn_stats.rx += n; // lwsl_hexdump_err(buf, n); /* * if it was our buffer that limited what we read, * check if SSL has additional data pending inside SSL buffers. * * Because these won't signal at the network layer with POLLIN * and if we don't realize, this data will sit there forever */ if (n != len) goto bail; if (!wsi->tls.ssl) goto bail; if (!SSL_pending(wsi->tls.ssl)) goto bail; if (wsi->tls.pending_read_list_next) return n; if (wsi->tls.pending_read_list_prev) return n; if (pt->tls.pending_read_list == wsi) return n; /* add us to the linked list of guys with pending ssl */ if (pt->tls.pending_read_list) pt->tls.pending_read_list->tls.pending_read_list_prev = wsi; wsi->tls.pending_read_list_next = pt->tls.pending_read_list; wsi->tls.pending_read_list_prev = NULL; pt->tls.pending_read_list = wsi; return n; bail: lws_ssl_remove_wsi_from_buffered_list(wsi); return n; }