static void lws_uv_idle(uv_idle_t *handle #if UV_VERSION_MAJOR == 0 , int status #endif ) { struct lws_context_per_thread *pt = lws_container_of(handle, struct lws_context_per_thread, uv_idle); lwsl_debug("%s\n", __func__); /* * is there anybody with pending stuff that needs service forcing? */ if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) { /* -1 timeout means just do forced service */ lws_plat_service_tsi(pt->context, -1, pt->tid); /* still somebody left who wants forced service? */ if (!lws_service_adjust_timeout(pt->context, 1, pt->tid)) /* yes... come back again later */ lwsl_debug("%s: done again\n", __func__); return; } /* there is nobody who needs service forcing, shut down idle */ uv_idle_stop(handle); lwsl_debug("%s: done stop\n", __func__); }
enum lws_ssl_capable_status __lws_tls_shutdown(struct lws *wsi) { int n = SSL_shutdown(wsi->tls.ssl); lwsl_debug("SSL_shutdown=%d for fd %d\n", n, wsi->desc.sockfd); switch (n) { case 1: /* successful completion */ n = shutdown(wsi->desc.sockfd, SHUT_WR); return LWS_SSL_CAPABLE_DONE; case 0: /* needs a retry */ __lws_change_pollfd(wsi, 0, LWS_POLLIN); return LWS_SSL_CAPABLE_MORE_SERVICE; default: /* fatal error, or WANT */ n = SSL_get_error(wsi->tls.ssl, n); if (n != SSL_ERROR_SYSCALL && n != SSL_ERROR_SSL) { if (SSL_want_read(wsi->tls.ssl)) { lwsl_debug("(wants read)\n"); __lws_change_pollfd(wsi, 0, LWS_POLLIN); return LWS_SSL_CAPABLE_MORE_SERVICE_READ; } if (SSL_want_write(wsi->tls.ssl)) { lwsl_debug("(wants write)\n"); __lws_change_pollfd(wsi, 0, LWS_POLLOUT); return LWS_SSL_CAPABLE_MORE_SERVICE_WRITE; } } return LWS_SSL_CAPABLE_ERROR; } }
int lws_http_serve(struct lws *wsi, char *uri, const char *origin) { const char *mimetype; struct stat st; char path[256], sym[256]; int n, spin = 0; lwsl_notice("%s: %s %s\n", __func__, uri, origin); snprintf(path, sizeof(path) - 1, "%s/%s", origin, uri); do { spin++; if (stat(path, &st)) { lwsl_err("unable to stat %s\n", path); goto bail; } lwsl_debug(" %s mode %d\n", path, S_IFMT & st.st_mode); #if !defined(WIN32) if ((S_IFMT & st.st_mode) == S_IFLNK) { if (readlink(path, sym, sizeof(sym))) { lwsl_err("Failed to read link %s\n", path); goto bail; } lwsl_debug("symlink %s -> %s\n", path, sym); snprintf(path, sizeof(path) - 1, "%s", sym); } #endif if ((S_IFMT & st.st_mode) == S_IFDIR) { lwsl_debug("default filename append to dir\n"); snprintf(path, sizeof(path) - 1, "%s/%s/index.html", origin, uri); } } while ((S_IFMT & st.st_mode) != S_IFREG && spin < 5); if (spin == 5) { lwsl_err("symlink loop %s \n", path); } mimetype = get_mimetype(path); if (!mimetype) { lwsl_err("unknown mimetype for %s", path); goto bail; } n = lws_serve_http_file(wsi, path, mimetype, NULL, 0); if (n < 0 || ((n > 0) && lws_http_transaction_completed(wsi))) return -1; /* error or can't reuse connection: close the socket */ return 0; bail: lws_return_http_status(wsi, HTTP_STATUS_NOT_FOUND, NULL); return -1; }
int lws_handshake_client(struct lws *wsi, unsigned char **buf, size_t len) { int m; switch (wsi->mode) { case LWSCM_WSCL_WAITING_PROXY_REPLY: case LWSCM_WSCL_ISSUE_HANDSHAKE: case LWSCM_WSCL_WAITING_SERVER_REPLY: case LWSCM_WSCL_WAITING_EXTENSION_CONNECT: case LWSCM_WS_CLIENT: while (len) { /* * we were accepting input but now we stopped doing so */ if (!(wsi->rxflow_change_to & LWS_RXFLOW_ALLOW)) { lwsl_debug("%s: caching %ld\n", __func__, (long)len); lws_rxflow_cache(wsi, *buf, 0, len); return 0; } if (wsi->u.ws.rx_draining_ext) { #if !defined(LWS_NO_CLIENT) if (wsi->mode == LWSCM_WS_CLIENT) m = lws_client_rx_sm(wsi, 0); else #endif m = lws_rx_sm(wsi, 0); if (m < 0) return -1; continue; } /* account for what we're using in rxflow buffer */ if (wsi->rxflow_buffer) wsi->rxflow_pos++; if (lws_client_rx_sm(wsi, *(*buf)++)) { lwsl_debug("client_rx_sm exited\n"); return -1; } len--; } lwsl_debug("%s: finished with %ld\n", __func__, (long)len); return 0; default: break; } return 0; }
int insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi) { struct lws_pollargs pa = { wsi->sock, LWS_POLLIN, 0 }; struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi]; int ret = 0; #ifndef LWS_NO_SERVER struct lws_pollargs pa1; #endif lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n", __func__, wsi, wsi->tsi, wsi->sock, pt->fds_count); if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) { lwsl_err("Too many fds (%d)\n", context->max_fds); return 1; } #if !defined(_WIN32) && !defined(MBED_OPERATORS) if (wsi->sock >= context->max_fds) { lwsl_err("Socket fd %d is too high (%d)\n", wsi->sock, context->max_fds); return 1; } #endif assert(wsi); assert(lws_socket_is_valid(wsi->sock)); if (context->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL, wsi->user_space, (void *) &pa, 1)) return -1; lws_pt_lock(pt); pt->count_conns++; insert_wsi(context, wsi); wsi->position_in_fds_table = pt->fds_count; pt->fds[pt->fds_count].fd = wsi->sock; pt->fds[pt->fds_count].events = LWS_POLLIN; pa.events = pt->fds[pt->fds_count].events; lws_plat_insert_socket_into_fds(context, wsi); /* external POLL support via protocol 0 */ if (context->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD, wsi->user_space, (void *) &pa, 0)) ret = -1; #ifndef LWS_NO_SERVER /* if no more room, defeat accepts on this thread */ if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1) _lws_change_pollfd(pt->wsi_listening, LWS_POLLIN, 0, &pa1); #endif lws_pt_unlock(pt); if (context->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL, wsi->user_space, (void *)&pa, 1)) ret = -1; return ret; }
LWS_VISIBLE int lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len) { int n = 0; if (lws_wsi_is_udp(wsi)) { #if !defined(LWS_WITH_ESP32) if (wsi->trunc_len) n = sendto(wsi->desc.sockfd, buf, len, 0, &wsi->udp->sa_pending, wsi->udp->salen_pending); else n = sendto(wsi->desc.sockfd, buf, len, 0, &wsi->udp->sa, wsi->udp->salen); #endif } else n = send(wsi->desc.sockfd, (char *)buf, len, MSG_NOSIGNAL); // lwsl_info("%s: sent len %d result %d", __func__, len, n); if (n >= 0) return n; if (LWS_ERRNO == LWS_EAGAIN || LWS_ERRNO == LWS_EWOULDBLOCK || LWS_ERRNO == LWS_EINTR) { if (LWS_ERRNO == LWS_EWOULDBLOCK) { lws_set_blocking_send(wsi); } return LWS_SSL_CAPABLE_MORE_SERVICE; } lwsl_debug("ERROR writing len %d to skt fd %d err %d / errno %d\n", len, wsi->desc.sockfd, n, LWS_ERRNO); return LWS_SSL_CAPABLE_ERROR; }
int lws_rxflow_cache(struct lws *wsi, unsigned char *buf, int n, int len) { struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; uint8_t *buffered; size_t blen; int ret = 0, m; /* his RX is flowcontrolled, don't send remaining now */ blen = lws_buflist_next_segment_len(&wsi->buflist, &buffered); if (blen) { if (buf >= buffered && buf + len <= buffered + blen) { /* rxflow while we were spilling prev rxflow */ lwsl_info("%s: staying in rxflow buf\n", __func__); return 1; } ret = 1; } /* a new rxflow, buffer it and warn caller */ m = lws_buflist_append_segment(&wsi->buflist, buf + n, len - n); if (m < 0) return -1; if (m) { lwsl_debug("%s: added %p to rxflow list\n", __func__, wsi); lws_dll_lws_add_front(&wsi->dll_buflist, &pt->dll_head_buflist); } return ret; }
static int rops_handle_POLLIN_raw_file(struct lws_context_per_thread *pt, struct lws *wsi, struct lws_pollfd *pollfd) { int n; if (pollfd->revents & LWS_POLLOUT) { n = lws_callback_as_writeable(wsi); if (lws_change_pollfd(wsi, LWS_POLLOUT, 0)) { lwsl_info("failed at set pollfd\n"); return LWS_HPI_RET_WSI_ALREADY_DIED; } if (n) return LWS_HPI_RET_PLEASE_CLOSE_ME; } if (pollfd->revents & LWS_POLLIN) { if (user_callback_handle_rxflow(wsi->protocol->callback, wsi, LWS_CALLBACK_RAW_RX_FILE, wsi->user_space, NULL, 0)) { lwsl_debug("raw rx callback closed it\n"); return LWS_HPI_RET_PLEASE_CLOSE_ME; } } if (pollfd->revents & LWS_POLLHUP) return LWS_HPI_RET_PLEASE_CLOSE_ME; return LWS_HPI_RET_HANDLED; }
int LWS_WARN_UNUSED_RESULT lws_http_transaction_completed_client(struct lws *wsi) { lwsl_debug("%s: wsi %p\n", __func__, wsi); /* if we can't go back to accept new headers, drop the connection */ if (wsi->u.http.connection_type != HTTP_CONNECTION_KEEP_ALIVE) { lwsl_info("%s: %p: close connection\n", __func__, wsi); return 1; } /* otherwise set ourselves up ready to go again */ wsi->state = LWSS_CLIENT_HTTP_ESTABLISHED; wsi->mode = LWSCM_HTTP_CLIENT_ACCEPTED; wsi->u.http.content_length = 0; wsi->hdr_parsing_completed = 0; /* He asked for it to stay alive indefinitely */ lws_set_timeout(wsi, NO_PENDING_TIMEOUT, 0); /* * As client, nothing new is going to come until we ask for it * we can drop the ah, if any */ if (wsi->u.hdr.ah) { wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen; lws_header_table_detach(wsi, 0); } /* If we're (re)starting on headers, need other implied init */ wsi->u.hdr.ues = URIES_IDLE; lwsl_info("%s: %p: keep-alive await new transaction\n", __func__, wsi); return 0; }
LWS_VISIBLE int lws_ssl_capable_write(struct lws *wsi, unsigned char *buf, int len) { int n, m; if (!wsi->tls.ssl) return lws_ssl_capable_write_no_ssl(wsi, buf, len); n = SSL_write(wsi->tls.ssl, buf, len); if (n > 0) return n; m = SSL_get_error(wsi->tls.ssl, n); if (m != SSL_ERROR_SYSCALL) { if (m == SSL_ERROR_WANT_READ || SSL_want_read(wsi->tls.ssl)) { lwsl_notice("%s: want read\n", __func__); return LWS_SSL_CAPABLE_MORE_SERVICE; } if (m == SSL_ERROR_WANT_WRITE || SSL_want_write(wsi->tls.ssl)) { lws_set_blocking_send(wsi); lwsl_notice("%s: want write\n", __func__); return LWS_SSL_CAPABLE_MORE_SERVICE; } } lwsl_debug("%s failed: %d\n",__func__, m); wsi->socket_is_permanently_unusable = 1; return LWS_SSL_CAPABLE_ERROR; }
int lws_tls_peer_cert_info(struct lws *wsi, enum lws_tls_cert_info type, union lws_tls_cert_info_results *buf, size_t len) { int rc = 0; X509 *x509; wsi = lws_get_network_wsi(wsi); x509 = SSL_get_peer_certificate(wsi->tls.ssl); if (!x509) { lwsl_debug("no peer cert\n"); return -1; } switch (type) { case LWS_TLS_CERT_INFO_VERIFIED: buf->verified = SSL_get_verify_result(wsi->tls.ssl) == X509_V_OK; break; default: rc = lws_tls_openssl_cert_info(x509, type, buf, len); } X509_free(x509); return rc; }
extern "C" LWS_VISIBLE int lws_ssl_capable_read_no_ssl(struct lws_context *context, struct lws *wsi, unsigned char *buf, int len) { socket_error_t err; size_t _len = len; lwsl_debug("%s\r\n", __func__); (void)context; err = ((lws_conn *)wsi->sock)->ts->recv((char *)buf, &_len); if (err == SOCKET_ERROR_NONE) { lwsl_info("%s: got %d bytes\n", __func__, _len); return _len; } #if LWS_POSIX if (LWS_ERRNO == LWS_EAGAIN || LWS_ERRNO == LWS_EWOULDBLOCK || LWS_ERRNO == LWS_EINTR) #else if (err == SOCKET_ERROR_WOULD_BLOCK) #endif return LWS_SSL_CAPABLE_MORE_SERVICE; lwsl_warn("error on reading from skt: %d\n", err); return LWS_SSL_CAPABLE_ERROR; }
extern "C" void mbed3_tcp_stream_accept(void *sock, struct lws *wsi) { lws_conn *conn = (lws_conn *)sock; lwsl_debug("%s\r\n", __func__); conn->set_wsi(wsi); }
int lws_ssl_client_connect2(struct lws *wsi, char *errbuf, int len) { int n = 0; if (lwsi_state(wsi) == LRS_WAITING_SSL) { lws_latency_pre(wsi->context, wsi); n = lws_tls_client_connect(wsi); lwsl_debug("%s: SSL_connect says %d\n", __func__, n); lws_latency(wsi->context, wsi, "SSL_connect LRS_WAITING_SSL", n, n > 0); switch (n) { case LWS_SSL_CAPABLE_ERROR: lws_snprintf(errbuf, len, "client connect failed"); return -1; case LWS_SSL_CAPABLE_DONE: break; /* connected */ case LWS_SSL_CAPABLE_MORE_SERVICE_WRITE: lws_callback_on_writable(wsi); /* fallthru */ case LWS_SSL_CAPABLE_MORE_SERVICE_READ: lwsi_set_state(wsi, LRS_WAITING_SSL); /* fallthru */ case LWS_SSL_CAPABLE_MORE_SERVICE: return 0; } } if (lws_tls_client_confirm_peer_cert(wsi, errbuf, len)) return -1; return 1; }
void lws_free_wsi(struct lws *wsi) { if (!wsi) return; /* Protocol user data may be allocated either internally by lws * or by specified the user. * We should only free what we allocated. */ if (wsi->protocol && wsi->protocol->per_session_data_size && wsi->user_space && !wsi->user_space_externally_allocated) lws_free(wsi->user_space); lws_free_set_NULL(wsi->rxflow_buffer); lws_free_set_NULL(wsi->trunc_alloc); if (wsi->u.hdr.ah) /* we're closing, losing some rx is OK */ wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen; /* we may not have an ah, but may be on the waiting list... */ lws_header_table_detach(wsi, 0); wsi->context->count_wsi_allocated--; lwsl_debug("%s: %p, remaining wsi %d\n", __func__, wsi, wsi->context->count_wsi_allocated); lws_free(wsi); }
void lws_libuv_closehandle(struct lws *wsi) { uv_handle_t* handle; if (!wsi->w_read.uv.pwatcher) return; if (wsi->told_event_loop_closed) { // assert(0); return; } lwsl_debug("%s: %p\n", __func__, wsi); wsi->told_event_loop_closed = 1; /* * The normal close path attaches the related wsi as the * handle->data. */ handle = (uv_handle_t *)wsi->w_read.uv.pwatcher; /* ensure we can only do this once */ wsi->w_read.uv.pwatcher = NULL; uv_close(handle, lws_libuv_closewsi); }
extern "C" LWS_VISIBLE int lws_ssl_capable_read_no_ssl(struct libwebsocket_context *context, struct libwebsocket *wsi, unsigned char *buf, int len) { socket_error_t err; size_t _len = len; lwsl_debug("%s\r\n", __func__); (void)context; /* s/s_HACK/ts/g when mbed3 listen payload bug fixed */ err = ((lws_conn *)wsi->sock)->s_HACK->recv((char *)buf, &_len); if (err == SOCKET_ERROR_NONE) { lwsl_info("%s: got %d bytes\n", __func__, _len); return _len; } #if LWS_POSIX if (LWS_ERRNO == LWS_EAGAIN || LWS_ERRNO == LWS_EWOULDBLOCK || LWS_ERRNO == LWS_EINTR) #else if (err == SOCKET_ERROR_WOULD_BLOCK) #endif return LWS_SSL_CAPABLE_MORE_SERVICE; // !!! while listen payload mbed3 bug, don't error out if nothing */ // if (((lws_conn *)wsi->sock)->s_HACK != ((Socket *)((lws_conn *)wsi->sock)->ts)) // return 0; lwsl_warn("error on reading from skt: %d\n", err); return LWS_SSL_CAPABLE_ERROR; }
void lws_libuv_destroyloop(struct lws_context *context, int tsi) { struct lws_context_per_thread *pt = &context->pt[tsi]; int m; if (!(context->options & LWS_SERVER_OPTION_LIBUV)) return; if (!pt->io_loop_uv) return; if (context->use_ev_sigint) uv_signal_stop(&pt->w_sigint.uv_watcher); for (m = 0; m < ARRAY_SIZE(sigs); m++) uv_signal_stop(&pt->signals[m]); if (!pt->ev_loop_foreign) { uv_stop(pt->io_loop_uv); uv_walk(pt->io_loop_uv, lws_uv_walk_cb, NULL); while (uv_run(pt->io_loop_uv, UV_RUN_NOWAIT)); m = uv_loop_close(pt->io_loop_uv); if (m == UV_EBUSY) lwsl_debug("%s: uv_loop_close: UV_EBUSY\n", __func__); lws_free(pt->io_loop_uv); } }
LWS_VISIBLE void lws_set_timeout(struct lws *wsi, enum pending_timeout reason, int secs) { struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; time_t now; lws_pt_lock(pt); time(&now); if (reason && !wsi->timeout_list_prev) { /* our next guy is current first guy */ wsi->timeout_list = pt->timeout_list; /* if there is a next guy, set his prev ptr to our next ptr */ if (wsi->timeout_list) wsi->timeout_list->timeout_list_prev = &wsi->timeout_list; /* our prev ptr is first ptr */ wsi->timeout_list_prev = &pt->timeout_list; /* set the first guy to be us */ *wsi->timeout_list_prev = wsi; } lwsl_debug("%s: %p: %d secs\n", __func__, wsi, secs); wsi->pending_timeout_limit = now + secs; wsi->pending_timeout = reason; lws_pt_unlock(pt); if (!reason) lws_remove_from_timeout_list(wsi); }
extern "C" LWS_VISIBLE int lws_ssl_capable_write_no_ssl(struct lws *wsi, unsigned char *buf, int len) { socket_error_t err; lws_conn *conn = (lws_conn *)wsi->sock; lwsl_debug("%s: wsi %p: write %d (from %p)\n", __func__, (void *)wsi, len, (void *)buf); lwsl_debug("%s: wsi %p: clear writeable\n", __func__, (void *)wsi); conn->writeable = 0; err = conn->ts->send((char *)buf, len); if (err == SOCKET_ERROR_NONE) return len; #if LWS_POSIX if (LWS_ERRNO == LWS_EAGAIN || LWS_ERRNO == LWS_EWOULDBLOCK || LWS_ERRNO == LWS_EINTR) { if (LWS_ERRNO == LWS_EWOULDBLOCK) lws_set_blocking_send(wsi); #else if (err == SOCKET_ERROR_WOULD_BLOCK) return LWS_SSL_CAPABLE_MORE_SERVICE; #endif lwsl_warn("%s: wsi %p: ERROR %d writing len %d to skt\n", __func__, (void *)wsi, err, len); return LWS_SSL_CAPABLE_ERROR; } /* * Set the listening socket to listen. */ void lws_conn_listener::start(const uint16_t port) { socket_error_t err = srv.open(SOCKET_AF_INET4); if (srv.error_check(err)) return; err = srv.bind("0.0.0.0", port); if (srv.error_check(err)) return; err = srv.start_listening(TCPListener::IncomingHandler_t(this, &lws_conn_listener::onIncoming)); srv.error_check(err); }
static void lws_libuv_closewsi_m(uv_handle_t* handle) { lws_sockfd_type sockfd = (lws_sockfd_type)(lws_intptr_t)handle->data; lwsl_debug("%s: sockfd %d\n", __func__, sockfd); compatible_close(sockfd); lws_free(handle); }
LWS_VISIBLE int LWS_WARN_UNUSED_RESULT lws_http_transaction_completed(struct lws *wsi) { int n = NO_PENDING_TIMEOUT; lws_access_log(wsi); lwsl_debug("%s: wsi %p\n", __func__, wsi); /* if we can't go back to accept new headers, drop the connection */ if (wsi->u.http.connection_type != HTTP_CONNECTION_KEEP_ALIVE) { lwsl_info("%s: %p: close connection\n", __func__, wsi); return 1; } /* otherwise set ourselves up ready to go again */ wsi->state = LWSS_HTTP; wsi->mode = LWSCM_HTTP_SERVING; /* reset of non [0] protocols (and freeing of user_space) is deferred */ wsi->u.http.content_length = 0; wsi->hdr_parsing_completed = 0; #ifdef LWS_WITH_ACCESS_LOG wsi->access_log.sent = 0; #endif if (wsi->vhost->keepalive_timeout) n = PENDING_TIMEOUT_HTTP_KEEPALIVE_IDLE; lws_set_timeout(wsi, n, wsi->vhost->keepalive_timeout); /* * We already know we are on http1.1 / keepalive and the next thing * coming will be another header set. * * If there is no pending rx and we still have the ah, drop it and * reacquire a new ah when the new headers start to arrive. (Otherwise * we needlessly hog an ah indefinitely.) * * However if there is pending rx and we know from the keepalive state * that is already at least the start of another header set, simply * reset the existing header table and keep it. */ if (wsi->u.hdr.ah) { lwsl_info("%s: wsi->more_rx_waiting=%d\n", __func__, wsi->more_rx_waiting); if (!wsi->more_rx_waiting) { wsi->u.hdr.ah->rxpos = wsi->u.hdr.ah->rxlen; lws_header_table_detach(wsi, 1); } else lws_header_table_reset(wsi, 1); } /* If we're (re)starting on headers, need other implied init */ wsi->u.hdr.ues = URIES_IDLE; lwsl_info("%s: %p: keep-alive await new transaction\n", __func__, wsi); return 0; }
enum lws_ssl_capable_status lws_tls_server_accept(struct lws *wsi) { union lws_tls_cert_info_results ir; int m, n = SSL_accept(wsi->tls.ssl); struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; if (n == 1) { n = lws_tls_peer_cert_info(wsi, LWS_TLS_CERT_INFO_COMMON_NAME, &ir, sizeof(ir.ns.name)); if (!n) lwsl_notice("%s: client cert CN '%s'\n", __func__, ir.ns.name); else lwsl_info("%s: no client cert CN\n", __func__); lws_openssl_describe_cipher(wsi); if (SSL_pending(wsi->tls.ssl) && lws_dll_is_detached(&wsi->tls.dll_pending_tls, &pt->tls.dll_pending_tls_head)) lws_dll_add_head(&wsi->tls.dll_pending_tls, &pt->tls.dll_pending_tls_head); return LWS_SSL_CAPABLE_DONE; } lws_tls_err_describe(); m = lws_ssl_get_error(wsi, n); if (m == SSL_ERROR_SYSCALL || m == SSL_ERROR_SSL) return LWS_SSL_CAPABLE_ERROR; if (m == SSL_ERROR_WANT_READ || (m != SSL_ERROR_ZERO_RETURN && SSL_want_read(wsi->tls.ssl))) { if (lws_change_pollfd(wsi, 0, LWS_POLLIN)) { lwsl_info("%s: WANT_READ change_pollfd failed\n", __func__); return LWS_SSL_CAPABLE_ERROR; } lwsl_info("SSL_ERROR_WANT_READ: m %d\n", m); return LWS_SSL_CAPABLE_MORE_SERVICE_READ; } if (m == SSL_ERROR_WANT_WRITE || SSL_want_write(wsi->tls.ssl)) { lwsl_debug("%s: WANT_WRITE\n", __func__); if (lws_change_pollfd(wsi, 0, LWS_POLLOUT)) { lwsl_info("%s: WANT_WRITE change_pollfd failed\n", __func__); return LWS_SSL_CAPABLE_ERROR; } return LWS_SSL_CAPABLE_MORE_SERVICE_WRITE; } return LWS_SSL_CAPABLE_ERROR; }
static void lws_uv_timeout_cb(uv_timer_t *timer) { struct lws_context_per_thread *pt = container_of(timer, struct lws_context_per_thread, uv_timeout_watcher); lwsl_debug("%s\n", __func__); /* do timeout check only */ lws_service_fd_tsi(pt->context, NULL, pt->tid); }
extern "C" void mbed3_tcp_stream_bind(void *sock, int port, struct lws *wsi) { lws_conn_listener *srv = (lws_conn_listener *)sock; lwsl_debug("%s\r\n", __func__); /* associate us with the listening wsi */ ((lws_conn *)srv)->set_wsi(wsi); mbed::util::FunctionPointer1<void, uint16_t> fp(srv, &lws_conn_listener::start); minar::Scheduler::postCallback(fp.bind(port)); }
LWS_VISIBLE struct lws * lws_adopt_socket(struct lws_context *context, lws_sockfd_type accept_fd) { struct lws *new_wsi = lws_create_new_server_wsi(context); if (!new_wsi) { compatible_close(accept_fd); return NULL; } lwsl_debug("%s: new wsi %p\n", __func__, new_wsi); new_wsi->sock = accept_fd; /* the transport is accepted... give him time to negotiate */ lws_set_timeout(new_wsi, PENDING_TIMEOUT_ESTABLISH_WITH_SERVER, context->timeout_secs); #if LWS_POSIX == 0 mbed3_tcp_stream_accept(accept_fd, new_wsi); #endif /* * A new connection was accepted. Give the user a chance to * set properties of the newly created wsi. There's no protocol * selected yet so we issue this to protocols[0] */ if ((context->protocols[0].callback)(new_wsi, LWS_CALLBACK_SERVER_NEW_CLIENT_INSTANTIATED, NULL, NULL, 0)) { compatible_close(new_wsi->sock); lws_free(new_wsi); return NULL; } lws_libev_accept(new_wsi, new_wsi->sock); lws_libuv_accept(new_wsi, new_wsi->sock); if (!LWS_SSL_ENABLED(context)) { if (insert_wsi_socket_into_fds(context, new_wsi)) goto fail; } else { new_wsi->mode = LWSCM_SSL_INIT; if (lws_server_socket_service_ssl(new_wsi, accept_fd)) goto fail; } return new_wsi; fail: lwsl_err("%s: fail\n", __func__); lws_close_free_wsi(new_wsi, LWS_CLOSE_STATUS_NOSTATUS); return NULL; }
static int elops_wsi_logical_close_uv(struct lws *wsi) { if (!lws_socket_is_valid(wsi->desc.sockfd)) return 0; if (wsi->listener || wsi->event_pipe) { lwsl_debug("%s: %p: %d %d stop listener / pipe poll\n", __func__, wsi, wsi->listener, wsi->event_pipe); if (wsi->w_read.uv.pwatcher) uv_poll_stop(wsi->w_read.uv.pwatcher); } lwsl_debug("%s: lws_libuv_closehandle: wsi %p\n", __func__, wsi); /* * libuv has to do his own close handle processing asynchronously */ lws_libuv_closehandle(wsi); return 1; /* do not complete the wsi close, uv close cb will do it */ }
static int proxy_header(struct lws *wsi, struct lws *par, unsigned char *temp, int temp_len, int index, unsigned char **p, unsigned char *end) { int n = lws_hdr_total_length(par, index); if (n < 1) { lwsl_debug("%s: no index %d:\n", __func__, index); return 0; } if (lws_hdr_copy(par, (char *)temp, temp_len, index) < 0) return -1; lwsl_debug("%s: index %d: %s\n", __func__, index, (char *)temp); if (lws_add_http_header_by_token(wsi, index, temp, n, p, end)) return -1; return 0; }
int lws_ssl_get_error(struct lws *wsi, int n) { int m; if (!wsi->ssl) return 99; m = SSL_get_error(wsi->ssl, n); lwsl_debug("%s: %p %d -> %d\n", __func__, wsi->ssl, n, m); return m; }
static void elops_io_uv(struct lws *wsi, int flags) { struct lws_context_per_thread *pt = &wsi->context->pt[(int)wsi->tsi]; struct lws_io_watcher *w = &wsi->w_read; int current_events = w->actual_events & (UV_READABLE | UV_WRITABLE); lwsl_debug("%s: %p: %d\n", __func__, wsi, flags); /* w->context is set after the loop is initialized */ if (!pt->uv.io_loop || !w->context) { lwsl_info("%s: no io loop yet\n", __func__); return; } if (!((flags & (LWS_EV_START | LWS_EV_STOP)) && (flags & (LWS_EV_READ | LWS_EV_WRITE)))) { lwsl_err("%s: assert: flags %d", __func__, flags); assert(0); } if (!w->uv.pwatcher || wsi->told_event_loop_closed) { lwsl_err("%s: no watcher\n", __func__); return; } if (flags & LWS_EV_START) { if (flags & LWS_EV_WRITE) current_events |= UV_WRITABLE; if (flags & LWS_EV_READ) current_events |= UV_READABLE; uv_poll_start(w->uv.pwatcher, current_events, lws_io_cb); } else { if (flags & LWS_EV_WRITE) current_events &= ~UV_WRITABLE; if (flags & LWS_EV_READ) current_events &= ~UV_READABLE; if (!(current_events & (UV_READABLE | UV_WRITABLE))) uv_poll_stop(w->uv.pwatcher); else uv_poll_start(w->uv.pwatcher, current_events, lws_io_cb); } w->actual_events = current_events; }