static void mg_handle_tcp_read(struct mg_connection *conn) { int n = 0; char *buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE); if (buf == NULL) { DBG(("OOM")); return; } #if MG_ENABLE_SSL if (conn->flags & MG_F_SSL) { if (conn->flags & MG_F_SSL_HANDSHAKE_DONE) { /* SSL library may have more bytes ready to read than we ask to read. * Therefore, read in a loop until we read everything. Without the loop, * we skip to the next select() cycle which can just timeout. */ while ((n = mg_ssl_if_read(conn, buf, MG_TCP_RECV_BUFFER_SIZE)) > 0) { DBG(("%p %d bytes <- %d (SSL)", conn, n, conn->sock)); mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */); buf = NULL; if (conn->flags & MG_F_CLOSE_IMMEDIATELY) break; /* buf has been freed, we need a new one. */ buf = (char *) MG_MALLOC(MG_TCP_RECV_BUFFER_SIZE); if (buf == NULL) break; } MG_FREE(buf); if (n < 0 && n != MG_SSL_WANT_READ) conn->flags |= MG_F_CLOSE_IMMEDIATELY; } else { MG_FREE(buf); mg_ssl_begin(conn); return; } } else #endif { n = (int) MG_RECV_FUNC(conn->sock, buf, recv_avail_size(conn, MG_TCP_RECV_BUFFER_SIZE), 0); DBG(("%p %d bytes (PLAIN) <- %d", conn, n, conn->sock)); if (n > 0) { mg_if_recv_tcp_cb(conn, buf, n, 1 /* own */); } else { MG_FREE(buf); } if (n == 0) { /* Orderly shutdown of the socket, try flushing output. */ conn->flags |= MG_F_SEND_AND_CLOSE; } else if (n < 0 && mg_is_error()) { conn->flags |= MG_F_CLOSE_IMMEDIATELY; } } }
static void mg_lwip_ssl_recv(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; while (1) { char *buf = (char *) malloc(MG_LWIP_SSL_READ_SIZE); if (buf == NULL) return; int ret = SSL_read(nc->ssl, buf, MG_LWIP_SSL_READ_SIZE); int err = SSL_get_error(nc->ssl, ret); DBG(("%p SSL_read %u = %d, %d", nc, MG_LWIP_SSL_READ_SIZE, ret, err)); if (ret <= 0) { free(buf); if (err == SSL_ERROR_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; return; } else if (err == SSL_ERROR_WANT_READ) { /* Nothing, we are callback-driven. */ cs->err = 0; return; } else { LOG(LL_ERROR, ("SSL read error: %d", err)); system_os_post(MG_TASK_PRIORITY, MG_SIG_CLOSE_CONN, (uint32_t) nc); } } else { mg_if_recv_tcp_cb(nc, buf, ret); /* callee takes over data */ } } }
void mg_lwip_ssl_recv(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* Don't deliver data before connect callback */ if (nc->flags & MG_F_CONNECTING) return; while (nc->recv_mbuf.len < MG_LWIP_SSL_RECV_MBUF_LIMIT) { char *buf = (char *) MG_MALLOC(MG_LWIP_SSL_IO_SIZE); if (buf == NULL) return; int ret = mg_ssl_if_read(nc, buf, MG_LWIP_SSL_IO_SIZE); DBG(("%p %p SSL_read %u = %d", nc, cs->rx_chain, MG_LWIP_SSL_IO_SIZE, ret)); if (ret <= 0) { MG_FREE(buf); if (ret == MG_SSL_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; return; } else if (ret == MG_SSL_WANT_READ) { /* * Nothing to do in particular, we are callback-driven. * What we definitely do not need anymore is SSL reading (nothing left). */ nc->flags &= ~MG_F_WANT_READ; cs->err = 0; return; } else { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); return; } } else { mg_if_recv_tcp_cb(nc, buf, ret, 1 /* own */); } } }
void mg_lwip_ssl_recv(struct mg_connection *nc) { struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* Don't deliver data before connect callback */ if (nc->flags & MG_F_CONNECTING) return; while (nc->recv_mbuf.len < MG_LWIP_SSL_RECV_MBUF_LIMIT) { char *buf = (char *) malloc(MG_LWIP_SSL_IO_SIZE); if (buf == NULL) return; int ret = SSL_read(nc->ssl, buf, MG_LWIP_SSL_IO_SIZE); int err = SSL_get_error(nc->ssl, ret); DBG(("%p SSL_read %u = %d, %d", nc, MG_LWIP_SSL_IO_SIZE, ret, err)); if (ret <= 0) { free(buf); if (err == SSL_ERROR_WANT_WRITE) { nc->flags |= MG_F_WANT_WRITE; return; } else if (err == SSL_ERROR_WANT_READ) { /* Nothing, we are callback-driven. */ cs->err = 0; return; } else { LOG(LL_ERROR, ("SSL read error: %d", err)); mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } } else { mg_if_recv_tcp_cb(nc, buf, ret); /* callee takes over data */ } } if (nc->recv_mbuf.len >= MG_LWIP_SSL_RECV_MBUF_LIMIT) { nc->flags |= MG_F_WANT_READ; } else { nc->flags &= ~MG_F_WANT_READ; } }
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; char *data; size_t len = (p != NULL ? p->len : 0); DBG(("%p %u %d", nc, len, err)); if (nc == NULL) { tcp_abort(tpcb); return ERR_ARG; } if (p == NULL) { system_os_post(MG_TASK_PRIORITY, MG_SIG_CLOSE_CONN, (uint32_t) nc); return ERR_OK; } data = (char *) malloc(len); if (data == NULL) { DBG(("OOM")); return ERR_MEM; } pbuf_copy_partial(p, data, len, 0); pbuf_free(p); mg_if_recv_tcp_cb(nc, data, len); if (nc->send_mbuf.len > 0) { mg_lwip_mgr_schedule_poll(nc->mgr); } return ERR_OK; }
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err)); if (p == NULL) { if (nc != NULL) { system_os_post(MG_TASK_PRIORITY, MG_SIG_CLOSE_CONN, (uint32_t) nc); } else { /* Tombstoned connection, do nothing. */ } return ERR_OK; } else if (nc == NULL) { tcp_abort(tpcb); return ERR_ARG; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* * If we get a chain of more than one segment at once, we need to bump * refcount on the subsequent bufs to make them independent. */ if (p->next != NULL) { struct pbuf *q = p->next; for (; q != NULL; q = q->next) pbuf_ref(q); } if (cs->rx_chain == NULL) { cs->rx_chain = p; cs->rx_offset = 0; } else { pbuf_chain(cs->rx_chain, p); } #ifdef SSL_KRYPTON if (nc->ssl != NULL) { if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { mg_lwip_ssl_recv(nc); } else { mg_lwip_ssl_do_hs(nc); } return ERR_OK; } #endif while (cs->rx_chain != NULL) { struct pbuf *seg = cs->rx_chain; size_t len = (seg->len - cs->rx_offset); char *data = (char *) malloc(len); if (data == NULL) { DBG(("OOM")); return ERR_MEM; } pbuf_copy_partial(seg, data, len, cs->rx_offset); mg_if_recv_tcp_cb(nc, data, len); /* callee takes over data */ cs->rx_offset += len; if (cs->rx_offset == cs->rx_chain->len) { cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); cs->rx_offset = 0; } } if (nc->send_mbuf.len > 0) { mg_lwip_mgr_schedule_poll(nc->mgr); } return ERR_OK; }
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err) { struct mg_connection *nc = (struct mg_connection *) arg; DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err)); if (p == NULL) { if (nc != NULL) { mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc); } else { /* Tombstoned connection, do nothing. */ } return ERR_OK; } else if (nc == NULL) { tcp_abort(tpcb); return ERR_ARG; } struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock; /* * If we get a chain of more than one segment at once, we need to bump * refcount on the subsequent bufs to make them independent. */ if (p->next != NULL) { struct pbuf *q = p->next; for (; q != NULL; q = q->next) pbuf_ref(q); } if (cs->rx_chain == NULL) { cs->rx_chain = p; cs->rx_offset = 0; } else { if (pbuf_clen(cs->rx_chain) >= 4) { /* ESP SDK has a limited pool of 5 pbufs. We must not hog them all or RX * will be completely blocked. We already have at least 4 in the chain, * this one is, so we have to make a copy and release this one. */ struct pbuf *np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM); if (np != NULL) { pbuf_copy(np, p); pbuf_free(p); p = np; } } pbuf_chain(cs->rx_chain, p); } #ifdef SSL_KRYPTON if (nc->ssl != NULL) { if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) { mg_lwip_ssl_recv(nc); } else { mg_lwip_ssl_do_hs(nc); } return ERR_OK; } #endif while (cs->rx_chain != NULL) { struct pbuf *seg = cs->rx_chain; size_t len = (seg->len - cs->rx_offset); char *data = (char *) malloc(len); if (data == NULL) { DBG(("OOM")); return ERR_MEM; } pbuf_copy_partial(seg, data, len, cs->rx_offset); mg_if_recv_tcp_cb(nc, data, len); /* callee takes over data */ cs->rx_offset += len; if (cs->rx_offset == cs->rx_chain->len) { cs->rx_chain = pbuf_dechain(cs->rx_chain); pbuf_free(seg); cs->rx_offset = 0; } } if (nc->send_mbuf.len > 0) { mg_lwip_mgr_schedule_poll(nc->mgr); } return ERR_OK; }