static void error_cb(void *arg, err_t err) { phase_expected(PHASE_EVENTS); //debug("**** error_cb(arg 0x%pp, err %d)\n", arg, err); outlet_t *ol = (outlet_t *)arg; if (ol == 0) return; // outlet already gone // from lwIP documentation: // // The error callback function does not get the pcb passed to it as a // parameter since the pcb may already have been deallocated. // ol->tcp = 0; // Do not throw an exception if there is an outstanding request - return an // error instead. // term_t reason = lwip_err_to_term(err); if (ol->cr_in_progress) { cr_cancel_deferred(ol); inet_async_error(ol->oid, ol->cr_reply_to, ASYNC_REF, reason); } else outlet_signal_exit_N(ol, ol->oid, reason); }
static void send_timeout_cb(void *arg) { phase_expected(PHASE_EVENTS); outlet_t *ol = (outlet_t *)arg; assert(ol != 0); assert(ol->send_timeout_set); assert(ol->send_in_progress); //debug("send_timeout_cb: %pt\n", T(ol->oid)); ol->send_timeout_set = 0; ol->send_in_progress = 0; inet_async_error(ol->oid, ol->send_reply_to, ASYNC_REF, A_TIMEOUT); }
static void ol_tcp_detach(outlet_t *ol) { // // Sever all outstanding relationships with processes and subsystems // // - a lwIP PCB // - a process waiting for an empty send queue // - a process waits until data is sent // - a process waiting of a connection or data (optionally, with timeout) // if (ol->tcp != 0) { tcp_arg(ol->tcp, 0); // quench last lwIP callback calls tcp_close(ol->tcp); ol->tcp = 0; } // // Take care as the oultet may be closed by // inet_reply_error/inet_async_error() if the memory is tight. // // // Outstanding empty queue subscriptions ignored - prim_inet:close() exits // after a timeout. // term_t send_reply_to = noval; term_t cr_reply_to = noval; if (ol->send_in_progress) { send_cancel_deferred(ol); send_reply_to = ol->send_reply_to; } if (ol->cr_in_progress) { cr_cancel_deferred(ol); cr_reply_to = ol->cr_reply_to; } term_t saved_oid = ol->oid; if (send_reply_to != noval) inet_reply_error(saved_oid, send_reply_to, A_CLOSED); if (cr_reply_to != noval) inet_async_error(saved_oid, cr_reply_to, ASYNC_REF, A_CLOSED); }
static void tcp_on_accept_timout(acc_pend_t *pend) { // no accept requests from network to meet `pend` of a process outlet_t *ol = pend->outlet; term_t reply_to = pend->reply_to; assert(pend->timeout_set); assert(ol->accepting != 0); pend->next->prev = pend->prev; pend->prev->next = pend->next; if (ol->accepting == pend) ol->accepting = (pend->next != pend) ?pend->next :0; reuse_pending(&ol->free_pends, pend); inet_async_error(ol->oid, reply_to, ASYNC_REF, A_TIMEOUT); }
static void ol_tcp_acc_destroy_private(outlet_t *ol) { ol_tcp_close(ol); // Take ownership of bits of the outlet that require a cleanup. This is // needed as the outlet may be destroyed within inet_async_error() call. // acc_pend_t *accepting = ol->accepting; ol->accepting = 0; acc_pend_t *accepted = ol->accepted; ol->accepted = 0; memnode_t *pend_nodes = ol->pend_nodes; ol->pend_nodes = 0; term_t saved_oid = ol->oid; // break the ring if (accepting != 0) accepting->prev->next = 0; acc_pend_t *pend = accepting; while (pend != 0) { if (pend->timeout_set) tcp_accept_untimeout(pend); inet_async_error(saved_oid, pend->reply_to, ASYNC_REF, A_CLOSED); pend = pend->next; } // break the ring if (accepted != 0) accepted->prev->next = 0; pend = ol->accepted; while (pend != 0) { tcp_close_pending(pend); pend = pend->next; } nfree_chain(pend_nodes); }
static term_t ol_tcp_acc_control(outlet_t *ol, uint32_t op, uint8_t *data, int dlen, term_t reply_to, heap_t *hp) { char rbuf[256]; char *reply = rbuf; assert(ol != 0); assert(ol->tcp != 0); debug("%s(op=%d)\n", __FUNCTION__, op); switch (op) { case INET_REQ_ACCEPT: { if (dlen != 4) goto error; uint32_t millis = GET_UINT_32(data); if (millis == 0) { // immediate reply requested if (ol->accepting != 0 && ol->accepted != 0) { struct proc_t *cont_proc = scheduler_lookup(reply_to); assert(cont_proc != 0); bake_one_accepted(ol, cont_proc); } else inet_async_error(ol->oid, reply_to, ASYNC_REF, A_TIMEOUT); break; } acc_pend_t *pend = get_free_pending(ol); if (pend == 0) { REPLY_INET_ERROR("emfile"); break; } pend->outlet = ol; pend->reply_to = reply_to; pend->timeout_set = 0; if (millis != INET_INFINITY) { tcp_accept_timeout(pend, millis); pend->timeout_set = 1; } append_to_ring(&ol->accepting, pend); bake_all_accepted(ol); *reply++ = INET_REP_OK; uint16_t ref = ASYNC_REF; PUT_UINT_16(reply, ref); reply += 2; } break; case INET_REQ_NAME: { saddr_t sockaddr; if (ol_tcp_getsockname(ol, &sockaddr)) goto error; int family; switch (sockaddr.saddr.sa_family) { case AF_INET: family = INET_AF_INET; break; case AF_INET6: family = INET_AF_INET6; break; default: goto error; } *reply++ = INET_REP_OK; *reply++ = family; PUT_UINT_16(reply, sockaddr_port(&sockaddr.saddr)); reply += 2; size_t alen = saddr_to_ipaddr(&sockaddr, (ipX_addr_t *)reply); reply += alen; } break; case INET_REQ_SETOPTS: if (ol_tcp_acc_set_opts(ol, data, dlen) < 0) goto error; *reply++ = INET_REP_OK; break; case INET_REQ_GETOPTS: { int sz = ol_tcp_acc_get_opts(ol, data, dlen, rbuf+1, sizeof(rbuf) -1); if (sz < 0) goto error; *reply++ = INET_REP_OK; reply += sz; } break; default: error: REPLY_INET_ERROR("einval"); } int rlen = reply -rbuf; assert(rlen >= 1 && rlen <= sizeof(rbuf)); term_t result = heap_str_N(hp, rbuf, rlen); if (result == noval) return A_NO_MEMORY; return result; }
static term_t ol_tcp_control(outlet_t *ol, uint32_t op, uint8_t *data, int dlen, term_t reply_to, heap_t *hp) { char rbuf[256]; char *reply = rbuf; int sz; assert(ol != 0); assert(ol->tcp != 0 || op == INET_REQ_OPEN || op == INET_REQ_SUBSCRIBE); switch (op) { case INET_REQ_OPEN: { if (dlen != 2 || data[1] != INET_TYPE_STREAM) goto error; uint8_t family = data[0]; if (family != INET_AF_INET && family != INET_AF_INET6) goto error; assert(ol->tcp == 0); #if LWIP_IPV6 ol->tcp = (family == INET_AF_INET6) ?tcp_new_ip6() :tcp_new(); #else if (family != INET_AF_INET) goto error; ol->tcp = tcp_new(); #endif assert(ol->tcp != 0); // see comment in ol_tcp_animate() tcp_setprio(ol->tcp, TCP_PRIO_MAX +1); tcp_arg(ol->tcp, ol); // callback arg tcp_recv(ol->tcp, recv_cb); tcp_sent(ol->tcp, sent_cb); tcp_err(ol->tcp, error_cb); *reply++ = INET_REP_OK; } break; case INET_REQ_CONNECT: { int is_ipv6 = PCB_ISIPV6(ol->tcp); if ((is_ipv6 && dlen != 4 +2 +16) || (!is_ipv6 && dlen != 4 +2 +4)) goto error; uint32_t timeout = GET_UINT_32(data); uint16_t remote_port = GET_UINT_16(data +4); err_t err; if (!is_ipv6) { ip_addr_t where_to; where_to.addr = ntohl(GET_UINT_32(data +4 +2)); err = tcp_connect(ol->tcp, &where_to, remote_port, connected_cb); } else { #if LWIP_IPV6 ip6_addr_t where_to; where_to.addr[0] = ntohl(GET_UINT_32(data +4 +2)); where_to.addr[1] = ntohl(GET_UINT_32(data +4 +2 +4)); where_to.addr[2] = ntohl(GET_UINT_32(data +4 +2 +8)); where_to.addr[3] = ntohl(GET_UINT_32(data +4 +2 +12)); err = tcp_connect_ip6(ol->tcp, &where_to, remote_port, connected_cb); #else goto error; #endif } // Does it make connections faster? tcp_output(ol->tcp); if (err == ERR_OK) { cr_defer_reply(ol, reply_to, timeout); *reply++ = INET_REP_OK; uint16_t ref = ASYNC_REF; // Why this is needed? A constant will do. PUT_UINT_16(reply, ref); reply += 2; } else { // //TODO: ERR_RTE possible too (IPv6) // assert(err == ERR_MEM); REPLY_INET_ERROR("enomem"); } } break; case INET_REQ_PEER: if (ol->tcp->state == CLOSED) REPLY_INET_ERROR("enotconn"); else { *reply++ = INET_REP_OK; *reply++ = INET_AF_INET; uint16_t peer_port = ol->tcp->remote_port; PUT_UINT_16(reply, peer_port); reply += 2; if (PCB_ISIPV6(ol->tcp)) { ip_addr_set_hton((ip_addr_t *)reply, (ip_addr_t *)&ol->tcp->remote_ip); reply += 4; } else { #if LWIP_IPV6 ip6_addr_set_hton((ip6_addr_t *)reply, (ip6_addr_t *)&ol->tcp->remote_ip); reply += 16; #else goto error; #endif } } break; case INET_REQ_NAME: if (ol->tcp->state == CLOSED) REPLY_INET_ERROR("enotconn"); else { *reply++ = INET_REP_OK; int is_ipv6 = PCB_ISIPV6(ol->tcp); *reply++ = (is_ipv6) ?INET_AF_INET6 :INET_AF_INET; uint16_t name_port = ol->tcp->local_port; PUT_UINT_16(reply, name_port); reply += 2; if (PCB_ISIPV6(ol->tcp)) { ip_addr_set_hton((ip_addr_t *)reply, (ip_addr_t *)&ol->tcp->local_ip); reply += 4; } else { #if LWIP_IPV6 ip6_addr_set_hton((ip6_addr_t *)reply, (ip6_addr_t *)&ol->tcp->local_ip); reply += 16; #else goto error; #endif } } break; case INET_REQ_BIND: { int is_ipv6 = PCB_ISIPV6(ol->tcp); if ((is_ipv6 && dlen != 2 +16) || (!is_ipv6 && dlen != 2 +4)) goto error; uint16_t port = GET_UINT_16(data); if (!is_ipv6) { ip_addr_t addr; addr.addr = ntohl(GET_UINT_32(data +2)); tcp_bind(ol->tcp, &addr, port); // always succeeds } else { #if LWIP_IPV6 ip6_addr_t addr; addr.addr[0] = ntohl(GET_UINT_32(data +2)); addr.addr[1] = ntohl(GET_UINT_32(data +2 +4)); addr.addr[2] = ntohl(GET_UINT_32(data +2 +8)); addr.addr[3] = ntohl(GET_UINT_32(data +2 +12)); tcp_bind_ip6(ol->tcp, &addr, port); // always succeeds #else goto error; #endif } uint16_t local_port = ol->tcp->local_port; *reply++ = INET_REP_OK; PUT_UINT_16(reply, local_port); reply += 2; } break; case INET_REQ_LISTEN: { assert(ol->recv_buf_node == 0); // or use destroy_private() int backlog = GET_UINT_16(data); ol_tcp_acc_promote(ol, ol->tcp, backlog); *reply++ = INET_REP_OK; } break; case INET_REQ_SETOPTS: if (ol_tcp_set_opts(ol, data, dlen) < 0) goto error; *reply++ = INET_REP_OK; break; case INET_REQ_GETOPTS: sz = ol_tcp_get_opts(ol, data, dlen, rbuf+1, sizeof(rbuf) -1); if (sz < 0) goto error; *reply++ = INET_REP_OK; reply += sz; break; case INET_REQ_GETSTAT: // // lwIP can provide some of the statistics but not all // REPLY_INET_ERROR("enotsup"); break; case INET_REQ_SUBSCRIBE: if (dlen != 1 && data[0] != INET_SUBS_EMPTY_OUT_Q) goto error; if (ol->empty_queue_in_progress) goto error; //TODO: allow multiple subscriptions int qlen = tcp_sndqueuelen(ol->tcp); if (qlen > 0) { ol->empty_queue_in_progress = 1; ol->empty_queue_reply_to = reply_to; } *reply++ = INET_REP_OK; *reply++ = INET_SUBS_EMPTY_OUT_Q; PUT_UINT_32(reply, qlen); reply += 4; break; case TCP_REQ_RECV: if (dlen != 4 +4) goto error; uint32_t msecs = GET_UINT_32(data); uint32_t recv_num = GET_UINT_32(data +4); if (ol->active != INET_PASSIVE) goto error; if (ol->packet == TCP_PB_RAW && recv_num > ol->recv_bufsize) goto error; if (ol->peer_close_detected) inet_async_error(ol->oid, reply_to, ASYNC_REF, A_CLOSED); else { cr_defer_reply(ol, reply_to, msecs); if (ol->packet == TCP_PB_RAW) ol->recv_expected_size = recv_num; // Enough data may have already been buffered proc_t *cont_proc = scheduler_lookup(reply_to); assert(cont_proc != 0); if (recv_bake_packets(ol, cont_proc) < 0) goto error; } *reply++ = INET_REP_OK; uint16_t my_ref = ASYNC_REF; PUT_UINT_16(reply, my_ref); reply += 2; break; case TCP_REQ_SHUTDOWN: if (dlen != 1) goto error; uint8_t what = data[0]; // 0 - read // 1 - write // 2 - read_write int shut_rx = (what == 0) || (what == 2); int shut_tx = (what == 1) || (what == 2); if (ol->tcp->state == LISTEN) REPLY_INET_ERROR("enotconn"); else { tcp_shutdown(ol->tcp, shut_rx, shut_tx); // TODO: return code ignored *reply++ = INET_REP_OK; } break; default: error: REPLY_INET_ERROR("einval"); } int rlen = reply -rbuf; assert(rlen >= 1 && rlen <= sizeof(rbuf)); term_t result = heap_str_N(hp, rbuf, rlen); if (result == noval) return A_NO_MEMORY; return result; }