static int ol_tcp_send(outlet_t *ol, int len, term_t reply_to) { assert(ol->send_in_progress == 0); assert(ol->send_buf_left == 0); //debug("ol_tcp_send: len %d\n", len); // TCP_PB_1 // TCP_PB_2 // TCP_PB_4 int buf_len = len; switch (ol->packet) { case TCP_PB_1: if (len < 0 || len > 255) return -BAD_ARG; ol->send_buffer[0] = len; buf_len = len +1; break; case TCP_PB_2: if (len < 0 || len > 65535) return -BAD_ARG; PUT_UINT_16(ol->send_buffer, len); buf_len = len +2; break; case TCP_PB_4: if (len < 0) return -BAD_ARG; PUT_UINT_32(ol->send_buffer, len); buf_len = len +4; break; } assert(buf_len <= ol->max_send_bufsize); ol->send_buf_left = buf_len; uint16_t write_len = (buf_len > TCP_SND_BUF) ?TCP_SND_BUF :buf_len; ol->send_buf_ack = 0; ol->send_buf_off = write_len; //debug("ol_tcp_send: tcp_write(%d)\n", write_len); int rc = tcp_write(ol->tcp, ol->send_buffer, write_len, TCP_WRITE_FLAG_COPY); if (rc != ERR_OK) { //debug("ol_tcp_send: tcp_write() returns error %d\n", rc); inet_reply_error(ol->oid, reply_to, lwip_err_to_term(rc)); return 0; } // Otherwise, the data are buffered until the next tcp_tmr timeout tcp_output(ol->tcp); // may set a timeout send_defer_reply(ol, reply_to); return 0; }
static term_t ol_tcp_acc_control(outlet_t *ol, uint32_t op, uint8_t *data, int dlen, term_t reply_to, heap_t *hp) { char rbuf[256]; char *reply = rbuf; assert(ol != 0); assert(ol->tcp != 0); debug("%s(op=%d)\n", __FUNCTION__, op); switch (op) { case INET_REQ_ACCEPT: { if (dlen != 4) goto error; uint32_t millis = GET_UINT_32(data); if (millis == 0) { // immediate reply requested if (ol->accepting != 0 && ol->accepted != 0) { struct proc_t *cont_proc = scheduler_lookup(reply_to); assert(cont_proc != 0); bake_one_accepted(ol, cont_proc); } else inet_async_error(ol->oid, reply_to, ASYNC_REF, A_TIMEOUT); break; } acc_pend_t *pend = get_free_pending(ol); if (pend == 0) { REPLY_INET_ERROR("emfile"); break; } pend->outlet = ol; pend->reply_to = reply_to; pend->timeout_set = 0; if (millis != INET_INFINITY) { tcp_accept_timeout(pend, millis); pend->timeout_set = 1; } append_to_ring(&ol->accepting, pend); bake_all_accepted(ol); *reply++ = INET_REP_OK; uint16_t ref = ASYNC_REF; PUT_UINT_16(reply, ref); reply += 2; } break; case INET_REQ_NAME: { saddr_t sockaddr; if (ol_tcp_getsockname(ol, &sockaddr)) goto error; int family; switch (sockaddr.saddr.sa_family) { case AF_INET: family = INET_AF_INET; break; case AF_INET6: family = INET_AF_INET6; break; default: goto error; } *reply++ = INET_REP_OK; *reply++ = family; PUT_UINT_16(reply, sockaddr_port(&sockaddr.saddr)); reply += 2; size_t alen = saddr_to_ipaddr(&sockaddr, (ipX_addr_t *)reply); reply += alen; } break; case INET_REQ_SETOPTS: if (ol_tcp_acc_set_opts(ol, data, dlen) < 0) goto error; *reply++ = INET_REP_OK; break; case INET_REQ_GETOPTS: { int sz = ol_tcp_acc_get_opts(ol, data, dlen, rbuf+1, sizeof(rbuf) -1); if (sz < 0) goto error; *reply++ = INET_REP_OK; reply += sz; } break; default: error: REPLY_INET_ERROR("einval"); } int rlen = reply -rbuf; assert(rlen >= 1 && rlen <= sizeof(rbuf)); term_t result = heap_str_N(hp, rbuf, rlen); if (result == noval) return A_NO_MEMORY; return result; }
static term_t ol_tcp_control(outlet_t *ol, uint32_t op, uint8_t *data, int dlen, term_t reply_to, heap_t *hp) { char rbuf[256]; char *reply = rbuf; int sz; assert(ol != 0); assert(ol->tcp != 0 || op == INET_REQ_OPEN || op == INET_REQ_SUBSCRIBE); switch (op) { case INET_REQ_OPEN: { if (dlen != 2 || data[1] != INET_TYPE_STREAM) goto error; uint8_t family = data[0]; if (family != INET_AF_INET && family != INET_AF_INET6) goto error; assert(ol->tcp == 0); #if LWIP_IPV6 ol->tcp = (family == INET_AF_INET6) ?tcp_new_ip6() :tcp_new(); #else if (family != INET_AF_INET) goto error; ol->tcp = tcp_new(); #endif assert(ol->tcp != 0); // see comment in ol_tcp_animate() tcp_setprio(ol->tcp, TCP_PRIO_MAX +1); tcp_arg(ol->tcp, ol); // callback arg tcp_recv(ol->tcp, recv_cb); tcp_sent(ol->tcp, sent_cb); tcp_err(ol->tcp, error_cb); *reply++ = INET_REP_OK; } break; case INET_REQ_CONNECT: { int is_ipv6 = PCB_ISIPV6(ol->tcp); if ((is_ipv6 && dlen != 4 +2 +16) || (!is_ipv6 && dlen != 4 +2 +4)) goto error; uint32_t timeout = GET_UINT_32(data); uint16_t remote_port = GET_UINT_16(data +4); err_t err; if (!is_ipv6) { ip_addr_t where_to; where_to.addr = ntohl(GET_UINT_32(data +4 +2)); err = tcp_connect(ol->tcp, &where_to, remote_port, connected_cb); } else { #if LWIP_IPV6 ip6_addr_t where_to; where_to.addr[0] = ntohl(GET_UINT_32(data +4 +2)); where_to.addr[1] = ntohl(GET_UINT_32(data +4 +2 +4)); where_to.addr[2] = ntohl(GET_UINT_32(data +4 +2 +8)); where_to.addr[3] = ntohl(GET_UINT_32(data +4 +2 +12)); err = tcp_connect_ip6(ol->tcp, &where_to, remote_port, connected_cb); #else goto error; #endif } // Does it make connections faster? tcp_output(ol->tcp); if (err == ERR_OK) { cr_defer_reply(ol, reply_to, timeout); *reply++ = INET_REP_OK; uint16_t ref = ASYNC_REF; // Why this is needed? A constant will do. PUT_UINT_16(reply, ref); reply += 2; } else { // //TODO: ERR_RTE possible too (IPv6) // assert(err == ERR_MEM); REPLY_INET_ERROR("enomem"); } } break; case INET_REQ_PEER: if (ol->tcp->state == CLOSED) REPLY_INET_ERROR("enotconn"); else { *reply++ = INET_REP_OK; *reply++ = INET_AF_INET; uint16_t peer_port = ol->tcp->remote_port; PUT_UINT_16(reply, peer_port); reply += 2; if (PCB_ISIPV6(ol->tcp)) { ip_addr_set_hton((ip_addr_t *)reply, (ip_addr_t *)&ol->tcp->remote_ip); reply += 4; } else { #if LWIP_IPV6 ip6_addr_set_hton((ip6_addr_t *)reply, (ip6_addr_t *)&ol->tcp->remote_ip); reply += 16; #else goto error; #endif } } break; case INET_REQ_NAME: if (ol->tcp->state == CLOSED) REPLY_INET_ERROR("enotconn"); else { *reply++ = INET_REP_OK; int is_ipv6 = PCB_ISIPV6(ol->tcp); *reply++ = (is_ipv6) ?INET_AF_INET6 :INET_AF_INET; uint16_t name_port = ol->tcp->local_port; PUT_UINT_16(reply, name_port); reply += 2; if (PCB_ISIPV6(ol->tcp)) { ip_addr_set_hton((ip_addr_t *)reply, (ip_addr_t *)&ol->tcp->local_ip); reply += 4; } else { #if LWIP_IPV6 ip6_addr_set_hton((ip6_addr_t *)reply, (ip6_addr_t *)&ol->tcp->local_ip); reply += 16; #else goto error; #endif } } break; case INET_REQ_BIND: { int is_ipv6 = PCB_ISIPV6(ol->tcp); if ((is_ipv6 && dlen != 2 +16) || (!is_ipv6 && dlen != 2 +4)) goto error; uint16_t port = GET_UINT_16(data); if (!is_ipv6) { ip_addr_t addr; addr.addr = ntohl(GET_UINT_32(data +2)); tcp_bind(ol->tcp, &addr, port); // always succeeds } else { #if LWIP_IPV6 ip6_addr_t addr; addr.addr[0] = ntohl(GET_UINT_32(data +2)); addr.addr[1] = ntohl(GET_UINT_32(data +2 +4)); addr.addr[2] = ntohl(GET_UINT_32(data +2 +8)); addr.addr[3] = ntohl(GET_UINT_32(data +2 +12)); tcp_bind_ip6(ol->tcp, &addr, port); // always succeeds #else goto error; #endif } uint16_t local_port = ol->tcp->local_port; *reply++ = INET_REP_OK; PUT_UINT_16(reply, local_port); reply += 2; } break; case INET_REQ_LISTEN: { assert(ol->recv_buf_node == 0); // or use destroy_private() int backlog = GET_UINT_16(data); ol_tcp_acc_promote(ol, ol->tcp, backlog); *reply++ = INET_REP_OK; } break; case INET_REQ_SETOPTS: if (ol_tcp_set_opts(ol, data, dlen) < 0) goto error; *reply++ = INET_REP_OK; break; case INET_REQ_GETOPTS: sz = ol_tcp_get_opts(ol, data, dlen, rbuf+1, sizeof(rbuf) -1); if (sz < 0) goto error; *reply++ = INET_REP_OK; reply += sz; break; case INET_REQ_GETSTAT: // // lwIP can provide some of the statistics but not all // REPLY_INET_ERROR("enotsup"); break; case INET_REQ_SUBSCRIBE: if (dlen != 1 && data[0] != INET_SUBS_EMPTY_OUT_Q) goto error; if (ol->empty_queue_in_progress) goto error; //TODO: allow multiple subscriptions int qlen = tcp_sndqueuelen(ol->tcp); if (qlen > 0) { ol->empty_queue_in_progress = 1; ol->empty_queue_reply_to = reply_to; } *reply++ = INET_REP_OK; *reply++ = INET_SUBS_EMPTY_OUT_Q; PUT_UINT_32(reply, qlen); reply += 4; break; case TCP_REQ_RECV: if (dlen != 4 +4) goto error; uint32_t msecs = GET_UINT_32(data); uint32_t recv_num = GET_UINT_32(data +4); if (ol->active != INET_PASSIVE) goto error; if (ol->packet == TCP_PB_RAW && recv_num > ol->recv_bufsize) goto error; if (ol->peer_close_detected) inet_async_error(ol->oid, reply_to, ASYNC_REF, A_CLOSED); else { cr_defer_reply(ol, reply_to, msecs); if (ol->packet == TCP_PB_RAW) ol->recv_expected_size = recv_num; // Enough data may have already been buffered proc_t *cont_proc = scheduler_lookup(reply_to); assert(cont_proc != 0); if (recv_bake_packets(ol, cont_proc) < 0) goto error; } *reply++ = INET_REP_OK; uint16_t my_ref = ASYNC_REF; PUT_UINT_16(reply, my_ref); reply += 2; break; case TCP_REQ_SHUTDOWN: if (dlen != 1) goto error; uint8_t what = data[0]; // 0 - read // 1 - write // 2 - read_write int shut_rx = (what == 0) || (what == 2); int shut_tx = (what == 1) || (what == 2); if (ol->tcp->state == LISTEN) REPLY_INET_ERROR("enotconn"); else { tcp_shutdown(ol->tcp, shut_rx, shut_tx); // TODO: return code ignored *reply++ = INET_REP_OK; } break; default: error: REPLY_INET_ERROR("einval"); } int rlen = reply -rbuf; assert(rlen >= 1 && rlen <= sizeof(rbuf)); term_t result = heap_str_N(hp, rbuf, rlen); if (result == noval) return A_NO_MEMORY; return result; }
int build_getifaddrs_reply(char *buf, int sz) { char *p = buf; more(1); *p++ = INET_REP_OK; // loopback interface more(3); *p++ = 'l'; *p++ = 'o'; *p++ = 0; more(1 +4); *p++ = INET_IFOPT_FLAGS; uint32_t loflags = INET_IFF_UP | INET_IFF_LOOPBACK | INET_IFF_RUNNING; PUT_UINT_32(p, loflags); p += 4; more(1 +1 +4); *p++ = INET_IFOPT_ADDR; *p++ = INET_AF_INET; *p++ = 127; *p++ = 0; *p++ = 0; *p++ = 1; more(1 +1 +4); *p++ = INET_IFOPT_NETMASK; *p++ = INET_AF_INET; *p++ = 255; *p++ = 0; *p++ = 0; *p++ = 0; more(1); *p++ = 0; // end of 'lo' options // network front ends netfe_t *fe = net_front_ends; while (fe != 0) { // ethNN more(3); *p++ = 'e'; *p++ = 't'; *p++ = 'h'; int n = fe->index; assert(n >= 0); do { more(1); *p++ = (n % 10) +'0'; n /= 10; } while (n > 0); more(1); *p++ = 0; more(1 +4); *p++ = INET_IFOPT_FLAGS; uint32_t ethflags = INET_IFF_UP | INET_IFF_BROADCAST | INET_IFF_RUNNING | INET_IFF_MULTICAST; PUT_UINT_32(p, ethflags); p += 4; more(1 +2 +6); *p++ = INET_IFOPT_HWADDR; PUT_UINT_16(p, fe->mac_len); p += 2; memcpy(p, fe->mac, fe->mac_len); p += fe->mac_len; if (fe->attached_lwip_netif) { // can happen for eth0 only struct netif *nf = fe->attached_lwip_netif; if (!ip_addr_isany(&nf->ip_addr)) { more(1 +1 +4); *p++ = INET_IFOPT_ADDR; *p++ = INET_AF_INET; *p++ = ip4_addr1(&nf->ip_addr); *p++ = ip4_addr2(&nf->ip_addr); *p++ = ip4_addr3(&nf->ip_addr); *p++ = ip4_addr4(&nf->ip_addr); more(1 +1 +4); *p++ = INET_IFOPT_NETMASK; *p++ = INET_AF_INET; *p++ = ip4_addr1(&nf->netmask); *p++ = ip4_addr2(&nf->netmask); *p++ = ip4_addr3(&nf->netmask); *p++ = ip4_addr4(&nf->netmask); } } more(1); *p++ = 0; // end of ethXX options fe = fe->next; } return p -buf; }