static void udp_op_read(struct socket * sock, message * m, int blk) { debug_udp_print("socket num %ld", get_sock_num(sock)); if (sock->recv_head) { /* data available receive immeditely */ struct udp_recv_data * data; int ret; data = (struct udp_recv_data *) sock->recv_head->data; ret = udp_do_receive(sock, m, (struct udp_pcb *) sock->pcb, data->pbuf, &data->ip, data->port); if (ret > 0) { sock_dequeue_data(sock); sock->recv_data_size -= data->pbuf->tot_len; udp_recv_free(data); } sock_reply(sock, ret); } else if (!blk) sock_reply(sock, EAGAIN); else { /* store the message so we know how to reply */ sock->mess = *m; /* operation is being processes */ sock->flags |= SOCK_FLG_OP_PENDING; debug_udp_print("no data to read, suspending\n"); } }
static void tcp_op_accept(struct socket * sock, message * m) { debug_tcp_print("socket num %ld", get_sock_num(sock)); if (!(sock->flags & SOCK_FLG_OP_LISTENING)) { debug_tcp_print("socket %ld does not listen\n", get_sock_num(sock)); sock_reply(sock, EINVAL); return; } /* there is a connection ready to be accepted */ if (sock->recv_head) { int ret; struct tcp_pcb * pcb; pcb = (struct tcp_pcb *) sock->recv_head->data; assert(pcb); ret = tcp_do_accept(sock, m, pcb); sock_reply(sock, ret); if (ret == OK) sock_dequeue_data(sock); return; } debug_tcp_print("no ready connection, suspending\n"); sock_reply(sock, SUSPEND); sock->flags |= SOCK_FLG_OP_PENDING; }
static void tcp_get_opt(struct socket * sock, message * m) { int err; nwio_tcpopt_t tcpopt; struct tcp_pcb * pcb = (struct tcp_pcb *) sock->pcb; debug_tcp_print("socket num %ld", get_sock_num(sock)); assert(pcb); if ((unsigned) m->COUNT < sizeof(tcpopt)) { sock_reply(sock, EINVAL); return; } /* FIXME : not used by the userspace library */ tcpopt.nwto_flags = 0; err = copy_to_user(m->m_source, &tcpopt, sizeof(tcpopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); sock_reply(sock, OK); }
static void tcp_op_listen(struct socket * sock, message * m) { int backlog, err; struct tcp_pcb * new_pcb; debug_tcp_print("socket num %ld", get_sock_num(sock)); err = copy_from_user(m->m_source, &backlog, sizeof(backlog), (cp_grant_id_t) m->IO_GRANT, 0); new_pcb = tcp_listen_with_backlog((struct tcp_pcb *) sock->pcb, (u8_t) backlog); debug_tcp_print("listening pcb %p", new_pcb); if (!new_pcb) { debug_tcp_print("Cannot listen on socket %ld", get_sock_num(sock)); sock_reply(sock, EGENERIC); return; } /* advertise that this socket is willing to accept connections */ tcp_accept(new_pcb, tcp_accept_callback); sock->flags |= SOCK_FLG_OP_LISTENING; sock->pcb = new_pcb; sock_reply(sock, OK); }
static void tcp_op_read(struct socket * sock, message * m) { debug_tcp_print("socket num %ld", get_sock_num(sock)); if (!sock->pcb || ((struct tcp_pcb *) sock->pcb)->state != ESTABLISHED) { debug_tcp_print("Connection not established\n"); sock_reply(sock, ENOTCONN); return; } if (sock->recv_head) { /* data available receive immeditely */ int ret = read_from_tcp(sock, m); debug_tcp_print("read op finished"); sock_reply(sock, ret); } else { if (sock->flags & SOCK_FLG_CLOSED) { printf("socket %ld already closed!!! call from %d\n", get_sock_num(sock), m->USER_ENDPT); do_tcp_debug = 1; sock_reply(sock, 0); return; } /* operation is being processed */ debug_tcp_print("no data to read, suspending"); sock_reply(sock, SUSPEND); sock->flags |= SOCK_FLG_OP_PENDING | SOCK_FLG_OP_READING; } }
int raw_socket_input(struct pbuf * pbuf, struct nic * nic) { struct socket * sock; struct pbuf * pbuf_new; if ((sock = nic->raw_socket) == NULL) return 0; debug_print("socket num : %ld", get_sock_num(sock)); if (sock->flags & SOCK_FLG_OP_PENDING) { int ret; /* we are resuming a suspended operation */ ret = raw_receive(&sock->mess, pbuf); if (ret > 0) { sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; return 0; } else { sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; } } /* Do not enqueue more data than allowed */ if (sock->recv_data_size > RAW_BUF_SIZE) { return 0; } /* * nobody is waiting for the data or an error occured above, we enqueue * the packet. We store a copy of this packet */ pbuf_new = pbuf_alloc(PBUF_RAW, pbuf->tot_len, PBUF_RAM); if (pbuf_new == NULL) { debug_print("LWIP : cannot allocated new pbuf\n"); return 0; } if (pbuf_copy(pbuf_new, pbuf) != ERR_OK) { debug_print("LWIP : cannot copy pbuf\n"); return 0; } /* * If we didn't managed to enqueue the packet we report it as not * consumed */ if (sock_enqueue_data(sock, pbuf_new, pbuf_new->tot_len) != OK) { pbuf_free(pbuf_new); } return 0; }
static void udp_set_opt(struct socket * sock, message * m) { int err; nwio_udpopt_t udpopt; struct udp_pcb * pcb = (struct udp_pcb *) sock->pcb; ip_addr_t loc_ip = ip_addr_any; assert(pcb); err = copy_from_user(m->m_source, &udpopt, sizeof(udpopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); debug_udp_print("udpopt.nwuo_flags = 0x%lx", udpopt.nwuo_flags); debug_udp_print("udpopt.nwuo_remaddr = 0x%x", (unsigned int) udpopt.nwuo_remaddr); debug_udp_print("udpopt.nwuo_remport = 0x%x", ntohs(udpopt.nwuo_remport)); debug_udp_print("udpopt.nwuo_locaddr = 0x%x", (unsigned int) udpopt.nwuo_locaddr); debug_udp_print("udpopt.nwuo_locport = 0x%x", ntohs(udpopt.nwuo_locport)); sock->usr_flags = udpopt.nwuo_flags; /* * We will only get data from userspace and the remote address * and port are being set which means that from now on we must * know where to send data. Thus we should interpret this as * connect() call */ if (sock->usr_flags & NWUO_RWDATONLY && sock->usr_flags & NWUO_RP_SET && sock->usr_flags & NWUO_RA_SET) udp_connect(pcb, (ip_addr_t *) &udpopt.nwuo_remaddr, ntohs(udpopt.nwuo_remport)); /* Setting local address means binding */ if (sock->usr_flags & NWUO_LP_SET) udp_bind(pcb, &loc_ip, ntohs(udpopt.nwuo_locport)); /* We can only bind to random local port */ if (sock->usr_flags & NWUO_LP_SEL) udp_bind(pcb, &loc_ip, 0); /* register a receive hook */ udp_recv((struct udp_pcb *) sock->pcb, udp_recv_callback, sock); sock_reply(sock, OK); }
static void raw_ip_set_opt(struct socket * sock, message * m) { int err; nwio_ipopt_t ipopt; struct raw_pcb * pcb; err = copy_from_user(m->m_source, &ipopt, sizeof(ipopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); debug_print("ipopt.nwio_flags = 0x%lx", ipopt.nwio_flags); debug_print("ipopt.nwio_proto = 0x%x", ipopt.nwio_proto); debug_print("ipopt.nwio_rem = 0x%x", (unsigned int) ipopt.nwio_rem); if (sock->pcb == NULL) { if (!(pcb = raw_new(ipopt.nwio_proto))) { raw_ip_close(sock); sock_reply(sock, ENOMEM); return; } sock->pcb = pcb; } else pcb = (struct raw_pcb *) sock->pcb; if (pcb->protocol != ipopt.nwio_proto) { debug_print("conflicting ip socket protocols\n"); sock_reply(sock, EBADIOCTL); } sock->usr_flags = ipopt.nwio_flags; #if 0 if (raw_bind(pcb, (ip_addr_t *)&ipopt.nwio_rem) == ERR_USE) { raw_ip_close(sock); sock_reply(sock, EADDRINUSE); return; } #endif /* register a receive hook */ raw_recv((struct raw_pcb *) sock->pcb, raw_ip_op_receive, sock); sock_reply(sock, OK); }
static void udp_op_write(struct socket * sock, message * m, __unused int blk) { int ret; struct pbuf * pbuf; debug_udp_print("socket num %ld data size %d", get_sock_num(sock), m->COUNT); pbuf = pbuf_alloc(PBUF_TRANSPORT, m->COUNT, PBUF_POOL); if (!pbuf) { ret = ENOMEM; goto write_err; } if ((ret = copy_from_user(m->m_source, pbuf->payload, m->COUNT, (cp_grant_id_t) m->IO_GRANT, 0)) != OK) { pbuf_free(pbuf); goto write_err; } if (sock->usr_flags & NWUO_RWDATONLY) ret = udp_op_send(sock, pbuf, m); else ret = udp_op_sendto(sock, pbuf, m); if (pbuf_free(pbuf) == 0) { panic("We cannot buffer udp packets yet!"); } write_err: sock_reply(sock, ret); }
static err_t tcp_accept_callback(void *arg, struct tcp_pcb *newpcb, err_t err) { struct socket * sock = (struct socket *) arg; debug_tcp_print("socket num %ld", get_sock_num(sock)); assert(err == ERR_OK && newpcb); assert(sock->flags & SOCK_FLG_OP_LISTENING); if (sock->flags & SOCK_FLG_OP_PENDING) { int ret; ret = tcp_do_accept(sock, &sock->mess, newpcb); sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; if (ret == OK) { return ERR_OK; } /* in case of an error fall through */ } /* If we cannot accept rightaway we enqueue the connection for later */ debug_tcp_print("Enqueue connection sock %ld pcb %p\n", get_sock_num(sock), newpcb); if (sock_enqueue_data(sock, newpcb, 1) != OK) { tcp_abort(newpcb); return ERR_ABRT; } if (sock_select_read_set(sock)) sock_select_notify(sock); return ERR_OK; }
static err_t tcp_connected_callback(void *arg, struct tcp_pcb *tpcb, __unused err_t err) { struct socket * sock = (struct socket *) arg; debug_tcp_print("socket num %ld err %d", get_sock_num(sock), err); if (sock->pcb == NULL) { if (sock_select_set(sock)) sock_select_notify(sock); return ERR_OK; } assert((struct tcp_pcb *)sock->pcb == tpcb); tcp_sent(tpcb, tcp_sent_callback); tcp_recv(tpcb, tcp_recv_callback); sock_reply(sock, OK); sock->flags &= ~(SOCK_FLG_OP_PENDING | SOCK_FLG_OP_CONNECTING); /* revive does the sock_select_notify() for us */ return ERR_OK; }
static void tcp_error_callback(void *arg, err_t err) { int perr; struct socket * sock = (struct socket *) arg; debug_tcp_print("socket num %ld err %d", get_sock_num(sock), err); switch (err) { case ERR_RST: perr = ECONNREFUSED; break; case ERR_CLSD: perr = EPIPE; break; case ERR_CONN: perr = ENOTCONN; break; default: perr = EIO; } if (sock->flags & SOCK_FLG_OP_PENDING) { sock_reply(sock, perr); sock->flags &= ~SOCK_FLG_OP_PENDING; } else if (sock_select_set(sock)) sock_select_notify(sock); /* * When error callback is called the tcb either does not exist anymore * or is going to be deallocated soon after. We must not use the pcb * anymore */ sock->pcb = NULL; }
static void tcp_op_get_cookie(struct socket * sock, message * m) { tcp_cookie_t cookie; unsigned sock_num; assert(sizeof(cookie) >= sizeof(sock)); sock_num = get_sock_num(sock); memcpy(&cookie, &sock_num, sizeof(sock_num)); if (copy_to_user(m->m_source, &cookie, sizeof(sock), (cp_grant_id_t) m->IO_GRANT, 0) == OK) sock_reply(sock, OK); else sock_reply(sock, EFAULT); }
static void tcp_op_ioctl(struct socket * sock, message * m) { if (!sock->pcb) { sock_reply(sock, ENOTCONN); return; } debug_tcp_print("socket num %ld req %c %d %d", get_sock_num(sock), (m->REQUEST >> 8) & 0xff, m->REQUEST & 0xff, (m->REQUEST >> 16) & _IOCPARM_MASK); switch (m->REQUEST) { case NWIOGTCPCONF: tcp_get_conf(sock, m); break; case NWIOSTCPCONF: tcp_set_conf(sock, m); break; case NWIOTCPCONN: tcp_op_connect(sock); break; case NWIOTCPLISTENQ: tcp_op_listen(sock, m); break; case NWIOGTCPCOOKIE: tcp_op_get_cookie(sock, m); break; case NWIOTCPACCEPTTO: tcp_op_accept(sock, m); break; case NWIOTCPSHUTDOWN: tcp_op_shutdown_tx(sock); break; case NWIOGTCPOPT: tcp_get_opt(sock, m); break; case NWIOSTCPOPT: tcp_set_opt(sock, m); break; default: sock_reply(sock, EBADIOCTL); return; } }
static void tcp_op_shutdown_tx(struct socket * sock) { err_t err; debug_tcp_print("socket num %ld", get_sock_num(sock)); err = tcp_shutdown((struct tcp_pcb *) sock->pcb, 0, 1); switch (err) { case ERR_OK: sock_reply(sock, OK); break; case ERR_CONN: sock_reply(sock, ENOTCONN); break; default: sock_reply(sock, EGENERIC); } }
static void tcp_set_opt(struct socket * sock, message * m) { int err; nwio_tcpopt_t tcpopt; struct tcp_pcb * pcb = (struct tcp_pcb *) sock->pcb; debug_tcp_print("socket num %ld", get_sock_num(sock)); assert(pcb); err = copy_from_user(m->m_source, &tcpopt, sizeof(tcpopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); /* FIXME : The userspace library does not use this */ sock_reply(sock, OK); }
static void tcp_set_conf(struct socket * sock, message * m) { int err; nwio_tcpconf_t tconf; struct tcp_pcb * pcb = (struct tcp_pcb *) sock->pcb; debug_tcp_print("socket num %ld", get_sock_num(sock)); assert(pcb); err = copy_from_user(m->m_source, &tconf, sizeof(tconf), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); debug_tcp_print("tconf.nwtc_flags = 0x%lx", tconf.nwtc_flags); debug_tcp_print("tconf.nwtc_remaddr = 0x%x", (unsigned int) tconf.nwtc_remaddr); debug_tcp_print("tconf.nwtc_remport = 0x%x", ntohs(tconf.nwtc_remport)); debug_tcp_print("tconf.nwtc_locaddr = 0x%x", (unsigned int) tconf.nwtc_locaddr); debug_tcp_print("tconf.nwtc_locport = 0x%x", ntohs(tconf.nwtc_locport)); sock->usr_flags = tconf.nwtc_flags; if (sock->usr_flags & NWTC_SET_RA) pcb->remote_ip.addr = tconf.nwtc_remaddr; if (sock->usr_flags & NWTC_SET_RP) pcb->remote_port = ntohs(tconf.nwtc_remport); if (sock->usr_flags & NWTC_LP_SET) { /* FIXME the user library can only bind to ANY anyway */ if (tcp_bind(pcb, IP_ADDR_ANY, ntohs(tconf.nwtc_locport)) == ERR_USE) { sock_reply(sock, EADDRINUSE); return; } } sock_reply(sock, OK); }
static void raw_ip_op_write(struct socket * sock, message * m, __unused int blk) { int ret; struct pbuf * pbuf; struct ip_hdr * ip_hdr; debug_print("socket num %ld data size %d", get_sock_num(sock), m->COUNT); if (sock->pcb == NULL) { ret = EIO; goto write_err; } if ((size_t) m->COUNT > sock->buf_size) { ret = ENOMEM; goto write_err; } pbuf = pbuf_alloc(PBUF_LINK, m->COUNT, PBUF_RAM); if (!pbuf) { ret = ENOMEM; goto write_err; } if ((ret = copy_from_user(m->m_source, pbuf->payload, m->COUNT, (cp_grant_id_t) m->IO_GRANT, 0)) != OK) { pbuf_free(pbuf); goto write_err; } ip_hdr = (struct ip_hdr *) pbuf->payload; if (pbuf_header(pbuf, -IP_HLEN)) { pbuf_free(pbuf); ret = EIO; goto write_err; } if ((ret = raw_sendto((struct raw_pcb *)sock->pcb, pbuf, (ip_addr_t *) &ip_hdr->dest)) != OK) { debug_print("raw_sendto failed %d", ret); ret = EIO; } else ret = m->COUNT; pbuf_free(pbuf); write_err: sock_reply(sock, ret); }
static void udp_get_opt(struct socket * sock, message * m) { int err; nwio_udpopt_t udpopt; struct udp_pcb * pcb = (struct udp_pcb *) sock->pcb; assert(pcb); udpopt.nwuo_locaddr = pcb->local_ip.addr; udpopt.nwuo_locport = htons(pcb->local_port); udpopt.nwuo_remaddr = pcb->remote_ip.addr; udpopt.nwuo_remport = htons(pcb->remote_port); udpopt.nwuo_flags = sock->usr_flags; debug_udp_print("udpopt.nwuo_flags = 0x%lx", udpopt.nwuo_flags); debug_udp_print("udpopt.nwuo_remaddr = 0x%x", (unsigned int) udpopt.nwuo_remaddr); debug_udp_print("udpopt.nwuo_remport = 0x%x", ntohs(udpopt.nwuo_remport)); debug_udp_print("udpopt.nwuo_locaddr = 0x%x", (unsigned int) udpopt.nwuo_locaddr); debug_udp_print("udpopt.nwuo_locport = 0x%x", ntohs(udpopt.nwuo_locport)); if ((unsigned int) m->COUNT < sizeof(udpopt)) { sock_reply(sock, EINVAL); return; } err = copy_to_user(m->m_source, &udpopt, sizeof(udpopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); sock_reply(sock, OK); }
static void tcp_get_conf(struct socket * sock, message * m) { int err; nwio_tcpconf_t tconf; struct tcp_pcb * pcb = (struct tcp_pcb *) sock->pcb; debug_tcp_print("socket num %ld", get_sock_num(sock)); assert(pcb); tconf.nwtc_locaddr = pcb->local_ip.addr; tconf.nwtc_locport = htons(pcb->local_port); tconf.nwtc_remaddr = pcb->remote_ip.addr; tconf.nwtc_remport = htons(pcb->remote_port); tconf.nwtc_flags = sock->usr_flags; debug_tcp_print("tconf.nwtc_flags = 0x%lx", tconf.nwtc_flags); debug_tcp_print("tconf.nwtc_remaddr = 0x%x", (unsigned int) tconf.nwtc_remaddr); debug_tcp_print("tconf.nwtc_remport = 0x%x", ntohs(tconf.nwtc_remport)); debug_tcp_print("tconf.nwtc_locaddr = 0x%x", (unsigned int) tconf.nwtc_locaddr); debug_tcp_print("tconf.nwtc_locport = 0x%x", ntohs(tconf.nwtc_locport)); if ((unsigned) m->COUNT < sizeof(tconf)) { sock_reply(sock, EINVAL); return; } err = copy_to_user(m->m_source, &tconf, sizeof(tconf), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); sock_reply(sock, OK); }
static void raw_ip_get_opt(struct socket * sock, message * m) { int err; nwio_ipopt_t ipopt; struct raw_pcb * pcb = (struct raw_pcb *) sock->pcb; assert(pcb); ipopt.nwio_rem = pcb->remote_ip.addr; ipopt.nwio_flags = sock->usr_flags; if ((unsigned int) m->COUNT < sizeof(ipopt)) { sock_reply(sock, EINVAL); return; } err = copy_to_user(m->m_source, &ipopt, sizeof(ipopt), (cp_grant_id_t) m->IO_GRANT, 0); if (err != OK) sock_reply(sock, err); sock_reply(sock, OK); }
static void tcp_op_close(struct socket * sock, __unused message * m) { debug_tcp_print("socket num %ld", get_sock_num(sock)); if (sock->flags & SOCK_FLG_OP_LISTENING) sock_dequeue_data_all(sock, tcp_backlog_free); else sock_dequeue_data_all(sock, tcp_recv_free); debug_tcp_print("dequed RX data"); if (sock->pcb) { int err; /* we are not able to handle any callback anymore */ tcp_arg((struct tcp_pcb *)sock->pcb, NULL); tcp_err((struct tcp_pcb *)sock->pcb, NULL); tcp_sent((struct tcp_pcb *)sock->pcb, NULL); tcp_recv((struct tcp_pcb *)sock->pcb, NULL); err = tcp_close(sock->pcb); assert(err == ERR_OK); sock->pcb = NULL; } debug_tcp_print("freed pcb"); if (sock->buf) { free_wbuf_chain((struct wbuf_chain *) sock->buf); sock->buf = NULL; } debug_tcp_print("freed TX data"); sock_reply(sock, OK); debug_tcp_print("socket unused"); /* mark it as unused */ sock->ops = NULL; }
static void tcp_op_connect(struct socket * sock) { ip_addr_t remaddr; struct tcp_pcb * pcb; err_t err; debug_tcp_print("socket num %ld", get_sock_num(sock)); /* * Connecting is going to send some packets. Unless an immediate error * occurs this operation is going to block */ sock_reply(sock, SUSPEND); sock->flags |= SOCK_FLG_OP_PENDING | SOCK_FLG_OP_CONNECTING; /* try to connect now */ pcb = (struct tcp_pcb *) sock->pcb; remaddr = pcb->remote_ip; err = tcp_connect(pcb, &remaddr, pcb->remote_port, tcp_connected_callback); if (err == ERR_VAL) panic("Wrong tcp_connect arguments"); if (err != ERR_OK) panic("Other tcp_connect error %d\n", err); }
static u8_t raw_ip_op_receive(void *arg, __unused struct raw_pcb *pcb, struct pbuf *pbuf, ip_addr_t *addr) { struct socket * sock = (struct socket *) arg; struct raw_ip_recv_data * data; int ret; debug_print("socket num : %ld addr : %x\n", get_sock_num(sock), (unsigned int) addr->addr); if (sock->flags & SOCK_FLG_OP_PENDING) { /* we are resuming a suspended operation */ ret = raw_ip_do_receive(&sock->mess, pbuf); if (ret > 0) { sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; if (sock->usr_flags & NWIO_EXCL) { pbuf_free(pbuf); return 1; } else return 0; } else { sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; } } /* Do not enqueue more data than allowed */ if (sock->recv_data_size > RAW_IP_BUF_SIZE) return 0; /* * nobody is waiting for the data or an error occured above, we enqueue * the packet */ if (!(data = raw_ip_recv_alloc())) { return 0; } data->ip = *addr; if (sock->usr_flags & NWIO_EXCL) { data->pbuf = pbuf; ret = 1; } else { /* we store a copy of this packet */ data->pbuf = pbuf_alloc(PBUF_RAW, pbuf->tot_len, PBUF_RAM); if (data->pbuf == NULL) { debug_print("LWIP : cannot allocated new pbuf\n"); raw_ip_recv_free(data); return 0; } if (pbuf_copy(data->pbuf, pbuf) != ERR_OK) { debug_print("LWIP : cannot copy pbuf\n"); raw_ip_recv_free(data); return 0; } ret = 0; } /* * If we didn't managed to enqueue the packet we report it as not * consumed */ if (sock_enqueue_data(sock, data, data->pbuf->tot_len) != OK) { raw_ip_recv_free(data); ret = 0; } return ret; }
static void udp_recv_callback(void *arg, struct udp_pcb *pcb, struct pbuf *pbuf, ip_addr_t *addr, u16_t port) { struct socket * sock = (struct socket *) arg; struct udp_recv_data * data; debug_udp_print("socket num : %ld addr : %x port : %d\n", get_sock_num(sock), (unsigned int) addr->addr, port); if (sock->flags & SOCK_FLG_OP_PENDING) { /* we are resuming a suspended operation */ int ret; ret = udp_do_receive(sock, &sock->mess, pcb, pbuf, addr, port); if (ret > 0) { pbuf_free(pbuf); sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; return; } else { sock_reply(sock, ret); sock->flags &= ~SOCK_FLG_OP_PENDING; } } /* Do not enqueue more data than allowed */ if (sock->recv_data_size > UDP_BUF_SIZE) { pbuf_free(pbuf); return; } /* * nobody is waiting for the data or an error occured above, we enqueue * the packet */ if (!(data = udp_recv_alloc())) { pbuf_free(pbuf); return; } data->ip = *addr; data->port = port; data->pbuf = pbuf; if (sock_enqueue_data(sock, data, data->pbuf->tot_len) != OK) { udp_recv_free(data); return; } /* * We don't need to notify when somebody is already waiting, reviving * read operation will do the trick for us. But we must announce new * data available here. */ if (sock_select_read_set(sock)) sock_select_notify(sock); }
static void tcp_op_write(struct socket * sock, message * m) { int ret; struct wbuf * wbuf; unsigned snd_buf_len, usr_buf_len; u8_t flgs = 0; if (!sock->pcb) { sock_reply(sock, ENOTCONN); return; } usr_buf_len = m->COUNT; debug_tcp_print("socket num %ld data size %d", get_sock_num(sock), usr_buf_len); /* * Let at most one buffer grow beyond TCP_BUF_SIZE. This is to minimize * small writes from userspace if only a few bytes were sent before */ if (sock->buf_size >= TCP_BUF_SIZE) { /* FIXME do not block for now */ debug_tcp_print("WARNING : tcp buffers too large, cannot allocate more"); sock_reply(sock, ENOMEM); return; } /* * Never let the allocated buffers grow more than to 2xTCP_BUF_SIZE and * never copy more than space available */ usr_buf_len = (usr_buf_len > TCP_BUF_SIZE ? TCP_BUF_SIZE : usr_buf_len); wbuf = wbuf_add(sock, usr_buf_len); debug_tcp_print("new wbuf for %d bytes", wbuf->len); if (!wbuf) { debug_tcp_print("cannot allocate new buffer of %d bytes", usr_buf_len); sock_reply(sock, ENOMEM); } if ((ret = copy_from_user(m->m_source, wbuf->data, usr_buf_len, (cp_grant_id_t) m->IO_GRANT, 0)) != OK) { sock_reply(sock, ret); return; } wbuf->written = 0; wbuf->rem_len = usr_buf_len; /* * If a writing operation is already in progress, we just enqueue the * data and quit. */ if (sock->flags & SOCK_FLG_OP_WRITING) { struct wbuf_chain * wc = (struct wbuf_chain *)sock->buf; /* * We are adding a buffer with unsent data. If we don't have any other * unsent data, set the pointer to this buffer. */ if (wc->unsent == NULL) { wc->unsent = wbuf; debug_tcp_print("unsent %p remains %d\n", wbuf, wbuf->rem_len); } debug_tcp_print("returns %d\n", usr_buf_len); sock_reply(sock, usr_buf_len); /* * We cannot accept new operations (write). We set the flag * after sending reply not to revive only. We could deadlock. */ if (sock->buf_size >= TCP_BUF_SIZE) sock->flags |= SOCK_FLG_OP_PENDING; return; } /* * Start sending data if the operation is not in progress yet. The * current buffer is the nly one we have, we cannot send more. */ snd_buf_len = tcp_sndbuf((struct tcp_pcb *)sock->pcb); debug_tcp_print("tcp can accept %d bytes", snd_buf_len); wbuf->unacked = (snd_buf_len < wbuf->rem_len ? snd_buf_len : wbuf->rem_len); wbuf->rem_len -= wbuf->unacked; if (wbuf->rem_len) { flgs = TCP_WRITE_FLAG_MORE; /* * Remember that this buffer has some data which we didn't pass * to tcp yet. */ ((struct wbuf_chain *)sock->buf)->unsent = wbuf; debug_tcp_print("unsent %p remains %d\n", wbuf, wbuf->rem_len); } ret = tcp_write((struct tcp_pcb *)sock->pcb, wbuf->data, wbuf->unacked, flgs); tcp_output((struct tcp_pcb *)sock->pcb); debug_tcp_print("%d bytes to tcp", wbuf->unacked); if (ret == ERR_OK) { /* * Operation is being processed, no need to remember the message * in this case, we are going to reply immediatly */ debug_tcp_print("returns %d\n", usr_buf_len); sock_reply(sock, usr_buf_len); sock->flags |= SOCK_FLG_OP_WRITING; if (sock->buf_size >= TCP_BUF_SIZE) sock->flags |= SOCK_FLG_OP_PENDING; } else sock_reply(sock, EIO); }
static err_t tcp_recv_callback(void *arg, struct tcp_pcb *tpcb, struct pbuf *pbuf, err_t err) { int ret, enqueued = 0; struct socket * sock = (struct socket *) arg; debug_tcp_print("socket num %ld", get_sock_num(sock)); if (sock->pcb == NULL) { if (sock_select_set(sock)) sock_select_notify(sock); return ERR_OK; } assert((struct tcp_pcb *) sock->pcb == tpcb); if (err != ERR_OK) return ERR_OK; if (!pbuf) { debug_tcp_print("tcp stream closed on the remote side"); // sock->flags |= SOCK_FLG_CLOSED; /* wake up the reader and report EOF */ if (sock->flags & SOCK_FLG_OP_PENDING && sock->flags & SOCK_FLG_OP_READING) { sock_reply(sock, 0); sock->flags &= ~(SOCK_FLG_OP_PENDING | SOCK_FLG_OP_READING); } #if 0 /* if there are any undelivered data, drop them */ sock_dequeue_data_all(sock, tcp_recv_free); tcp_abandon(tpcb, 0); sock->pcb = NULL; #endif return ERR_OK; } /* * FIXME we always enqueue the data first. If the head is empty and read * operation is pending we could try to deliver immeditaly without * enqueueing */ if (enqueue_rcv_data(sock, pbuf) == ERR_OK) enqueued = 1; /* * Deliver data if there is a pending read operation, otherwise notify * select if the socket is being monitored */ if (sock->flags & SOCK_FLG_OP_PENDING) { if (sock->flags & SOCK_FLG_OP_READING) { ret = read_from_tcp(sock, &sock->mess); debug_tcp_print("read op finished"); sock_reply(sock, ret); sock->flags &= ~(SOCK_FLG_OP_PENDING | SOCK_FLG_OP_READING); } } else if (!(sock->flags & SOCK_FLG_OP_WRITING) && sock_select_rw_set(sock)) sock_select_notify(sock); /* perhaps we have deliverd some data to user, try to enqueue again */ if (!enqueued) { return enqueue_rcv_data(sock, pbuf); } else return ERR_OK; }