static inline int net_pollteardown(FAR struct socket *psock, struct pollfd *fds) { FAR struct uip_conn *conn = psock->s_conn; FAR struct uip_callback_s *cb; uip_lock_t flags; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds->priv) { return -EINVAL; } #endif /* Recover the socket descriptor poll state info from the poll structure */ cb = (FAR struct uip_callback_s *)fds->priv; if (cb) { /* Release the callback */ flags = uip_lock(); uip_tcpcallbackfree(conn, cb); uip_unlock(flags); /* Release the poll/select data slot */ fds->priv = NULL; } return OK; }
static inline void netclose_disconnect(FAR struct socket *psock) { struct tcp_close_s state; uip_lock_t flags; /* Interrupts are disabled here to avoid race conditions */ flags = uip_lock(); /* Is the TCP socket in a connected state? */ if (_SS_ISCONNECTED(psock->s_flags)) { struct uip_conn *conn = (struct uip_conn*)psock->s_conn; /* Check for the case where the host beat us and disconnected first */ if (conn->tcpstateflags == UIP_ESTABLISHED) { /* Set up to receive TCP data event callbacks */ state.cl_cb = uip_tcpcallbackalloc(conn); if (state.cl_cb) { state.cl_psock = psock; sem_init(&state.cl_sem, 0, 0); state.cl_cb->flags = UIP_NEWDATA|UIP_POLL|UIP_CLOSE|UIP_ABORT; state.cl_cb->priv = (void*)&state; state.cl_cb->event = netclose_interrupt; /* Notify the device driver of the availaibilty of TX data */ netdev_txnotify(&conn->ripaddr); /* Wait for the disconnect event */ (void)uip_lockedwait(&state.cl_sem); /* We are now disconnected */ sem_destroy(&state.cl_sem); uip_tcpcallbackfree(conn, state.cl_cb); } } } uip_unlock(flags); }
static inline void tcp_teardown_callbacks(struct tcp_connect_s *pstate, int status) { struct uip_conn *conn = pstate->tc_conn; /* Make sure that no further interrupts are processed */ uip_tcpcallbackfree(conn, pstate->tc_cb); /* If we successfully connected, we will continue to monitor the connection state * via callbacks. */ if (status < 0) { /* Failed to connect */ conn->connection_private = NULL; conn->connection_event = NULL; } }
static inline int net_pollteardown(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct uip_conn *conn = psock->s_conn; FAR struct net_poll_s *info; uip_lock_t flags; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds->priv) { return -EINVAL; } #endif /* Recover the socket descriptor poll state info from the poll structure */ info = (FAR struct net_poll_s *)fds->priv; DEBUGASSERT(info && info->fds && info->cb); if (info) { /* Release the callback */ flags = uip_lock(); uip_tcpcallbackfree(conn, info->cb); uip_unlock(flags); /* Release the poll/select data slot */ info->fds->priv = NULL; /* Then free the poll info container */ kfree(info); } return OK; }
static inline void tcp_teardown_callbacks(struct tcp_connect_s *pstate, int status) { FAR struct uip_conn *conn = pstate->tc_conn; /* Make sure that no further interrupts are processed */ uip_tcpcallbackfree(conn, pstate->tc_cb); pstate->tc_cb = NULL; /* If we successfully connected, we will continue to monitor the connection * state via callbacks. */ if (status < 0) { /* Failed to connect. Stop the connection event monitor */ net_stopmonitor(conn); } }
void uip_tcpfree(struct uip_conn *conn) { #if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 struct uip_readahead_s *readahead; #endif uip_lock_t flags; /* Because g_free_tcp_connections is accessed from user level and interrupt * level, code, it is necessary to keep interrupts disabled during this * operation. */ DEBUGASSERT(conn->crefs == 0); flags = uip_lock(); /* Check if there is an allocated close callback structure */ if (conn->closecb != NULL) { uip_tcpcallbackfree(conn, conn->closecb); } /* UIP_ALLOCATED means that that the connection is not in the active list * yet. */ if (conn->tcpstateflags != UIP_ALLOCATED) { /* Remove the connection from the active list */ dq_rem(&conn->node, &g_active_tcp_connections); } /* Release any read-ahead buffers attached to the connection */ #if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL) { uip_tcpreadaheadrelease(readahead); } #endif /* Remove any backlog attached to this connection */ #ifdef CONFIG_NET_TCPBACKLOG if (conn->backlog) { uip_backlogdestroy(conn); } /* If this connection is, itself, backlogged, then remove it from the * parent connection's backlog list. */ if (conn->blparent) { uip_backlogdelete(conn->blparent, conn); } #endif /* Mark the connection available and put it into the free list */ conn->tcpstateflags = UIP_CLOSED; dq_addlast(&conn->node, &g_free_tcp_connections); uip_unlock(flags); }
void uip_tcpfree(struct uip_conn *conn) { FAR struct uip_callback_s *cb; FAR struct uip_callback_s *next; #ifdef CONFIG_NET_TCP_READAHEAD FAR struct uip_readahead_s *readahead; #endif #ifdef CONFIG_NET_TCP_WRITE_BUFFERS FAR struct uip_wrbuffer_s *wrbuffer; #endif uip_lock_t flags; /* Because g_free_tcp_connections is accessed from user level and interrupt * level, code, it is necessary to keep interrupts disabled during this * operation. */ DEBUGASSERT(conn->crefs == 0); flags = uip_lock(); /* Free remaining callbacks, actually there should be only the close callback * left. */ for (cb = conn->list; cb; cb = next) { next = cb->flink; uip_tcpcallbackfree(conn, cb); } /* UIP_ALLOCATED means that that the connection is not in the active list * yet. */ if (conn->tcpstateflags != UIP_ALLOCATED) { /* Remove the connection from the active list */ dq_rem(&conn->node, &g_active_tcp_connections); } #ifdef CONFIG_NET_TCP_READAHEAD /* Release any read-ahead buffers attached to the connection */ while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL) { uip_tcpreadahead_release(readahead); } #endif #ifdef CONFIG_NET_TCP_WRITE_BUFFERS /* Release any write buffers attached to the connection */ while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL) { uip_tcpwrbuffer_release(wrbuffer); } while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL) { uip_tcpwrbuffer_release(wrbuffer); } #endif #ifdef CONFIG_NET_TCPBACKLOG /* Remove any backlog attached to this connection */ if (conn->backlog) { uip_backlogdestroy(conn); } /* If this connection is, itself, backlogged, then remove it from the * parent connection's backlog list. */ if (conn->blparent) { uip_backlogdelete(conn->blparent, conn); } #endif /* Mark the connection available and put it into the free list */ conn->tcpstateflags = UIP_CLOSED; dq_addlast(&conn->node, &g_free_tcp_connections); uip_unlock(flags); }
ssize_t send(int sockfd, const void *buf, size_t len, int flags) { FAR struct socket *psock = sockfd_socket(sockfd); struct send_s state; uip_lock_t save; int err; int ret = OK; /* Verify that the sockfd corresponds to valid, allocated socket */ if (!psock || psock->s_crefs <= 0) { err = EBADF; goto errout; } /* If this is an un-connected socket, then return ENOTCONN */ if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags)) { err = ENOTCONN; goto errout; } /* Set the socket state to sending */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND); /* Perform the TCP send operation */ /* Initialize the state structure. This is done with interrupts * disabled because we don't want anything to happen until we * are ready. */ save = uip_lock(); memset(&state, 0, sizeof(struct send_s)); (void)sem_init(&state. snd_sem, 0, 0); /* Doesn't really fail */ state.snd_sock = psock; /* Socket descriptor to use */ state.snd_buflen = len; /* Number of bytes to send */ state.snd_buffer = buf; /* Buffer to send from */ if (len > 0) { struct uip_conn *conn = (struct uip_conn*)psock->s_conn; /* Allocate resources to receive a callback */ state.snd_cb = uip_tcpcallbackalloc(conn); if (state.snd_cb) { /* Get the initial sequence number that will be used */ state.snd_isn = uip_tcpgetsequence(conn->sndseq); /* There is no outstanding, unacknowledged data after this * initial sequence number. */ conn->unacked = 0; /* Update the initial time for calculating timeouts */ #if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK) state.snd_time = clock_systimer(); #endif /* Set up the callback in the connection */ state.snd_cb->flags = UIP_ACKDATA|UIP_REXMIT|UIP_POLL|UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT; state.snd_cb->priv = (void*)&state; state.snd_cb->event = send_interrupt; /* Notify the device driver of the availaibilty of TX data */ netdev_txnotify(&conn->ripaddr); /* Wait for the send to complete or an error to occur: NOTES: (1) * uip_lockedwait will also terminate if a signal is received, (2) interrupts * may be disabled! They will be re-enabled while the task sleeps and * automatically re-enabled when the task restarts. */ ret = uip_lockedwait(&state. snd_sem); /* Make sure that no further interrupts are processed */ uip_tcpcallbackfree(conn, state.snd_cb); } } sem_destroy(&state. snd_sem); uip_unlock(save); /* Set the socket state to idle */ psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE); /* Check for a errors. Errors are signaled by negative errno values * for the send length */ if (state.snd_sent < 0) { err = state.snd_sent; goto errout; } /* If uip_lockedwait failed, then we were probably reawakened by a signal. In * this case, uip_lockedwait will have set errno appropriately. */ if (ret < 0) { err = -ret; goto errout; } /* Return the number of bytes actually sent */ return state.snd_sent; errout: *get_errno_ptr() = err; return ERROR; }
static ssize_t tcp_recvfrom(FAR struct socket *psock, FAR void *buf, size_t len, FAR struct sockaddr_in *infrom ) #endif { struct recvfrom_s state; uip_lock_t save; int ret; /* Initialize the state structure. This is done with interrupts * disabled because we don't want anything to happen until we * are ready. */ save = uip_lock(); recvfrom_init(psock, buf, len, infrom, &state); /* Handle any any TCP data already buffered in a read-ahead buffer. NOTE * that there may be read-ahead data to be retrieved even after the * socket has been disconnected. */ #if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 recvfrom_readahead(&state); /* The default return value is the number of bytes that we just copied into * the user buffer. We will return this if the socket has become disconnected * or if the user request was completely satisfied with data from the readahead * buffers. */ ret = state.rf_recvlen; #else /* Otherwise, the default return value of zero is used (only for the case * where len == state.rf_buflen is zero). */ ret = 0; #endif /* Verify that the SOCK_STREAM has been and still is connected */ if (!_SS_ISCONNECTED(psock->s_flags)) { /* Was any data transferred from the readahead buffer after we were * disconnected? If so, then return the number of bytes received. We * will wait to return end disconnection indications the next time that * recvfrom() is called. * * If no data was received (i.e., ret == 0 -- it will not be negative) * and the connection was gracefully closed by the remote peer, then return * success. If rf_recvlen is zero, the caller of recvfrom() will get an * end-of-file indication. */ #if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 if (ret <= 0 && !_SS_ISCLOSED(psock->s_flags)) #else if (!_SS_ISCLOSED(psock->s_flags)) #endif { /* Nothing was previously received from the readahead buffers. * The SOCK_STREAM must be (re-)connected in order to receive any * additional data. */ ret = -ENOTCONN; } } /* In general, this uIP-based implementation will not support non-blocking * socket operations... except in a few cases: Here for TCP receive with read-ahead * enabled. If this socket is configured as non-blocking then return EAGAIN * if no data was obtained from the read-ahead buffers. */ else #if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 if (_SS_ISNONBLOCK(psock->s_flags)) { /* Return the number of bytes read from the read-ahead buffer if * something was received (already in 'ret'); EAGAIN if not. */ if (ret <= 0) { /* Nothing was received */ ret = -EAGAIN; } } /* It is okay to block if we need to. If there is space to receive anything * more, then we will wait to receive the data. Otherwise return the number * of bytes read from the read-ahead buffer (already in 'ret'). */ else #endif /* We get here when we we decide that we need to setup the wait for incoming * TCP/IP data. Just a few more conditions to check: * * 1) Make sure thet there is buffer space to receive additional data * (state.rf_buflen > 0). This could be zero, for example, if read-ahead * buffering was enabled and we filled the user buffer with data from * the read-ahead buffers. Aand * 2) if read-ahead buffering is enabled (CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0) * and delay logic is disabled (CONFIG_NET_TCP_RECVDELAY == 0), then we * not want to wait if we already obtained some data from the read-ahead * buffer. In that case, return now with what we have (don't want for more * because there may be no timeout). */ #if CONFIG_NET_TCP_RECVDELAY == 0 && CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0 if (state.rf_recvlen == 0 && state.rf_buflen > 0) #else if (state.rf_buflen > 0) #endif { struct uip_conn *conn = (struct uip_conn *)psock->s_conn; /* Set up the callback in the connection */ state.rf_cb = uip_tcpcallbackalloc(conn); if (state.rf_cb) { state.rf_cb->flags = UIP_NEWDATA|UIP_POLL|UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT; state.rf_cb->priv = (void*)&state; state.rf_cb->event = recvfrom_tcpinterrupt; /* Wait for either the receive to complete or for an error/timeout to occur. * NOTES: (1) uip_lockedwait will also terminate if a signal is received, (2) * interrupts may be disabled! They will be re-enabled while the task sleeps * and automatically re-enabled when the task restarts. */ ret = uip_lockedwait(&state.rf_sem); /* Make sure that no further interrupts are processed */ uip_tcpcallbackfree(conn, state.rf_cb); ret = recvfrom_result(ret, &state); } else { ret = -EBUSY; } } uip_unlock(save); recvfrom_uninit(&state); return (ssize_t)ret; }