/** * @brief Get a node from the queue. * * @param queue The target queue. * @return A pointer to the node or NULL for no node to get. */ static struct buf_node *get_node_from(sq_queue_t *queue) { struct buf_node *node = NULL; irqstate_t flags = irqsave(); if (sq_empty(queue)) { irqrestore(flags); return NULL; } node = (struct buf_node *)sq_remfirst(queue); irqrestore(flags); return node; }
static void usbmsc_unbind(FAR struct usbdevclass_driver_s *driver, FAR struct usbdev_s *dev) { FAR struct usbmsc_dev_s *priv; FAR struct usbmsc_req_s *reqcontainer; irqstate_t flags; int i; usbtrace(TRACE_CLASSUNBIND, 0); #ifdef CONFIG_DEBUG if (!driver || !dev || !dev->ep0) { usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_UNBINDINVALIDARGS), 0); return; } #endif /* Extract reference to private data */ priv = ((FAR struct usbmsc_driver_s *)driver)->dev; #ifdef CONFIG_DEBUG if (!priv) { usbtrace(TRACE_CLSERROR(USBMSC_TRACEERR_EP0NOTBOUND1), 0); return; } #endif /* The worker thread should have already been stopped by the * driver un-initialize logic. */ DEBUGASSERT(priv->thstate == USBMSC_STATE_TERMINATED || priv->thstate == USBMSC_STATE_NOTSTARTED); /* Make sure that we are not already unbound */ if (priv != NULL) { /* Make sure that the endpoints have been unconfigured. If * we were terminated gracefully, then the configuration should * already have been reset. If not, then calling usbmsc_resetconfig * should cause the endpoints to immediately terminate all * transfers and return the requests to us (with result == -ESHUTDOWN) */ usbmsc_resetconfig(priv); up_mdelay(50); /* Free the pre-allocated control request */ if (priv->ctrlreq != NULL) { usbmsc_freereq(dev->ep0, priv->ctrlreq); priv->ctrlreq = NULL; } /* Free pre-allocated read requests (which should all have * been returned to the free list at this time -- we don't check) */ for (i = 0; i < CONFIG_USBMSC_NRDREQS; i++) { reqcontainer = &priv->rdreqs[i]; if (reqcontainer->req) { usbmsc_freereq(priv->epbulkout, reqcontainer->req); reqcontainer->req = NULL; } } /* Free the bulk OUT endpoint */ if (priv->epbulkout) { DEV_FREEEP(dev, priv->epbulkout); priv->epbulkout = NULL; } /* Free write requests that are not in use (which should be all * of them */ flags = irqsave(); while (!sq_empty(&priv->wrreqlist)) { reqcontainer = (struct usbmsc_req_s *)sq_remfirst(&priv->wrreqlist); if (reqcontainer->req != NULL) { usbmsc_freereq(priv->epbulkin, reqcontainer->req); } } /* Free the bulk IN endpoint */ if (priv->epbulkin) { DEV_FREEEP(dev, priv->epbulkin); priv->epbulkin = NULL; } irqrestore(flags); } }
static inline int net_pollsetup(FAR struct socket *psock, FAR struct pollfd *fds) { FAR struct uip_conn *conn = psock->s_conn; FAR struct net_poll_s *info; FAR struct uip_callback_s *cb; uip_lock_t flags; int ret; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds) { return -EINVAL; } #endif /* Allocate a container to hold the poll information */ info = (FAR struct net_poll_s *)kmalloc(sizeof(struct net_poll_s)); if (!info) { return -ENOMEM; } /* Some of the following must be atomic */ flags = uip_lock(); /* Allocate a TCP/IP callback structure */ cb = uip_tcpcallbackalloc(conn); if (!cb) { ret = -EBUSY; goto errout_with_lock; } /* Initialize the poll info container */ info->psock = psock; info->fds = fds; info->cb = cb; /* Initialize the callback structure. Save the reference to the info * structure as callback private data so that it will be available during * callback processing. */ cb->flags = (UIP_NEWDATA|UIP_BACKLOG|UIP_POLL|UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT); cb->priv = (FAR void *)info; cb->event = poll_interrupt; /* Save the reference in the poll info structure as fds private as well * for use durring poll teardown as well. */ fds->priv = (FAR void *)info; #ifdef CONFIG_NET_TCPBACKLOG /* Check for read data or backlogged connection availability now */ if (!sq_empty(&conn->readahead) || uip_backlogavailable(conn)) #else /* Check for read data availability now */ if (!sq_empty(&conn->readahead)) #endif { fds->revents |= (POLLOUT & fds->events); } /* Check for a loss of connection events */ if (!_SS_ISCONNECTED(psock->s_flags)) { fds->revents |= (POLLERR | POLLHUP); } /* Check if any requested events are already in effect */ if (fds->revents != 0) { /* Yes.. then signal the poll logic */ sem_post(fds->sem); } uip_unlock(flags); return OK; errout_with_lock: kfree(info); uip_unlock(flags); return ret; }
static uint16_t psock_send_interrupt(FAR struct net_driver_s *dev, FAR void *pvconn, FAR void *pvpriv, uint16_t flags) { FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)pvconn; FAR struct socket *psock = (FAR struct socket *)pvpriv; nllvdbg("flags: %04x\n", flags); /* If this packet contains an acknowledgement, then update the count of * acknowledged bytes. */ if ((flags & TCP_ACKDATA) != 0) { FAR struct tcp_wrbuffer_s *wrb; FAR sq_entry_t *entry; FAR sq_entry_t *next; uint32_t ackno; ackno = tcp_getsequence(TCPBUF->ackno); nllvdbg("ACK: ackno=%u flags=%04x\n", ackno, flags); /* Look at every write buffer in the unacked_q. The unacked_q * holds write buffers that have been entirely sent, but which * have not yet been ACKed. */ for (entry = sq_peek(&conn->unacked_q); entry; entry = next) { uint32_t lastseq; /* Check of some or all of this write buffer has been ACKed. */ next = sq_next(entry); wrb = (FAR struct tcp_wrbuffer_s*)entry; /* If the ACKed sequence number is greater than the start * sequence number of the write buffer, then some or all of * the write buffer has been ACKed. */ if (ackno > WRB_SEQNO(wrb)) { /* Get the sequence number at the end of the data */ lastseq = WRB_SEQNO(wrb) + WRB_PKTLEN(wrb); nllvdbg("ACK: wrb=%p seqno=%u lastseq=%u pktlen=%u ackno=%u\n", wrb, WRB_SEQNO(wrb), lastseq, WRB_PKTLEN(wrb), ackno); /* Has the entire buffer been ACKed? */ if (ackno >= lastseq) { nllvdbg("ACK: wrb=%p Freeing write buffer\n", wrb); /* Yes... Remove the write buffer from ACK waiting queue */ sq_rem(entry, &conn->unacked_q); /* And return the write buffer to the pool of free buffers */ tcp_wrbuffer_release(wrb); } else { unsigned int trimlen; /* No, then just trim the ACKed bytes from the beginning * of the write buffer. This will free up some I/O buffers * that can be reused while are still sending the last * buffers in the chain. */ trimlen = ackno - WRB_SEQNO(wrb); if (trimlen > WRB_SENT(wrb)) { /* More data has been ACKed then we have sent? */ trimlen = WRB_SENT(wrb); } nllvdbg("ACK: wrb=%p trim %u bytes\n", wrb, trimlen); WRB_TRIM(wrb, trimlen); WRB_SEQNO(wrb) = ackno; WRB_SENT(wrb) -= trimlen; /* Set the new sequence number for what remains */ nllvdbg("ACK: wrb=%p seqno=%u pktlen=%u\n", wrb, WRB_SEQNO(wrb), WRB_PKTLEN(wrb)); } } } /* A special case is the head of the write_q which may be partially * sent and so can still have un-ACKed bytes that could get ACKed * before the entire write buffer has even been sent. */ wrb = (FAR struct tcp_wrbuffer_s*)sq_peek(&conn->write_q); if (wrb && WRB_SENT(wrb) > 0 && ackno > WRB_SEQNO(wrb)) { uint32_t nacked; /* Number of bytes that were ACKed */ nacked = ackno - WRB_SEQNO(wrb); if (nacked > WRB_SENT(wrb)) { /* More data has been ACKed then we have sent? ASSERT? */ nacked = WRB_SENT(wrb); } nllvdbg("ACK: wrb=%p seqno=%u nacked=%u sent=%u ackno=%u\n", wrb, WRB_SEQNO(wrb), nacked, WRB_SENT(wrb), ackno); /* Trim the ACKed bytes from the beginning of the write buffer. */ WRB_TRIM(wrb, nacked); WRB_SEQNO(wrb) = ackno; WRB_SENT(wrb) -= nacked; nllvdbg("ACK: wrb=%p seqno=%u pktlen=%u sent=%u\n", wrb, WRB_SEQNO(wrb), WRB_PKTLEN(wrb), WRB_SENT(wrb)); } } /* Check for a loss of connection */ else if ((flags & (TCP_CLOSE | TCP_ABORT | TCP_TIMEDOUT)) != 0) { nllvdbg("Lost connection: %04x\n", flags); /* Report not connected */ net_lostconnection(psock, flags); /* Free write buffers and terminate polling */ psock_lost_connection(psock, conn); return flags; } /* Check if we are being asked to retransmit data */ else if ((flags & TCP_REXMIT) != 0) { FAR struct tcp_wrbuffer_s *wrb; FAR sq_entry_t *entry; nllvdbg("REXMIT: %04x\n", flags); /* If there is a partially sent write buffer at the head of the * write_q? Has anything been sent from that write buffer? */ wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q); nllvdbg("REXMIT: wrb=%p sent=%u\n", wrb, wrb ? WRB_SENT(wrb) : 0); if (wrb != NULL && WRB_SENT(wrb) > 0) { FAR struct tcp_wrbuffer_s *tmp; uint16_t sent; /* Yes.. Reset the number of bytes sent sent from the write buffer */ sent = WRB_SENT(wrb); if (conn->unacked > sent) { conn->unacked -= sent; } else { conn->unacked = 0; } if (conn->sent > sent) { conn->sent -= sent; } else { conn->sent = 0; } WRB_SENT(wrb) = 0; nllvdbg("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n", wrb, WRB_SENT(wrb), conn->unacked, conn->sent); /* Increment the retransmit count on this write buffer. */ if (++WRB_NRTX(wrb) >= TCP_MAXRTX) { nlldbg("Expiring wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb)); /* The maximum retry count as been exhausted. Remove the write * buffer at the head of the queue. */ tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q); DEBUGASSERT(tmp == wrb); UNUSED(tmp); /* And return the write buffer to the free list */ tcp_wrbuffer_release(wrb); /* NOTE expired is different from un-ACKed, it is designed to * represent the number of segments that have been sent, * retransmitted, and un-ACKed, if expired is not zero, the * connection will be closed. * * field expired can only be updated at TCP_ESTABLISHED state */ conn->expired++; } } /* Move all segments that have been sent but not ACKed to the write * queue again note, the un-ACKed segments are put at the head of the * write_q so they can be resent as soon as possible. */ while ((entry = sq_remlast(&conn->unacked_q)) != NULL) { wrb = (FAR struct tcp_wrbuffer_s*)entry; uint16_t sent; /* Reset the number of bytes sent sent from the write buffer */ sent = WRB_SENT(wrb); if (conn->unacked > sent) { conn->unacked -= sent; } else { conn->unacked = 0; } if (conn->sent > sent) { conn->sent -= sent; } else { conn->sent = 0; } WRB_SENT(wrb) = 0; nllvdbg("REXMIT: wrb=%p sent=%u, conn unacked=%d sent=%d\n", wrb, WRB_SENT(wrb), conn->unacked, conn->sent); /* Free any write buffers that have exceed the retry count */ if (++WRB_NRTX(wrb) >= TCP_MAXRTX) { nlldbg("Expiring wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb)); /* Return the write buffer to the free list */ tcp_wrbuffer_release(wrb); /* NOTE expired is different from un-ACKed, it is designed to * represent the number of segments that have been sent, * retransmitted, and un-ACKed, if expired is not zero, the * connection will be closed. * * field expired can only be updated at TCP_ESTABLISHED state */ conn->expired++; continue; } else { /* Insert the write buffer into the write_q (in sequence * number order). The retransmission will occur below * when the write buffer with the lowest sequenc number * is pulled from the write_q again. */ nllvdbg("REXMIT: Moving wrb=%p nrtx=%u\n", wrb, WRB_NRTX(wrb)); psock_insert_segment(wrb, &conn->write_q); } } } /* Check if the outgoing packet is available (it may have been claimed * by a sendto interrupt serving a different thread). */ if (dev->d_sndlen > 0) { /* Another thread has beat us sending data, wait for the next poll */ return flags; } /* We get here if (1) not all of the data has been ACKed, (2) we have been * asked to retransmit data, (3) the connection is still healthy, and (4) * the outgoing packet is available for our use. In this case, we are * now free to send more data to receiver -- UNLESS the buffer contains * unprocessed incoming data. In that event, we will have to wait for the * next polling cycle. */ if ((conn->tcpstateflags & TCP_ESTABLISHED) && (flags & (TCP_POLL | TCP_REXMIT)) && !(sq_empty(&conn->write_q))) { /* Check if the destination IP address is in the ARP table. If not, * then the send won't actually make it out... it will be replaced with * an ARP request. * * NOTE 1: This could be an expensive check if there are a lot of * entries in the ARP table. * * NOTE 2: If we are actually harvesting IP addresses on incoming IP * packets, then this check should not be necessary; the MAC mapping * should already be in the ARP table in many cases. * * NOTE 3: If CONFIG_NET_ARP_SEND then we can be assured that the IP * address mapping is already in the ARP table. */ #if defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_NET_ARP_IPIN) && \ !defined(CONFIG_NET_ARP_SEND) if (arp_find(conn->ripaddr) != NULL) #endif { FAR struct tcp_wrbuffer_s *wrb; size_t sndlen; /* Peek at the head of the write queue (but don't remove anything * from the write queue yet). We know from the above test that * the write_q is not empty. */ wrb = (FAR struct tcp_wrbuffer_s *)sq_peek(&conn->write_q); DEBUGASSERT(wrb); /* Get the amount of data that we can send in the next packet. * We will send either the remaining data in the buffer I/O * buffer chain, or as much as will fit given the MSS and current * window size. */ sndlen = WRB_PKTLEN(wrb) - WRB_SENT(wrb); if (sndlen > tcp_mss(conn)) { sndlen = tcp_mss(conn); } if (sndlen > conn->winsize) { sndlen = conn->winsize; } nllvdbg("SEND: wrb=%p pktlen=%u sent=%u sndlen=%u\n", wrb, WRB_PKTLEN(wrb), WRB_SENT(wrb), sndlen); /* Set the sequence number for this segment. If we are * retransmitting, then the sequence number will already * be set for this write buffer. */ if (WRB_SEQNO(wrb) == (unsigned)-1) { WRB_SEQNO(wrb) = conn->isn + conn->sent; } /* The TCP stack updates sndseq on receipt of ACK *before* * this function is called. In that case sndseq will point * to the next unacknowledged byte (which might have already * been sent). We will overwrite the value of sndseq here * before the packet is sent. */ tcp_setsequence(conn->sndseq, WRB_SEQNO(wrb) + WRB_SENT(wrb)); /* Then set-up to send that amount of data with the offset * corresponding to the amount of data already sent. (this * won't actually happen until the polling cycle completes). */ devif_iob_send(dev, WRB_IOB(wrb), sndlen, WRB_SENT(wrb)); /* Remember how much data we send out now so that we know * when everything has been acknowledged. Just increment * the amount of data sent. This will be needed in sequence * number calculations. */ conn->unacked += sndlen; conn->sent += sndlen; nllvdbg("SEND: wrb=%p nrtx=%u unacked=%u sent=%u\n", wrb, WRB_NRTX(wrb), conn->unacked, conn->sent); /* Increment the count of bytes sent from this write buffer */ WRB_SENT(wrb) += sndlen; nllvdbg("SEND: wrb=%p sent=%u pktlen=%u\n", wrb, WRB_SENT(wrb), WRB_PKTLEN(wrb)); /* Remove the write buffer from the write queue if the * last of the data has been sent from the buffer. */ DEBUGASSERT(WRB_SENT(wrb) <= WRB_PKTLEN(wrb)); if (WRB_SENT(wrb) >= WRB_PKTLEN(wrb)) { FAR struct tcp_wrbuffer_s *tmp; nllvdbg("SEND: wrb=%p Move to unacked_q\n", wrb); tmp = (FAR struct tcp_wrbuffer_s *)sq_remfirst(&conn->write_q); DEBUGASSERT(tmp == wrb); UNUSED(tmp); /* Put the I/O buffer chain in the un-acked queue; the * segment is waiting for ACK again */ psock_insert_segment(wrb, &conn->unacked_q); } /* Only one data can be sent by low level driver at once, * tell the caller stop polling the other connection. */ flags &= ~TCP_POLL; } } /* Continue waiting */ return flags; }
static inline int net_pollsetup(FAR struct socket *psock, struct pollfd *fds) { FAR struct uip_conn *conn = psock->s_conn; FAR struct uip_callback_s *cb; uip_lock_t flags; int ret; /* Sanity check */ #ifdef CONFIG_DEBUG if (!conn || !fds) { return -EINVAL; } #endif /* Some of the following must be atomic */ flags = uip_lock(); /* Allocate a TCP/IP callback structure */ cb = uip_tcpcallbackalloc(conn); if (!cb) { ret = -EBUSY; goto errout_with_irq; } /* Initialize the callbcack structure */ cb->flags = UIP_NEWDATA|UIP_BACKLOG|UIP_POLL|UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT; cb->priv = (FAR void *)fds; cb->event = poll_interrupt; /* Save the nps reference in the poll structure for use at teardown as well */ fds->priv = (FAR void *)cb; #ifdef CONFIG_NET_TCPBACKLOG /* Check for read data or backlogged connection availability now */ if (!sq_empty(&conn->readahead) || uip_backlogavailable(conn)) #else /* Check for read data availability now */ if (!sq_empty(&conn->readahead)) #endif { fds->revents = fds->events & POLLIN; if (fds->revents != 0) { /* If data is available now, the signal the poll logic */ sem_post(fds->sem); } } uip_unlock(flags); return OK; errout_with_irq: uip_unlock(flags); return ret; }
bool uip_backlogavailable(FAR struct uip_conn *conn) { return (conn && conn->backlog && !sq_empty(&conn->backlog->bl_pending)); }
static uint16_t send_interrupt(FAR struct uip_driver_s *dev, FAR void *pvconn, FAR void *pvpriv, uint16_t flags) { FAR struct uip_conn *conn = (FAR struct uip_conn*)pvconn; FAR struct socket *psock = (FAR struct socket *)pvpriv; nllvdbg("flags: %04x\n", flags); /* If this packet contains an acknowledgement, then update the count of * acknowledged bytes. */ if ((flags & UIP_ACKDATA) != 0) { FAR sq_entry_t *entry, *next; FAR struct uip_wrbuffer_s *segment; uint32_t ackno; ackno = uip_tcpgetsequence(TCPBUF->ackno); for (entry = sq_peek(&conn->unacked_q); entry; entry = next) { next = sq_next(entry); segment = (FAR struct uip_wrbuffer_s*)entry; if (segment->wb_seqno < ackno) { nllvdbg("ACK: acked=%d buflen=%d ackno=%d\n", segment->wb_seqno, segment->wb_nbytes, ackno); /* Segment was ACKed. Remove from ACK waiting queue */ sq_rem(entry, &conn->unacked_q); /* Return the write buffer to the pool of free buffers */ uip_tcpwrbuffer_release(segment); } } } /* Check for a loss of connection */ else if ((flags & (UIP_CLOSE | UIP_ABORT | UIP_TIMEDOUT)) != 0) { /* Report not connected */ nllvdbg("Lost connection\n"); net_lostconnection(psock, flags); goto end_wait; } /* Check if we are being asked to retransmit data */ else if ((flags & UIP_REXMIT) != 0) { sq_entry_t *entry; /* Put all segments that have been sent but not ACKed to write queue * again note, the un-ACKed segment is put at the first of the write_q, * so it can be sent as soon as possible. */ while ((entry = sq_remlast(&conn->unacked_q))) { struct uip_wrbuffer_s *segment = (struct uip_wrbuffer_s*)entry; if (segment->wb_nrtx >= UIP_MAXRTX) { //conn->unacked -= segment->wb_nbytes; /* Return the write buffer */ uip_tcpwrbuffer_release(segment); /* NOTE expired is different from un-ACKed, it is designed to * represent the number of segments that have been sent, * retransmitted, and un-ACKed, if expired is not zero, the * connection will be closed. * * field expired can only be updated at UIP_ESTABLISHED state */ conn->expired++; continue; } send_insert_seqment(segment, &conn->write_q); } } /* Check if the outgoing packet is available (it may have been claimed * by a sendto interrupt serving a different thread). */ if (dev->d_sndlen > 0) { /* Another thread has beat us sending data, wait for the next poll */ return flags; } /* We get here if (1) not all of the data has been ACKed, (2) we have been * asked to retransmit data, (3) the connection is still healthy, and (4) * the outgoing packet is available for our use. In this case, we are * now free to send more data to receiver -- UNLESS the buffer contains * unprocesed incoming data. In that event, we will have to wait for the * next polling cycle. */ if ((conn->tcpstateflags & UIP_ESTABLISHED) && (flags & (UIP_POLL | UIP_REXMIT)) && !(sq_empty(&conn->write_q))) { /* Check if the destination IP address is in the ARP table. If not, * then the send won't actually make it out... it will be replaced with * an ARP request. * * NOTE 1: This could be an expensive check if there are a lot of * entries in the ARP table. * * NOTE 2: If we are actually harvesting IP addresses on incomming IP * packets, then this check should not be necessary; the MAC mapping * should already be in the ARP table. */ #if defined(CONFIG_NET_ETHERNET) && !defined(CONFIG_NET_ARP_IPIN) if (uip_arp_find(conn->ripaddr) != NULL) #endif { FAR struct uip_wrbuffer_s *segment; FAR void *sndbuf; size_t sndlen; /* Get the amount of data that we can send in the next packet */ segment = (FAR struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q); if (segment) { sndbuf = segment->wb_buffer; sndlen = segment->wb_nbytes; DEBUGASSERT(sndlen <= uip_mss(conn)); /* REVISIT: There should be a check here to assure that we do * not excced the window (conn->winsize). */ /* Set the sequence number for this segment. NOTE: uIP * updates sndseq on receipt of ACK *before* this function * is called. In that case sndseq will point to the next * unacknowledged byte (which might have already been * sent). We will overwrite the value of sndseq here * before the packet is sent. */ if (segment->wb_nrtx == 0 && segment->wb_seqno == (unsigned)-1) { segment->wb_seqno = conn->isn + conn->sent; } uip_tcpsetsequence(conn->sndseq, segment->wb_seqno); /* Then set-up to send that amount of data. (this won't * actually happen until the polling cycle completes). */ uip_send(dev, sndbuf, sndlen); /* Remember how much data we send out now so that we know * when everything has been acknowledged. Just increment * the amount of data sent. This will be needed in * sequence* number calculations and we know that this is * not a re-transmission. Re-transmissions do not go through * this path. */ if (segment->wb_nrtx == 0) { conn->unacked += sndlen; conn->sent += sndlen; } /* Increment the retransmission counter before expiration. * NOTE we will not calculate the retransmission timer * (RTT) to save cpu cycles, each send_insert_seqment * segment will be retransmitted UIP_MAXRTX times in halt- * second interval before expiration. */ segment->wb_nrtx++; /* The segment is waiting for ACK again */ send_insert_seqment(segment, &conn->unacked_q); /* Only one data can be sent by low level driver at once, * tell the caller stop polling the other connection. */ flags &= ~UIP_POLL; } } } /* Continue waiting */ return flags; end_wait: /* Do not allow any further callbacks */ psock->s_sndcb->flags = 0; psock->s_sndcb->event = NULL; return flags; }
static uint16_t netclose_interrupt(FAR struct net_driver_s *dev, FAR void *pvconn, FAR void *pvpriv, uint16_t flags) { #ifdef CONFIG_NET_SOLINGER FAR struct tcp_close_s *pstate = (FAR struct tcp_close_s *)pvpriv; #endif FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)pvconn; DEBUGASSERT(conn != NULL); nllvdbg("conn: %p flags: %04x\n", conn, flags); /* TCP_DISCONN_EVENTS: * TCP_CLOSE: The remote host has closed the connection * TCP_ABORT: The remote host has aborted the connection * TCP_TIMEDOUT: The remote did not respond, the connection timed out * NETDEV_DOWN: The network device went down */ if ((flags & TCP_DISCONN_EVENTS) != 0) { /* The disconnection is complete */ #ifdef CONFIG_NET_SOLINGER /* pstate non-NULL means that we are performing a LINGERing close. */ if (pstate) { /* Wake up the waiting thread with a successful result */ pstate->cl_result = OK; goto end_wait; } /* Otherwise, nothing is waiting on the close event and we can perform * the completion actions here. */ else #endif { /* Free connection resources */ tcp_free(conn); /* Stop further callbacks */ flags = 0; } } #ifdef CONFIG_NET_SOLINGER /* Check for a timeout. */ else if (pstate && close_timeout(pstate)) { /* Yes.. Wake up the waiting thread and report the timeout */ nlldbg("CLOSE timeout\n"); pstate->cl_result = -ETIMEDOUT; goto end_wait; } #endif /* CONFIG_NET_SOLINGER */ #ifdef CONFIG_NET_TCP_WRITE_BUFFERS /* Check if all outstanding bytes have been ACKed */ else if (conn->unacked != 0 || !sq_empty(&conn->write_q)) { /* No... we are still waiting for ACKs. Drop any received data, but * do not yet report TCP_CLOSE in the response. */ dev->d_len = 0; flags = (flags & ~TCP_NEWDATA); } #endif /* CONFIG_NET_TCP_WRITE_BUFFERS */ else { /* Drop data received in this state and make sure that TCP_CLOSE * is set in the response */ dev->d_len = 0; flags = (flags & ~TCP_NEWDATA) | TCP_CLOSE; } return flags; #ifdef CONFIG_NET_SOLINGER end_wait: pstate->cl_cb->flags = 0; pstate->cl_cb->priv = NULL; pstate->cl_cb->event = NULL; sem_post(&pstate->cl_sem); nllvdbg("Resuming\n"); return 0; #endif }