/* * Free mbufs held by a socket, and reserved mbuf space. */ void sbrelease(struct sockbuf *sb) { sbflush(sb); sb->sb_hiwat = sb->sb_mbmax = 0; }
/* * Initiate (or continue) disconnect. * If embryonic state, just send reset (once). * If in ``let data drain'' option and linger null, just drop. * Otherwise (hard), mark socket disconnecting and drop * current input data; switch states based on user close, and * send segment to peer (with FIN). */ static void tcp_disconnect(struct tcpcb *tp) { struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; INP_INFO_WLOCK_ASSERT(&V_tcbinfo); INP_WLOCK_ASSERT(inp); /* * Neither tcp_close() nor tcp_drop() should return NULL, as the * socket is still open. */ if (tp->t_state < TCPS_ESTABLISHED) { tp = tcp_close(tp); KASSERT(tp != NULL, ("tcp_disconnect: tcp_close() returned NULL")); } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { tp = tcp_drop(tp, 0); KASSERT(tp != NULL, ("tcp_disconnect: tcp_drop() returned NULL")); } else { soisdisconnecting(so); sbflush(&so->so_rcv); tcp_usrclosed(tp); if (!(inp->inp_flags & INP_DROPPED)) tcp_output_disconnect(tp); } }
/* * Free mbufs held by a socket, and reserved mbuf space. We do not assert * that the socket is held locked here: see sorflush(). */ void sbrelease(struct sockbuf *sb, struct socket *so) { KASSERT(sb->sb_so == so); sbflush(sb); // (void)chgsbsize(so->so_uidinfo, &sb->sb_hiwat, 0, RLIM_INFINITY); sb->sb_mbmax = 0; }
void send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) { struct wrqe *wr; struct cpl_abort_req *req; int tid = toep->tid; struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ INP_WLOCK_ASSERT(inp); CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", __func__, toep->tid, inp->inp_flags & INP_DROPPED ? "inp dropped" : tcpstates[tp->t_state], toep->flags, inp->inp_flags, toep->flags & TPF_ABORT_SHUTDOWN ? " (abort already in progress)" : ""); if (toep->flags & TPF_ABORT_SHUTDOWN) return; /* abort already in progress */ toep->flags |= TPF_ABORT_SHUTDOWN; KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %d.", __func__, tid)); wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); if (inp->inp_flags & INP_DROPPED) req->rsvd0 = htobe32(snd_nxt); else req->rsvd0 = htobe32(tp->snd_nxt); req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); req->cmd = CPL_ABORT_SEND_RST; /* * XXX: What's the correct way to tell that the inp hasn't been detached * from its socket? Should I even be flushing the snd buffer here? */ if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { struct socket *so = inp->inp_socket; if (so != NULL) /* because I'm not sure. See comment above */ sbflush(&so->so_snd); } t4_l2t_send(sc, wr, toep->l2te); }
/* FUNCTION: soshutdown() * * PARAM1: struct socket * socket structure * PARAM2: int shutdown action * 0 = shutdown read half of connection * 1 = shutdown write half of connection * 2 = shutdown both halves of connection * * RETURNS: int 0 if successful, else error code */ int soshutdown(struct socket *so, int how) { how++; /* convert 0,1,2 into 1,2,3 */ if (how & 1) /* caller wanted READ or BOTH */ sorflush(so); if (how & 2) /* caller wanted WRITE or BOTH */ { sbflush(&so->so_snd); /* flush the socket send queue */ so->so_req = PRU_SHUTDOWN; return ((*so->so_proto->pr_usrreq)(so, (struct mbuf *)0, (struct mbuf *)0)); } return 0; }
struct tcpcb * tcp_disconnect(struct tcpcb * tp) { struct socket * so = tp->t_inpcb->inp_socket; if (tp->t_state < TCPS_ESTABLISHED) tp = tcp_close(tp); else if ((so->so_options & SO_LINGER) && so->so_linger == 0) tp = tcp_drop(tp, 0); else { soisdisconnecting(so); sbflush(&so->so_rcv); tp = tcp_usrclosed(tp); if (tp) (void) tcp_output(tp); } return (tp); }
/* * Initiate (or continue) disconnect. * If embryonic state, just send reset (once). * If in ``let data drain'' option and linger null, just drop. * Otherwise (hard), mark socket disconnecting and drop * current input data; switch states based on user close, and * send segment to peer (with FIN). */ static struct tcpcb * tcp_disconnect(struct tcpcb *tp) { struct socket *so = tp->t_inpcb->inp_socket; if (tp->t_state < TCPS_ESTABLISHED) { tp = tcp_close(tp); } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { tp = tcp_drop(tp, 0); } else { lwkt_gettoken(&so->so_rcv.ssb_token); soisdisconnecting(so); sbflush(&so->so_rcv.sb); tp = tcp_usrclosed(tp); if (tp) tcp_output(tp); lwkt_reltoken(&so->so_rcv.ssb_token); } return (tp); }
int rawip_usrreq(struct socket * so, struct mbuf * m, struct mbuf * nam) { int e; /* error from IP stack */ PACKET pkt; /* packet for sending */ struct sockaddr_in * sin; struct ipraw_ep * ep; ip_addr fhost; /* host to send to/recv from (network byte order) */ ip_addr lhost; /* local IP address to bind to (network byte order) */ u_char prot; struct ip * pip; int req; NET ifp; /* ptr to network interface structure */ req = so->so_req; /* get request from socket struct */ switch (req) { case PRU_ATTACH: /* fake small windows so sockets asks us to move data */ so->so_rcv.sb_hiwat = so->so_snd.sb_hiwat = ip_raw_maxalloc(so->so_options & SO_HDRINCL); /* make a raw IP endpoint */ prot = (u_char)(MBUF2LONG(nam)); /* unlock the net resource; IP will immediatly re-lock it */ UNLOCK_NET_RESOURCE(NET_RESID); ep = ip_raw_open(prot, 0L, 0L, rawip_soinput, so); LOCK_NET_RESOURCE(NET_RESID); if (!ep) return(EINVAL); return 0; case PRU_DETACH: /* delete the raw IP endpoint */ ep = rawip_lookup(so); if (!ep) return(EINVAL); /* unlock the net resource; IP will immediatly re-lock it */ UNLOCK_NET_RESOURCE(NET_RESID); ip_raw_close(ep); LOCK_NET_RESOURCE(NET_RESID); return 0; case PRU_CONNECT: /* "connect" the raw IP endpoint to a peer IP address: * this sets a filter for received IP datagrams and sets * a default address for sending */ /* fall through to shared bind logic */ case PRU_BIND: /* do bind parameters lookups and tests */ if (nam == NULL) return(EINVAL); sin = mtod(nam, struct sockaddr_in *); if (sin == NULL) return(EINVAL); if (nam->m_len != sizeof (*sin)) return(EINVAL); ep = rawip_lookup(so); if (!ep) return(EINVAL); if (req == PRU_BIND) { /* bind the socket to a local interface IP address. * * if the caller-supplied address is INADDR_ANY, * don't bind to a specific address; else, * make sure the caller-supplied address is * an interface IP address and if so, bind to that */ if (sin->sin_addr.s_addr == INADDR_ANY) { lhost = 0L; } else { lhost = sin->sin_addr.s_addr; /* verify that lhost is a local interface address */ for (ifp = (NET)(netlist.q_head); ifp; ifp = ifp->n_next) if (ifp->n_ipaddr == lhost) break; if (ifp == NULL) return(EADDRNOTAVAIL); } /* bind the endpoint */ ep->ipr_laddr = lhost; } else /* PRU_CONNECT */ { /* connect the socket to a remote IP address. * * if the caller-supplied address is INADDR_ANY, * use the wildcard address; else, use the caller- * supplied address */ if (sin->sin_addr.s_addr == INADDR_ANY) fhost = 0L; else fhost = sin->sin_addr.s_addr; /* connect the IP endpoint */ ep->ipr_faddr = fhost; /* mark the socket as connected or disconnected, as appropriate */ if (fhost != 0L) { so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING); so->so_state |= SS_ISCONNECTED; } else { so->so_state &= ~SS_ISCONNECTED; } /* since socket was in listen state, packets may be queued */ sbflush(&so->so_rcv); /* dump these now */ } return 0; case PRU_SEND: /* do parameter lookups and tests */ if (!m) /* no data passed? */ return(EINVAL); ep = rawip_lookup(so); if (!ep) { m_free(m); /* may be bogus socket, but more likely the connection may have closed due to ICMP dest unreachable from other side. */ return(ECONNREFUSED); } if (nam == NULL) /* no sendto() info passed, must be send() */ { if (!(so->so_state & SS_ISCONNECTED)) return (ENOTCONN); fhost = ep->ipr_faddr; } else { if (so->so_state & SS_ISCONNECTED) return (EISCONN); if (nam->m_len != sizeof (*sin)) { dtrap(); return (EINVAL); } sin = mtod(nam, struct sockaddr_in *); fhost = sin->sin_addr.s_addr; } /* since our pkt->nb_buff size is tied to max packet size, we * assume our raw IP datagrams are always in one mbuf and that the * mbuf -- but check anyway */ if (m->m_len > (unsigned)ip_raw_maxalloc(so->so_options & SO_HDRINCL)) { dtrap(); /* should never happen */ return EMSGSIZE; /* try to recover */ } /* get a packet buffer for send */ pkt = ip_raw_alloc(m->m_len, so->so_options & SO_HDRINCL); if (!pkt) { m_free(m); return ENOBUFS; /* report buffer shortages */ } MEMCPY(pkt->nb_prot, m->m_data, m->m_len); pkt->nb_plen = m->m_len; /* finished with mbuf, free it now */ m_free(m); pkt->fhost = fhost; /* if we're being asked to send to 255.255.255.255 (a local-net * broadcast), figure out which interface to send the broadcast * on, based on the IP address that the socket is bound to: if * it has been bound to an interface address, we should send the * broadcast on that interface; else, we look for the first * interface that can support broadcasts and is up; if we still * don't have an interface we look for the first interface that * is up; if (after all that) we don't have an interface then we * fail with error EADDRNOTAVAIL; and finally, if we're built * for a single-homed configuration where there's only one * interface, we might as well use it, so we do. */ if (fhost == 0xffffffff) { #ifdef MULTI_HOMED if (ep->ipr_laddr != 0L) { for (ifp = (NET)(netlist.q_head); ifp; ifp = ifp->n_next) if (ifp->n_ipaddr == ep->ipr_laddr) break; } else { for (ifp = (NET)(netlist.q_head); ifp; ifp = ifp->n_next) if ((ifp->n_flags & NF_BCAST) && (ifp->n_mib) && (ifp->n_mib->ifAdminStatus == NI_UP)) break; } if (ifp == NULL) { for (ifp = (NET)(netlist.q_head); ifp; ifp = ifp->n_next) if ((ifp->n_mib) && (ifp->n_mib->ifAdminStatus == NI_UP)) break; if (ifp == NULL) return(EADDRNOTAVAIL); } pkt->net = ifp; #else /* single-homed */ pkt->net = (NET)(netlist.q_head); #endif /* MULTI_HOMED */ } #ifdef IP_MULTICAST /* If the socket has an IP moptions structure for multicast options, * place a pointer to this structure in the PACKET structure. */ if (so->inp_moptions) pkt->imo = so->inp_moptions; #endif /* IP_MULTICAST */ if (so->so_options & SO_HDRINCL) { UNLOCK_NET_RESOURCE(NET_RESID); e = ip_raw_write(pkt); LOCK_NET_RESOURCE(NET_RESID); } else { pip = (struct ip *)(pkt->nb_prot - IPHSIZ); if (ep->ipr_laddr) pip->ip_src = ep->ipr_laddr; else { if (fhost == 0xffffffff) pip->ip_src = pkt->net->n_ipaddr; else pip->ip_src = ip_mymach(fhost); } pip->ip_dest = fhost; UNLOCK_NET_RESOURCE(NET_RESID); e = ip_write(ep->ipr_prot, pkt); LOCK_NET_RESOURCE(NET_RESID); } if (e < 0) return(e); return 0; case PRU_SOCKADDR: /* fall through to share PRU_PEERADDR prefix */ case PRU_PEERADDR: if (nam == NULL) return(EINVAL); sin = mtod(nam, struct sockaddr_in *); if (sin == NULL) return(EINVAL); ep = rawip_lookup(so); if (!ep) return(EINVAL); sin->sin_port = 0; nam->m_len = sizeof(*sin); if (req == PRU_SOCKADDR) { sin->sin_addr.s_addr = ep->ipr_laddr; } else /* PRU_PEERADDR */ { sin->sin_addr.s_addr = ep->ipr_faddr; } return 0; case PRU_DISCONNECT: case PRU_RCVD: dtrap(); return 0; case PRU_LISTEN: /* don't support these for raw IP */ case PRU_ACCEPT: default: return EOPNOTSUPP; } }
/* * NAME: tp_detach() * * CALLED FROM: * tp.trans, on behalf of a user close request * and when the reference timer goes off * (if the disconnect was initiated by the protocol entity * rather than by the user) * * FUNCTION and ARGUMENTS: * remove the tpcb structure from the list of active or * partially active connections, recycle all the mbufs * associated with the pcb, ref structure, sockbufs, etc. * Only free the ref structure if you know that a ref timer * wasn't set for this tpcb. * * RETURNS: Nada * * SIDE EFFECTS: * * NOTES: * tp_soisdisconnected() was already when this is called */ void tp_detach(struct tp_pcb *tpcb) { struct socket *so = tpcb->tp_sock; #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("tp_detach(tpcb %p, so %p)\n", tpcb, so); } #endif #ifdef TPPT if (tp_traceflags[D_CONN]) { tptraceTPCB(TPPTmisc, "tp_detach tpcb so lsufx", tpcb, so, *(u_short *) (tpcb->tp_lsuffix), 0); } #endif #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("so_snd at %p so_rcv at %p\n", &so->so_snd, &so->so_rcv); dump_mbuf(so->so_snd.sb_mb, "so_snd at detach "); printf("about to call LL detach, nlproto %p, nl_detach %p\n", tpcb->tp_nlproto, tpcb->tp_nlproto->nlp_pcbdetach); } #endif if (tpcb->tp_Xsnd.sb_mb) { printf("Unsent Xdata on detach; would panic"); sbflush(&tpcb->tp_Xsnd); } if (tpcb->tp_ucddata) m_freem(tpcb->tp_ucddata); #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("reassembly info cnt %d rsyq %p\n", tpcb->tp_rsycnt, tpcb->tp_rsyq); } #endif if (tpcb->tp_rsyq) tp_rsyflush(tpcb); if (tpcb->tp_next) { iso_remque(tpcb); tpcb->tp_next = tpcb->tp_prev = 0; } tpcb->tp_notdetached = 0; #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("calling (...nlproto->...)(%p, so %p)\n", tpcb->tp_npcb, so); printf("so %p so_head %p, qlen %d q0len %d qlimit %d\n", so, so->so_head, so->so_q0len, so->so_qlen, so->so_qlimit); } #endif (*tpcb->tp_nlproto->nlp_pcbdetach)(tpcb->tp_npcb); /* does an so->so_pcb = 0; sofree(so) */ #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("after xxx_pcbdetach\n"); } #endif if (tpcb->tp_state == TP_LISTENING) { struct tp_pcb **tt; for (tt = &tp_listeners; *tt; tt = &((*tt)->tp_nextlisten)) if (*tt == tpcb) break; if (*tt) *tt = tpcb->tp_nextlisten; else printf("tp_detach from listen: should panic\n"); } if (tpcb->tp_refstate == REF_OPENING) { /* * no connection existed here so no reference timer will be * called */ #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("SETTING ref %d to REF_FREE\n", tpcb->tp_lref); } #endif tp_freeref(tpcb->tp_lref); } #ifdef TP_PERF_MEAS /* * Get rid of the cluster mbuf allocated for performance * measurements, if there is one. Note that tpcb->tp_perf_on says * nothing about whether or not a cluster mbuf was allocated, so you * have to check for a pointer to one (that is, we need the * TP_PERF_MEASs around the following section of code, not the * IFPERFs) */ if (tpcb->tp_p_meas) { struct mbuf *m = tpcb->tp_p_mbuf; struct mbuf *n; #ifdef ARGO_DEBUG if (argo_debug[D_PERF_MEAS]) { printf("freeing tp_p_meas 0x%x ", tpcb->tp_p_meas); } #endif free(tpcb->tp_p_meas, M_PCB); tpcb->tp_p_meas = 0; } #endif /* TP_PERF_MEAS */ #ifdef ARGO_DEBUG if (argo_debug[D_CONN]) { printf("end of detach, NOT single, tpcb %p\n", tpcb); } #endif /* free((void *)tpcb, M_PCB); WHere to put this ? */ }