/* This is _not_ the normal way to "unoffload" a socket. */ void undo_offload_socket(struct socket *so) { struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct toepcb *toep = tp->t_toe; struct tom_data *td = toep->td; struct sockbuf *sb; INP_WLOCK_ASSERT(inp); sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags &= ~SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); tp->tod = NULL; tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->inp = NULL; toep->flags &= ~TPF_ATTACHED; if (in_pcbrele_wlocked(inp)) panic("%s: inp freed.", __func__); mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); }
/* * Two update paths: one in which the 4-tuple on an inpcb has been updated * and therefore connection groups may need to change (or a wildcard entry * may needed to be installed), and another in which the 4-tuple has been * set as a result of a packet received, in which case we may be able to use * the hash on the mbuf to avoid doing a software hash calculation for RSS. * * In each case: first, let the wildcard code have a go at placing it as a * wildcard socket. If it was a wildcard, or if the connection has been * dropped, then no pcbgroup is required (so potentially clear it); * otherwise, calculate and update the pcbgroup for the inpcb. */ void in_pcbgroup_update(struct inpcb *inp) { struct inpcbinfo *pcbinfo; struct inpcbgroup *newpcbgroup; INP_WLOCK_ASSERT(inp); pcbinfo = inp->inp_pcbinfo; if (!in_pcbgroup_enabled(pcbinfo)) return; in_pcbwild_update_internal(inp); if (!(inp->inp_flags2 & INP_PCBGROUPWILD) && !(inp->inp_flags & INP_DROPPED)) { #ifdef INET6 if (inp->inp_vflag & INP_IPV6) newpcbgroup = in6_pcbgroup_byinpcb(inp); else #endif newpcbgroup = in_pcbgroup_byinpcb(inp); } else newpcbgroup = NULL; in_pcbgroup_update_internal(pcbinfo, newpcbgroup, inp); }
void handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, struct sockbuf *sb, __be32 rcv_nxt) { struct mbuf *m; int len; SOCKBUF_LOCK_ASSERT(sb); INP_WLOCK_ASSERT(toep->inp); len = be32toh(rcv_nxt) - tp->rcv_nxt; /* Signal handle_ddp() to break out of its sleep loop. */ toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE); if (len == 0) return; tp->rcv_nxt += len; KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); toep->rx_credits += toep->sb_cc - sbused(sb); #ifdef USE_DDP_RX_FLOW_CONTROL toep->rx_credits -= len; /* adjust for F_RX_FC_DDP */ #endif m = get_ddp_mbuf(len); sbappendstream_locked(sb, m, 0); toep->sb_cc = sbused(sb); }
void t4_rcvd(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; struct toepcb *toep = tp->t_toe; int credits; INP_WLOCK_ASSERT(inp); SOCKBUF_LOCK(sb); KASSERT(toep->sb_cc >= sb->sb_cc, ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sb->sb_cc, toep->sb_cc)); toep->rx_credits += toep->sb_cc - sb->sb_cc; toep->sb_cc = sb->sb_cc; credits = toep->rx_credits; SOCKBUF_UNLOCK(sb); if (credits > 0 && (credits + 16384 >= tp->rcv_wnd || credits >= 15 * 1024)) { credits = send_rx_credits(sc, toep, credits); SOCKBUF_LOCK(sb); toep->rx_credits -= credits; SOCKBUF_UNLOCK(sb); tp->rcv_wnd += credits; tp->rcv_adv += credits; } }
/* * Provide an opportunity for a TOE driver to offload. */ int tcp_offload_connect(struct socket *so, struct sockaddr *nam) { struct ifnet *ifp; struct toedev *tod; struct rtentry *rt; int error = EOPNOTSUPP; INP_WLOCK_ASSERT(sotoinpcb(so)); KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, ("%s: called with sa_family %d", __func__, nam->sa_family)); if (registered_toedevs == 0) return (error); rt = rtalloc1(nam, 0, 0); if (rt) RT_UNLOCK(rt); else return (EHOSTUNREACH); ifp = rt->rt_ifp; if (nam->sa_family == AF_INET && !(ifp->if_capenable & IFCAP_TOE4)) goto done; if (nam->sa_family == AF_INET6 && !(ifp->if_capenable & IFCAP_TOE6)) goto done; tod = TOEDEV(ifp); if (tod != NULL) error = tod->tod_connect(tod, so, rt, nam); done: RTFREE(rt); return (error); }
/* * Set up the socket for TCP offload. */ void offload_socket(struct socket *so, struct toepcb *toep) { struct tom_data *td = toep->td; struct inpcb *inp = sotoinpcb(so); struct tcpcb *tp = intotcpcb(inp); struct sockbuf *sb; INP_WLOCK_ASSERT(inp); /* Update socket */ sb = &so->so_snd; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); sb = &so->so_rcv; SOCKBUF_LOCK(sb); sb->sb_flags |= SB_NOCOALESCE; SOCKBUF_UNLOCK(sb); /* Update TCP PCB */ tp->tod = &td->tod; tp->t_toe = toep; tp->t_flags |= TF_TOE; /* Install an extra hold on inp */ toep->inp = inp; toepcb_set_flag(toep, TPF_ATTACHED); in_pcbref(inp); /* Add the TOE PCB to the active list */ mtx_lock(&td->toep_list_lock); TAILQ_INSERT_HEAD(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); }
/* * Called after the last CPL for the toepcb has been received. * * The inp must be wlocked on entry and is unlocked (or maybe destroyed) by the * time this function exits. */ static int toepcb_release(struct toepcb *toep) { struct inpcb *inp = toep->tp_inp; struct toedev *tod = toep->tp_tod; struct tom_data *td = t3_tomdata(tod); int rc; INP_WLOCK_ASSERT(inp); KASSERT(!(toep->tp_flags & TP_CPL_DONE), ("%s: double release?", __func__)); CTR2(KTR_CXGB, "%s: tid %d", __func__, toep->tp_tid); toep->tp_flags |= TP_CPL_DONE; toep->tp_inp = NULL; mtx_lock(&td->toep_list_lock); TAILQ_REMOVE(&td->toep_list, toep, link); mtx_unlock(&td->toep_list_lock); if (!(toep->tp_flags & TP_ATTACHED)) t3_release_offload_resources(toep); rc = in_pcbrele_wlocked(inp); if (!rc) INP_WUNLOCK(inp); return (rc); }
void t4_rcvd_locked(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; struct sockbuf *sb = &so->so_rcv; struct toepcb *toep = tp->t_toe; int credits; INP_WLOCK_ASSERT(inp); SOCKBUF_LOCK_ASSERT(sb); KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); toep->rx_credits += toep->sb_cc - sbused(sb); toep->sb_cc = sbused(sb); if (toep->rx_credits > 0 && (tp->rcv_wnd <= 32 * 1024 || toep->rx_credits >= 64 * 1024 || (toep->rx_credits >= 16 * 1024 && tp->rcv_wnd <= 128 * 1024) || toep->sb_cc + tp->rcv_wnd < sb->sb_lowat)) { credits = send_rx_credits(sc, toep, toep->rx_credits); toep->rx_credits -= credits; tp->rcv_wnd += credits; tp->rcv_adv += credits; } }
/* * Initiate (or continue) disconnect. * If embryonic state, just send reset (once). * If in ``let data drain'' option and linger null, just drop. * Otherwise (hard), mark socket disconnecting and drop * current input data; switch states based on user close, and * send segment to peer (with FIN). */ static void tcp_disconnect(struct tcpcb *tp) { struct inpcb *inp = tp->t_inpcb; struct socket *so = inp->inp_socket; INP_INFO_WLOCK_ASSERT(&V_tcbinfo); INP_WLOCK_ASSERT(inp); /* * Neither tcp_close() nor tcp_drop() should return NULL, as the * socket is still open. */ if (tp->t_state < TCPS_ESTABLISHED) { tp = tcp_close(tp); KASSERT(tp != NULL, ("tcp_disconnect: tcp_close() returned NULL")); } else if ((so->so_options & SO_LINGER) && so->so_linger == 0) { tp = tcp_drop(tp, 0); KASSERT(tp != NULL, ("tcp_disconnect: tcp_drop() returned NULL")); } else { soisdisconnecting(so); sbflush(&so->so_rcv); tcp_usrclosed(tp); if (!(inp->inp_flags & INP_DROPPED)) tcp_output_disconnect(tp); } }
/* XXX: handle_ddp_data code duplication */ void insert_ddp_data(struct toepcb *toep, uint32_t n) { struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); struct sockbuf *sb = &inp->inp_socket->so_rcv; struct mbuf *m; INP_WLOCK_ASSERT(inp); SOCKBUF_LOCK_ASSERT(sb); m = get_ddp_mbuf(n); tp->rcv_nxt += n; #ifndef USE_DDP_RX_FLOW_CONTROL KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); tp->rcv_wnd -= n; #endif KASSERT(toep->sb_cc >= sbused(sb), ("%s: sb %p has more data (%d) than last time (%d).", __func__, sb, sbused(sb), toep->sb_cc)); toep->rx_credits += toep->sb_cc - sbused(sb); #ifdef USE_DDP_RX_FLOW_CONTROL toep->rx_credits -= n; /* adjust for F_RX_FC_DDP */ #endif sbappendstream_locked(sb, m, 0); toep->sb_cc = sbused(sb); }
/* * One sided detach. The tcpcb is going away and we need to unhook the toepcb * hanging off it. If the TOE driver is also done with the toepcb we'll release * all offload resources. */ static void toepcb_detach(struct inpcb *inp) { struct toepcb *toep; struct tcpcb *tp; KASSERT(inp, ("%s: inp is NULL", __func__)); INP_WLOCK_ASSERT(inp); tp = intotcpcb(inp); toep = tp->t_toe; KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); KASSERT(toep->tp_flags & TP_ATTACHED, ("%s: not attached", __func__)); CTR6(KTR_CXGB, "%s: %s %u, toep %p, inp %p, tp %p", __func__, tp->t_state == TCPS_SYN_SENT ? "atid" : "tid", toep->tp_tid, toep, inp, tp); tp->t_toe = NULL; tp->t_flags &= ~TF_TOE; toep->tp_flags &= ~TP_ATTACHED; if (toep->tp_flags & TP_CPL_DONE) t3_release_offload_resources(toep); }
void tcp_offload_listen_start(struct tcpcb *tp) { INP_WLOCK_ASSERT(tp->t_inpcb); EVENTHANDLER_INVOKE(tcp_offload_listen_start, tp); }
static void rip_delhash(struct inpcb *inp) { INP_INFO_WLOCK_ASSERT(inp->inp_pcbinfo); INP_WLOCK_ASSERT(inp); LIST_REMOVE(inp, inp_hash); }
void tcp_offload_detach(struct tcpcb *tp) { struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); INP_WLOCK_ASSERT(tp->t_inpcb); tod->tod_pcb_detach(tod, tp); }
void tcp_offload_input(struct tcpcb *tp, struct mbuf *m) { struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); INP_WLOCK_ASSERT(tp->t_inpcb); tod->tod_input(tod, tp, m); }
void tcp_offload_ctloutput(struct tcpcb *tp, int sopt_dir, int sopt_name) { struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); INP_WLOCK_ASSERT(tp->t_inpcb); tod->tod_ctloutput(tod, tp, sopt_dir, sopt_name); }
void tcp_offload_tcp_info(struct tcpcb *tp, struct tcp_info *ti) { struct toedev *tod = tp->tod; KASSERT(tod != NULL, ("%s: tp->tod is NULL, tp %p", __func__, tp)); INP_WLOCK_ASSERT(tp->t_inpcb); tod->tod_tcp_info(tod, tp, ti); }
void send_reset(struct adapter *sc, struct toepcb *toep, uint32_t snd_nxt) { struct wrqe *wr; struct cpl_abort_req *req; int tid = toep->tid; struct inpcb *inp = toep->inp; struct tcpcb *tp = intotcpcb(inp); /* don't use if INP_DROPPED */ INP_WLOCK_ASSERT(inp); CTR6(KTR_CXGBE, "%s: tid %d (%s), toep_flags 0x%x, inp_flags 0x%x%s", __func__, toep->tid, inp->inp_flags & INP_DROPPED ? "inp dropped" : tcpstates[tp->t_state], toep->flags, inp->inp_flags, toep->flags & TPF_ABORT_SHUTDOWN ? " (abort already in progress)" : ""); if (toep->flags & TPF_ABORT_SHUTDOWN) return; /* abort already in progress */ toep->flags |= TPF_ABORT_SHUTDOWN; KASSERT(toep->flags & TPF_FLOWC_WR_SENT, ("%s: flowc_wr not sent for tid %d.", __func__, tid)); wr = alloc_wrqe(sizeof(*req), toep->ofld_txq); if (wr == NULL) { /* XXX */ panic("%s: allocation failure.", __func__); } req = wrtod(wr); INIT_TP_WR_MIT_CPL(req, CPL_ABORT_REQ, tid); if (inp->inp_flags & INP_DROPPED) req->rsvd0 = htobe32(snd_nxt); else req->rsvd0 = htobe32(tp->snd_nxt); req->rsvd1 = !(toep->flags & TPF_TX_DATA_SENT); req->cmd = CPL_ABORT_SEND_RST; /* * XXX: What's the correct way to tell that the inp hasn't been detached * from its socket? Should I even be flushing the snd buffer here? */ if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { struct socket *so = inp->inp_socket; if (so != NULL) /* because I'm not sure. See comment above */ sbflush(&so->so_snd); } t4_l2t_send(sc, wr, toep->l2te); }
/* * tcp_detach is called when the socket layer loses its final reference * to the socket, be it a file descriptor reference, a reference from TCP, * etc. At this point, there is only one case in which we will keep around * inpcb state: time wait. * * This function can probably be re-absorbed back into tcp_usr_detach() now * that there is a single detach path. */ static void tcp_detach(struct socket *so, struct inpcb *inp) { struct tcpcb *tp; INP_INFO_WLOCK_ASSERT(&V_tcbinfo); INP_WLOCK_ASSERT(inp); KASSERT(so->so_pcb == inp, ("tcp_detach: so_pcb != inp")); KASSERT(inp->inp_socket == so, ("tcp_detach: inp_socket != so")); tp = intotcpcb(inp); if (inp->inp_flags & INP_TIMEWAIT) { /* * There are two cases to handle: one in which the time wait * state is being discarded (INP_DROPPED), and one in which * this connection will remain in timewait. In the former, * it is time to discard all state (except tcptw, which has * already been discarded by the timewait close code, which * should be further up the call stack somewhere). In the * latter case, we detach from the socket, but leave the pcb * present until timewait ends. * * XXXRW: Would it be cleaner to free the tcptw here? */ if (inp->inp_flags & INP_DROPPED) { KASSERT(tp == NULL, ("tcp_detach: INP_TIMEWAIT && " "INP_DROPPED && tp != NULL")); in_pcbdetach(inp); in_pcbfree(inp); } else { in_pcbdetach(inp); INP_WUNLOCK(inp); } } else { /* * If the connection is not in timewait, we consider two * two conditions: one in which no further processing is * necessary (dropped || embryonic), and one in which TCP is * not yet done, but no longer requires the socket, so the * pcb will persist for the time being. * * XXXRW: Does the second case still occur? */ if (inp->inp_flags & INP_DROPPED || tp->t_state < TCPS_SYN_SENT) { tcp_discardcb(tp); in_pcbdetach(inp); in_pcbfree(inp); } else in_pcbdetach(inp); } }
/* * Common subroutine to open a TCP connection to remote host specified * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local * port number if needed. Call in_pcbconnect_setup to do the routing and * to choose a local host address (interface). If there is an existing * incarnation of the same connection in TIME-WAIT state and if the remote * host was sending CC options and if the connection duration was < MSL, then * truncate the previous TIME-WAIT state and proceed. * Initialize connection parameters and enter SYN-SENT state. */ static int tcp_connect(struct tcpcb *tp, struct sockaddr *nam, struct thread *td) { struct inpcb *inp = tp->t_inpcb, *oinp; struct socket *so = inp->inp_socket; struct in_addr laddr; u_short lport; int error; INP_INFO_WLOCK_ASSERT(&V_tcbinfo); INP_WLOCK_ASSERT(inp); if (inp->inp_lport == 0) { error = in_pcbbind(inp, (struct sockaddr *)0, td->td_ucred); if (error) return error; } /* * Cannot simply call in_pcbconnect, because there might be an * earlier incarnation of this same connection still in * TIME_WAIT state, creating an ADDRINUSE error. */ laddr = inp->inp_laddr; lport = inp->inp_lport; error = in_pcbconnect_setup(inp, nam, &laddr.s_addr, &lport, &inp->inp_faddr.s_addr, &inp->inp_fport, &oinp, td->td_ucred); if (error && oinp == NULL) return error; if (oinp) return EADDRINUSE; inp->inp_laddr = laddr; in_pcbrehash(inp); /* * Compute window scaling to request: * Scale to fit into sweet spot. See tcp_syncache.c. * XXX: This should move to tcp_output(). */ while (tp->request_r_scale < TCP_MAX_WINSHIFT && (TCP_MAXWIN << tp->request_r_scale) < sb_max) tp->request_r_scale++; soisconnecting(so); TCPSTAT_INC(tcps_connattempt); tp->t_state = TCPS_SYN_SENT; tcp_timer_activate(tp, TT_KEEP, tcp_keepinit); tp->iss = tcp_new_isn(tp); tp->t_bw_rtseq = tp->iss; tcp_sendseqinit(tp); return 0; }
/* * Update the pcbgroup of an inpcb, which might include removing an old * pcbgroup reference and/or adding a new one. Wildcard processing is not * performed here, although ideally we'll never install a pcbgroup for a * wildcard inpcb (asserted below). */ static void in_pcbgroup_update_internal(struct inpcbinfo *pcbinfo, struct inpcbgroup *newpcbgroup, struct inpcb *inp) { struct inpcbgroup *oldpcbgroup; struct inpcbhead *pcbhash; uint32_t hashkey_faddr; INP_WLOCK_ASSERT(inp); oldpcbgroup = inp->inp_pcbgroup; if (oldpcbgroup != NULL && oldpcbgroup != newpcbgroup) { INP_GROUP_LOCK(oldpcbgroup); LIST_REMOVE(inp, inp_pcbgrouphash); inp->inp_pcbgroup = NULL; INP_GROUP_UNLOCK(oldpcbgroup); } if (newpcbgroup != NULL && oldpcbgroup != newpcbgroup) { #ifdef INET6 if (inp->inp_vflag & INP_IPV6) hashkey_faddr = INP6_PCBHASHKEY(&inp->in6p_faddr); else #endif hashkey_faddr = inp->inp_faddr.s_addr; INP_GROUP_LOCK(newpcbgroup); /* * If the inp is an RSS bucket wildcard entry, ensure * that the PCB hash is calculated correctly. * * The wildcard hash calculation differs from the * non-wildcard definition. The source address is * INADDR_ANY and the far port is 0. */ if (inp->inp_flags2 & INP_RSS_BUCKET_SET) { pcbhash = &newpcbgroup->ipg_hashbase[ INP_PCBHASH(INADDR_ANY, inp->inp_lport, 0, newpcbgroup->ipg_hashmask)]; } else { pcbhash = &newpcbgroup->ipg_hashbase[ INP_PCBHASH(hashkey_faddr, inp->inp_lport, inp->inp_fport, newpcbgroup->ipg_hashmask)]; } LIST_INSERT_HEAD(pcbhash, inp, inp_pcbgrouphash); inp->inp_pcbgroup = newpcbgroup; INP_GROUP_UNLOCK(newpcbgroup); } KASSERT(!(newpcbgroup != NULL && in_pcbwild_needed(inp)), ("%s: pcbgroup and wildcard!", __func__)); }
/* * User issued close, and wish to trail through shutdown states: * if never received SYN, just forget it. If got a SYN from peer, * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. * If already got a FIN from peer, then almost done; go to LAST_ACK * state. In all other cases, have already sent FIN to peer (e.g. * after PRU_SHUTDOWN), and just have to play tedious game waiting * for peer to send FIN or not respond to keep-alives, etc. * We can let the user exit from the close as soon as the FIN is acked. */ static void tcp_usrclosed(struct tcpcb *tp) { INP_INFO_WLOCK_ASSERT(&V_tcbinfo); INP_WLOCK_ASSERT(tp->t_inpcb); switch (tp->t_state) { case TCPS_LISTEN: tcp_offload_listen_close(tp); /* FALLTHROUGH */ case TCPS_CLOSED: tp->t_state = TCPS_CLOSED; tp = tcp_close(tp); /* * tcp_close() should never return NULL here as the socket is * still open. */ KASSERT(tp != NULL, ("tcp_usrclosed: tcp_close() returned NULL")); break; case TCPS_SYN_SENT: case TCPS_SYN_RECEIVED: tp->t_flags |= TF_NEEDFIN; break; case TCPS_ESTABLISHED: tp->t_state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: tp->t_state = TCPS_LAST_ACK; break; } if (tp->t_state >= TCPS_FIN_WAIT_2) { soisdisconnected(tp->t_inpcb->inp_socket); /* Prevent the connection hanging in FIN_WAIT_2 forever. */ if (tp->t_state == TCPS_FIN_WAIT_2) { int timeout; timeout = (tcp_fast_finwait2_recycle) ? tcp_finwait2_timeout : tcp_maxidle; tcp_timer_activate(tp, TT_2MSL, timeout); } } }
void tcp_reass_flush(struct tcpcb *tp) { struct mbuf *m; INP_WLOCK_ASSERT(tp->t_inpcb); while ((m = tp->t_segq) != NULL) { tp->t_segq = m->m_nextpkt; tp->t_segqlen -= m->m_pkthdr.len; m_freem(m); } KASSERT((tp->t_segqlen == 0), ("TCP reass queue %p length is %d instead of 0 after flush.", tp, tp->t_segqlen)); }
int t4_tod_output(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); t4_push_frames(sc, toep); return (0); }
static void in_pcbwild_remove(struct inpcb *inp) { struct inpcbinfo *pcbinfo; u_int pgn; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags2 & INP_PCBGROUPWILD), ("%s: not wild", __func__)); pcbinfo = inp->inp_pcbinfo; for (pgn = 0; pgn < pcbinfo->ipi_npcbgroups; pgn++) INP_GROUP_LOCK(&pcbinfo->ipi_pcbgroups[pgn]); LIST_REMOVE(inp, inp_pcbgroup_wild); for (pgn = 0; pgn < pcbinfo->ipi_npcbgroups; pgn++) INP_GROUP_UNLOCK(&pcbinfo->ipi_pcbgroups[pgn]); inp->inp_flags2 &= ~INP_PCBGROUPWILD; }
int t4_send_fin(struct toedev *tod, struct tcpcb *tp) { struct adapter *sc = tod->tod_softc; #ifdef INVARIANTS struct inpcb *inp = tp->t_inpcb; #endif struct toepcb *toep = tp->t_toe; INP_WLOCK_ASSERT(inp); KASSERT((inp->inp_flags & INP_DROPPED) == 0, ("%s: inp %p dropped.", __func__, inp)); KASSERT(toep != NULL, ("%s: toep is NULL", __func__)); toep->flags |= TPF_SEND_FIN; if (tp->t_state >= TCPS_ESTABLISHED) t4_push_frames(sc, toep, 0); return (0); }
static void ipsec_invalidate_cache(struct inpcb *inp, u_int dir) { struct secpolicy *sp; INP_WLOCK_ASSERT(inp); if (dir == IPSEC_DIR_OUTBOUND) { if (inp->inp_sp->flags & INP_INBOUND_POLICY) return; sp = inp->inp_sp->sp_in; inp->inp_sp->sp_in = NULL; } else { if (inp->inp_sp->flags & INP_OUTBOUND_POLICY) return; sp = inp->inp_sp->sp_out; inp->inp_sp->sp_out = NULL; } if (sp != NULL) key_freesp(&sp); /* release extra reference */ }
static void rip_inshash(struct inpcb *inp) { struct inpcbinfo *pcbinfo = inp->inp_pcbinfo; struct inpcbhead *pcbhash; int hash; INP_INFO_WLOCK_ASSERT(pcbinfo); INP_WLOCK_ASSERT(inp); if (inp->inp_ip_p != 0 && inp->inp_laddr.s_addr != INADDR_ANY && inp->inp_faddr.s_addr != INADDR_ANY) { hash = INP_PCBHASH_RAW(inp->inp_ip_p, inp->inp_laddr.s_addr, inp->inp_faddr.s_addr, pcbinfo->ipi_hashmask); } else hash = 0; pcbhash = &pcbinfo->ipi_hashbase[hash]; LIST_INSERT_HEAD(pcbhash, inp, inp_hash); }
/* * Handle UDP_ENCAP socket option. Always return with released INP_WLOCK. */ int udp_ipsec_pcbctl(struct inpcb *inp, struct sockopt *sopt) { struct udpcb *up; int error, optval; INP_WLOCK_ASSERT(inp); if (sopt->sopt_name != UDP_ENCAP) { INP_WUNLOCK(inp); return (ENOPROTOOPT); } up = intoudpcb(inp); if (sopt->sopt_dir == SOPT_GET) { if (up->u_flags & UF_ESPINUDP) optval = UDP_ENCAP_ESPINUDP; else optval = 0; INP_WUNLOCK(inp); return (sooptcopyout(sopt, &optval, sizeof(optval))); } INP_WUNLOCK(inp); error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error != 0) return (error); INP_WLOCK(inp); switch (optval) { case 0: up->u_flags &= ~UF_ESPINUDP; break; case UDP_ENCAP_ESPINUDP: up->u_flags |= UF_ESPINUDP; break; default: error = EINVAL; } INP_WUNLOCK(inp); return (error); }
static int tcp_ipsec_pcbctl(struct inpcb *inp, struct sockopt *sopt) { struct tcpcb *tp; int error, optval; INP_WLOCK_ASSERT(inp); if (sopt->sopt_name != TCP_MD5SIG) { INP_WUNLOCK(inp); return (ENOPROTOOPT); } tp = intotcpcb(inp); if (sopt->sopt_dir == SOPT_GET) { optval = (tp->t_flags & TF_SIGNATURE) ? 1 : 0; INP_WUNLOCK(inp); /* On success return with released INP_WLOCK */ return (sooptcopyout(sopt, &optval, sizeof(optval))); } INP_WUNLOCK(inp); error = sooptcopyin(sopt, &optval, sizeof(optval), sizeof(optval)); if (error != 0) return (error); /* INP_WLOCK_RECHECK */ INP_WLOCK(inp); if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) { INP_WUNLOCK(inp); return (ECONNRESET); } if (optval > 0) tp->t_flags |= TF_SIGNATURE; else tp->t_flags &= ~TF_SIGNATURE; /* On success return with acquired INP_WLOCK */ return (error); }