/* ARGSUSED */ void tcp_time_wait_collector(void *arg) { tcp_t *tcp; int64_t now; mblk_t *mp; conn_t *connp; kmutex_t *lock; boolean_t removed; extern void (*cl_inet_disconnect)(netstackid_t, uint8_t, sa_family_t, uint8_t *, in_port_t, uint8_t *, in_port_t, void *); squeue_t *sqp = (squeue_t *)arg; tcp_squeue_priv_t *tcp_time_wait = *((tcp_squeue_priv_t **)squeue_getprivate(sqp, SQPRIVATE_TCP)); mutex_enter(&tcp_time_wait->tcp_time_wait_lock); tcp_time_wait->tcp_time_wait_tid = 0; #ifdef DEBUG tcp_time_wait->tcp_time_wait_running = B_TRUE; #endif if (tcp_time_wait->tcp_free_list != NULL && tcp_time_wait->tcp_free_list->tcp_in_free_list == B_TRUE) { TCP_G_STAT(tcp_freelist_cleanup); while ((tcp = tcp_time_wait->tcp_free_list) != NULL) { tcp_time_wait->tcp_free_list = tcp->tcp_time_wait_next; tcp->tcp_time_wait_next = NULL; tcp_time_wait->tcp_free_list_cnt--; ASSERT(tcp->tcp_tcps == NULL); CONN_DEC_REF(tcp->tcp_connp); } ASSERT(tcp_time_wait->tcp_free_list_cnt == 0); } /* * In order to reap time waits reliably, we should use a * source of time that is not adjustable by the user -- hence * the call to ddi_get_lbolt64(). */ now = ddi_get_lbolt64(); while ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL) { /* * lbolt64 should not wrap around in practice... So we can * do a direct comparison. */ if (now < tcp->tcp_time_wait_expire) break; removed = tcp_time_wait_remove(tcp, tcp_time_wait); ASSERT(removed); connp = tcp->tcp_connp; ASSERT(connp->conn_fanout != NULL); lock = &connp->conn_fanout->connf_lock; /* * This is essentially a TW reclaim fast path optimization for * performance where the timewait collector checks under the * fanout lock (so that no one else can get access to the * conn_t) that the refcnt is 2 i.e. one for TCP and one for * the classifier hash list. If ref count is indeed 2, we can * just remove the conn under the fanout lock and avoid * cleaning up the conn under the squeue, provided that * clustering callbacks are not enabled. If clustering is * enabled, we need to make the clustering callback before * setting the CONDEMNED flag and after dropping all locks and * so we forego this optimization and fall back to the slow * path. Also please see the comments in tcp_closei_local * regarding the refcnt logic. * * Since we are holding the tcp_time_wait_lock, its better * not to block on the fanout_lock because other connections * can't add themselves to time_wait list. So we do a * tryenter instead of mutex_enter. */ if (mutex_tryenter(lock)) { mutex_enter(&connp->conn_lock); if ((connp->conn_ref == 2) && (cl_inet_disconnect == NULL)) { ipcl_hash_remove_locked(connp, connp->conn_fanout); /* * Set the CONDEMNED flag now itself so that * the refcnt cannot increase due to any * walker. */ connp->conn_state_flags |= CONN_CONDEMNED; mutex_exit(lock); mutex_exit(&connp->conn_lock); if (tcp_time_wait->tcp_free_list_cnt < tcp_free_list_max_cnt) { /* Add to head of tcp_free_list */ mutex_exit( &tcp_time_wait->tcp_time_wait_lock); tcp_cleanup(tcp); ASSERT(connp->conn_latch == NULL); ASSERT(connp->conn_policy == NULL); ASSERT(tcp->tcp_tcps == NULL); ASSERT(connp->conn_netstack == NULL); mutex_enter( &tcp_time_wait->tcp_time_wait_lock); tcp->tcp_time_wait_next = tcp_time_wait->tcp_free_list; tcp_time_wait->tcp_free_list = tcp; tcp_time_wait->tcp_free_list_cnt++; continue; } else { /* Do not add to tcp_free_list */ mutex_exit( &tcp_time_wait->tcp_time_wait_lock); tcp_bind_hash_remove(tcp); ixa_cleanup(tcp->tcp_connp->conn_ixa); tcp_ipsec_cleanup(tcp); CONN_DEC_REF(tcp->tcp_connp); } } else { CONN_INC_REF_LOCKED(connp); mutex_exit(lock); mutex_exit(&tcp_time_wait->tcp_time_wait_lock); mutex_exit(&connp->conn_lock); /* * We can reuse the closemp here since conn has * detached (otherwise we wouldn't even be in * time_wait list). tcp_closemp_used can safely * be changed without taking a lock as no other * thread can concurrently access it at this * point in the connection lifecycle. */ if (tcp->tcp_closemp.b_prev == NULL) tcp->tcp_closemp_used = B_TRUE; else cmn_err(CE_PANIC, "tcp_timewait_collector: " "concurrent use of tcp_closemp: " "connp %p tcp %p\n", (void *)connp, (void *)tcp); TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); mp = &tcp->tcp_closemp; SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_timewait_close, connp, NULL, SQ_FILL, SQTAG_TCP_TIMEWAIT); } } else { mutex_enter(&connp->conn_lock); CONN_INC_REF_LOCKED(connp); mutex_exit(&tcp_time_wait->tcp_time_wait_lock); mutex_exit(&connp->conn_lock); /* * We can reuse the closemp here since conn has * detached (otherwise we wouldn't even be in * time_wait list). tcp_closemp_used can safely * be changed without taking a lock as no other * thread can concurrently access it at this * point in the connection lifecycle. */ if (tcp->tcp_closemp.b_prev == NULL) tcp->tcp_closemp_used = B_TRUE; else cmn_err(CE_PANIC, "tcp_timewait_collector: " "concurrent use of tcp_closemp: " "connp %p tcp %p\n", (void *)connp, (void *)tcp); TCP_DEBUG_GETPCSTACK(tcp->tcmp_stk, 15); mp = &tcp->tcp_closemp; SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_timewait_close, connp, NULL, SQ_FILL, SQTAG_TCP_TIMEWAIT); } mutex_enter(&tcp_time_wait->tcp_time_wait_lock); } if (tcp_time_wait->tcp_free_list != NULL) tcp_time_wait->tcp_free_list->tcp_in_free_list = B_TRUE; /* * If the time wait list is not empty and there is no timer running, * restart it. */ if ((tcp = tcp_time_wait->tcp_time_wait_head) != NULL && tcp_time_wait->tcp_time_wait_tid == 0) { hrtime_t firetime; firetime = TICK_TO_NSEC(tcp->tcp_time_wait_expire - now); /* This ensures that we won't wake up too often. */ firetime = MAX(TCP_TIME_WAIT_DELAY, firetime); tcp_time_wait->tcp_time_wait_tid = timeout_generic(CALLOUT_NORMAL, tcp_time_wait_collector, sqp, firetime, CALLOUT_TCP_RESOLUTION, CALLOUT_FLAG_ROUNDUP); } #ifdef DEBUG tcp_time_wait->tcp_time_wait_running = B_FALSE; #endif mutex_exit(&tcp_time_wait->tcp_time_wait_lock); }
/* * Callback function for the cases kssl_input() had to submit an asynchronous * job and need to come back when done to carry on the input processing. * This routine follows the conventions of timeout and interrupt handlers. * (no blocking, ...) */ static void tcp_kssl_input_callback(void *arg, mblk_t *mp, kssl_cmd_t kssl_cmd) { tcp_t *tcp = (tcp_t *)arg; conn_t *connp; mblk_t *sqmp; ASSERT(tcp != NULL); connp = tcp->tcp_connp; ASSERT(connp != NULL); switch (kssl_cmd) { case KSSL_CMD_SEND: /* I'm coming from an outside perimeter */ if (mp != NULL) { /* * See comment in tcp_kssl_input() call to tcp_output() */ mutex_enter(&tcp->tcp_non_sq_lock); tcp->tcp_squeue_bytes += msgdsize(mp); mutex_exit(&tcp->tcp_non_sq_lock); } CONN_INC_REF(connp); SQUEUE_ENTER_ONE(connp->conn_sqp, mp, tcp_output, connp, NULL, tcp_squeue_flag, SQTAG_TCP_OUTPUT); /* FALLTHROUGH */ case KSSL_CMD_NONE: break; case KSSL_CMD_DELIVER_PROXY: case KSSL_CMD_DELIVER_SSL: /* * Keep accumulating if not yet accepted. */ if (tcp->tcp_listener != NULL) { tcp_rcv_enqueue(tcp, mp, msgdsize(mp), NULL); } else { putnext(connp->conn_rq, mp); } break; case KSSL_CMD_NOT_SUPPORTED: /* Stop the SSL processing */ kssl_release_ctx(tcp->tcp_kssl_ctx); tcp->tcp_kssl_ctx = NULL; } /* * Process any input that may have accumulated while we're waiting for * the call-back. * We need to re-enter the squeue for this connp, and a new mp is * necessary. */ if ((sqmp = allocb(1, BPRI_MED)) != NULL) { CONN_INC_REF(connp); SQUEUE_ENTER_ONE(connp->conn_sqp, sqmp, tcp_kssl_input_asynch, connp, NULL, SQ_FILL, SQTAG_TCP_KSSL_INPUT); } else { DTRACE_PROBE(kssl_err__allocb_failed); } CONN_DEC_REF(connp); }
/* * tcp_input_data() calls this routine for all packet destined to a * connection to the SSL port, when the SSL kernel proxy is configured * to intercept and process those packets. * A packet may carry multiple SSL records, so the function * calls kssl_input() in a loop, until all records are * handled. * As long as this connection is in handshake, that is until the first * time kssl_input() returns a record to be delivered ustreams, * we maintain the tcp_kssl_inhandshake, and keep an extra reference on * the tcp/connp across the call to kssl_input(). The reason is, that * function may return KSSL_CMD_QUEUED after scheduling an asynchronous * request and cause tcp_kssl_callback() to be called on a different CPU, * which could decrement the conn/tcp reference before we get to increment it. */ void tcp_kssl_input(tcp_t *tcp, mblk_t *mp, cred_t *cr) { struct conn_s *connp = tcp->tcp_connp; tcp_t *listener; mblk_t *ind_mp; kssl_cmd_t kssl_cmd; mblk_t *outmp; struct T_conn_ind *tci; boolean_t more = B_FALSE; boolean_t conn_held = B_FALSE; boolean_t is_v4; void *addr; if (is_system_labeled() && mp != NULL) { ASSERT(cr != NULL || msg_getcred(mp, NULL) != NULL); /* * Provide for protocols above TCP such as RPC. NOPID leaves * db_cpid unchanged. * The cred could have already been set. */ if (cr != NULL) mblk_setcred(mp, cr, NOPID); } /* First time here, allocate the SSL context */ if (tcp->tcp_kssl_ctx == NULL) { ASSERT(tcp->tcp_kssl_pending); is_v4 = (connp->conn_ipversion == IPV4_VERSION); if (is_v4) { addr = &connp->conn_faddr_v4; } else { addr = &connp->conn_faddr_v6; } if (kssl_init_context(tcp->tcp_kssl_ent, addr, is_v4, tcp->tcp_mss, &(tcp->tcp_kssl_ctx)) != KSSL_STS_OK) { tcp->tcp_kssl_pending = B_FALSE; kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; goto no_can_do; } tcp->tcp_kssl_inhandshake = B_TRUE; /* we won't be needing this one after now */ kssl_release_ent(tcp->tcp_kssl_ent, NULL, KSSL_NO_PROXY); tcp->tcp_kssl_ent = NULL; } if (tcp->tcp_kssl_inhandshake) { CONN_INC_REF(connp); conn_held = B_TRUE; } do { kssl_cmd = kssl_input(tcp->tcp_kssl_ctx, mp, &outmp, &more, tcp_kssl_input_callback, (void *)tcp); switch (kssl_cmd) { case KSSL_CMD_SEND: DTRACE_PROBE(kssl_cmd_send); /* * We need to increment tcp_squeue_bytes to account * for the extra bytes internally injected to the * outgoing flow. tcp_output() will decrement it * as they are sent out. */ mutex_enter(&tcp->tcp_non_sq_lock); tcp->tcp_squeue_bytes += msgdsize(outmp); mutex_exit(&tcp->tcp_non_sq_lock); tcp_output(connp, outmp, NULL, NULL); /* FALLTHROUGH */ case KSSL_CMD_NONE: DTRACE_PROBE(kssl_cmd_none); if (tcp->tcp_kssl_pending) { mblk_t *ctxmp; /* * SSL handshake successfully started - * pass up the T_CONN_IND */ mp = NULL; listener = tcp->tcp_listener; tcp->tcp_kssl_pending = B_FALSE; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); ctxmp = allocb(sizeof (kssl_ctx_t), BPRI_MED); /* * Give this session a chance to fall back to * userland SSL */ if (ctxmp == NULL) goto no_can_do; /* * attach the kssl_ctx to the conn_ind and * transform it to a T_SSL_PROXY_CONN_IND. * Hold it so that it stays valid till it * reaches the stream head. */ kssl_hold_ctx(tcp->tcp_kssl_ctx); *((kssl_ctx_t *)ctxmp->b_rptr) = tcp->tcp_kssl_ctx; ctxmp->b_wptr = ctxmp->b_rptr + sizeof (kssl_ctx_t); ind_mp->b_cont = ctxmp; tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } break; case KSSL_CMD_QUEUED: DTRACE_PROBE(kssl_cmd_queued); /* * We hold the conn_t here because an asynchronous * request have been queued and * tcp_kssl_input_callback() will be called later. * It will release the conn_t */ CONN_INC_REF(connp); break; case KSSL_CMD_DELIVER_PROXY: case KSSL_CMD_DELIVER_SSL: DTRACE_PROBE(kssl_cmd_proxy__ssl); /* * Keep accumulating if not yet accepted. */ if (tcp->tcp_listener != NULL) { DTRACE_PROBE1(kssl_mblk__input_rcv_enqueue, mblk_t *, outmp); tcp_rcv_enqueue(tcp, outmp, msgdsize(outmp), NULL); } else { DTRACE_PROBE1(kssl_mblk__input_putnext, mblk_t *, outmp); putnext(connp->conn_rq, outmp); } /* * We're at a phase where records are sent upstreams, * past the handshake */ tcp->tcp_kssl_inhandshake = B_FALSE; break; case KSSL_CMD_NOT_SUPPORTED: DTRACE_PROBE(kssl_cmd_not_supported); /* * Stop the SSL processing by the proxy, and * switch to the userland SSL */ if (tcp->tcp_kssl_pending) { tcp->tcp_kssl_pending = B_FALSE; no_can_do: DTRACE_PROBE1(kssl_no_can_do, tcp_t *, tcp); listener = tcp->tcp_listener; ind_mp = tcp->tcp_conn.tcp_eager_conn_ind; ASSERT(ind_mp != NULL); if (tcp->tcp_kssl_ctx != NULL) { kssl_release_ctx(tcp->tcp_kssl_ctx); tcp->tcp_kssl_ctx = NULL; } /* * Make this a T_SSL_PROXY_CONN_IND, for the * stream head to deliver it to the SSL * fall-back listener */ tci = (struct T_conn_ind *)ind_mp->b_rptr; tci->PRIM_type = T_SSL_PROXY_CONN_IND; /* * The code below is copied from tcp_input_data * delivering the T_CONN_IND on a TCPS_SYN_RCVD, * and all conn ref cnt comments apply. */ tcp->tcp_conn.tcp_eager_conn_ind = NULL; tcp->tcp_tconnind_started = B_TRUE; CONN_INC_REF(connp); CONN_INC_REF(listener->tcp_connp); if (listener->tcp_connp->conn_sqp == connp->conn_sqp) { tcp_send_conn_ind(listener->tcp_connp, ind_mp, listener->tcp_connp->conn_sqp); CONN_DEC_REF(listener->tcp_connp); } else { SQUEUE_ENTER_ONE( listener->tcp_connp->conn_sqp, ind_mp, tcp_send_conn_ind, listener->tcp_connp, NULL, SQ_FILL, SQTAG_TCP_CONN_IND); } } if (mp != NULL) tcp_rcv_enqueue(tcp, mp, msgdsize(mp), NULL); break; } mp = NULL; } while (more); if (conn_held) { CONN_DEC_REF(connp); } }
/* * This routine gets called by the eager tcp upon changing state from * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself * and the active connect tcp such that the regular tcp processings * may be bypassed under allowable circumstances. Because the fusion * requires both endpoints to be in the same squeue, it does not work * for simultaneous active connects because there is no easy way to * switch from one squeue to another once the connection is created. * This is different from the eager tcp case where we assign it the * same squeue as the one given to the active connect tcp during open. */ void tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcph_t *tcph) { conn_t *peer_connp, *connp = tcp->tcp_connp; tcp_t *peer_tcp; ASSERT(!tcp->tcp_fused); ASSERT(tcp->tcp_loopback); ASSERT(tcp->tcp_loopback_peer == NULL); /* * We need to inherit q_hiwat of the listener tcp, but we can't * really use tcp_listener since we get here after sending up * T_CONN_IND and tcp_wput_accept() may be called independently, * at which point tcp_listener is cleared; this is why we use * tcp_saved_listener. The listener itself is guaranteed to be * around until tcp_accept_finish() is called on this eager -- * this won't happen until we're done since we're inside the * eager's perimeter now. */ ASSERT(tcp->tcp_saved_listener != NULL); /* * Lookup peer endpoint; search for the remote endpoint having * the reversed address-port quadruplet in ESTABLISHED state, * which is guaranteed to be unique in the system. Zone check * is applied accordingly for loopback address, but not for * local address since we want fusion to happen across Zones. */ if (tcp->tcp_ipversion == IPV4_VERSION) { peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp, (ipha_t *)iphdr, tcph); } else { peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp, (ip6_t *)iphdr, tcph); } /* * We can only proceed if peer exists, resides in the same squeue * as our conn and is not raw-socket. The squeue assignment of * this eager tcp was done earlier at the time of SYN processing * in ip_fanout_tcp{_v6}. Note that similar squeues by itself * doesn't guarantee a safe condition to fuse, hence we perform * additional tests below. */ ASSERT(peer_connp == NULL || peer_connp != connp); if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp || !IPCL_IS_TCP(peer_connp)) { if (peer_connp != NULL) { TCP_STAT(tcp_fusion_unqualified); CONN_DEC_REF(peer_connp); } return; } peer_tcp = peer_connp->conn_tcp; /* active connect tcp */ ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused); ASSERT(peer_tcp->tcp_loopback && peer_tcp->tcp_loopback_peer == NULL); ASSERT(peer_connp->conn_sqp == connp->conn_sqp); /* * Fuse the endpoints; we perform further checks against both * tcp endpoints to ensure that a fusion is allowed to happen. * In particular we bail out for non-simple TCP/IP or if IPsec/ * IPQoS policy/kernel SSL exists. */ if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable && !TCP_LOOPBACK_IP(tcp) && !TCP_LOOPBACK_IP(peer_tcp) && tcp->tcp_kssl_ent == NULL && !IPP_ENABLED(IPP_LOCAL_OUT|IPP_LOCAL_IN)) { mblk_t *mp; struct stroptions *stropt; queue_t *peer_rq = peer_tcp->tcp_rq; ASSERT(!TCP_IS_DETACHED(peer_tcp) && peer_rq != NULL); ASSERT(tcp->tcp_fused_sigurg_mp == NULL); ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL); ASSERT(tcp->tcp_kssl_ctx == NULL); /* * We need to drain data on both endpoints during unfuse. * If we need to send up SIGURG at the time of draining, * we want to be sure that an mblk is readily available. * This is why we pre-allocate the M_PCSIG mblks for both * endpoints which will only be used during/after unfuse. */ if ((mp = allocb(1, BPRI_HI)) == NULL) goto failed; tcp->tcp_fused_sigurg_mp = mp; if ((mp = allocb(1, BPRI_HI)) == NULL) goto failed; peer_tcp->tcp_fused_sigurg_mp = mp; /* Allocate M_SETOPTS mblk */ if ((mp = allocb(sizeof (*stropt), BPRI_HI)) == NULL) goto failed; /* Fuse both endpoints */ peer_tcp->tcp_loopback_peer = tcp; tcp->tcp_loopback_peer = peer_tcp; peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE; /* * We never use regular tcp paths in fusion and should * therefore clear tcp_unsent on both endpoints. Having * them set to non-zero values means asking for trouble * especially after unfuse, where we may end up sending * through regular tcp paths which expect xmit_list and * friends to be correctly setup. */ peer_tcp->tcp_unsent = tcp->tcp_unsent = 0; tcp_timers_stop(tcp); tcp_timers_stop(peer_tcp); /* * At this point we are a detached eager tcp and therefore * don't have a queue assigned to us until accept happens. * In the mean time the peer endpoint may immediately send * us data as soon as fusion is finished, and we need to be * able to flow control it in case it sends down huge amount * of data while we're still detached. To prevent that we * inherit the listener's q_hiwat value; this is temporary * since we'll repeat the process in tcp_accept_finish(). */ (void) tcp_fuse_set_rcv_hiwat(tcp, tcp->tcp_saved_listener->tcp_rq->q_hiwat); /* * Set the stream head's write offset value to zero since we * won't be needing any room for TCP/IP headers; tell it to * not break up the writes (this would reduce the amount of * work done by kmem); and configure our receive buffer. * Note that we can only do this for the active connect tcp * since our eager is still detached; it will be dealt with * later in tcp_accept_finish(). */ DB_TYPE(mp) = M_SETOPTS; mp->b_wptr += sizeof (*stropt); stropt = (struct stroptions *)mp->b_rptr; stropt->so_flags = SO_MAXBLK | SO_WROFF | SO_HIWAT; stropt->so_maxblk = tcp_maxpsz_set(peer_tcp, B_FALSE); stropt->so_wroff = 0; /* * Record the stream head's high water mark for * peer endpoint; this is used for flow-control * purposes in tcp_fuse_output(). */ stropt->so_hiwat = tcp_fuse_set_rcv_hiwat(peer_tcp, peer_rq->q_hiwat); /* Send the options up */ putnext(peer_rq, mp); } else { TCP_STAT(tcp_fusion_unqualified); } CONN_DEC_REF(peer_connp); return; failed: if (tcp->tcp_fused_sigurg_mp != NULL) { freeb(tcp->tcp_fused_sigurg_mp); tcp->tcp_fused_sigurg_mp = NULL; } if (peer_tcp->tcp_fused_sigurg_mp != NULL) { freeb(peer_tcp->tcp_fused_sigurg_mp); peer_tcp->tcp_fused_sigurg_mp = NULL; } CONN_DEC_REF(peer_connp); }