예제 #1
0
/*
 * Return SNMP stuff in buffer in mpdata.
 */
mblk_t *
tcp_snmp_get(queue_t *q, mblk_t *mpctl, boolean_t legacy_req)
{
	mblk_t			*mpdata;
	mblk_t			*mp_conn_ctl = NULL;
	mblk_t			*mp_conn_tail;
	mblk_t			*mp_attr_ctl = NULL;
	mblk_t			*mp_attr_tail;
	mblk_t			*mp6_conn_ctl = NULL;
	mblk_t			*mp6_conn_tail;
	mblk_t			*mp6_attr_ctl = NULL;
	mblk_t			*mp6_attr_tail;
	struct opthdr		*optp;
	mib2_tcpConnEntry_t	tce;
	mib2_tcp6ConnEntry_t	tce6;
	mib2_transportMLPEntry_t mlp;
	connf_t			*connfp;
	int			i;
	boolean_t 		ispriv;
	zoneid_t 		zoneid;
	int			v4_conn_idx;
	int			v6_conn_idx;
	conn_t			*connp = Q_TO_CONN(q);
	tcp_stack_t		*tcps;
	ip_stack_t		*ipst;
	mblk_t			*mp2ctl;
	mib2_tcp_t		tcp_mib;
	size_t			tcp_mib_size, tce_size, tce6_size;

	/*
	 * make a copy of the original message
	 */
	mp2ctl = copymsg(mpctl);

	if (mpctl == NULL ||
	    (mpdata = mpctl->b_cont) == NULL ||
	    (mp_conn_ctl = copymsg(mpctl)) == NULL ||
	    (mp_attr_ctl = copymsg(mpctl)) == NULL ||
	    (mp6_conn_ctl = copymsg(mpctl)) == NULL ||
	    (mp6_attr_ctl = copymsg(mpctl)) == NULL) {
		freemsg(mp_conn_ctl);
		freemsg(mp_attr_ctl);
		freemsg(mp6_conn_ctl);
		freemsg(mp6_attr_ctl);
		freemsg(mpctl);
		freemsg(mp2ctl);
		return (NULL);
	}

	ipst = connp->conn_netstack->netstack_ip;
	tcps = connp->conn_netstack->netstack_tcp;

	if (legacy_req) {
		tcp_mib_size = LEGACY_MIB_SIZE(&tcp_mib, mib2_tcp_t);
		tce_size = LEGACY_MIB_SIZE(&tce, mib2_tcpConnEntry_t);
		tce6_size = LEGACY_MIB_SIZE(&tce6, mib2_tcp6ConnEntry_t);
	} else {
		tcp_mib_size = sizeof (mib2_tcp_t);
		tce_size = sizeof (mib2_tcpConnEntry_t);
		tce6_size = sizeof (mib2_tcp6ConnEntry_t);
	}

	bzero(&tcp_mib, sizeof (tcp_mib));

	/* build table of connections -- need count in fixed part */
	SET_MIB(tcp_mib.tcpRtoAlgorithm, 4);   /* vanj */
	SET_MIB(tcp_mib.tcpRtoMin, tcps->tcps_rexmit_interval_min);
	SET_MIB(tcp_mib.tcpRtoMax, tcps->tcps_rexmit_interval_max);
	SET_MIB(tcp_mib.tcpMaxConn, -1);
	SET_MIB(tcp_mib.tcpCurrEstab, 0);

	ispriv =
	    secpolicy_ip_config((Q_TO_CONN(q))->conn_cred, B_TRUE) == 0;
	zoneid = Q_TO_CONN(q)->conn_zoneid;

	v4_conn_idx = v6_conn_idx = 0;
	mp_conn_tail = mp_attr_tail = mp6_conn_tail = mp6_attr_tail = NULL;

	for (i = 0; i < CONN_G_HASH_SIZE; i++) {
		ipst = tcps->tcps_netstack->netstack_ip;

		connfp = &ipst->ips_ipcl_globalhash_fanout[i];

		connp = NULL;

		while ((connp =
		    ipcl_get_next_conn(connfp, connp, IPCL_TCPCONN)) != NULL) {
			tcp_t *tcp;
			boolean_t needattr;

			if (connp->conn_zoneid != zoneid)
				continue;	/* not in this zone */

			tcp = connp->conn_tcp;
			TCPS_UPDATE_MIB(tcps, tcpHCInSegs, tcp->tcp_ibsegs);
			tcp->tcp_ibsegs = 0;
			TCPS_UPDATE_MIB(tcps, tcpHCOutSegs, tcp->tcp_obsegs);
			tcp->tcp_obsegs = 0;

			tce6.tcp6ConnState = tce.tcpConnState =
			    tcp_snmp_state(tcp);
			if (tce.tcpConnState == MIB2_TCP_established ||
			    tce.tcpConnState == MIB2_TCP_closeWait)
				BUMP_MIB(&tcp_mib, tcpCurrEstab);

			needattr = B_FALSE;
			bzero(&mlp, sizeof (mlp));
			if (connp->conn_mlp_type != mlptSingle) {
				if (connp->conn_mlp_type == mlptShared ||
				    connp->conn_mlp_type == mlptBoth)
					mlp.tme_flags |= MIB2_TMEF_SHARED;
				if (connp->conn_mlp_type == mlptPrivate ||
				    connp->conn_mlp_type == mlptBoth)
					mlp.tme_flags |= MIB2_TMEF_PRIVATE;
				needattr = B_TRUE;
			}
			if (connp->conn_anon_mlp) {
				mlp.tme_flags |= MIB2_TMEF_ANONMLP;
				needattr = B_TRUE;
			}
			switch (connp->conn_mac_mode) {
			case CONN_MAC_DEFAULT:
				break;
			case CONN_MAC_AWARE:
				mlp.tme_flags |= MIB2_TMEF_MACEXEMPT;
				needattr = B_TRUE;
				break;
			case CONN_MAC_IMPLICIT:
				mlp.tme_flags |= MIB2_TMEF_MACIMPLICIT;
				needattr = B_TRUE;
				break;
			}
			if (connp->conn_ixa->ixa_tsl != NULL) {
				ts_label_t *tsl;

				tsl = connp->conn_ixa->ixa_tsl;
				mlp.tme_flags |= MIB2_TMEF_IS_LABELED;
				mlp.tme_doi = label2doi(tsl);
				mlp.tme_label = *label2bslabel(tsl);
				needattr = B_TRUE;
			}

			/* Create a message to report on IPv6 entries */
			if (connp->conn_ipversion == IPV6_VERSION) {
			tce6.tcp6ConnLocalAddress = connp->conn_laddr_v6;
			tce6.tcp6ConnRemAddress = connp->conn_faddr_v6;
			tce6.tcp6ConnLocalPort = ntohs(connp->conn_lport);
			tce6.tcp6ConnRemPort = ntohs(connp->conn_fport);
			if (connp->conn_ixa->ixa_flags & IXAF_SCOPEID_SET) {
				tce6.tcp6ConnIfIndex =
				    connp->conn_ixa->ixa_scopeid;
			} else {
				tce6.tcp6ConnIfIndex = connp->conn_bound_if;
			}
			/* Don't want just anybody seeing these... */
			if (ispriv) {
				tce6.tcp6ConnEntryInfo.ce_snxt =
				    tcp->tcp_snxt;
				tce6.tcp6ConnEntryInfo.ce_suna =
				    tcp->tcp_suna;
				tce6.tcp6ConnEntryInfo.ce_rnxt =
				    tcp->tcp_rnxt;
				tce6.tcp6ConnEntryInfo.ce_rack =
				    tcp->tcp_rack;
			} else {
				/*
				 * Netstat, unfortunately, uses this to
				 * get send/receive queue sizes.  How to fix?
				 * Why not compute the difference only?
				 */
				tce6.tcp6ConnEntryInfo.ce_snxt =
				    tcp->tcp_snxt - tcp->tcp_suna;
				tce6.tcp6ConnEntryInfo.ce_suna = 0;
				tce6.tcp6ConnEntryInfo.ce_rnxt =
				    tcp->tcp_rnxt - tcp->tcp_rack;
				tce6.tcp6ConnEntryInfo.ce_rack = 0;
			}

			tce6.tcp6ConnEntryInfo.ce_swnd = tcp->tcp_swnd;
			tce6.tcp6ConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
			tce6.tcp6ConnEntryInfo.ce_rto =  tcp->tcp_rto;
			tce6.tcp6ConnEntryInfo.ce_mss =  tcp->tcp_mss;
			tce6.tcp6ConnEntryInfo.ce_state = tcp->tcp_state;

			tce6.tcp6ConnCreationProcess =
			    (connp->conn_cpid < 0) ? MIB2_UNKNOWN_PROCESS :
			    connp->conn_cpid;
			tce6.tcp6ConnCreationTime = connp->conn_open_time;

			(void) snmp_append_data2(mp6_conn_ctl->b_cont,
			    &mp6_conn_tail, (char *)&tce6, tce6_size);

			mlp.tme_connidx = v6_conn_idx++;
			if (needattr)
				(void) snmp_append_data2(mp6_attr_ctl->b_cont,
				    &mp6_attr_tail, (char *)&mlp, sizeof (mlp));
			}
			/*
			 * Create an IPv4 table entry for IPv4 entries and also
			 * for IPv6 entries which are bound to in6addr_any
			 * but don't have IPV6_V6ONLY set.
			 * (i.e. anything an IPv4 peer could connect to)
			 */
			if (connp->conn_ipversion == IPV4_VERSION ||
			    (tcp->tcp_state <= TCPS_LISTEN &&
			    !connp->conn_ipv6_v6only &&
			    IN6_IS_ADDR_UNSPECIFIED(&connp->conn_laddr_v6))) {
				if (connp->conn_ipversion == IPV6_VERSION) {
					tce.tcpConnRemAddress = INADDR_ANY;
					tce.tcpConnLocalAddress = INADDR_ANY;
				} else {
					tce.tcpConnRemAddress =
					    connp->conn_faddr_v4;
					tce.tcpConnLocalAddress =
					    connp->conn_laddr_v4;
				}
				tce.tcpConnLocalPort = ntohs(connp->conn_lport);
				tce.tcpConnRemPort = ntohs(connp->conn_fport);
				/* Don't want just anybody seeing these... */
				if (ispriv) {
					tce.tcpConnEntryInfo.ce_snxt =
					    tcp->tcp_snxt;
					tce.tcpConnEntryInfo.ce_suna =
					    tcp->tcp_suna;
					tce.tcpConnEntryInfo.ce_rnxt =
					    tcp->tcp_rnxt;
					tce.tcpConnEntryInfo.ce_rack =
					    tcp->tcp_rack;
				} else {
					/*
					 * Netstat, unfortunately, uses this to
					 * get send/receive queue sizes.  How
					 * to fix?
					 * Why not compute the difference only?
					 */
					tce.tcpConnEntryInfo.ce_snxt =
					    tcp->tcp_snxt - tcp->tcp_suna;
					tce.tcpConnEntryInfo.ce_suna = 0;
					tce.tcpConnEntryInfo.ce_rnxt =
					    tcp->tcp_rnxt - tcp->tcp_rack;
					tce.tcpConnEntryInfo.ce_rack = 0;
				}

				tce.tcpConnEntryInfo.ce_swnd = tcp->tcp_swnd;
				tce.tcpConnEntryInfo.ce_rwnd = tcp->tcp_rwnd;
				tce.tcpConnEntryInfo.ce_rto =  tcp->tcp_rto;
				tce.tcpConnEntryInfo.ce_mss =  tcp->tcp_mss;
				tce.tcpConnEntryInfo.ce_state =
				    tcp->tcp_state;

				tce.tcpConnCreationProcess =
				    (connp->conn_cpid < 0) ?
				    MIB2_UNKNOWN_PROCESS :
				    connp->conn_cpid;
				tce.tcpConnCreationTime = connp->conn_open_time;

				(void) snmp_append_data2(mp_conn_ctl->b_cont,
				    &mp_conn_tail, (char *)&tce, tce_size);

				mlp.tme_connidx = v4_conn_idx++;
				if (needattr)
					(void) snmp_append_data2(
					    mp_attr_ctl->b_cont,
					    &mp_attr_tail, (char *)&mlp,
					    sizeof (mlp));
			}
		}
	}

	tcp_sum_mib(tcps, &tcp_mib);

	/* Fixed length structure for IPv4 and IPv6 counters */
	SET_MIB(tcp_mib.tcpConnTableSize, tce_size);
	SET_MIB(tcp_mib.tcp6ConnTableSize, tce6_size);

	/*
	 * Synchronize 32- and 64-bit counters.  Note that tcpInSegs and
	 * tcpOutSegs are not updated anywhere in TCP.  The new 64 bits
	 * counters are used.  Hence the old counters' values in tcp_sc_mib
	 * are always 0.
	 */
	SYNC32_MIB(&tcp_mib, tcpInSegs, tcpHCInSegs);
	SYNC32_MIB(&tcp_mib, tcpOutSegs, tcpHCOutSegs);

	optp = (struct opthdr *)&mpctl->b_rptr[sizeof (struct T_optmgmt_ack)];
	optp->level = MIB2_TCP;
	optp->name = 0;
	(void) snmp_append_data(mpdata, (char *)&tcp_mib, tcp_mib_size);
	optp->len = msgdsize(mpdata);
	qreply(q, mpctl);

	/* table of connections... */
	optp = (struct opthdr *)&mp_conn_ctl->b_rptr[
	    sizeof (struct T_optmgmt_ack)];
	optp->level = MIB2_TCP;
	optp->name = MIB2_TCP_CONN;
	optp->len = msgdsize(mp_conn_ctl->b_cont);
	qreply(q, mp_conn_ctl);

	/* table of MLP attributes... */
	optp = (struct opthdr *)&mp_attr_ctl->b_rptr[
	    sizeof (struct T_optmgmt_ack)];
	optp->level = MIB2_TCP;
	optp->name = EXPER_XPORT_MLP;
	optp->len = msgdsize(mp_attr_ctl->b_cont);
	if (optp->len == 0)
		freemsg(mp_attr_ctl);
	else
		qreply(q, mp_attr_ctl);

	/* table of IPv6 connections... */
	optp = (struct opthdr *)&mp6_conn_ctl->b_rptr[
	    sizeof (struct T_optmgmt_ack)];
	optp->level = MIB2_TCP6;
	optp->name = MIB2_TCP6_CONN;
	optp->len = msgdsize(mp6_conn_ctl->b_cont);
	qreply(q, mp6_conn_ctl);

	/* table of IPv6 MLP attributes... */
	optp = (struct opthdr *)&mp6_attr_ctl->b_rptr[
	    sizeof (struct T_optmgmt_ack)];
	optp->level = MIB2_TCP6;
	optp->name = EXPER_XPORT_MLP;
	optp->len = msgdsize(mp6_attr_ctl->b_cont);
	if (optp->len == 0)
		freemsg(mp6_attr_ctl);
	else
		qreply(q, mp6_attr_ctl);
	return (mp2ctl);
}
예제 #2
0
/*
 * Returns 0 if there is at leave one other active faddr, -1 if there
 * are none. If there are none left, faddr_dead() will start killing the
 * association.
 * If the downed faddr was the current faddr, a new current faddr
 * will be chosen.
 */
int
sctp_faddr_dead(sctp_t *sctp, sctp_faddr_t *fp, int newstate)
{
	sctp_faddr_t *ofp;
	sctp_stack_t *sctps = sctp->sctp_sctps;

	if (fp->state == SCTP_FADDRS_ALIVE) {
		sctp_intf_event(sctp, fp->faddr, SCTP_ADDR_UNREACHABLE, 0);
	}
	fp->state = newstate;

	dprint(1, ("sctp_faddr_dead: %x:%x:%x:%x down (state=%d)\n",
	    SCTP_PRINTADDR(fp->faddr), newstate));

	if (fp == sctp->sctp_current) {
		/* Current faddr down; need to switch it */
		sctp->sctp_current = NULL;
	}

	/* Find next alive faddr */
	ofp = fp;
	for (fp = fp->next; fp != NULL; fp = fp->next) {
		if (fp->state == SCTP_FADDRS_ALIVE) {
			break;
		}
	}

	if (fp == NULL) {
		/* Continue from beginning of list */
		for (fp = sctp->sctp_faddrs; fp != ofp; fp = fp->next) {
			if (fp->state == SCTP_FADDRS_ALIVE) {
				break;
			}
		}
	}

	/*
	 * Find a new fp, so if the current faddr is dead, use the new fp
	 * as the current one.
	 */
	if (fp != ofp) {
		if (sctp->sctp_current == NULL) {
			dprint(1, ("sctp_faddr_dead: failover->%x:%x:%x:%x\n",
			    SCTP_PRINTADDR(fp->faddr)));
			/*
			 * Note that we don't need to reset the source addr
			 * of the new fp.
			 */
			sctp_set_faddr_current(sctp, fp);
		}
		return (0);
	}


	/* All faddrs are down; kill the association */
	dprint(1, ("sctp_faddr_dead: all faddrs down, killing assoc\n"));
	BUMP_MIB(&sctps->sctps_mib, sctpAborted);
	sctp_assoc_event(sctp, sctp->sctp_state < SCTPS_ESTABLISHED ?
	    SCTP_CANT_STR_ASSOC : SCTP_COMM_LOST, 0, NULL);
	sctp_clean_death(sctp, sctp->sctp_client_errno ?
	    sctp->sctp_client_errno : ETIMEDOUT);

	return (-1);
}
예제 #3
0
파일: sctp_snmp.c 프로젝트: andreiw/polaris
static int
sctp_kstat_update(kstat_t *kp, int rw)
{
	sctp_named_kstat_t	*sctpkp;
	sctp_t			*sctp, *sctp_prev;
	zoneid_t		zoneid;

	if (kp == NULL|| kp->ks_data == NULL)
		return (EIO);

	if (rw == KSTAT_WRITE)
		return (EACCES);

	zoneid = getzoneid();

	/*
	 * Get the number of current associations and gather their
	 * individual set of statistics.
	 */
	SET_MIB(sctp_mib.sctpCurrEstab, 0);
	sctp = gsctp;
	sctp_prev = NULL;
	mutex_enter(&sctp_g_lock);
	while (sctp != NULL) {
		mutex_enter(&sctp->sctp_reflock);
		if (sctp->sctp_condemned) {
			mutex_exit(&sctp->sctp_reflock);
			sctp = list_next(&sctp_g_list, sctp);
			continue;
		}
		sctp->sctp_refcnt++;
		mutex_exit(&sctp->sctp_reflock);
		mutex_exit(&sctp_g_lock);
		if (sctp_prev != NULL)
			SCTP_REFRELE(sctp_prev);
		if (sctp->sctp_connp->conn_zoneid != zoneid)
			goto next_sctp;
		if (sctp->sctp_state == SCTPS_ESTABLISHED ||
		    sctp->sctp_state == SCTPS_SHUTDOWN_PENDING ||
		    sctp->sctp_state == SCTPS_SHUTDOWN_RECEIVED) {
			BUMP_MIB(&sctp_mib, sctpCurrEstab);
		}

		if (sctp->sctp_opkts) {
			UPDATE_MIB(&sctp_mib, sctpOutSCTPPkts,
			    sctp->sctp_opkts);
			sctp->sctp_opkts = 0;
		}

		if (sctp->sctp_obchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutCtrlChunks,
			    sctp->sctp_obchunks);
			sctp->sctp_obchunks = 0;
		}

		if (sctp->sctp_odchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutOrderChunks,
			    sctp->sctp_odchunks);
			sctp->sctp_odchunks = 0;
		}

		if (sctp->sctp_oudchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutUnorderChunks,
			    sctp->sctp_oudchunks);
			sctp->sctp_oudchunks = 0;
		}

		if (sctp->sctp_rxtchunks) {
			UPDATE_MIB(&sctp_mib, sctpRetransChunks,
			    sctp->sctp_rxtchunks);
			sctp->sctp_rxtchunks = 0;
		}

		if (sctp->sctp_ipkts) {
			UPDATE_MIB(&sctp_mib, sctpInSCTPPkts, sctp->sctp_ipkts);
			sctp->sctp_ipkts = 0;
		}

		if (sctp->sctp_ibchunks) {
			UPDATE_MIB(&sctp_mib, sctpInCtrlChunks,
			    sctp->sctp_ibchunks);
			sctp->sctp_ibchunks = 0;
		}

		if (sctp->sctp_idchunks) {
			UPDATE_MIB(&sctp_mib, sctpInOrderChunks,
			    sctp->sctp_idchunks);
			sctp->sctp_idchunks = 0;
		}

		if (sctp->sctp_iudchunks) {
			UPDATE_MIB(&sctp_mib, sctpInUnorderChunks,
			    sctp->sctp_iudchunks);
			sctp->sctp_iudchunks = 0;
		}

		if (sctp->sctp_fragdmsgs) {
			UPDATE_MIB(&sctp_mib, sctpFragUsrMsgs,
			    sctp->sctp_fragdmsgs);
			sctp->sctp_fragdmsgs = 0;
		}

		if (sctp->sctp_reassmsgs) {
			UPDATE_MIB(&sctp_mib, sctpReasmUsrMsgs,
			    sctp->sctp_reassmsgs);
			sctp->sctp_reassmsgs = 0;
		}

next_sctp:
		sctp_prev = sctp;
		mutex_enter(&sctp_g_lock);
		sctp = list_next(&sctp_g_list, sctp);
	}
	mutex_exit(&sctp_g_lock);
	if (sctp_prev != NULL)
		SCTP_REFRELE(sctp_prev);

	/* Copy data from the SCTP MIB */
	sctpkp = (sctp_named_kstat_t *)kp->ks_data;

	/* These are from global ndd params. */
	sctpkp->sctpRtoMin.value.ui32 = sctp_rto_ming;
	sctpkp->sctpRtoMax.value.ui32 = sctp_rto_maxg;
	sctpkp->sctpRtoInitial.value.ui32 = sctp_rto_initialg;
	sctpkp->sctpValCookieLife.value.ui32 = sctp_cookie_life;
	sctpkp->sctpMaxInitRetr.value.ui32 = sctp_max_init_retr;

	sctpkp->sctpCurrEstab.value.i32 = sctp_mib.sctpCurrEstab;
	sctpkp->sctpActiveEstab.value.i32 = sctp_mib.sctpActiveEstab;
	sctpkp->sctpPassiveEstab.value.i32 = sctp_mib.sctpPassiveEstab;
	sctpkp->sctpAborted.value.i32 = sctp_mib.sctpAborted;
	sctpkp->sctpShutdowns.value.i32 = sctp_mib.sctpShutdowns;
	sctpkp->sctpOutOfBlue.value.i32 = sctp_mib.sctpOutOfBlue;
	sctpkp->sctpChecksumError.value.i32 = sctp_mib.sctpChecksumError;
	sctpkp->sctpOutCtrlChunks.value.i64 = sctp_mib.sctpOutCtrlChunks;
	sctpkp->sctpOutOrderChunks.value.i64 = sctp_mib.sctpOutOrderChunks;
	sctpkp->sctpOutUnorderChunks.value.i64 = sctp_mib.sctpOutUnorderChunks;
	sctpkp->sctpRetransChunks.value.i64 = sctp_mib.sctpRetransChunks;
	sctpkp->sctpOutAck.value.i32 = sctp_mib.sctpOutAck;
	sctpkp->sctpOutAckDelayed.value.i32 = sctp_mib.sctpOutAckDelayed;
	sctpkp->sctpOutWinUpdate.value.i32 = sctp_mib.sctpOutWinUpdate;
	sctpkp->sctpOutFastRetrans.value.i32 = sctp_mib.sctpOutFastRetrans;
	sctpkp->sctpOutWinProbe.value.i32 = sctp_mib.sctpOutWinProbe;
	sctpkp->sctpInCtrlChunks.value.i64 = sctp_mib.sctpInCtrlChunks;
	sctpkp->sctpInOrderChunks.value.i64 = sctp_mib.sctpInOrderChunks;
	sctpkp->sctpInUnorderChunks.value.i64 = sctp_mib.sctpInUnorderChunks;
	sctpkp->sctpInAck.value.i32 = sctp_mib.sctpInAck;
	sctpkp->sctpInDupAck.value.i32 = sctp_mib.sctpInDupAck;
	sctpkp->sctpInAckUnsent.value.i32 = sctp_mib.sctpInAckUnsent;
	sctpkp->sctpFragUsrMsgs.value.i64 = sctp_mib.sctpFragUsrMsgs;
	sctpkp->sctpReasmUsrMsgs.value.i64 = sctp_mib.sctpReasmUsrMsgs;
	sctpkp->sctpOutSCTPPkts.value.i64 = sctp_mib.sctpOutSCTPPkts;
	sctpkp->sctpInSCTPPkts.value.i64 = sctp_mib.sctpInSCTPPkts;
	sctpkp->sctpInInvalidCookie.value.i32 = sctp_mib.sctpInInvalidCookie;
	sctpkp->sctpTimRetrans.value.i32 = sctp_mib.sctpTimRetrans;
	sctpkp->sctpTimRetransDrop.value.i32 = sctp_mib.sctpTimRetransDrop;
	sctpkp->sctpTimHeartBeatProbe.value.i32 =
	    sctp_mib.sctpTimHeartBeatProbe;
	sctpkp->sctpTimHeartBeatDrop.value.i32 = sctp_mib.sctpTimHeartBeatDrop;
	sctpkp->sctpListenDrop.value.i32 = sctp_mib.sctpListenDrop;
	sctpkp->sctpInClosed.value.i32 = sctp_mib.sctpInClosed;

	return (0);
}
예제 #4
0
static void
sctp_rc_timer(sctp_t *sctp, sctp_faddr_t *fp)
{
#define	SCTP_CLR_SENT_FLAG(mp)	((mp)->b_flag &= ~SCTP_CHUNK_FLAG_SENT)
	sctp_faddr_t	*nfp;
	sctp_faddr_t	*ofp;
	sctp_stack_t	*sctps = sctp->sctp_sctps;

	ASSERT(fp != NULL);

	fp->rc_timer_running = 0;

	if (sctp->sctp_state != SCTPS_ESTABLISHED ||
	    sctp->sctp_cxmit_list == NULL) {
		return;
	}
	/*
	 * Not a retransmission, this was deferred due to some error
	 * condition
	 */
	if (!SCTP_CHUNK_ISSENT(sctp->sctp_cxmit_list)) {
		sctp_wput_asconf(sctp, fp);
		return;
	}
	/*
	 * The sent flag indicates if the msg has been sent on this fp.
	 */
	SCTP_CLR_SENT_FLAG(sctp->sctp_cxmit_list);
	/* Retransmission */
	if (sctp->sctp_strikes >= sctp->sctp_pa_max_rxt) {
		/* time to give up */
		BUMP_MIB(&sctps->sctps_mib, sctpAborted);
		sctp_assoc_event(sctp, SCTP_COMM_LOST, 0, NULL);
		sctp_clean_death(sctp, ETIMEDOUT);
		return;
	}
	if (fp->strikes >= fp->max_retr) {
		if (sctp_faddr_dead(sctp, fp, SCTP_FADDRS_DOWN) == -1)
			return;
	}

	fp->strikes++;
	sctp->sctp_strikes++;
	SCTP_CALC_RXT(sctp, fp);

	nfp = sctp_rotate_faddr(sctp, fp);
	sctp->sctp_cchunk_pend = 0;
	ofp = SCTP_CHUNK_DEST(sctp->sctp_cxmit_list);
	SCTP_SET_CHUNK_DEST(sctp->sctp_cxmit_list, NULL);
	ASSERT(ofp != NULL && ofp == fp);
	ASSERT(ofp->suna >= MBLKL(sctp->sctp_cxmit_list));
	/*
	 * Enter slow start for this destination.
	 * XXX anything in the data path that needs to be considered?
	 */
	ofp->ssthresh = ofp->cwnd / 2;
	if (ofp->ssthresh < 2 * ofp->sfa_pmss)
		ofp->ssthresh = 2 * ofp->sfa_pmss;
	ofp->cwnd = ofp->sfa_pmss;
	ofp->pba = 0;
	ofp->suna -= MBLKL(sctp->sctp_cxmit_list);
	/*
	 * The rexmit flags is used to determine if a serial number needs to
	 * be assigned or not, so once set we leave it there.
	 */
	if (!SCTP_CHUNK_WANT_REXMIT(sctp->sctp_cxmit_list))
		SCTP_CHUNK_REXMIT(sctp->sctp_cxmit_list);
	sctp_wput_asconf(sctp, nfp);
#undef	SCTP_CLR_SENT_FLAG
}
예제 #5
0
파일: sctp_hash.c 프로젝트: andreiw/polaris
/* ARGSUSED */
void
ip_fanout_sctp(mblk_t *mp, ill_t *recv_ill, ipha_t *ipha,
    uint32_t ports, uint_t flags, boolean_t mctl_present, boolean_t ip_policy,
    uint_t ipif_seqid, zoneid_t zoneid)
{
	sctp_t *sctp;
	boolean_t isv4;
	conn_t *connp;
	mblk_t *first_mp;
	ip6_t *ip6h;
	in6_addr_t map_src, map_dst;
	in6_addr_t *src, *dst;

	first_mp = mp;
	if (mctl_present) {
		mp = first_mp->b_cont;
		ASSERT(mp != NULL);
	}

	/* Assume IP provides aligned packets - otherwise toss */
	if (!OK_32PTR(mp->b_rptr)) {
		BUMP_MIB(&ip_mib, ipInDiscards);
		freemsg(first_mp);
		return;
	}

	if (IPH_HDR_VERSION(ipha) == IPV6_VERSION) {
		ip6h = (ip6_t *)ipha;
		src = &ip6h->ip6_src;
		dst = &ip6h->ip6_dst;
		isv4 = B_FALSE;
	} else {
		ip6h = NULL;
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &map_src);
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &map_dst);
		src = &map_src;
		dst = &map_dst;
		isv4 = B_TRUE;
	}
	if ((connp = sctp_fanout(src, dst, ports, ipif_seqid, zoneid, mp)) ==
	    NULL) {
		ip_fanout_sctp_raw(first_mp, recv_ill, ipha, isv4,
		    ports, mctl_present, flags, ip_policy,
		    ipif_seqid, zoneid);
		return;
	}
	sctp = CONN2SCTP(connp);

	/* Found a client; up it goes */
	BUMP_MIB(&ip_mib, ipInDelivers);

	/*
	 * We check some fields in conn_t without holding a lock.
	 * This should be fine.
	 */
	if (CONN_INBOUND_POLICY_PRESENT(connp) || mctl_present) {
		first_mp = ipsec_check_inbound_policy(first_mp, connp,
		    ipha, NULL, mctl_present);
		if (first_mp == NULL) {
			SCTP_REFRELE(sctp);
			return;
		}
	}

	/* Initiate IPPF processing for fastpath */
	if (IPP_ENABLED(IPP_LOCAL_IN)) {
		ip_process(IPP_LOCAL_IN, &mp,
		    recv_ill->ill_phyint->phyint_ifindex);
		if (mp == NULL) {
			SCTP_REFRELE(sctp);
			if (mctl_present)
				freeb(first_mp);
			return;
		} else if (mctl_present) {
			/*
			 * ip_process might return a new mp.
			 */
			ASSERT(first_mp != mp);
			first_mp->b_cont = mp;
		} else {
			first_mp = mp;
		}
	}

	if (connp->conn_recvif || connp->conn_recvslla ||
	    connp->conn_ipv6_recvpktinfo) {
		int in_flags = 0;

		if (connp->conn_recvif || connp->conn_ipv6_recvpktinfo) {
			in_flags = IPF_RECVIF;
		}
		if (connp->conn_recvslla) {
			in_flags |= IPF_RECVSLLA;
		}
		if (isv4) {
			mp = ip_add_info(mp, recv_ill, in_flags);
		} else {
			mp = ip_add_info_v6(mp, recv_ill, &ip6h->ip6_dst);
		}
		if (mp == NULL) {
			SCTP_REFRELE(sctp);
			if (mctl_present)
				freeb(first_mp);
			return;
		} else if (mctl_present) {
			/*
			 * ip_add_info might return a new mp.
			 */
			ASSERT(first_mp != mp);
			first_mp->b_cont = mp;
		} else {
			first_mp = mp;
		}
	}

	mutex_enter(&sctp->sctp_lock);
	if (sctp->sctp_running) {
		if (mctl_present)
			mp->b_prev = first_mp;
		if (!sctp_add_recvq(sctp, mp, B_FALSE)) {
			BUMP_MIB(&ip_mib, ipInDiscards);
			freemsg(first_mp);
		}
		mutex_exit(&sctp->sctp_lock);
	} else {
		sctp->sctp_running = B_TRUE;
		mutex_exit(&sctp->sctp_lock);

		mutex_enter(&sctp->sctp_recvq_lock);
		if (sctp->sctp_recvq != NULL) {
			if (mctl_present)
				mp->b_prev = first_mp;
			if (!sctp_add_recvq(sctp, mp, B_TRUE)) {
				BUMP_MIB(&ip_mib, ipInDiscards);
				freemsg(first_mp);
			}
			mutex_exit(&sctp->sctp_recvq_lock);
			WAKE_SCTP(sctp);
		} else {
			mutex_exit(&sctp->sctp_recvq_lock);
			sctp_input_data(sctp, mp, (mctl_present ? first_mp :
			    NULL));
			WAKE_SCTP(sctp);
			sctp_process_sendq(sctp);
		}
	}
	SCTP_REFRELE(sctp);
}
예제 #6
0
/*
 * Fusion output routine for urgent data.  This routine is called by
 * tcp_fuse_output() for handling non-M_DATA mblks.
 */
void
tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
{
	mblk_t *mp1;
	struct T_exdata_ind *tei;
	tcp_t *peer_tcp = tcp->tcp_loopback_peer;
	mblk_t *head, *prev_head = NULL;

	ASSERT(tcp->tcp_fused);
	ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
	ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
	ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA);
	ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0);

	/*
	 * Urgent data arrives in the form of T_EXDATA_REQ from above.
	 * Each occurence denotes a new urgent pointer.  For each new
	 * urgent pointer we signal (SIGURG) the receiving app to indicate
	 * that it needs to go into urgent mode.  This is similar to the
	 * urgent data handling in the regular tcp.  We don't need to keep
	 * track of where the urgent pointer is, because each T_EXDATA_REQ
	 * "advances" the urgent pointer for us.
	 *
	 * The actual urgent data carried by T_EXDATA_REQ is then prepended
	 * by a T_EXDATA_IND before being enqueued behind any existing data
	 * destined for the receiving app.  There is only a single urgent
	 * pointer (out-of-band mark) for a given tcp.  If the new urgent
	 * data arrives before the receiving app reads some existing urgent
	 * data, the previous marker is lost.  This behavior is emulated
	 * accordingly below, by removing any existing T_EXDATA_IND messages
	 * and essentially converting old urgent data into non-urgent.
	 */
	ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID);
	/* Let sender get out of urgent mode */
	tcp->tcp_valid_bits &= ~TCP_URG_VALID;

	/*
	 * This flag indicates that a signal needs to be sent up.
	 * This flag will only get cleared once SIGURG is delivered and
	 * is not affected by the tcp_fused flag -- delivery will still
	 * happen even after an endpoint is unfused, to handle the case
	 * where the sending endpoint immediately closes/unfuses after
	 * sending urgent data and the accept is not yet finished.
	 */
	peer_tcp->tcp_fused_sigurg = B_TRUE;

	/* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */
	DB_TYPE(mp) = M_PROTO;
	tei = (struct T_exdata_ind *)mp->b_rptr;
	tei->PRIM_type = T_EXDATA_IND;
	tei->MORE_flag = 0;
	mp->b_wptr = (uchar_t *)&tei[1];

	TCP_STAT(tcp_fusion_urg);
	BUMP_MIB(&tcp_mib, tcpOutUrg);

	head = peer_tcp->tcp_rcv_list;
	while (head != NULL) {
		/*
		 * Remove existing T_EXDATA_IND, keep the data which follows
		 * it and relink our list.  Note that we don't modify the
		 * tcp_rcv_last_tail since it never points to T_EXDATA_IND.
		 */
		if (DB_TYPE(head) != M_DATA) {
			mp1 = head;

			ASSERT(DB_TYPE(mp1->b_cont) == M_DATA);
			head = mp1->b_cont;
			mp1->b_cont = NULL;
			head->b_next = mp1->b_next;
			mp1->b_next = NULL;
			if (prev_head != NULL)
				prev_head->b_next = head;
			if (peer_tcp->tcp_rcv_list == mp1)
				peer_tcp->tcp_rcv_list = head;
			if (peer_tcp->tcp_rcv_last_head == mp1)
				peer_tcp->tcp_rcv_last_head = head;
			freeb(mp1);
		}
		prev_head = head;
		head = head->b_next;
	}
}
예제 #7
0
/* Process the COOKIE packet, mp, directed at the listener 'sctp' */
sctp_t *
sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len,
    sctp_init_chunk_t *iack, ip_recv_attr_t *ira)
{
	sctp_t	*eager;
	ip6_t	*ip6h;
	int	err;
	conn_t	*connp, *econnp;
	sctp_stack_t	*sctps;
	struct sock_proto_props sopp;
	cred_t		*cr;
	pid_t		cpid;
	in6_addr_t	faddr, laddr;
	ip_xmit_attr_t	*ixa;

	/*
	 * No need to check for duplicate as this is the listener
	 * and we are holding the lock.  This means that no new
	 * connection can be created out of it.  And since the
	 * fanout already done cannot find a match, it means that
	 * there is no duplicate.
	 */
	ASSERT(OK_32PTR(mp->b_rptr));

	if ((eager = sctp_create_eager(sctp)) == NULL) {
		return (NULL);
	}

	connp = sctp->sctp_connp;
	sctps = sctp->sctp_sctps;
	econnp = eager->sctp_connp;

	if (connp->conn_policy != NULL) {
		/* Inherit the policy from the listener; use actions from ira */
		if (!ip_ipsec_policy_inherit(econnp, connp, ira)) {
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			return (NULL);
		}
	}

	ip6h = (ip6_t *)mp->b_rptr;
	if (ira->ira_flags & IXAF_IS_IPV4) {
		ipha_t	*ipha;

		ipha = (ipha_t *)ip6h;
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &laddr);
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &faddr);
	} else {
		laddr = ip6h->ip6_dst;
		faddr = ip6h->ip6_src;
	}

	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
		/*
		 * XXX need to fix the cached policy issue here.
		 * We temporarily set the conn_laddr/conn_faddr here so
		 * that IPsec can use it for the latched policy
		 * selector.  This is obvioursly wrong as SCTP can
		 * use different addresses...
		 */
		econnp->conn_laddr_v6 = laddr;
		econnp->conn_faddr_v6 = faddr;
		econnp->conn_saddr_v6 = laddr;
	}
	if (ipsec_conn_cache_policy(econnp,
	    (ira->ira_flags & IRAF_IS_IPV4) != 0) != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	/* Save for getpeerucred */
	cr = ira->ira_cred;
	cpid = ira->ira_cpid;

	if (is_system_labeled()) {
		ip_xmit_attr_t *ixa = econnp->conn_ixa;

		ASSERT(ira->ira_tsl != NULL);

		/* Discard any old label */
		if (ixa->ixa_free_flags & IXA_FREE_TSL) {
			ASSERT(ixa->ixa_tsl != NULL);
			label_rele(ixa->ixa_tsl);
			ixa->ixa_free_flags &= ~IXA_FREE_TSL;
			ixa->ixa_tsl = NULL;
		}

		if ((connp->conn_mlp_type != mlptSingle ||
		    connp->conn_mac_mode != CONN_MAC_DEFAULT) &&
		    ira->ira_tsl != NULL) {
			/*
			 * If this is an MLP connection or a MAC-Exempt
			 * connection with an unlabeled node, packets are to be
			 * exchanged using the security label of the received
			 * Cookie packet instead of the server application's
			 * label.
			 * tsol_check_dest called from ip_set_destination
			 * might later update TSF_UNLABELED by replacing
			 * ixa_tsl with a new label.
			 */
			label_hold(ira->ira_tsl);
			ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl);
		} else {
			ixa->ixa_tsl = crgetlabel(econnp->conn_cred);
		}
	}

	err = sctp_accept_comm(sctp, eager, mp, ip_hdr_len, iack);
	if (err != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	ASSERT(eager->sctp_current->ixa != NULL);

	ixa = eager->sctp_current->ixa;
	if (!(ira->ira_flags & IXAF_IS_IPV4)) {
		ASSERT(!(ixa->ixa_flags & IXAF_IS_IPV4));

		if (IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_src) ||
		    IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_dst)) {
			eager->sctp_linklocal = 1;

			ixa->ixa_flags |= IXAF_SCOPEID_SET;
			ixa->ixa_scopeid = ifindex;
			econnp->conn_incoming_ifindex = ifindex;
		}
	}

	/*
	 * On a clustered note send this notification to the clustering
	 * subsystem.
	 */
	if (cl_sctp_connect != NULL) {
		uchar_t	*slist;
		uchar_t	*flist;
		size_t	fsize;
		size_t	ssize;

		fsize = sizeof (in6_addr_t) * eager->sctp_nfaddrs;
		ssize = sizeof (in6_addr_t) * eager->sctp_nsaddrs;
		slist = kmem_alloc(ssize, KM_NOSLEEP);
		flist = kmem_alloc(fsize, KM_NOSLEEP);
		if (slist == NULL || flist == NULL) {
			if (slist != NULL)
				kmem_free(slist, ssize);
			if (flist != NULL)
				kmem_free(flist, fsize);
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			SCTP_KSTAT(sctps, sctp_cl_connect);
			return (NULL);
		}
		/* The clustering module frees these list */
		sctp_get_saddr_list(eager, slist, ssize);
		sctp_get_faddr_list(eager, flist, fsize);
		(*cl_sctp_connect)(econnp->conn_family, slist,
		    eager->sctp_nsaddrs, econnp->conn_lport, flist,
		    eager->sctp_nfaddrs, econnp->conn_fport, B_FALSE,
		    (cl_sctp_handle_t)eager);
	}

	/* Connection established, so send up the conn_ind */
	if ((eager->sctp_ulpd = sctp->sctp_ulp_newconn(sctp->sctp_ulpd,
	    (sock_lower_handle_t)eager, NULL, cr, cpid,
	    &eager->sctp_upcalls)) == NULL) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}
	ASSERT(SCTP_IS_DETACHED(eager));
	eager->sctp_detached = B_FALSE;
	bzero(&sopp, sizeof (sopp));
	sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF;
	sopp.sopp_maxblk = strmsgsz;
	if (econnp->conn_family == AF_INET) {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr_len;
	} else {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr6_len;
	}
	eager->sctp_ulp_prop(eager->sctp_ulpd, &sopp);
	return (eager);
}
예제 #8
0
/*
 * OOTB version of the above.
 * If iserror == 0, sends an abort. If iserror != 0, sends an error.
 */
void
sctp_ootb_send_abort(uint32_t vtag, uint16_t serror, char *details,
    size_t len, const mblk_t *inmp, int iserror, boolean_t tbit,
    ip_recv_attr_t *ira, ip_stack_t *ipst)
{
	uint32_t	ip_hdr_len;
	size_t		ahlen;
	ipha_t		*ipha = NULL;
	ip6_t		*ip6h = NULL;
	sctp_hdr_t	*insctph;
	int		i;
	uint16_t	port;
	ssize_t		alen;
	int		isv4;
	mblk_t		*mp;
	netstack_t	*ns = ipst->ips_netstack;
	sctp_stack_t	*sctps = ns->netstack_sctp;
	ip_xmit_attr_t	ixas;

	bzero(&ixas, sizeof (ixas));

	isv4 = (IPH_HDR_VERSION(inmp->b_rptr) == IPV4_VERSION);
	ip_hdr_len = ira->ira_ip_hdr_length;
	ahlen = ip_hdr_len + sizeof (sctp_hdr_t);

	/*
	 * If this is a labeled system, then check to see if we're allowed to
	 * send a response to this particular sender.  If not, then just drop.
	 */
	if (is_system_labeled() && !tsol_can_reply_error(inmp, ira))
		return;

	mp = allocb(ahlen + sctps->sctps_wroff_xtra, BPRI_MED);
	if (mp == NULL) {
		return;
	}
	mp->b_rptr += sctps->sctps_wroff_xtra;
	mp->b_wptr = mp->b_rptr + ahlen;
	bcopy(inmp->b_rptr, mp->b_rptr, ahlen);

	/*
	 * We follow the logic in tcp_xmit_early_reset() in that we skip
	 * reversing source route (i.e. replace all IP options with EOL).
	 */
	if (isv4) {
		ipaddr_t	v4addr;

		ipha = (ipha_t *)mp->b_rptr;
		for (i = IP_SIMPLE_HDR_LENGTH; i < (int)ip_hdr_len; i++)
			mp->b_rptr[i] = IPOPT_EOL;
		/* Swap addresses */
		ipha->ipha_length = htons(ahlen);
		v4addr = ipha->ipha_src;
		ipha->ipha_src = ipha->ipha_dst;
		ipha->ipha_dst = v4addr;
		ipha->ipha_ident = 0;
		ipha->ipha_ttl = (uchar_t)sctps->sctps_ipv4_ttl;

		ixas.ixa_flags = IXAF_BASIC_SIMPLE_V4;
	} else {
		in6_addr_t	v6addr;

		ip6h = (ip6_t *)mp->b_rptr;
		/* Remove any extension headers assuming partial overlay */
		if (ip_hdr_len > IPV6_HDR_LEN) {
			uint8_t	*to;

			to = mp->b_rptr + ip_hdr_len - IPV6_HDR_LEN;
			ovbcopy(ip6h, to, IPV6_HDR_LEN);
			mp->b_rptr += ip_hdr_len - IPV6_HDR_LEN;
			ip_hdr_len = IPV6_HDR_LEN;
			ip6h = (ip6_t *)mp->b_rptr;
			ip6h->ip6_nxt = IPPROTO_SCTP;
			ahlen = ip_hdr_len + sizeof (sctp_hdr_t);
		}
		ip6h->ip6_plen = htons(ahlen - IPV6_HDR_LEN);
		v6addr = ip6h->ip6_src;
		ip6h->ip6_src = ip6h->ip6_dst;
		ip6h->ip6_dst = v6addr;
		ip6h->ip6_hops = (uchar_t)sctps->sctps_ipv6_hoplimit;

		ixas.ixa_flags = IXAF_BASIC_SIMPLE_V6;
		if (IN6_IS_ADDR_LINKSCOPE(&ip6h->ip6_dst)) {
			ixas.ixa_flags |= IXAF_SCOPEID_SET;
			ixas.ixa_scopeid = ira->ira_ruifindex;
		}
	}
	insctph = (sctp_hdr_t *)(mp->b_rptr + ip_hdr_len);

	/* Swap ports.  Verification tag is reused. */
	port = insctph->sh_sport;
	insctph->sh_sport = insctph->sh_dport;
	insctph->sh_dport = port;
	insctph->sh_verf = vtag;

	/* Link in the abort chunk */
	if ((alen = sctp_link_abort(mp, serror, details, len, iserror, tbit))
	    < 0) {
		freemsg(mp);
		return;
	}

	ixas.ixa_pktlen = ahlen + alen;
	ixas.ixa_ip_hdr_length = ip_hdr_len;

	if (isv4) {
		ipha->ipha_length = htons(ixas.ixa_pktlen);
	} else {
		ip6h->ip6_plen = htons(ixas.ixa_pktlen - IPV6_HDR_LEN);
	}

	ixas.ixa_protocol = IPPROTO_SCTP;
	ixas.ixa_zoneid = ira->ira_zoneid;
	ixas.ixa_ipst = ipst;
	ixas.ixa_ifindex = 0;

	SCTPS_BUMP_MIB(sctps, sctpAborted);

	if (is_system_labeled()) {
		ASSERT(ira->ira_tsl != NULL);

		ixas.ixa_tsl = ira->ira_tsl;	/* A multi-level responder */
	}

	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
		/*
		 * Apply IPsec based on how IPsec was applied to
		 * the packet that was out of the blue.
		 */
		if (!ipsec_in_to_out(ira, &ixas, mp, ipha, ip6h)) {
			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
			/* Note: mp already consumed and ip_drop_packet done */
			return;
		}
	} else {
		/*
		 * This is in clear. The abort message we are building
		 * here should go out in clear, independent of our policy.
		 */
		ixas.ixa_flags |= IXAF_NO_IPSEC;
	}

	(void) ip_output_simple(mp, &ixas);
	ixa_cleanup(&ixas);
}
예제 #9
0
/*
 * If iserror == 0, sends an abort. If iserror != 0, sends an error.
 */
void
sctp_send_abort(sctp_t *sctp, uint32_t vtag, uint16_t serror, char *details,
    size_t len, mblk_t *inmp, int iserror, boolean_t tbit, ip_recv_attr_t *ira)
{

	mblk_t		*hmp;
	uint32_t	ip_hdr_len;
	ipha_t		*iniph;
	ipha_t		*ahiph = NULL;
	ip6_t		*inip6h;
	ip6_t		*ahip6h = NULL;
	sctp_hdr_t	*sh;
	sctp_hdr_t	*insh;
	size_t		ahlen;
	uchar_t		*p;
	ssize_t		alen;
	int		isv4;
	conn_t		*connp = sctp->sctp_connp;
	sctp_stack_t	*sctps = sctp->sctp_sctps;
	ip_xmit_attr_t	*ixa;

	isv4 = (IPH_HDR_VERSION(inmp->b_rptr) == IPV4_VERSION);
	if (isv4) {
		ahlen = sctp->sctp_hdr_len;
	} else {
		ahlen = sctp->sctp_hdr6_len;
	}

	/*
	 * If this is a labeled system, then check to see if we're allowed to
	 * send a response to this particular sender.  If not, then just drop.
	 */
	if (is_system_labeled() && !tsol_can_reply_error(inmp, ira))
		return;

	hmp = allocb(sctps->sctps_wroff_xtra + ahlen, BPRI_MED);
	if (hmp == NULL) {
		/* XXX no resources */
		return;
	}

	/* copy in the IP / SCTP header */
	p = hmp->b_rptr + sctps->sctps_wroff_xtra;
	hmp->b_rptr = p;
	hmp->b_wptr = p + ahlen;
	if (isv4) {
		bcopy(sctp->sctp_iphc, p, sctp->sctp_hdr_len);
		/*
		 * Composite is likely incomplete at this point, so pull
		 * info from the incoming IP / SCTP headers.
		 */
		ahiph = (ipha_t *)p;
		iniph = (ipha_t *)inmp->b_rptr;
		ip_hdr_len = IPH_HDR_LENGTH(inmp->b_rptr);

		sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr_len);
		ASSERT(OK_32PTR(sh));

		insh = (sctp_hdr_t *)((uchar_t *)iniph + ip_hdr_len);
		ASSERT(OK_32PTR(insh));

		/* Copy in the peer's IP addr */
		ahiph->ipha_dst = iniph->ipha_src;
		ahiph->ipha_src = iniph->ipha_dst;
	} else {
		bcopy(sctp->sctp_iphc6, p, sctp->sctp_hdr6_len);
		ahip6h = (ip6_t *)p;
		inip6h = (ip6_t *)inmp->b_rptr;
		ip_hdr_len = ip_hdr_length_v6(inmp, inip6h);

		sh = (sctp_hdr_t *)(p + sctp->sctp_ip_hdr6_len);
		ASSERT(OK_32PTR(sh));

		insh = (sctp_hdr_t *)((uchar_t *)inip6h + ip_hdr_len);
		ASSERT(OK_32PTR(insh));

		/* Copy in the peer's IP addr */
		ahip6h->ip6_dst = inip6h->ip6_src;
		ahip6h->ip6_src = inip6h->ip6_dst;
	}

	/* Fill in the holes in the SCTP common header */
	sh->sh_sport = insh->sh_dport;
	sh->sh_dport = insh->sh_sport;
	sh->sh_verf = vtag;

	/* Link in the abort chunk */
	if ((alen = sctp_link_abort(hmp, serror, details, len, iserror, tbit))
	    < 0) {
		freemsg(hmp);
		return;
	}

	/*
	 * Base the transmission on any routing-related socket options
	 * that have been set on the listener/connection.
	 */
	ixa = conn_get_ixa_exclusive(connp);
	if (ixa == NULL) {
		freemsg(hmp);
		return;
	}
	ixa->ixa_flags &= ~IXAF_VERIFY_PMTU;

	ixa->ixa_pktlen = ahlen + alen;
	if (isv4) {
		ixa->ixa_flags |= IXAF_IS_IPV4;
		ahiph->ipha_length = htons(ixa->ixa_pktlen);
		ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr_len;
	} else {
		ixa->ixa_flags &= ~IXAF_IS_IPV4;
		ahip6h->ip6_plen = htons(ixa->ixa_pktlen - IPV6_HDR_LEN);
		ixa->ixa_ip_hdr_length = sctp->sctp_ip_hdr6_len;
	}

	SCTPS_BUMP_MIB(sctps, sctpAborted);
	BUMP_LOCAL(sctp->sctp_obchunks);

	if (is_system_labeled() && ixa->ixa_tsl != NULL) {
		ASSERT(ira->ira_tsl != NULL);

		ixa->ixa_tsl = ira->ira_tsl;	/* A multi-level responder */
	}

	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
		/*
		 * Apply IPsec based on how IPsec was applied to
		 * the packet that caused the abort.
		 */
		if (!ipsec_in_to_out(ira, ixa, hmp, ahiph, ahip6h)) {
			ip_stack_t *ipst = sctps->sctps_netstack->netstack_ip;

			BUMP_MIB(&ipst->ips_ip_mib, ipIfStatsOutDiscards);
			/* Note: mp already consumed and ip_drop_packet done */
			ixa_refrele(ixa);
			return;
		}
	} else {
		ixa->ixa_flags |= IXAF_NO_IPSEC;
	}

	BUMP_LOCAL(sctp->sctp_opkts);
	BUMP_LOCAL(sctp->sctp_obchunks);

	(void) ip_output_simple(hmp, ixa);
	ixa_refrele(ixa);
}