Example #1
0
static int
sctp_kstat_update(kstat_t *kp, int rw)
{
	sctp_named_kstat_t	*sctpkp;
	sctp_t			*sctp, *sctp_prev;
	zoneid_t		zoneid;

	if (kp == NULL|| kp->ks_data == NULL)
		return (EIO);

	if (rw == KSTAT_WRITE)
		return (EACCES);

	zoneid = getzoneid();

	/*
	 * Get the number of current associations and gather their
	 * individual set of statistics.
	 */
	SET_MIB(sctp_mib.sctpCurrEstab, 0);
	sctp = gsctp;
	sctp_prev = NULL;
	mutex_enter(&sctp_g_lock);
	while (sctp != NULL) {
		mutex_enter(&sctp->sctp_reflock);
		if (sctp->sctp_condemned) {
			mutex_exit(&sctp->sctp_reflock);
			sctp = list_next(&sctp_g_list, sctp);
			continue;
		}
		sctp->sctp_refcnt++;
		mutex_exit(&sctp->sctp_reflock);
		mutex_exit(&sctp_g_lock);
		if (sctp_prev != NULL)
			SCTP_REFRELE(sctp_prev);
		if (sctp->sctp_connp->conn_zoneid != zoneid)
			goto next_sctp;
		if (sctp->sctp_state == SCTPS_ESTABLISHED ||
		    sctp->sctp_state == SCTPS_SHUTDOWN_PENDING ||
		    sctp->sctp_state == SCTPS_SHUTDOWN_RECEIVED) {
			BUMP_MIB(&sctp_mib, sctpCurrEstab);
		}

		if (sctp->sctp_opkts) {
			UPDATE_MIB(&sctp_mib, sctpOutSCTPPkts,
			    sctp->sctp_opkts);
			sctp->sctp_opkts = 0;
		}

		if (sctp->sctp_obchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutCtrlChunks,
			    sctp->sctp_obchunks);
			sctp->sctp_obchunks = 0;
		}

		if (sctp->sctp_odchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutOrderChunks,
			    sctp->sctp_odchunks);
			sctp->sctp_odchunks = 0;
		}

		if (sctp->sctp_oudchunks) {
			UPDATE_MIB(&sctp_mib, sctpOutUnorderChunks,
			    sctp->sctp_oudchunks);
			sctp->sctp_oudchunks = 0;
		}

		if (sctp->sctp_rxtchunks) {
			UPDATE_MIB(&sctp_mib, sctpRetransChunks,
			    sctp->sctp_rxtchunks);
			sctp->sctp_rxtchunks = 0;
		}

		if (sctp->sctp_ipkts) {
			UPDATE_MIB(&sctp_mib, sctpInSCTPPkts, sctp->sctp_ipkts);
			sctp->sctp_ipkts = 0;
		}

		if (sctp->sctp_ibchunks) {
			UPDATE_MIB(&sctp_mib, sctpInCtrlChunks,
			    sctp->sctp_ibchunks);
			sctp->sctp_ibchunks = 0;
		}

		if (sctp->sctp_idchunks) {
			UPDATE_MIB(&sctp_mib, sctpInOrderChunks,
			    sctp->sctp_idchunks);
			sctp->sctp_idchunks = 0;
		}

		if (sctp->sctp_iudchunks) {
			UPDATE_MIB(&sctp_mib, sctpInUnorderChunks,
			    sctp->sctp_iudchunks);
			sctp->sctp_iudchunks = 0;
		}

		if (sctp->sctp_fragdmsgs) {
			UPDATE_MIB(&sctp_mib, sctpFragUsrMsgs,
			    sctp->sctp_fragdmsgs);
			sctp->sctp_fragdmsgs = 0;
		}

		if (sctp->sctp_reassmsgs) {
			UPDATE_MIB(&sctp_mib, sctpReasmUsrMsgs,
			    sctp->sctp_reassmsgs);
			sctp->sctp_reassmsgs = 0;
		}

next_sctp:
		sctp_prev = sctp;
		mutex_enter(&sctp_g_lock);
		sctp = list_next(&sctp_g_list, sctp);
	}
	mutex_exit(&sctp_g_lock);
	if (sctp_prev != NULL)
		SCTP_REFRELE(sctp_prev);

	/* Copy data from the SCTP MIB */
	sctpkp = (sctp_named_kstat_t *)kp->ks_data;

	/* These are from global ndd params. */
	sctpkp->sctpRtoMin.value.ui32 = sctp_rto_ming;
	sctpkp->sctpRtoMax.value.ui32 = sctp_rto_maxg;
	sctpkp->sctpRtoInitial.value.ui32 = sctp_rto_initialg;
	sctpkp->sctpValCookieLife.value.ui32 = sctp_cookie_life;
	sctpkp->sctpMaxInitRetr.value.ui32 = sctp_max_init_retr;

	sctpkp->sctpCurrEstab.value.i32 = sctp_mib.sctpCurrEstab;
	sctpkp->sctpActiveEstab.value.i32 = sctp_mib.sctpActiveEstab;
	sctpkp->sctpPassiveEstab.value.i32 = sctp_mib.sctpPassiveEstab;
	sctpkp->sctpAborted.value.i32 = sctp_mib.sctpAborted;
	sctpkp->sctpShutdowns.value.i32 = sctp_mib.sctpShutdowns;
	sctpkp->sctpOutOfBlue.value.i32 = sctp_mib.sctpOutOfBlue;
	sctpkp->sctpChecksumError.value.i32 = sctp_mib.sctpChecksumError;
	sctpkp->sctpOutCtrlChunks.value.i64 = sctp_mib.sctpOutCtrlChunks;
	sctpkp->sctpOutOrderChunks.value.i64 = sctp_mib.sctpOutOrderChunks;
	sctpkp->sctpOutUnorderChunks.value.i64 = sctp_mib.sctpOutUnorderChunks;
	sctpkp->sctpRetransChunks.value.i64 = sctp_mib.sctpRetransChunks;
	sctpkp->sctpOutAck.value.i32 = sctp_mib.sctpOutAck;
	sctpkp->sctpOutAckDelayed.value.i32 = sctp_mib.sctpOutAckDelayed;
	sctpkp->sctpOutWinUpdate.value.i32 = sctp_mib.sctpOutWinUpdate;
	sctpkp->sctpOutFastRetrans.value.i32 = sctp_mib.sctpOutFastRetrans;
	sctpkp->sctpOutWinProbe.value.i32 = sctp_mib.sctpOutWinProbe;
	sctpkp->sctpInCtrlChunks.value.i64 = sctp_mib.sctpInCtrlChunks;
	sctpkp->sctpInOrderChunks.value.i64 = sctp_mib.sctpInOrderChunks;
	sctpkp->sctpInUnorderChunks.value.i64 = sctp_mib.sctpInUnorderChunks;
	sctpkp->sctpInAck.value.i32 = sctp_mib.sctpInAck;
	sctpkp->sctpInDupAck.value.i32 = sctp_mib.sctpInDupAck;
	sctpkp->sctpInAckUnsent.value.i32 = sctp_mib.sctpInAckUnsent;
	sctpkp->sctpFragUsrMsgs.value.i64 = sctp_mib.sctpFragUsrMsgs;
	sctpkp->sctpReasmUsrMsgs.value.i64 = sctp_mib.sctpReasmUsrMsgs;
	sctpkp->sctpOutSCTPPkts.value.i64 = sctp_mib.sctpOutSCTPPkts;
	sctpkp->sctpInSCTPPkts.value.i64 = sctp_mib.sctpInSCTPPkts;
	sctpkp->sctpInInvalidCookie.value.i32 = sctp_mib.sctpInInvalidCookie;
	sctpkp->sctpTimRetrans.value.i32 = sctp_mib.sctpTimRetrans;
	sctpkp->sctpTimRetransDrop.value.i32 = sctp_mib.sctpTimRetransDrop;
	sctpkp->sctpTimHeartBeatProbe.value.i32 =
	    sctp_mib.sctpTimHeartBeatProbe;
	sctpkp->sctpTimHeartBeatDrop.value.i32 = sctp_mib.sctpTimHeartBeatDrop;
	sctpkp->sctpListenDrop.value.i32 = sctp_mib.sctpListenDrop;
	sctpkp->sctpInClosed.value.i32 = sctp_mib.sctpInClosed;

	return (0);
}
Example #2
0
/* ARGSUSED */
void
ip_fanout_sctp(mblk_t *mp, ill_t *recv_ill, ipha_t *ipha,
    uint32_t ports, uint_t flags, boolean_t mctl_present, boolean_t ip_policy,
    uint_t ipif_seqid, zoneid_t zoneid)
{
	sctp_t *sctp;
	boolean_t isv4;
	conn_t *connp;
	mblk_t *first_mp;
	ip6_t *ip6h;
	in6_addr_t map_src, map_dst;
	in6_addr_t *src, *dst;

	first_mp = mp;
	if (mctl_present) {
		mp = first_mp->b_cont;
		ASSERT(mp != NULL);
	}

	/* Assume IP provides aligned packets - otherwise toss */
	if (!OK_32PTR(mp->b_rptr)) {
		BUMP_MIB(&ip_mib, ipInDiscards);
		freemsg(first_mp);
		return;
	}

	if (IPH_HDR_VERSION(ipha) == IPV6_VERSION) {
		ip6h = (ip6_t *)ipha;
		src = &ip6h->ip6_src;
		dst = &ip6h->ip6_dst;
		isv4 = B_FALSE;
	} else {
		ip6h = NULL;
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &map_src);
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &map_dst);
		src = &map_src;
		dst = &map_dst;
		isv4 = B_TRUE;
	}
	if ((connp = sctp_fanout(src, dst, ports, ipif_seqid, zoneid, mp)) ==
	    NULL) {
		ip_fanout_sctp_raw(first_mp, recv_ill, ipha, isv4,
		    ports, mctl_present, flags, ip_policy,
		    ipif_seqid, zoneid);
		return;
	}
	sctp = CONN2SCTP(connp);

	/* Found a client; up it goes */
	BUMP_MIB(&ip_mib, ipInDelivers);

	/*
	 * We check some fields in conn_t without holding a lock.
	 * This should be fine.
	 */
	if (CONN_INBOUND_POLICY_PRESENT(connp) || mctl_present) {
		first_mp = ipsec_check_inbound_policy(first_mp, connp,
		    ipha, NULL, mctl_present);
		if (first_mp == NULL) {
			SCTP_REFRELE(sctp);
			return;
		}
	}

	/* Initiate IPPF processing for fastpath */
	if (IPP_ENABLED(IPP_LOCAL_IN)) {
		ip_process(IPP_LOCAL_IN, &mp,
		    recv_ill->ill_phyint->phyint_ifindex);
		if (mp == NULL) {
			SCTP_REFRELE(sctp);
			if (mctl_present)
				freeb(first_mp);
			return;
		} else if (mctl_present) {
			/*
			 * ip_process might return a new mp.
			 */
			ASSERT(first_mp != mp);
			first_mp->b_cont = mp;
		} else {
			first_mp = mp;
		}
	}

	if (connp->conn_recvif || connp->conn_recvslla ||
	    connp->conn_ipv6_recvpktinfo) {
		int in_flags = 0;

		if (connp->conn_recvif || connp->conn_ipv6_recvpktinfo) {
			in_flags = IPF_RECVIF;
		}
		if (connp->conn_recvslla) {
			in_flags |= IPF_RECVSLLA;
		}
		if (isv4) {
			mp = ip_add_info(mp, recv_ill, in_flags);
		} else {
			mp = ip_add_info_v6(mp, recv_ill, &ip6h->ip6_dst);
		}
		if (mp == NULL) {
			SCTP_REFRELE(sctp);
			if (mctl_present)
				freeb(first_mp);
			return;
		} else if (mctl_present) {
			/*
			 * ip_add_info might return a new mp.
			 */
			ASSERT(first_mp != mp);
			first_mp->b_cont = mp;
		} else {
			first_mp = mp;
		}
	}

	mutex_enter(&sctp->sctp_lock);
	if (sctp->sctp_running) {
		if (mctl_present)
			mp->b_prev = first_mp;
		if (!sctp_add_recvq(sctp, mp, B_FALSE)) {
			BUMP_MIB(&ip_mib, ipInDiscards);
			freemsg(first_mp);
		}
		mutex_exit(&sctp->sctp_lock);
	} else {
		sctp->sctp_running = B_TRUE;
		mutex_exit(&sctp->sctp_lock);

		mutex_enter(&sctp->sctp_recvq_lock);
		if (sctp->sctp_recvq != NULL) {
			if (mctl_present)
				mp->b_prev = first_mp;
			if (!sctp_add_recvq(sctp, mp, B_TRUE)) {
				BUMP_MIB(&ip_mib, ipInDiscards);
				freemsg(first_mp);
			}
			mutex_exit(&sctp->sctp_recvq_lock);
			WAKE_SCTP(sctp);
		} else {
			mutex_exit(&sctp->sctp_recvq_lock);
			sctp_input_data(sctp, mp, (mctl_present ? first_mp :
			    NULL));
			WAKE_SCTP(sctp);
			sctp_process_sendq(sctp);
		}
	}
	SCTP_REFRELE(sctp);
}
Example #3
0
/*
 * Walk the SCTP global list and refrele the ire for this ipif
 * This is called when an address goes down, so that we release any reference
 * to the ire associated with this address. Additionally, for any SCTP if
 * this was the only/last address in its source list, we don't kill the
 * assoc., if there is no address added subsequently, or if this does not
 * come up, then the assoc. will die a natural death (i.e. timeout).
 */
void
sctp_ire_cache_flush(ipif_t *ipif)
{
	sctp_t			*sctp;
	sctp_t			*sctp_prev = NULL;
	sctp_faddr_t		*fp;
	conn_t			*connp;
	ire_t			*ire;

	sctp = gsctp;
	mutex_enter(&sctp_g_lock);
	while (sctp != NULL) {
		mutex_enter(&sctp->sctp_reflock);
		if (sctp->sctp_condemned) {
			mutex_exit(&sctp->sctp_reflock);
			sctp = list_next(&sctp_g_list, sctp);
			continue;
		}
		sctp->sctp_refcnt++;
		mutex_exit(&sctp->sctp_reflock);
		mutex_exit(&sctp_g_lock);
		if (sctp_prev != NULL)
			SCTP_REFRELE(sctp_prev);

		RUN_SCTP(sctp);
		connp = sctp->sctp_connp;
		mutex_enter(&connp->conn_lock);
		ire = connp->conn_ire_cache;
		if (ire != NULL && ire->ire_ipif == ipif) {
			connp->conn_ire_cache = NULL;
			mutex_exit(&connp->conn_lock);
			IRE_REFRELE_NOTR(ire);
		} else {
			mutex_exit(&connp->conn_lock);
		}
		/* check for ires cached in faddr */
		for (fp = sctp->sctp_faddrs; fp != NULL; fp = fp->next) {
			/*
			 * If this ipif is being used as the source address
			 * we need to update it as well, else we will end
			 * up using the dead source address.
			 */
			ire = fp->ire;
			if (ire != NULL && ire->ire_ipif == ipif) {
				fp->ire = NULL;
				IRE_REFRELE_NOTR(ire);
			}
			/*
			 * This may result in setting the fp as unreachable,
			 * i.e. if all the source addresses are down. In
			 * that case the assoc. would timeout.
			 */
			if (IN6_ARE_ADDR_EQUAL(&ipif->ipif_v6lcl_addr,
			    &fp->saddr)) {
				sctp_set_saddr(sctp, fp);
				if (fp == sctp->sctp_current &&
				    fp->state != SCTP_FADDRS_UNREACH) {
					sctp_set_faddr_current(sctp, fp);
				}
			}
		}
		WAKE_SCTP(sctp);
		sctp_prev = sctp;
		mutex_enter(&sctp_g_lock);
		sctp = list_next(&sctp_g_list, sctp);
	}
	mutex_exit(&sctp_g_lock);
	if (sctp_prev != NULL)
		SCTP_REFRELE(sctp_prev);
}
Example #4
0
/*
 * Exported routine for extracting active SCTP associations.
 * Like TCP, we terminate the walk if the callback returns non-zero.
 */
int
cl_sctp_walk_list(int (*cl_callback)(cl_sctp_info_t *, void *), void *arg,
    boolean_t cansleep)
{
	sctp_t		*sctp;
	sctp_t		*sctp_prev;
	cl_sctp_info_t	cl_sctpi;
	uchar_t		*slist;
	uchar_t		*flist;

	sctp = gsctp;
	sctp_prev = NULL;
	mutex_enter(&sctp_g_lock);
	while (sctp != NULL) {
		size_t	ssize;
		size_t	fsize;

		mutex_enter(&sctp->sctp_reflock);
		if (sctp->sctp_condemned || sctp->sctp_state <= SCTPS_LISTEN) {
			mutex_exit(&sctp->sctp_reflock);
			sctp = list_next(&sctp_g_list, sctp);
			continue;
		}
		sctp->sctp_refcnt++;
		mutex_exit(&sctp->sctp_reflock);
		mutex_exit(&sctp_g_lock);
		if (sctp_prev != NULL)
			SCTP_REFRELE(sctp_prev);
		RUN_SCTP(sctp);
		ssize = sizeof (in6_addr_t) * sctp->sctp_nsaddrs;
		fsize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;

		slist = kmem_alloc(ssize, cansleep ? KM_SLEEP : KM_NOSLEEP);
		flist = kmem_alloc(fsize, cansleep ? KM_SLEEP : KM_NOSLEEP);
		if (slist == NULL || flist == NULL) {
			WAKE_SCTP(sctp);
			if (slist != NULL)
				kmem_free(slist, ssize);
			if (flist != NULL)
				kmem_free(flist, fsize);
			SCTP_REFRELE(sctp);
			return (1);
		}
		cl_sctpi.cl_sctpi_version = CL_SCTPI_V1;
		sctp_get_saddr_list(sctp, slist, ssize);
		sctp_get_faddr_list(sctp, flist, fsize);
		cl_sctpi.cl_sctpi_nladdr = sctp->sctp_nsaddrs;
		cl_sctpi.cl_sctpi_nfaddr = sctp->sctp_nfaddrs;
		cl_sctpi.cl_sctpi_family = sctp->sctp_family;
		cl_sctpi.cl_sctpi_ipversion = sctp->sctp_ipversion;
		cl_sctpi.cl_sctpi_state = sctp->sctp_state;
		cl_sctpi.cl_sctpi_lport = sctp->sctp_lport;
		cl_sctpi.cl_sctpi_fport = sctp->sctp_fport;
		cl_sctpi.cl_sctpi_handle = (cl_sctp_handle_t)sctp;
		WAKE_SCTP(sctp);
		cl_sctpi.cl_sctpi_laddrp = slist;
		cl_sctpi.cl_sctpi_faddrp = flist;
		if ((*cl_callback)(&cl_sctpi, arg) != 0) {
			kmem_free(slist, ssize);
			kmem_free(flist, fsize);
			SCTP_REFRELE(sctp);
			return (1);
		}
		/* list will be freed by cl_callback */
		sctp_prev = sctp;
		mutex_enter(&sctp_g_lock);
		sctp = list_next(&sctp_g_list, sctp);
	}
	mutex_exit(&sctp_g_lock);
	if (sctp_prev != NULL)
		SCTP_REFRELE(sctp_prev);
	return (0);
}
/*
 * Connect to a peer - this function inserts the sctp in the
 * bind and conn fanouts, sends the INIT, and replies to the client
 * with an OK ack.
 */
int
sctp_connect(sctp_t *sctp, const struct sockaddr *dst, uint32_t addrlen,
    cred_t *cr, pid_t pid)
{
	sin_t		*sin;
	sin6_t		*sin6;
	in6_addr_t	dstaddr;
	in_port_t	dstport;
	mblk_t		*initmp;
	sctp_tf_t	*tbf;
	sctp_t		*lsctp;
	char		buf[INET6_ADDRSTRLEN];
	int		sleep = sctp->sctp_cansleep ? KM_SLEEP : KM_NOSLEEP;
	int		err;
	sctp_faddr_t	*cur_fp;
	sctp_stack_t	*sctps = sctp->sctp_sctps;
	conn_t		*connp = sctp->sctp_connp;
	uint_t		scope_id = 0;
	ip_xmit_attr_t	*ixa;

	/*
	 * Determine packet type based on type of address passed in
	 * the request should contain an IPv4 or IPv6 address.
	 * Make sure that address family matches the type of
	 * family of the address passed down.
	 */
	if (addrlen < sizeof (sin_t)) {
		return (EINVAL);
	}
	switch (dst->sa_family) {
	case AF_INET:
		sin = (sin_t *)dst;

		/* Check for attempt to connect to non-unicast */
		if (CLASSD(sin->sin_addr.s_addr) ||
		    (sin->sin_addr.s_addr == INADDR_BROADCAST)) {
			ip0dbg(("sctp_connect: non-unicast\n"));
			return (EINVAL);
		}
		if (connp->conn_ipv6_v6only)
			return (EAFNOSUPPORT);

		/* convert to v6 mapped */
		/* Check for attempt to connect to INADDR_ANY */
		if (sin->sin_addr.s_addr == INADDR_ANY)  {
			struct in_addr v4_addr;
			/*
			 * SunOS 4.x and 4.3 BSD allow an application
			 * to connect a TCP socket to INADDR_ANY.
			 * When they do this, the kernel picks the
			 * address of one interface and uses it
			 * instead.  The kernel usually ends up
			 * picking the address of the loopback
			 * interface.  This is an undocumented feature.
			 * However, we provide the same thing here
			 * in case any TCP apps that use this feature
			 * are being ported to SCTP...
			 */
			v4_addr.s_addr = htonl(INADDR_LOOPBACK);
			IN6_INADDR_TO_V4MAPPED(&v4_addr, &dstaddr);
		} else {
			IN6_INADDR_TO_V4MAPPED(&sin->sin_addr, &dstaddr);
		}
		dstport = sin->sin_port;
		break;
	case AF_INET6:
		sin6 = (sin6_t *)dst;
		/* Check for attempt to connect to non-unicast. */
		if ((addrlen < sizeof (sin6_t)) ||
		    IN6_IS_ADDR_MULTICAST(&sin6->sin6_addr)) {
			ip0dbg(("sctp_connect: non-unicast\n"));
			return (EINVAL);
		}
		if (connp->conn_ipv6_v6only &&
		    IN6_IS_ADDR_V4MAPPED(&sin6->sin6_addr)) {
			return (EAFNOSUPPORT);
		}
		/* check for attempt to connect to unspec */
		if (IN6_IS_ADDR_UNSPECIFIED(&sin6->sin6_addr)) {
			dstaddr = ipv6_loopback;
		} else {
			dstaddr = sin6->sin6_addr;
			if (IN6_IS_ADDR_LINKLOCAL(&dstaddr)) {
				sctp->sctp_linklocal = 1;
				scope_id = sin6->sin6_scope_id;
			}
		}
		dstport = sin6->sin6_port;
		connp->conn_flowinfo = sin6->sin6_flowinfo;
		break;
	default:
		dprint(1, ("sctp_connect: unknown family %d\n",
		    dst->sa_family));
		return (EAFNOSUPPORT);
	}

	(void) inet_ntop(AF_INET6, &dstaddr, buf, sizeof (buf));
	dprint(1, ("sctp_connect: attempting connect to %s...\n", buf));

	RUN_SCTP(sctp);

	if (connp->conn_family != dst->sa_family ||
	    (connp->conn_state_flags & CONN_CLOSING)) {
		WAKE_SCTP(sctp);
		return (EINVAL);
	}

	/* We update our cred/cpid based on the caller of connect */
	if (connp->conn_cred != cr) {
		crhold(cr);
		crfree(connp->conn_cred);
		connp->conn_cred = cr;
	}
	connp->conn_cpid = pid;

	/* Cache things in conn_ixa without any refhold */
	ixa = connp->conn_ixa;
	ixa->ixa_cred = cr;
	ixa->ixa_cpid = pid;
	if (is_system_labeled()) {
		/* We need to restart with a label based on the cred */
		ip_xmit_attr_restore_tsl(ixa, ixa->ixa_cred);
	}

	switch (sctp->sctp_state) {
	case SCTPS_IDLE: {
		struct sockaddr_storage	ss;

		/*
		 * We support a quick connect capability here, allowing
		 * clients to transition directly from IDLE to COOKIE_WAIT.
		 * sctp_bindi will pick an unused port, insert the connection
		 * in the bind hash and transition to BOUND state. SCTP
		 * picks and uses what it considers the optimal local address
		 * set (just like specifiying INADDR_ANY to bind()).
		 */
		dprint(1, ("sctp_connect: idle, attempting bind...\n"));
		ASSERT(sctp->sctp_nsaddrs == 0);

		bzero(&ss, sizeof (ss));
		ss.ss_family = connp->conn_family;
		WAKE_SCTP(sctp);
		if ((err = sctp_bind(sctp, (struct sockaddr *)&ss,
		    sizeof (ss))) != 0) {
			return (err);
		}
		RUN_SCTP(sctp);
		/* FALLTHRU */
	}

	case SCTPS_BOUND:
		ASSERT(sctp->sctp_nsaddrs > 0);

		/* do the connect */
		/* XXX check for attempt to connect to self */
		connp->conn_fport = dstport;

		ASSERT(sctp->sctp_iphc);
		ASSERT(sctp->sctp_iphc6);

		/*
		 * Don't allow this connection to completely duplicate
		 * an existing connection.
		 *
		 * Ensure that the duplicate check and insertion is atomic.
		 */
		sctp_conn_hash_remove(sctp);
		tbf = &sctps->sctps_conn_fanout[SCTP_CONN_HASH(sctps,
		    connp->conn_ports)];
		mutex_enter(&tbf->tf_lock);
		lsctp = sctp_lookup(sctp, &dstaddr, tbf, &connp->conn_ports,
		    SCTPS_COOKIE_WAIT);
		if (lsctp != NULL) {
			/* found a duplicate connection */
			mutex_exit(&tbf->tf_lock);
			SCTP_REFRELE(lsctp);
			WAKE_SCTP(sctp);
			return (EADDRINUSE);
		}

		/*
		 * OK; set up the peer addr (this may grow after we get
		 * the INIT ACK from the peer with additional addresses).
		 */
		if ((err = sctp_add_faddr(sctp, &dstaddr, sleep,
		    B_FALSE)) != 0) {
			mutex_exit(&tbf->tf_lock);
			WAKE_SCTP(sctp);
			return (err);
		}
		cur_fp = sctp->sctp_faddrs;
		ASSERT(cur_fp->ixa != NULL);

		/* No valid src addr, return. */
		if (cur_fp->state == SCTP_FADDRS_UNREACH) {
			mutex_exit(&tbf->tf_lock);
			WAKE_SCTP(sctp);
			return (EADDRNOTAVAIL);
		}

		sctp->sctp_primary = cur_fp;
		sctp->sctp_current = cur_fp;
		sctp->sctp_mss = cur_fp->sfa_pmss;
		sctp_conn_hash_insert(tbf, sctp, 1);
		mutex_exit(&tbf->tf_lock);

		ixa = cur_fp->ixa;
		ASSERT(ixa->ixa_cred != NULL);

		if (scope_id != 0) {
			ixa->ixa_flags |= IXAF_SCOPEID_SET;
			ixa->ixa_scopeid = scope_id;
		} else {
			ixa->ixa_flags &= ~IXAF_SCOPEID_SET;
		}

		/* initialize composite headers */
		if ((err = sctp_set_hdraddrs(sctp)) != 0) {
			sctp_conn_hash_remove(sctp);
			WAKE_SCTP(sctp);
			return (err);
		}

		if ((err = sctp_build_hdrs(sctp, KM_SLEEP)) != 0) {
			sctp_conn_hash_remove(sctp);
			WAKE_SCTP(sctp);
			return (err);
		}

		/*
		 * Turn off the don't fragment bit on the (only) faddr,
		 * so that if one of the messages exchanged during the
		 * initialization sequence exceeds the path mtu, it
		 * at least has a chance to get there. SCTP does no
		 * fragmentation of initialization messages.  The DF bit
		 * will be turned on again in sctp_send_cookie_echo()
		 * (but the cookie echo will still be sent with the df bit
		 * off).
		 */
		cur_fp->df = B_FALSE;

		/* Mark this address as alive */
		cur_fp->state = SCTP_FADDRS_ALIVE;

		/* Send the INIT to the peer */
		SCTP_FADDR_TIMER_RESTART(sctp, cur_fp, cur_fp->rto);
		sctp->sctp_state = SCTPS_COOKIE_WAIT;
		/*
		 * sctp_init_mp() could result in modifying the source
		 * address list, so take the hash lock.
		 */
		mutex_enter(&tbf->tf_lock);
		initmp = sctp_init_mp(sctp, cur_fp);
		if (initmp == NULL) {
			mutex_exit(&tbf->tf_lock);
			/*
			 * It may happen that all the source addresses
			 * (loopback/link local) are removed.  In that case,
			 * faile the connect.
			 */
			if (sctp->sctp_nsaddrs == 0) {
				sctp_conn_hash_remove(sctp);
				SCTP_FADDR_TIMER_STOP(cur_fp);
				WAKE_SCTP(sctp);
				return (EADDRNOTAVAIL);
			}

			/* Otherwise, let the retransmission timer retry */
			WAKE_SCTP(sctp);
			goto notify_ulp;
		}
		mutex_exit(&tbf->tf_lock);

		/*
		 * On a clustered note send this notification to the clustering
		 * subsystem.
		 */
		if (cl_sctp_connect != NULL) {
			uchar_t		*slist;
			uchar_t		*flist;
			size_t		ssize;
			size_t		fsize;

			fsize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;
			ssize = sizeof (in6_addr_t) * sctp->sctp_nsaddrs;
			slist = kmem_alloc(ssize, KM_SLEEP);
			flist = kmem_alloc(fsize, KM_SLEEP);
			/* The clustering module frees the lists */
			sctp_get_saddr_list(sctp, slist, ssize);
			sctp_get_faddr_list(sctp, flist, fsize);
			(*cl_sctp_connect)(connp->conn_family, slist,
			    sctp->sctp_nsaddrs, connp->conn_lport,
			    flist, sctp->sctp_nfaddrs, connp->conn_fport,
			    B_TRUE, (cl_sctp_handle_t)sctp);
		}
		ASSERT(ixa->ixa_cred != NULL);
		ASSERT(ixa->ixa_ire != NULL);

		(void) conn_ip_output(initmp, ixa);
		BUMP_LOCAL(sctp->sctp_opkts);
		WAKE_SCTP(sctp);

notify_ulp:
		sctp_set_ulp_prop(sctp);

		return (0);
	default:
		ip0dbg(("sctp_connect: invalid state. %d\n", sctp->sctp_state));
		WAKE_SCTP(sctp);
		return (EINVAL);
	}
}