Exemple #1
0
/*
 * Called from sctp_input_data() to add one error chunk to the error
 * chunks list.  The error chunks list will be processed at the end
 * of sctp_input_data() by calling sctp_process_err().
 */
void
sctp_add_err(sctp_t *sctp, uint16_t serror, void *details, size_t len,
    sctp_faddr_t *dest)
{
	sctp_stack_t *sctps = sctp->sctp_sctps;
	mblk_t *emp;
	uint32_t emp_len;
	uint32_t mss;
	mblk_t *sendmp;
	sctp_faddr_t *fp;

	emp = sctp_make_err(sctp, serror, details, len);
	if (emp == NULL)
		return;
	emp_len = MBLKL(emp);
	if (sctp->sctp_err_chunks != NULL) {
		fp = SCTP_CHUNK_DEST(sctp->sctp_err_chunks);
	} else {
		fp = dest;
		SCTP_SET_CHUNK_DEST(emp, dest);
	}
	mss = fp->sf_pmss;

	/*
	 * If the current output packet cannot include the new error chunk,
	 * send out the current packet and then add the new error chunk
	 * to the new output packet.
	 */
	if (sctp->sctp_err_len + emp_len > mss) {
		if ((sendmp = sctp_make_mp(sctp, fp, 0)) == NULL) {
			SCTP_KSTAT(sctps, sctp_send_err_failed);
			/* Just free the latest error chunk. */
			freeb(emp);
			return;
		}
		sendmp->b_cont = sctp->sctp_err_chunks;
		sctp_set_iplen(sctp, sendmp, fp->sf_ixa);
		(void) conn_ip_output(sendmp, fp->sf_ixa);
		BUMP_LOCAL(sctp->sctp_opkts);

		sctp->sctp_err_chunks = emp;
		sctp->sctp_err_len = emp_len;
		SCTP_SET_CHUNK_DEST(emp, dest);
	} else {
		if (sctp->sctp_err_chunks != NULL)
			linkb(sctp->sctp_err_chunks, emp);
		else
			sctp->sctp_err_chunks = emp;
		sctp->sctp_err_len += emp_len;
	}
	/* Assume that we will send it out... */
	BUMP_LOCAL(sctp->sctp_obchunks);
}
Exemple #2
0
/*
 * Called from sctp_input_data() to send out error chunks created during
 * the processing of all the chunks in an incoming packet.
 */
void
sctp_process_err(sctp_t *sctp)
{
	sctp_stack_t *sctps = sctp->sctp_sctps;
	mblk_t *errmp;
	mblk_t *sendmp;
	sctp_faddr_t *fp;

	ASSERT(sctp->sctp_err_chunks != NULL);
	errmp = sctp->sctp_err_chunks;
	fp = SCTP_CHUNK_DEST(errmp);
	if ((sendmp = sctp_make_mp(sctp, fp, 0)) == NULL) {
		SCTP_KSTAT(sctps, sctp_send_err_failed);
		freemsg(errmp);
		goto done;
	}
	sendmp->b_cont = errmp;
	sctp_set_iplen(sctp, sendmp, fp->sf_ixa);
	(void) conn_ip_output(sendmp, fp->sf_ixa);
	BUMP_LOCAL(sctp->sctp_opkts);
done:
	sctp->sctp_err_chunks = NULL;
	sctp->sctp_err_len = 0;
}
Exemple #3
0
/* ARGSUSED */
void
sctp_conn_reclaim(void *arg)
{
	netstack_handle_t nh;
	netstack_t *ns;
	sctp_stack_t *sctps;
	extern pgcnt_t lotsfree, needfree;

	if (!sctp_do_reclaim)
		return;

	/*
	 * The reclaim function may be called even when the system is not
	 * really under memory pressure.
	 */
	if (freemem >= lotsfree + needfree)
		return;

	netstack_next_init(&nh);
	while ((ns = netstack_next(&nh)) != NULL) {
		int i;
		int64_t tot_assoc = 0;

		/*
		 * During boot time, the first netstack_t is created and
		 * initialized before SCTP has registered with the netstack
		 * framework.  If this reclaim function is called before SCTP
		 * has finished its initialization, netstack_next() will
		 * return the first netstack_t (since its netstack_flags is
		 * not NSF_UNINIT).  And its netstack_sctp will be NULL.  We
		 * need to catch it.
		 *
		 * All subsequent netstack_t creation will not have this
		 * problem since the initialization is not finished until SCTP
		 * has finished its own sctp_stack_t initialization.  Hence
		 * netstack_next() will not return one with NULL netstack_sctp.
		 */
		if ((sctps = ns->netstack_sctp) == NULL) {
			netstack_rele(ns);
			continue;
		}

		/*
		 * Even if the system is under memory pressure, the reason may
		 * not be because of SCTP activity.  Check the number of
		 * associations in each stack.  If the number exceeds the
		 * threshold (maxusers), turn on defensive mode.
		 */
		for (i = 0; i < sctps->sctps_sc_cnt; i++)
			tot_assoc += sctps->sctps_sc[i]->sctp_sc_assoc_cnt;
		if (tot_assoc < maxusers) {
			netstack_rele(ns);
			continue;
		}

		mutex_enter(&sctps->sctps_reclaim_lock);
		if (!sctps->sctps_reclaim) {
			sctps->sctps_reclaim = B_TRUE;
			sctps->sctps_reclaim_tid = timeout(sctp_reclaim_timer,
			    sctps, MSEC_TO_TICK(sctps->sctps_reclaim_period));
			SCTP_KSTAT(sctps, sctp_reclaim_cnt);
		}
		mutex_exit(&sctps->sctps_reclaim_lock);
		netstack_rele(ns);
	}
	netstack_next_fini(&nh);
}
/*
 * Sets the address parameters given in the INIT chunk into sctp's
 * faddrs; if psctp is non-NULL, copies psctp's saddrs. If there are
 * no address parameters in the INIT chunk, a single faddr is created
 * from the ip hdr at the beginning of pkt.
 * If there already are existing addresses hanging from sctp, merge
 * them in, if the old info contains addresses which are not present
 * in this new info, get rid of them, and clean the pointers if there's
 * messages which have this as their target address.
 *
 * We also re-adjust the source address list here since the list may
 * contain more than what is actually part of the association. If
 * we get here from sctp_send_cookie_echo(), we are on the active
 * side and psctp will be NULL and ich will be the INIT-ACK chunk.
 * If we get here from sctp_accept_comm(), ich will be the INIT chunk
 * and psctp will the listening endpoint.
 *
 * INIT processing: When processing the INIT we inherit the src address
 * list from the listener. For a loopback or linklocal association, we
 * delete the list and just take the address from the IP header (since
 * that's how we created the INIT-ACK). Additionally, for loopback we
 * ignore the address params in the INIT. For determining which address
 * types were sent in the INIT-ACK we follow the same logic as in
 * creating the INIT-ACK. We delete addresses of the type that are not
 * supported by the peer.
 *
 * INIT-ACK processing: When processing the INIT-ACK since we had not
 * included addr params for loopback or linklocal addresses when creating
 * the INIT, we just use the address from the IP header. Further, for
 * loopback we ignore the addr param list. We mark addresses of the
 * type not supported by the peer as unconfirmed.
 *
 * In case of INIT processing we look for supported address types in the
 * supported address param, if present. In both cases the address type in
 * the IP header is supported as well as types for addresses in the param
 * list, if any.
 *
 * Once we have the supported address types sctp_check_saddr() runs through
 * the source address list and deletes or marks as unconfirmed address of
 * types not supported by the peer.
 *
 * Returns 0 on success, sys errno on failure
 */
int
sctp_get_addrparams(sctp_t *sctp, sctp_t *psctp, mblk_t *pkt,
    sctp_chunk_hdr_t *ich, uint_t *sctp_options)
{
	sctp_init_chunk_t	*init;
	ipha_t			*iph;
	ip6_t			*ip6h;
	in6_addr_t		hdrsaddr[1];
	in6_addr_t		hdrdaddr[1];
	sctp_parm_hdr_t		*ph;
	ssize_t			remaining;
	int			isv4;
	int			err;
	sctp_faddr_t		*fp;
	int			supp_af = 0;
	boolean_t		check_saddr = B_TRUE;
	in6_addr_t		curaddr;
	sctp_stack_t		*sctps = sctp->sctp_sctps;
	conn_t			*connp = sctp->sctp_connp;

	if (sctp_options != NULL)
		*sctp_options = 0;

	/* extract the address from the IP header */
	isv4 = (IPH_HDR_VERSION(pkt->b_rptr) == IPV4_VERSION);
	if (isv4) {
		iph = (ipha_t *)pkt->b_rptr;
		IN6_IPADDR_TO_V4MAPPED(iph->ipha_src, hdrsaddr);
		IN6_IPADDR_TO_V4MAPPED(iph->ipha_dst, hdrdaddr);
		supp_af |= PARM_SUPP_V4;
	} else {
		ip6h = (ip6_t *)pkt->b_rptr;
		hdrsaddr[0] = ip6h->ip6_src;
		hdrdaddr[0] = ip6h->ip6_dst;
		supp_af |= PARM_SUPP_V6;
	}

	/*
	 * Unfortunately, we can't delay this because adding an faddr
	 * looks for the presence of the source address (from the ire
	 * for the faddr) in the source address list. We could have
	 * delayed this if, say, this was a loopback/linklocal connection.
	 * Now, we just end up nuking this list and taking the addr from
	 * the IP header for loopback/linklocal.
	 */
	if (psctp != NULL && psctp->sctp_nsaddrs > 0) {
		ASSERT(sctp->sctp_nsaddrs == 0);

		err = sctp_dup_saddrs(psctp, sctp, KM_NOSLEEP);
		if (err != 0)
			return (err);
	}
	/*
	 * We will add the faddr before parsing the address list as this
	 * might be a loopback connection and we would not have to
	 * go through the list.
	 *
	 * Make sure the header's addr is in the list
	 */
	fp = sctp_lookup_faddr(sctp, hdrsaddr);
	if (fp == NULL) {
		/* not included; add it now */
		err = sctp_add_faddr(sctp, hdrsaddr, KM_NOSLEEP, B_TRUE);
		if (err != 0)
			return (err);

		/* sctp_faddrs will be the hdr addr */
		fp = sctp->sctp_faddrs;
	}
	/* make the header addr the primary */

	if (cl_sctp_assoc_change != NULL && psctp == NULL)
		curaddr = sctp->sctp_current->faddr;

	sctp->sctp_primary = fp;
	sctp->sctp_current = fp;
	sctp->sctp_mss = fp->sfa_pmss;

	/* For loopback connections & linklocal get address from the header */
	if (sctp->sctp_loopback || sctp->sctp_linklocal) {
		if (sctp->sctp_nsaddrs != 0)
			sctp_free_saddrs(sctp);
		if ((err = sctp_saddr_add_addr(sctp, hdrdaddr, 0)) != 0)
			return (err);
		/* For loopback ignore address list */
		if (sctp->sctp_loopback)
			return (0);
		check_saddr = B_FALSE;
	}

	/* Walk the params in the INIT [ACK], pulling out addr params */
	remaining = ntohs(ich->sch_len) - sizeof (*ich) -
	    sizeof (sctp_init_chunk_t);
	if (remaining < sizeof (*ph)) {
		if (check_saddr) {
			sctp_check_saddr(sctp, supp_af, psctp == NULL ?
			    B_FALSE : B_TRUE, hdrdaddr);
		}
		ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
		return (0);
	}

	init = (sctp_init_chunk_t *)(ich + 1);
	ph = (sctp_parm_hdr_t *)(init + 1);

	/* params will have already been byteordered when validating */
	while (ph != NULL) {
		if (ph->sph_type == htons(PARM_SUPP_ADDRS)) {
			int		plen;
			uint16_t	*p;
			uint16_t	addrtype;

			ASSERT(psctp != NULL);
			plen = ntohs(ph->sph_len);
			p = (uint16_t *)(ph + 1);
			while (plen > 0) {
				addrtype = ntohs(*p);
				switch (addrtype) {
					case PARM_ADDR6:
						supp_af |= PARM_SUPP_V6;
						break;
					case PARM_ADDR4:
						supp_af |= PARM_SUPP_V4;
						break;
					default:
						break;
				}
				p++;
				plen -= sizeof (*p);
			}
		} else if (ph->sph_type == htons(PARM_ADDR4)) {
			if (remaining >= PARM_ADDR4_LEN) {
				in6_addr_t addr;
				ipaddr_t ta;

				supp_af |= PARM_SUPP_V4;
				/*
				 * Screen out broad/multicasts & loopback.
				 * If the endpoint only accepts v6 address,
				 * go to the next one.
				 *
				 * Subnet broadcast check is done in
				 * sctp_add_faddr().  If the address is
				 * a broadcast address, it won't be added.
				 */
				bcopy(ph + 1, &ta, sizeof (ta));
				if (ta == 0 ||
				    ta == INADDR_BROADCAST ||
				    ta == htonl(INADDR_LOOPBACK) ||
				    CLASSD(ta) || connp->conn_ipv6_v6only) {
					goto next;
				}
				IN6_INADDR_TO_V4MAPPED((struct in_addr *)
				    (ph + 1), &addr);

				/* Check for duplicate. */
				if (sctp_lookup_faddr(sctp, &addr) != NULL)
					goto next;

				/* OK, add it to the faddr set */
				err = sctp_add_faddr(sctp, &addr, KM_NOSLEEP,
				    B_FALSE);
				/* Something is wrong...  Try the next one. */
				if (err != 0)
					goto next;
			}
		} else if (ph->sph_type == htons(PARM_ADDR6) &&
		    connp->conn_family == AF_INET6) {
			/* An v4 socket should not take v6 addresses. */
			if (remaining >= PARM_ADDR6_LEN) {
				in6_addr_t *addr6;

				supp_af |= PARM_SUPP_V6;
				addr6 = (in6_addr_t *)(ph + 1);
				/*
				 * Screen out link locals, mcast, loopback
				 * and bogus v6 address.
				 */
				if (IN6_IS_ADDR_LINKLOCAL(addr6) ||
				    IN6_IS_ADDR_MULTICAST(addr6) ||
				    IN6_IS_ADDR_LOOPBACK(addr6) ||
				    IN6_IS_ADDR_V4MAPPED(addr6)) {
					goto next;
				}
				/* Check for duplicate. */
				if (sctp_lookup_faddr(sctp, addr6) != NULL)
					goto next;

				err = sctp_add_faddr(sctp,
				    (in6_addr_t *)(ph + 1), KM_NOSLEEP,
				    B_FALSE);
				/* Something is wrong...  Try the next one. */
				if (err != 0)
					goto next;
			}
		} else if (ph->sph_type == htons(PARM_FORWARD_TSN)) {
			if (sctp_options != NULL)
				*sctp_options |= SCTP_PRSCTP_OPTION;
		} /* else; skip */

next:
		ph = sctp_next_parm(ph, &remaining);
	}
	if (check_saddr) {
		sctp_check_saddr(sctp, supp_af, psctp == NULL ? B_FALSE :
		    B_TRUE, hdrdaddr);
	}
	ASSERT(sctp_saddr_lookup(sctp, hdrdaddr, 0) != NULL);
	/*
	 * We have the right address list now, update clustering's
	 * knowledge because when we sent the INIT we had just added
	 * the address the INIT was sent to.
	 */
	if (psctp == NULL && cl_sctp_assoc_change != NULL) {
		uchar_t	*alist;
		size_t	asize;
		uchar_t	*dlist;
		size_t	dsize;

		asize = sizeof (in6_addr_t) * sctp->sctp_nfaddrs;
		alist = kmem_alloc(asize, KM_NOSLEEP);
		if (alist == NULL) {
			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
			return (ENOMEM);
		}
		/*
		 * Just include the address the INIT was sent to in the
		 * delete list and send the entire faddr list. We could
		 * do it differently (i.e include all the addresses in the
		 * add list even if it contains the original address OR
		 * remove the original address from the add list etc.), but
		 * this seems reasonable enough.
		 */
		dsize = sizeof (in6_addr_t);
		dlist = kmem_alloc(dsize, KM_NOSLEEP);
		if (dlist == NULL) {
			kmem_free(alist, asize);
			SCTP_KSTAT(sctps, sctp_cl_assoc_change);
			return (ENOMEM);
		}
		bcopy(&curaddr, dlist, sizeof (curaddr));
		sctp_get_faddr_list(sctp, alist, asize);
		(*cl_sctp_assoc_change)(connp->conn_family, alist, asize,
		    sctp->sctp_nfaddrs, dlist, dsize, 1, SCTP_CL_PADDR,
		    (cl_sctp_handle_t)sctp);
		/* alist and dlist will be freed by the clustering module */
	}
	return (0);
}
Exemple #5
0
/*
 * Add a list of addresses to a sctp_t.
 */
int
sctp_bind_add(sctp_t *sctp, const void *addrs, uint32_t addrcnt,
    boolean_t caller_hold_lock, in_port_t port)
{
	int		err = 0;
	boolean_t	do_asconf = B_FALSE;

	if (!caller_hold_lock)
		RUN_SCTP(sctp);

	if (sctp->sctp_state > SCTPS_ESTABLISHED) {
		if (!caller_hold_lock)
			WAKE_SCTP(sctp);
		return (EINVAL);
	}

	if (sctp->sctp_state > SCTPS_LISTEN) {
		/*
		 * Let's do some checking here rather than undoing the
		 * add later (for these reasons).
		 */
		if (!sctp_addip_enabled || !sctp->sctp_understands_asconf ||
		    !sctp->sctp_understands_addip) {
			if (!caller_hold_lock)
				WAKE_SCTP(sctp);
			return (EINVAL);
		}
		do_asconf = B_TRUE;
	}
	/*
	 * On a clustered node, for an inaddr_any bind, we will pass the list
	 * of all the addresses in the global list, minus any address on the
	 * loopback interface, and expect the clustering susbsystem to give us
	 * the correct list for the 'port'. For explicit binds we give the
	 * list of addresses  and the clustering module validates it for the
	 * 'port'.
	 *
	 * On a non-clustered node, cl_sctp_check_addrs will be NULL and
	 * we proceed as usual.
	 */
	if (cl_sctp_check_addrs != NULL) {
		uchar_t		*addrlist = NULL;
		size_t		size = 0;
		int		unspec = 0;
		boolean_t	do_listen;
		uchar_t		*llist = NULL;
		size_t		lsize = 0;

		/*
		 * If we are adding addresses after listening, but before
		 * an association is established, we need to update the
		 * clustering module with this info.
		 */
		do_listen = !do_asconf && sctp->sctp_state > SCTPS_BOUND &&
		    cl_sctp_listen != NULL;

		err = sctp_get_addrlist(sctp, addrs, &addrcnt, &addrlist,
		    &unspec, &size);
		if (err != 0) {
			ASSERT(addrlist == NULL);
			ASSERT(addrcnt == 0);
			ASSERT(size == 0);
			if (!caller_hold_lock)
				WAKE_SCTP(sctp);
			SCTP_KSTAT(sctp_cl_check_addrs);
			return (err);
		}
		ASSERT(addrlist != NULL);
		(*cl_sctp_check_addrs)(sctp->sctp_family, port, &addrlist,
		    size, &addrcnt, unspec == 1);
		if (addrcnt == 0) {
			/* We free the list */
			kmem_free(addrlist, size);
			if (!caller_hold_lock)
				WAKE_SCTP(sctp);
			return (EINVAL);
		}
		if (do_listen) {
			lsize = sizeof (in6_addr_t) * addrcnt;
			llist = kmem_alloc(lsize, KM_SLEEP);
		}
		err = sctp_valid_addr_list(sctp, addrlist, addrcnt, llist,
		    lsize);
		if (err == 0 && do_listen) {
			(*cl_sctp_listen)(sctp->sctp_family, llist,
			    addrcnt, sctp->sctp_lport);
			/* list will be freed by the clustering module */
		} else if (err != 0 && llist != NULL) {
			kmem_free(llist, lsize);
		}
		/* free the list we allocated */
		kmem_free(addrlist, size);
	} else {
		err = sctp_valid_addr_list(sctp, addrs, addrcnt, NULL, 0);
	}
	if (err != 0) {
		if (!caller_hold_lock)
			WAKE_SCTP(sctp);
		return (err);
	}
	/* Need to send  ASCONF messages */
	if (do_asconf) {
		err = sctp_add_ip(sctp, addrs, addrcnt);
		if (err != 0) {
			sctp_del_saddr_list(sctp, addrs, addrcnt, B_FALSE);
			if (!caller_hold_lock)
				WAKE_SCTP(sctp);
			return (err);
		}
	}
	if (!caller_hold_lock)
		WAKE_SCTP(sctp);
	if (do_asconf)
		sctp_process_sendq(sctp);
	return (0);
}
void
sctp_wput_asconf(sctp_t *sctp, sctp_faddr_t *fp)
{
#define	SCTP_SET_SENT_FLAG(mp)	((mp)->b_flag = SCTP_CHUNK_FLAG_SENT)

	mblk_t 			*mp;
	mblk_t			*ipmp;
	uint32_t 		*snp;
	sctp_parm_hdr_t		*ph;
	boolean_t		isv4;
	sctp_stack_t		*sctps = sctp->sctp_sctps;
	boolean_t		saddr_set;

	if (sctp->sctp_cchunk_pend || sctp->sctp_cxmit_list == NULL ||
	    /* Queue it for later transmission if not yet established */
	    sctp->sctp_state < SCTPS_ESTABLISHED) {
		ip2dbg(("sctp_wput_asconf: cchunk pending? (%d) or null "\
		    "sctp_cxmit_list? (%s) or incorrect state? (%x)\n",
		    sctp->sctp_cchunk_pend, sctp->sctp_cxmit_list == NULL ?
		    "yes" : "no", sctp->sctp_state));
		return;
	}

	if (fp == NULL)
		fp = sctp->sctp_current;

	/* OK to send */
	ipmp = sctp_make_mp(sctp, fp, 0);
	if (ipmp == NULL) {
		SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto);
		SCTP_KSTAT(sctps, sctp_send_asconf_failed);
		return;
	}
	mp = sctp->sctp_cxmit_list;
	/* Fill in the mandatory  Address Parameter TLV */
	isv4 = (fp != NULL) ? fp->isv4 : sctp->sctp_current->isv4;
	ph = (sctp_parm_hdr_t *)(mp->b_rptr + sizeof (sctp_chunk_hdr_t) +
	    sizeof (uint32_t));
	if (isv4) {
		ipha_t		*ipha = (ipha_t *)ipmp->b_rptr;
		in6_addr_t	ipaddr;
		ipaddr_t	addr4;

		ph->sph_type = htons(PARM_ADDR4);
		ph->sph_len = htons(PARM_ADDR4_LEN);
		if (ipha->ipha_src != INADDR_ANY) {
			bcopy(&ipha->ipha_src, ph + 1, IP_ADDR_LEN);
		} else {
			ipaddr = sctp_get_valid_addr(sctp, B_FALSE, &saddr_set);
			/*
			 * All the addresses are down.
			 * Maybe we might have better luck next time.
			 */
			if (!saddr_set) {
				SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto);
				freeb(ipmp);
				return;
			}
			IN6_V4MAPPED_TO_IPADDR(&ipaddr, addr4);
			bcopy(&addr4, ph + 1, IP_ADDR_LEN);
		}
	} else {
		ip6_t		*ip6 = (ip6_t *)ipmp->b_rptr;
		in6_addr_t	ipaddr;

		ph->sph_type = htons(PARM_ADDR6);
		ph->sph_len = htons(PARM_ADDR6_LEN);
		if (!IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_src)) {
			bcopy(&ip6->ip6_src, ph + 1, IPV6_ADDR_LEN);
		} else {
			ipaddr = sctp_get_valid_addr(sctp, B_TRUE, &saddr_set);
			/*
			 * All the addresses are down.
			 * Maybe we might have better luck next time.
			 */
			if (!saddr_set) {
				SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto);
				freeb(ipmp);
				return;
			}
			bcopy(&ipaddr, ph + 1, IPV6_ADDR_LEN);
		}
	}

	/* Don't exceed CWND */
	if ((MBLKL(mp) > (fp->cwnd - fp->suna)) ||
	    ((mp = dupb(sctp->sctp_cxmit_list)) == NULL)) {
		SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto);
		freeb(ipmp);
		return;
	}

	/* Set the serial number now, if sending for the first time */
	if (!SCTP_CHUNK_WANT_REXMIT(mp)) {
		snp = (uint32_t *)(mp->b_rptr + sizeof (sctp_chunk_hdr_t));
		*snp = htonl(sctp->sctp_lcsn++);
	}
	SCTP_CHUNK_CLEAR_FLAGS(mp);
	fp->suna += MBLKL(mp);
	/* Attach the header and send the chunk */
	ipmp->b_cont = mp;
	sctp->sctp_cchunk_pend = 1;

	SCTP_SET_SENT_FLAG(sctp->sctp_cxmit_list);
	SCTP_SET_CHUNK_DEST(sctp->sctp_cxmit_list, fp);
	sctp_set_iplen(sctp, ipmp, fp->ixa);
	(void) conn_ip_output(ipmp, fp->ixa);
	BUMP_LOCAL(sctp->sctp_opkts);
	SCTP_FADDR_RC_TIMER_RESTART(sctp, fp, fp->rto);
#undef	SCTP_SET_SENT_FLAG
}
void
sctp_input_asconf(sctp_t *sctp, sctp_chunk_hdr_t *ch, sctp_faddr_t *fp)
{
	const dispatch_t	*dp;
	mblk_t			*hmp;
	mblk_t			*mp;
	uint32_t		*idp;
	uint32_t		*hidp;
	ssize_t			rlen;
	sctp_parm_hdr_t		*ph;
	sctp_chunk_hdr_t	*ach;
	int			cont;
	int			act;
	uint16_t		plen;
	uchar_t			*alist = NULL;
	size_t			asize = 0;
	uchar_t			*dlist = NULL;
	size_t			dsize = 0;
	uchar_t			*aptr = NULL;
	uchar_t			*dptr = NULL;
	int			acount = 0;
	int			dcount = 0;
	sctp_stack_t		*sctps = sctp->sctp_sctps;

	ASSERT(ch->sch_id == CHUNK_ASCONF);

	idp = (uint32_t *)(ch + 1);
	rlen = ntohs(ch->sch_len) - sizeof (*ch) - sizeof (*idp);

	if (rlen < 0 || rlen < sizeof (*idp)) {
		/* nothing there; bail out */
		return;
	}

	/* Check for duplicates */
	*idp = ntohl(*idp);
	if (*idp == (sctp->sctp_fcsn + 1)) {
		act = 1;
	} else if (*idp == sctp->sctp_fcsn) {
		act = 0;
	} else {
		/* stale or malicious packet; drop */
		return;
	}

	/* Create the ASCONF_ACK header */
	hmp = sctp_make_mp(sctp, fp, sizeof (*ach) + sizeof (*idp));
	if (hmp == NULL) {
		/* Let the peer retransmit */
		SCTP_KSTAT(sctps, sctp_send_asconf_ack_failed);
		return;
	}
	ach = (sctp_chunk_hdr_t *)hmp->b_wptr;
	ach->sch_id = CHUNK_ASCONF_ACK;
	ach->sch_flags = 0;
	/* Set the length later */
	hidp = (uint32_t *)(ach + 1);
	*hidp = htonl(*idp);
	hmp->b_wptr = (uchar_t *)(hidp + 1);

	/* Move to the Address Parameter */
	ph = (sctp_parm_hdr_t *)(idp + 1);
	if (rlen <= ntohs(ph->sph_len)) {
		freeb(hmp);
		return;
	}

	/*
	 * We already have the association here, so this address parameter
	 * doesn't seem to be very useful, should we make sure this is part
	 * of the association and send an error, if not?
	 * Ignore it for now.
	 */
	rlen -= ntohs(ph->sph_len);
	ph = (sctp_parm_hdr_t *)((char *)ph + ntohs(ph->sph_len));

	/*
	 * We need to pre-allocate buffer before processing the ASCONF
	 * chunk. We don't want to fail allocating buffers after processing
	 * the ASCONF chunk. So, we walk the list and get the number of
	 * addresses added and/or deleted.
	 */
	if (cl_sctp_assoc_change != NULL) {
		sctp_parm_hdr_t	*oph = ph;
		ssize_t		orlen = rlen;

		/*
		 * This not very efficient, but there is no better way of
		 * doing it.  It should be fine since normally the param list
		 * will not be very long.
		 */
		while (orlen > 0) {
			/* Sanity checks */
			if (orlen < sizeof (*oph))
				break;
			plen = ntohs(oph->sph_len);
			if (plen < sizeof (*oph) || plen > orlen)
				break;
			if (oph->sph_type == htons(PARM_ADD_IP))
				acount++;
			if (oph->sph_type == htons(PARM_DEL_IP))
				dcount++;
			oph = sctp_next_parm(oph, &orlen);
			if (oph == NULL)
				break;
		}
		if (acount > 0 || dcount > 0) {
			if (acount > 0) {
				asize = sizeof (in6_addr_t) * acount;
				alist = kmem_alloc(asize, KM_NOSLEEP);
				if (alist == NULL) {
					freeb(hmp);
					SCTP_KSTAT(sctps, sctp_cl_assoc_change);
					return;
				}
			}
			if (dcount > 0) {
				dsize = sizeof (in6_addr_t) * dcount;
				dlist = kmem_alloc(dsize, KM_NOSLEEP);
				if (dlist == NULL) {
					if (acount > 0)
						kmem_free(alist, asize);
					freeb(hmp);
					SCTP_KSTAT(sctps, sctp_cl_assoc_change);
					return;
				}
			}
			aptr = alist;
			dptr = dlist;
			/*
			 * We will get the actual count when we process
			 * the chunk.
			 */
			acount = 0;
			dcount = 0;
		}
	}
	cont = 1;
	while (rlen > 0 && cont) {
		in6_addr_t	addr;

		/* Sanity checks */
		if (rlen < sizeof (*ph))
			break;
		plen = ntohs(ph->sph_len);
		if (plen < sizeof (*ph) || plen > rlen) {
			break;
		}
		idp = (uint32_t *)(ph + 1);
		dp = sctp_lookup_asconf_dispatch(ntohs(ph->sph_type));
		ASSERT(dp);
		if (dp->asconf) {
			mp = dp->asconf(sctp, ph, *idp, fp, &cont, act, &addr);
			if (cont == -1) {
				/*
				 * Not even enough memory to create
				 * an out-of-resources error. Free
				 * everything and return; the peer
				 * should retransmit.
				 */
				freemsg(hmp);
				if (alist != NULL)
					kmem_free(alist, asize);
				if (dlist != NULL)
					kmem_free(dlist, dsize);
				return;
			}
			if (mp != NULL) {
				linkb(hmp, mp);
			} else if (act != 0) {
				/* update the add/delete list */
				if (cl_sctp_assoc_change != NULL) {
					if (ph->sph_type ==
					    htons(PARM_ADD_IP)) {
						ASSERT(alist != NULL);
						bcopy(&addr, aptr,
						    sizeof (addr));
						aptr += sizeof (addr);
						acount++;
					} else if (ph->sph_type ==
					    htons(PARM_DEL_IP)) {
						ASSERT(dlist != NULL);
						bcopy(&addr, dptr,
						    sizeof (addr));
						dptr += sizeof (addr);
						dcount++;
					}
				}
			}
		}
		ph = sctp_next_parm(ph, &rlen);
		if (ph == NULL)
			break;
	}

	/*
	 * Update clustering's state for this assoc. Note acount/dcount
	 * could be zero (i.e. if the add/delete address(es) were not
	 * processed successfully). Regardless, if the ?size is > 0,
	 * it is the clustering module's responsibility to free the lists.
	 */
	if (cl_sctp_assoc_change != NULL) {
		(*cl_sctp_assoc_change)(sctp->sctp_connp->conn_family,
		    alist, asize,
		    acount, dlist, dsize, dcount, SCTP_CL_PADDR,
		    (cl_sctp_handle_t)sctp);
		/* alist and dlist will be freed by the clustering module */
	}
	/* Now that the params have been processed, increment the fcsn */
	if (act) {
		sctp->sctp_fcsn++;
	}
	BUMP_LOCAL(sctp->sctp_obchunks);

	if (fp->isv4)
		ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr_len);
	else
		ach->sch_len = htons(msgdsize(hmp) - sctp->sctp_hdr6_len);

	sctp_set_iplen(sctp, hmp, fp->ixa);
	(void) conn_ip_output(hmp, fp->ixa);
	BUMP_LOCAL(sctp->sctp_opkts);
	sctp_validate_peer(sctp);
}
/* Process the COOKIE packet, mp, directed at the listener 'sctp' */
sctp_t *
sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len,
    sctp_init_chunk_t *iack, ip_recv_attr_t *ira)
{
	sctp_t	*eager;
	ip6_t	*ip6h;
	int	err;
	conn_t	*connp, *econnp;
	sctp_stack_t	*sctps;
	struct sock_proto_props sopp;
	cred_t		*cr;
	pid_t		cpid;
	in6_addr_t	faddr, laddr;
	ip_xmit_attr_t	*ixa;

	/*
	 * No need to check for duplicate as this is the listener
	 * and we are holding the lock.  This means that no new
	 * connection can be created out of it.  And since the
	 * fanout already done cannot find a match, it means that
	 * there is no duplicate.
	 */
	ASSERT(OK_32PTR(mp->b_rptr));

	if ((eager = sctp_create_eager(sctp)) == NULL) {
		return (NULL);
	}

	connp = sctp->sctp_connp;
	sctps = sctp->sctp_sctps;
	econnp = eager->sctp_connp;

	if (connp->conn_policy != NULL) {
		/* Inherit the policy from the listener; use actions from ira */
		if (!ip_ipsec_policy_inherit(econnp, connp, ira)) {
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			return (NULL);
		}
	}

	ip6h = (ip6_t *)mp->b_rptr;
	if (ira->ira_flags & IXAF_IS_IPV4) {
		ipha_t	*ipha;

		ipha = (ipha_t *)ip6h;
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &laddr);
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &faddr);
	} else {
		laddr = ip6h->ip6_dst;
		faddr = ip6h->ip6_src;
	}

	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
		/*
		 * XXX need to fix the cached policy issue here.
		 * We temporarily set the conn_laddr/conn_faddr here so
		 * that IPsec can use it for the latched policy
		 * selector.  This is obvioursly wrong as SCTP can
		 * use different addresses...
		 */
		econnp->conn_laddr_v6 = laddr;
		econnp->conn_faddr_v6 = faddr;
		econnp->conn_saddr_v6 = laddr;
	}
	if (ipsec_conn_cache_policy(econnp,
	    (ira->ira_flags & IRAF_IS_IPV4) != 0) != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	/* Save for getpeerucred */
	cr = ira->ira_cred;
	cpid = ira->ira_cpid;

	if (is_system_labeled()) {
		ip_xmit_attr_t *ixa = econnp->conn_ixa;

		ASSERT(ira->ira_tsl != NULL);

		/* Discard any old label */
		if (ixa->ixa_free_flags & IXA_FREE_TSL) {
			ASSERT(ixa->ixa_tsl != NULL);
			label_rele(ixa->ixa_tsl);
			ixa->ixa_free_flags &= ~IXA_FREE_TSL;
			ixa->ixa_tsl = NULL;
		}

		if ((connp->conn_mlp_type != mlptSingle ||
		    connp->conn_mac_mode != CONN_MAC_DEFAULT) &&
		    ira->ira_tsl != NULL) {
			/*
			 * If this is an MLP connection or a MAC-Exempt
			 * connection with an unlabeled node, packets are to be
			 * exchanged using the security label of the received
			 * Cookie packet instead of the server application's
			 * label.
			 * tsol_check_dest called from ip_set_destination
			 * might later update TSF_UNLABELED by replacing
			 * ixa_tsl with a new label.
			 */
			label_hold(ira->ira_tsl);
			ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl);
		} else {
			ixa->ixa_tsl = crgetlabel(econnp->conn_cred);
		}
	}

	err = sctp_accept_comm(sctp, eager, mp, ip_hdr_len, iack);
	if (err != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	ASSERT(eager->sctp_current->ixa != NULL);

	ixa = eager->sctp_current->ixa;
	if (!(ira->ira_flags & IXAF_IS_IPV4)) {
		ASSERT(!(ixa->ixa_flags & IXAF_IS_IPV4));

		if (IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_src) ||
		    IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_dst)) {
			eager->sctp_linklocal = 1;

			ixa->ixa_flags |= IXAF_SCOPEID_SET;
			ixa->ixa_scopeid = ifindex;
			econnp->conn_incoming_ifindex = ifindex;
		}
	}

	/*
	 * On a clustered note send this notification to the clustering
	 * subsystem.
	 */
	if (cl_sctp_connect != NULL) {
		uchar_t	*slist;
		uchar_t	*flist;
		size_t	fsize;
		size_t	ssize;

		fsize = sizeof (in6_addr_t) * eager->sctp_nfaddrs;
		ssize = sizeof (in6_addr_t) * eager->sctp_nsaddrs;
		slist = kmem_alloc(ssize, KM_NOSLEEP);
		flist = kmem_alloc(fsize, KM_NOSLEEP);
		if (slist == NULL || flist == NULL) {
			if (slist != NULL)
				kmem_free(slist, ssize);
			if (flist != NULL)
				kmem_free(flist, fsize);
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			SCTP_KSTAT(sctps, sctp_cl_connect);
			return (NULL);
		}
		/* The clustering module frees these list */
		sctp_get_saddr_list(eager, slist, ssize);
		sctp_get_faddr_list(eager, flist, fsize);
		(*cl_sctp_connect)(econnp->conn_family, slist,
		    eager->sctp_nsaddrs, econnp->conn_lport, flist,
		    eager->sctp_nfaddrs, econnp->conn_fport, B_FALSE,
		    (cl_sctp_handle_t)eager);
	}

	/* Connection established, so send up the conn_ind */
	if ((eager->sctp_ulpd = sctp->sctp_ulp_newconn(sctp->sctp_ulpd,
	    (sock_lower_handle_t)eager, NULL, cr, cpid,
	    &eager->sctp_upcalls)) == NULL) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}
	ASSERT(SCTP_IS_DETACHED(eager));
	eager->sctp_detached = B_FALSE;
	bzero(&sopp, sizeof (sopp));
	sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF;
	sopp.sopp_maxblk = strmsgsz;
	if (econnp->conn_family == AF_INET) {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr_len;
	} else {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr6_len;
	}
	eager->sctp_ulp_prop(eager->sctp_ulpd, &sopp);
	return (eager);
}
Exemple #9
0
mblk_t *
sctp_init_mp(sctp_t *sctp, sctp_faddr_t *fp)
{
	mblk_t			*mp;
	uchar_t			*p;
	size_t			initlen;
	sctp_init_chunk_t	*icp;
	sctp_chunk_hdr_t	*chp;
	uint16_t		schlen;
	int			supp_af;
	sctp_stack_t		*sctps = sctp->sctp_sctps;
	conn_t			*connp = sctp->sctp_connp;

	if (connp->conn_family == AF_INET) {
		supp_af = PARM_SUPP_V4;
	} else {
		if (sctp->sctp_connp->conn_ipv6_v6only)
			supp_af = PARM_SUPP_V6;
		else
			supp_af = PARM_SUPP_V6 | PARM_SUPP_V4;
	}
	initlen = sizeof (*chp) + sizeof (*icp);
	if (sctp->sctp_send_adaptation) {
		initlen += (sizeof (sctp_parm_hdr_t) + sizeof (uint32_t));
	}
	initlen += sctp_supaddr_param_len(sctp);
	initlen += sctp_addr_params(sctp, supp_af, NULL, B_TRUE);
	if (sctp->sctp_prsctp_aware && sctps->sctps_prsctp_enabled)
		initlen += sctp_options_param_len(sctp, SCTP_PRSCTP_OPTION);

	/*
	 * This could be a INIT retransmission in which case sh_verf may
	 * be non-zero, zero it out just to be sure.
	 */
	sctp->sctp_sctph->sh_verf = 0;
	sctp->sctp_sctph6->sh_verf = 0;

	mp = sctp_make_mp(sctp, fp, initlen);
	if (mp == NULL) {
		SCTP_KSTAT(sctps, sctp_send_init_failed);
		return (NULL);
	}
	/* sctp_make_mp could have discovered we have no usable sources */
	if (sctp->sctp_nsaddrs == 0) {
		freemsg(mp);
		SCTP_KSTAT(sctps, sctp_send_init_failed);
		return (NULL);
	}

	/* Lay in a new INIT chunk, starting with the chunk header */
	chp = (sctp_chunk_hdr_t *)mp->b_wptr;
	chp->sch_id = CHUNK_INIT;
	chp->sch_flags = 0;
	schlen = (uint16_t)initlen;
	U16_TO_ABE16(schlen, &(chp->sch_len));

	mp->b_wptr += initlen;

	icp = (sctp_init_chunk_t *)(chp + 1);
	icp->sic_inittag = sctp->sctp_lvtag;
	U32_TO_ABE32(sctp->sctp_rwnd, &(icp->sic_a_rwnd));
	U16_TO_ABE16(sctp->sctp_num_ostr, &(icp->sic_outstr));
	U16_TO_ABE16(sctp->sctp_num_istr, &(icp->sic_instr));
	U32_TO_ABE32(sctp->sctp_ltsn, &(icp->sic_inittsn));

	p = (uchar_t *)(icp + 1);

	/* Adaptation layer param */
	p += sctp_adaptation_code_param(sctp, p);

	/* Add supported address types parameter */
	p += sctp_supaddr_param(sctp, p);

	/* Add address parameters */
	p += sctp_addr_params(sctp, supp_af, p, B_FALSE);

	/* Add Forward-TSN-Supported param */
	if (sctp->sctp_prsctp_aware && sctps->sctps_prsctp_enabled)
		p += sctp_options_param(sctp, p, SCTP_PRSCTP_OPTION);

	BUMP_LOCAL(sctp->sctp_obchunks);

	sctp_set_iplen(sctp, mp, fp->sf_ixa);

	return (mp);
}
Exemple #10
0
void
sctp_user_abort(sctp_t *sctp, mblk_t *data)
{
	mblk_t *mp;
	int len, hdrlen;
	char *cause;
	sctp_faddr_t *fp = sctp->sctp_current;
	ip_xmit_attr_t	*ixa = fp->sf_ixa;
	sctp_stack_t	*sctps = sctp->sctp_sctps;

	/*
	 * Don't need notification if connection is not yet setup,
	 * call sctp_clean_death() to reclaim resources.
	 * Any pending connect call(s) will error out.
	 */
	if (sctp->sctp_state < SCTPS_COOKIE_WAIT) {
		sctp_clean_death(sctp, ECONNABORTED);
		return;
	}

	mp = sctp_make_mp(sctp, fp, 0);
	if (mp == NULL) {
		SCTP_KSTAT(sctps, sctp_send_user_abort_failed);
		return;
	}

	/*
	 * Create abort chunk.
	 */
	if (data) {
		if (fp->sf_isv4) {
			hdrlen = sctp->sctp_hdr_len;
		} else {
			hdrlen = sctp->sctp_hdr6_len;
		}
		hdrlen += sizeof (sctp_chunk_hdr_t) + sizeof (sctp_parm_hdr_t);
		cause = (char *)data->b_rptr;
		len = data->b_wptr - data->b_rptr;

		if (len + hdrlen > fp->sf_pmss) {
			len = fp->sf_pmss - hdrlen;
		}
	} else {
		cause = NULL;
		len = 0;
	}
	/*
	 * Since it is a user abort, we should have the sctp_t and hence
	 * the correct verification tag.  So we should not set the T-bit
	 * in the ABORT.
	 */
	if ((len = sctp_link_abort(mp, SCTP_ERR_USER_ABORT, cause, len, 0,
	    B_FALSE)) < 0) {
		freemsg(mp);
		return;
	}
	SCTPS_BUMP_MIB(sctps, sctpAborted);
	BUMP_LOCAL(sctp->sctp_opkts);
	BUMP_LOCAL(sctp->sctp_obchunks);

	sctp_set_iplen(sctp, mp, ixa);
	ASSERT(ixa->ixa_ire != NULL);
	ASSERT(ixa->ixa_cred != NULL);

	(void) conn_ip_output(mp, ixa);

	sctp_assoc_event(sctp, SCTP_COMM_LOST, 0, NULL);
	sctp_clean_death(sctp, ECONNABORTED);
}