Exemplo n.º 1
0
void
cred2uclabel(const cred_t *cr, bslabel_t *labelp)
{
	ts_label_t	*tslp;

	if ((tslp = crgetlabel(cr)) != NULL)
		bcopy(&tslp->tsl_label, labelp, sizeof (bslabel_t));
}
Exemplo n.º 2
0
/*
 * Convert a credential into a "ucred".  Allow the caller to specify
 * and aligned buffer, e.g., in an mblk, so we don't have to allocate
 * memory and copy it twice.
 *
 * This function may call cred2ucaud(), which calls CRED(). Since this
 * can be called from an interrupt thread, receiver's cred (rcr) is needed
 * to determine whether audit info should be included.
 */
struct ucred_s *
cred2ucred(const cred_t *cr, pid_t pid, void *buf, const cred_t *rcr)
{
	struct ucred_s *uc;
	uint32_t realsz = ucredminsize(cr);
	ts_label_t *tslp = is_system_labeled() ? crgetlabel(cr) : NULL;

	/* The structure isn't always completely filled in, so zero it */
	if (buf == NULL) {
		uc = kmem_zalloc(realsz, KM_SLEEP);
	} else {
		bzero(buf, realsz);
		uc = buf;
	}
	uc->uc_size = realsz;
	uc->uc_pid = pid;
	uc->uc_projid = cr->cr_projid;
	uc->uc_zoneid = crgetzoneid(cr);

	if (REMOTE_PEER_CRED(cr)) {
		/*
		 * Other than label, the rest of cred info about a
		 * remote peer isn't available. Copy the label directly
		 * after the header where we generally copy the prcred.
		 * That's why we use sizeof (struct ucred_s).  The other
		 * offset fields are initialized to 0.
		 */
		uc->uc_labeloff = tslp == NULL ? 0 : sizeof (struct ucred_s);
	} else {
		uc->uc_credoff = UCRED_CRED_OFF;
		uc->uc_privoff = UCRED_PRIV_OFF;
		uc->uc_audoff = UCRED_AUD_OFF;
		uc->uc_labeloff = tslp == NULL ? 0 : UCRED_LABEL_OFF;

		cred2prcred(cr, UCCRED(uc));
		cred2prpriv(cr, UCPRIV(uc));

		if (audoff == 0 || cred2ucaud(cr, UCAUD(uc), rcr) != 0)
			uc->uc_audoff = 0;
	}
	if (tslp != NULL)
		bcopy(&tslp->tsl_label, UCLABEL(uc), sizeof (bslabel_t));

	return (uc);
}
Exemplo n.º 3
0
int
sctp_listen(sctp_t *sctp)
{
	sctp_tf_t	*tf;
	sctp_stack_t	*sctps = sctp->sctp_sctps;
	conn_t		*connp = sctp->sctp_connp;

	RUN_SCTP(sctp);
	/*
	 * TCP handles listen() increasing the backlog, need to check
	 * if it should be handled here too
	 */
	if (sctp->sctp_state > SCTPS_BOUND ||
	    (sctp->sctp_connp->conn_state_flags & CONN_CLOSING)) {
		WAKE_SCTP(sctp);
		return (EINVAL);
	}

	/* Do an anonymous bind for unbound socket doing listen(). */
	if (sctp->sctp_nsaddrs == 0) {
		struct sockaddr_storage ss;
		int ret;

		bzero(&ss, sizeof (ss));
		ss.ss_family = connp->conn_family;

		WAKE_SCTP(sctp);
		if ((ret = sctp_bind(sctp, (struct sockaddr *)&ss,
		    sizeof (ss))) != 0)
			return (ret);
		RUN_SCTP(sctp)
	}

	/* Cache things in the ixa without any refhold */
	ASSERT(!(connp->conn_ixa->ixa_free_flags & IXA_FREE_CRED));
	connp->conn_ixa->ixa_cred = connp->conn_cred;
	connp->conn_ixa->ixa_cpid = connp->conn_cpid;
	if (is_system_labeled())
		connp->conn_ixa->ixa_tsl = crgetlabel(connp->conn_cred);

	sctp->sctp_state = SCTPS_LISTEN;
	(void) random_get_pseudo_bytes(sctp->sctp_secret, SCTP_SECRET_LEN);
	sctp->sctp_last_secret_update = ddi_get_lbolt64();
	bzero(sctp->sctp_old_secret, SCTP_SECRET_LEN);

	/*
	 * If there is an association limit, allocate and initialize
	 * the counter struct.  Note that since listen can be called
	 * multiple times, the struct may have been allready allocated.
	 */
	if (!list_is_empty(&sctps->sctps_listener_conf) &&
	    sctp->sctp_listen_cnt == NULL) {
		sctp_listen_cnt_t *slc;
		uint32_t ratio;

		ratio = sctp_find_listener_conf(sctps,
		    ntohs(connp->conn_lport));
		if (ratio != 0) {
			uint32_t mem_ratio, tot_buf;

			slc = kmem_alloc(sizeof (sctp_listen_cnt_t), KM_SLEEP);
			/*
			 * Calculate the connection limit based on
			 * the configured ratio and maxusers.  Maxusers
			 * are calculated based on memory size,
			 * ~ 1 user per MB.  Note that the conn_rcvbuf
			 * and conn_sndbuf may change after a
			 * connection is accepted.  So what we have
			 * is only an approximation.
			 */
			if ((tot_buf = connp->conn_rcvbuf +
			    connp->conn_sndbuf) < MB) {
				mem_ratio = MB / tot_buf;
				slc->slc_max = maxusers / ratio * mem_ratio;
			} else {
				mem_ratio = tot_buf / MB;
				slc->slc_max = maxusers / ratio / mem_ratio;
			}
			/* At least we should allow some associations! */
			if (slc->slc_max < sctp_min_assoc_listener)
				slc->slc_max = sctp_min_assoc_listener;
			slc->slc_cnt = 1;
			slc->slc_drop = 0;
			sctp->sctp_listen_cnt = slc;
		}
	}


	tf = &sctps->sctps_listen_fanout[SCTP_LISTEN_HASH(
	    ntohs(connp->conn_lport))];
	sctp_listen_hash_insert(tf, sctp);

	WAKE_SCTP(sctp);
	return (0);
}
Exemplo n.º 4
0
/* Process the COOKIE packet, mp, directed at the listener 'sctp' */
sctp_t *
sctp_conn_request(sctp_t *sctp, mblk_t *mp, uint_t ifindex, uint_t ip_hdr_len,
    sctp_init_chunk_t *iack, ip_recv_attr_t *ira)
{
	sctp_t	*eager;
	ip6_t	*ip6h;
	int	err;
	conn_t	*connp, *econnp;
	sctp_stack_t	*sctps;
	struct sock_proto_props sopp;
	cred_t		*cr;
	pid_t		cpid;
	in6_addr_t	faddr, laddr;
	ip_xmit_attr_t	*ixa;

	/*
	 * No need to check for duplicate as this is the listener
	 * and we are holding the lock.  This means that no new
	 * connection can be created out of it.  And since the
	 * fanout already done cannot find a match, it means that
	 * there is no duplicate.
	 */
	ASSERT(OK_32PTR(mp->b_rptr));

	if ((eager = sctp_create_eager(sctp)) == NULL) {
		return (NULL);
	}

	connp = sctp->sctp_connp;
	sctps = sctp->sctp_sctps;
	econnp = eager->sctp_connp;

	if (connp->conn_policy != NULL) {
		/* Inherit the policy from the listener; use actions from ira */
		if (!ip_ipsec_policy_inherit(econnp, connp, ira)) {
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			return (NULL);
		}
	}

	ip6h = (ip6_t *)mp->b_rptr;
	if (ira->ira_flags & IXAF_IS_IPV4) {
		ipha_t	*ipha;

		ipha = (ipha_t *)ip6h;
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_dst, &laddr);
		IN6_IPADDR_TO_V4MAPPED(ipha->ipha_src, &faddr);
	} else {
		laddr = ip6h->ip6_dst;
		faddr = ip6h->ip6_src;
	}

	if (ira->ira_flags & IRAF_IPSEC_SECURE) {
		/*
		 * XXX need to fix the cached policy issue here.
		 * We temporarily set the conn_laddr/conn_faddr here so
		 * that IPsec can use it for the latched policy
		 * selector.  This is obvioursly wrong as SCTP can
		 * use different addresses...
		 */
		econnp->conn_laddr_v6 = laddr;
		econnp->conn_faddr_v6 = faddr;
		econnp->conn_saddr_v6 = laddr;
	}
	if (ipsec_conn_cache_policy(econnp,
	    (ira->ira_flags & IRAF_IS_IPV4) != 0) != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	/* Save for getpeerucred */
	cr = ira->ira_cred;
	cpid = ira->ira_cpid;

	if (is_system_labeled()) {
		ip_xmit_attr_t *ixa = econnp->conn_ixa;

		ASSERT(ira->ira_tsl != NULL);

		/* Discard any old label */
		if (ixa->ixa_free_flags & IXA_FREE_TSL) {
			ASSERT(ixa->ixa_tsl != NULL);
			label_rele(ixa->ixa_tsl);
			ixa->ixa_free_flags &= ~IXA_FREE_TSL;
			ixa->ixa_tsl = NULL;
		}

		if ((connp->conn_mlp_type != mlptSingle ||
		    connp->conn_mac_mode != CONN_MAC_DEFAULT) &&
		    ira->ira_tsl != NULL) {
			/*
			 * If this is an MLP connection or a MAC-Exempt
			 * connection with an unlabeled node, packets are to be
			 * exchanged using the security label of the received
			 * Cookie packet instead of the server application's
			 * label.
			 * tsol_check_dest called from ip_set_destination
			 * might later update TSF_UNLABELED by replacing
			 * ixa_tsl with a new label.
			 */
			label_hold(ira->ira_tsl);
			ip_xmit_attr_replace_tsl(ixa, ira->ira_tsl);
		} else {
			ixa->ixa_tsl = crgetlabel(econnp->conn_cred);
		}
	}

	err = sctp_accept_comm(sctp, eager, mp, ip_hdr_len, iack);
	if (err != 0) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}

	ASSERT(eager->sctp_current->ixa != NULL);

	ixa = eager->sctp_current->ixa;
	if (!(ira->ira_flags & IXAF_IS_IPV4)) {
		ASSERT(!(ixa->ixa_flags & IXAF_IS_IPV4));

		if (IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_src) ||
		    IN6_IS_ADDR_LINKLOCAL(&ip6h->ip6_dst)) {
			eager->sctp_linklocal = 1;

			ixa->ixa_flags |= IXAF_SCOPEID_SET;
			ixa->ixa_scopeid = ifindex;
			econnp->conn_incoming_ifindex = ifindex;
		}
	}

	/*
	 * On a clustered note send this notification to the clustering
	 * subsystem.
	 */
	if (cl_sctp_connect != NULL) {
		uchar_t	*slist;
		uchar_t	*flist;
		size_t	fsize;
		size_t	ssize;

		fsize = sizeof (in6_addr_t) * eager->sctp_nfaddrs;
		ssize = sizeof (in6_addr_t) * eager->sctp_nsaddrs;
		slist = kmem_alloc(ssize, KM_NOSLEEP);
		flist = kmem_alloc(fsize, KM_NOSLEEP);
		if (slist == NULL || flist == NULL) {
			if (slist != NULL)
				kmem_free(slist, ssize);
			if (flist != NULL)
				kmem_free(flist, fsize);
			sctp_close_eager(eager);
			BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
			SCTP_KSTAT(sctps, sctp_cl_connect);
			return (NULL);
		}
		/* The clustering module frees these list */
		sctp_get_saddr_list(eager, slist, ssize);
		sctp_get_faddr_list(eager, flist, fsize);
		(*cl_sctp_connect)(econnp->conn_family, slist,
		    eager->sctp_nsaddrs, econnp->conn_lport, flist,
		    eager->sctp_nfaddrs, econnp->conn_fport, B_FALSE,
		    (cl_sctp_handle_t)eager);
	}

	/* Connection established, so send up the conn_ind */
	if ((eager->sctp_ulpd = sctp->sctp_ulp_newconn(sctp->sctp_ulpd,
	    (sock_lower_handle_t)eager, NULL, cr, cpid,
	    &eager->sctp_upcalls)) == NULL) {
		sctp_close_eager(eager);
		BUMP_MIB(&sctps->sctps_mib, sctpListenDrop);
		return (NULL);
	}
	ASSERT(SCTP_IS_DETACHED(eager));
	eager->sctp_detached = B_FALSE;
	bzero(&sopp, sizeof (sopp));
	sopp.sopp_flags = SOCKOPT_MAXBLK|SOCKOPT_WROFF;
	sopp.sopp_maxblk = strmsgsz;
	if (econnp->conn_family == AF_INET) {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr_len;
	} else {
		sopp.sopp_wroff = sctps->sctps_wroff_xtra +
		    sizeof (sctp_data_hdr_t) + sctp->sctp_hdr6_len;
	}
	eager->sctp_ulp_prop(eager->sctp_ulpd, &sopp);
	return (eager);
}