Example #1
0
uint32_t *
pktsched_get_pkt_sfb_vars(pktsched_pkt_t *pkt, uint32_t **sfb_flags)
{
	uint32_t *hashp = NULL;

	switch (pkt->pktsched_ptype) {
	case QP_MBUF: {
		struct mbuf *m = (struct mbuf *)pkt->pktsched_pkt;
		struct pkthdr *pkth = &m->m_pkthdr;

		_CASSERT(sizeof (pkth->pkt_mpriv_hash) == sizeof (uint32_t));
		_CASSERT(sizeof (pkth->pkt_mpriv_flags) == sizeof (uint32_t));

		*sfb_flags = &pkth->pkt_mpriv_flags;
		hashp = &pkth->pkt_mpriv_hash;
		break;
	}


	default:
		VERIFY(0);
		/* NOTREACHED */
	}

	return (hashp);
}
Example #2
0
void
red_init(void)
{
	_CASSERT(REDF_ECN4 == CLASSQF_ECN4);
	_CASSERT(REDF_ECN6 == CLASSQF_ECN6);

	red_size = sizeof (red_t);
	red_zone = zinit(red_size, RED_ZONE_MAX * red_size,
	    0, RED_ZONE_NAME);
	if (red_zone == NULL) {
		panic("%s: failed allocating %s", __func__, RED_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(red_zone, Z_EXPAND, TRUE);
	zone_change(red_zone, Z_CALLERACCT, TRUE);
}
Example #3
0
struct flowadv_fcentry *
pktsched_alloc_fcentry(pktsched_pkt_t *pkt, struct ifnet *ifp, int how)
{
#pragma unused(ifp)
	struct flowadv_fcentry *fce = NULL;

	switch (pkt->pktsched_ptype) {
	case QP_MBUF: {
		struct mbuf *m = (struct mbuf *)pkt->pktsched_pkt;

		fce = flowadv_alloc_entry(how);
		if (fce == NULL)
			break;

		_CASSERT(sizeof (m->m_pkthdr.pkt_flowid) ==
		    sizeof (fce->fce_flowid));

		fce->fce_flowsrc_type = m->m_pkthdr.pkt_flowsrc;
		fce->fce_flowid = m->m_pkthdr.pkt_flowid;
		break;
	}


	default:
		VERIFY(0);
		/* NOTREACHED */
	}

	return (fce);
}
Example #4
0
void
sfb_init(void)
{
	_CASSERT(SFBF_ECN4 == CLASSQF_ECN4);
	_CASSERT(SFBF_ECN6 == CLASSQF_ECN6);

	sfb_size = sizeof (struct sfb);
	sfb_zone = zinit(sfb_size, SFB_ZONE_MAX * sfb_size,
	    0, SFB_ZONE_NAME);
	if (sfb_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_zone, Z_EXPAND, TRUE);
	zone_change(sfb_zone, Z_CALLERACCT, TRUE);

	sfb_bins_size = sizeof (*((struct sfb *)0)->sfb_bins);
	sfb_bins_zone = zinit(sfb_bins_size, SFB_BINS_ZONE_MAX * sfb_bins_size,
	    0, SFB_BINS_ZONE_NAME);
	if (sfb_bins_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_BINS_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_bins_zone, Z_EXPAND, TRUE);
	zone_change(sfb_bins_zone, Z_CALLERACCT, TRUE);

	sfb_fcl_size = sizeof (*((struct sfb *)0)->sfb_fc_lists);
	sfb_fcl_zone = zinit(sfb_fcl_size, SFB_FCL_ZONE_MAX * sfb_fcl_size,
	    0, SFB_FCL_ZONE_NAME);
	if (sfb_fcl_zone == NULL) {
		panic("%s: failed allocating %s", __func__, SFB_FCL_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(sfb_fcl_zone, Z_EXPAND, TRUE);
	zone_change(sfb_fcl_zone, Z_CALLERACCT, TRUE);
}
Example #5
0
/*
 * Initialise reassembly queue and fragment identifier.
 */
void
frag6_init(void)
{
	/* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */
	_CASSERT(sizeof (struct ip6q) <= _MLEN);
	/* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */
	_CASSERT(sizeof (struct ip6asfrag) <= _MLEN);

	/* IPv6 fragment reassembly queue lock */
	ip6qlock_grp_attr  = lck_grp_attr_alloc_init();
	ip6qlock_grp = lck_grp_alloc_init("ip6qlock", ip6qlock_grp_attr);
	ip6qlock_attr = lck_attr_alloc_init();
	lck_mtx_init(&ip6qlock, ip6qlock_grp, ip6qlock_attr);

	lck_mtx_lock(&ip6qlock);
	/* Initialize IPv6 reassembly queue. */
	ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q;

	/* same limits as IPv4 */
	ip6_maxfragpackets = nmbclusters / 32;
	ip6_maxfrags = ip6_maxfragpackets * 2;
	ip6q_updateparams();
	lck_mtx_unlock(&ip6qlock);
}
Example #6
0
/* Initialize the PF_INET6 domain, and add in the pre-defined protos */
void
in6_dinit(struct domain *dp)
{
	struct ip6protosw *pr;
	int i;

	VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
	VERIFY(inet6domain == NULL);

	inet6domain = dp;

	_CASSERT(sizeof (struct protosw) == sizeof (struct ip6protosw));
	_CASSERT(offsetof(struct ip6protosw, pr_entry) ==
	    offsetof(struct protosw, pr_entry));
	_CASSERT(offsetof(struct ip6protosw, pr_domain) ==
	    offsetof(struct protosw, pr_domain));
	_CASSERT(offsetof(struct ip6protosw, pr_protosw) ==
	    offsetof(struct protosw, pr_protosw));
	_CASSERT(offsetof(struct ip6protosw, pr_type) ==
	    offsetof(struct protosw, pr_type));
	_CASSERT(offsetof(struct ip6protosw, pr_protocol) ==
	    offsetof(struct protosw, pr_protocol));
	_CASSERT(offsetof(struct ip6protosw, pr_flags) ==
	    offsetof(struct protosw, pr_flags));
	_CASSERT(offsetof(struct ip6protosw, pr_input) ==
	    offsetof(struct protosw, pr_input));
	_CASSERT(offsetof(struct ip6protosw, pr_output) ==
	    offsetof(struct protosw, pr_output));
	_CASSERT(offsetof(struct ip6protosw, pr_ctlinput) ==
	    offsetof(struct protosw, pr_ctlinput));
	_CASSERT(offsetof(struct ip6protosw, pr_ctloutput) ==
	    offsetof(struct protosw, pr_ctloutput));
	_CASSERT(offsetof(struct ip6protosw, pr_usrreqs) ==
	    offsetof(struct protosw, pr_usrreqs));
	_CASSERT(offsetof(struct ip6protosw, pr_init) ==
	    offsetof(struct protosw, pr_init));
	_CASSERT(offsetof(struct ip6protosw, pr_drain) ==
	    offsetof(struct protosw, pr_drain));
	_CASSERT(offsetof(struct ip6protosw, pr_sysctl) ==
	    offsetof(struct protosw, pr_sysctl));
	_CASSERT(offsetof(struct ip6protosw, pr_lock) ==
	    offsetof(struct protosw, pr_lock));
	_CASSERT(offsetof(struct ip6protosw, pr_unlock) ==
	    offsetof(struct protosw, pr_unlock));
	_CASSERT(offsetof(struct ip6protosw, pr_getlock) ==
	    offsetof(struct protosw, pr_getlock));
	_CASSERT(offsetof(struct ip6protosw, pr_filter_head) ==
	    offsetof(struct protosw, pr_filter_head));
	_CASSERT(offsetof(struct ip6protosw, pr_old) ==
	    offsetof(struct protosw, pr_old));

	/*
	 * Attach first, then initialize.  ip6_init() needs raw IP6 handler.
	 */
	for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++)
		net_add_proto((struct protosw *)pr, dp, 0);
	for (i = 0, pr = &inet6sw[0]; i < in6_proto_count; i++, pr++)
		net_init_proto((struct protosw *)pr, dp);

	inet6_domain_mutex = dp->dom_mtx;
}