예제 #1
0
static int
priq_stat_sc(struct priq_if *pif, cqrq_stat_sc_t *sr)
{
	struct ifclassq *ifq = pif->pif_ifq;
	struct priq_class *cl;
	u_int32_t i;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	VERIFY(sr->sc == MBUF_SC_UNSPEC || MBUF_VALID_SC(sr->sc));

	i = MBUF_SCIDX(sr->sc);
	VERIFY(i < IFCQ_SC_MAX);

	cl = ifq->ifcq_disc_slots[i].cl;
	sr->packets = qlen(&cl->cl_q);
	sr->bytes = qsize(&cl->cl_q);

	return (0);
}
예제 #2
0
/*
 * note: CLASSQDQ_POLL returns the next packet without removing the packet
 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
 *	CLASSQDQ_REMOVE must return the same packet if called immediately
 *	after CLASSQDQ_POLL.
 */
struct mbuf *
priq_dequeue(struct priq_if *pif, cqdq_op_t op)
{
	struct ifclassq *ifq = pif->pif_ifq;
	struct priq_class *cl;
	struct mbuf *m;
	u_int32_t pri, len;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (pif->pif_bitmap == 0) {
		/* no active class; nothing to dequeue */
		return (NULL);
	}
	VERIFY(!IFCQ_IS_EMPTY(ifq));

	pri = pktsched_fls(pif->pif_bitmap) - 1;	/* zero based */
	VERIFY(pri < PRIQ_MAXPRI);
	cl = pif->pif_classes[pri];
	VERIFY(cl != NULL && !qempty(&cl->cl_q));

	if (op == CLASSQDQ_POLL)
		return (priq_pollq(cl));

	m = priq_getq(cl);
	VERIFY(m != NULL);	/* qalg must be work conserving */
	len = m_pktlen(m);

	IFCQ_DEC_LEN(ifq);
	IFCQ_DEC_BYTES(ifq, len);
	if (qempty(&cl->cl_q)) {
		cl->cl_period++;
		/* class is now inactive; indicate it as such */
		pktsched_bit_clr(pri, &pif->pif_bitmap);
	}
	PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len);
	IFCQ_XMIT_ADD(ifq, 1, len);

	return (m);
}
예제 #3
0
파일: pktsched.c 프로젝트: onlynone/xnu
int
pktsched_teardown(struct ifclassq *ifq)
{
	int error = 0;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if_qflush(ifq->ifcq_ifp, 1);
	VERIFY(IFCQ_IS_EMPTY(ifq));

	ifq->ifcq_flags &= ~IFCQF_ENABLED;

	switch (ifq->ifcq_type) {
	case PKTSCHEDT_NONE:
		break;

#if PKTSCHED_PRIQ
	case PKTSCHEDT_PRIQ:
		error = priq_teardown_ifclassq(ifq);
		break;
#endif /* PKTSCHED_PRIQ */

	case PKTSCHEDT_TCQ:
		error = tcq_teardown_ifclassq(ifq);
		break;

	case PKTSCHEDT_QFQ:
		error = qfq_teardown_ifclassq(ifq);
		break;

	case PKTSCHEDT_FQ_CODEL:
		error = fq_if_teardown_ifclassq(ifq);
		break;
	default:
		error = ENXIO;
		break;
	}
	return (error);
}
예제 #4
0
/* discard all the queued packets on the interface */
void
fairq_purge(struct fairq_if *fif)
{
	struct fairq_class *cl;
	int pri;

	IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);

	for (pri = 0; pri <= fif->fif_maxpri; pri++) {
		if ((cl = fif->fif_classes[pri]) != NULL && cl->cl_head)
			fairq_purgeq(fif, cl, 0, NULL, NULL);
	}
#if !PF_ALTQ
	/*
	 * This assertion is safe to be made only when PF_ALTQ is not
	 * configured; otherwise, IFCQ_LEN represents the sum of the
	 * packets managed by ifcq_disc and altq_disc instances, which
	 * is possible when transitioning between the two.
	 */
	VERIFY(IFCQ_LEN(fif->fif_ifq) == 0);
#endif /* !PF_ALTQ */
}
예제 #5
0
/*
 * priq_enqueue_ifclassq is an enqueue function to be registered to
 * (*ifcq_enqueue) in struct ifclassq.
 */
static int
priq_enqueue_ifclassq(struct ifclassq *ifq, struct mbuf *m)
{
	u_int32_t i;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (!(m->m_flags & M_PKTHDR)) {
		/* should not happen */
		log(LOG_ERR, "%s: packet does not have pkthdr\n",
		    if_name(ifq->ifcq_ifp));
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (ENOBUFS);
	}

	i = MBUF_SCIDX(mbuf_get_service_class(m));
	VERIFY((u_int32_t)i < IFCQ_SC_MAX);

	return (priq_enqueue(ifq->ifcq_disc,
	    ifq->ifcq_disc_slots[i].cl, m, m_pftag(m)));
}
예제 #6
0
int
priq_get_class_stats(struct priq_if *pif, u_int32_t qid,
    struct priq_classstats *sp)
{
	struct priq_class *cl;

	IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);

	if ((cl = priq_clh_to_clp(pif, qid)) == NULL)
		return (EINVAL);

	sp->class_handle = cl->cl_handle;
	sp->priority = cl->cl_pri;
	sp->qlength = qlen(&cl->cl_q);
	sp->qlimit = qlimit(&cl->cl_q);
	sp->period = cl->cl_period;
	sp->xmitcnt = cl->cl_xmitcnt;
	sp->dropcnt = cl->cl_dropcnt;

	sp->qtype = qtype(&cl->cl_q);
	sp->qstate = qstate(&cl->cl_q);
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		red_getstats(cl->cl_red, &sp->red[0]);
#endif /* CLASSQ_RED */
#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		rio_getstats(cl->cl_rio, &sp->red[0]);
#endif /* CLASSQ_RIO */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		blue_getstats(cl->cl_blue, &sp->blue);
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
		sfb_getstats(cl->cl_sfb, &sp->sfb);

	return (0);
}
예제 #7
0
static int
priq_suspendq(struct priq_if *pif, struct priq_class *cl)
{
	struct ifclassq *ifq = pif->pif_ifq;
	int err = 0;

	IFCQ_LOCK_ASSERT_HELD(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		err = rio_suspendq(cl->cl_rio, &cl->cl_q, TRUE);
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		err = red_suspendq(cl->cl_red, &cl->cl_q, TRUE);
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		err = blue_suspendq(cl->cl_blue, &cl->cl_q, TRUE);
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q)) {
		if (cl->cl_sfb != NULL) {
			err = sfb_suspendq(cl->cl_sfb, &cl->cl_q, TRUE);
		} else {
			VERIFY(cl->cl_flags & PRCF_LAZY);
			err = ENXIO;	/* delayed throttling */
		}
	}

	if (err == 0 || err == ENXIO)
		qstate(&cl->cl_q) = QS_SUSPENDED;

	return (err);
}
예제 #8
0
static int
qfq_destroy_locked(struct qfq_if *qif)
{
	int i;

	IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq);

	(void) qfq_clear_interface(qif);

	VERIFY(qif->qif_class_tbl != NULL);
	_FREE(qif->qif_class_tbl, M_DEVBUF);
	qif->qif_class_tbl = NULL;

	VERIFY(qif->qif_groups != NULL);
	for (i = 0; i <= QFQ_MAX_INDEX; i++) {
		struct qfq_group *grp = qif->qif_groups[i];

		if (grp != NULL) {
			VERIFY(grp->qfg_slots != NULL);
			_FREE(grp->qfg_slots, M_DEVBUF);
			grp->qfg_slots = NULL;
			_FREE(grp, M_DEVBUF);
			qif->qif_groups[i] = NULL;
		}
	}
	_FREE(qif->qif_groups, M_DEVBUF);
	qif->qif_groups = NULL;

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s scheduler destroyed\n",
		    if_name(QFQIF_IFP(qif)), qfq_style(qif));
	}

	zfree(qfq_zone, qif);

	return (0);
}
예제 #9
0
static int
fairq_class_destroy(struct fairq_if *fif, struct fairq_class *cl)
{
	struct ifclassq *ifq = fif->fif_ifq;
	int pri;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (cl->cl_head)
		fairq_purgeq(fif, cl, 0, NULL, NULL);

	fif->fif_classes[cl->cl_pri] = NULL;
	if (fif->fif_poll_cache == cl)
		fif->fif_poll_cache = NULL;
	if (fif->fif_maxpri == cl->cl_pri) {
		for (pri = cl->cl_pri; pri >= 0; pri--)
			if (fif->fif_classes[pri] != NULL) {
				fif->fif_maxpri = pri;
				break;
			}
		if (pri < 0)
			fif->fif_maxpri = -1;
	}

	if (cl->cl_qalg.ptr != NULL) {
#if CLASSQ_RIO
		if (cl->cl_qtype == Q_RIO)
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (cl->cl_qtype == Q_RED)
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (cl->cl_qtype == Q_BLUE)
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		cl->cl_qtype = Q_DROPTAIL;
		cl->cl_qstate = QS_RUNNING;
	}

	if (fif->fif_default == cl)
		fif->fif_default = NULL;

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s destroyed qid=%d pri=%d\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif),
		    cl->cl_handle, cl->cl_pri);
	}

	_FREE(cl->cl_buckets, M_DEVBUF);
	cl->cl_head = NULL;	/* sanity */
	cl->cl_polled = NULL;	/* sanity */
	cl->cl_buckets = NULL;	/* sanity */

	zfree(fairq_cl_zone, cl);

	return (0);
}
예제 #10
0
static struct fairq_class *
fairq_class_create(struct fairq_if *fif, int pri, u_int32_t qlimit,
    u_int64_t bandwidth, u_int32_t nbuckets, int flags, u_int64_t hogs_m1,
    u_int64_t lssc_m1, u_int64_t lssc_d, u_int64_t lssc_m2, u_int32_t qid)
{
#pragma unused(lssc_d, lssc_m2)
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct fairq_class *cl;
	u_int32_t i;

	IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);

	/* Sanitize flags unless internally configured */
	if (fif->fif_flags & FAIRQIFF_ALTQ)
		flags &= FARF_USERFLAGS;

#if !CLASSQ_RED
	if (flags & FARF_RED) {
		log(LOG_ERR, "%s: %s RED not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* !CLASSQ_RED */

#if !CLASSQ_RIO
	if (flags & FARF_RIO) {
		log(LOG_ERR, "%s: %s RIO not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* CLASSQ_RIO */

#if !CLASSQ_BLUE
	if (flags & FARF_BLUE) {
		log(LOG_ERR, "%s: %s BLUE not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* CLASSQ_BLUE */

	/* These are mutually exclusive */
	if ((flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RED &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RIO &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_BLUE &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_SFB) {
		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}

	if (bandwidth == 0 || (bandwidth / 8) == 0) {
		log(LOG_ERR, "%s: %s invalid data rate %llu\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif), bandwidth);
		return (NULL);
	}

	if (nbuckets == 0)
		nbuckets = 256;
	if (nbuckets > FAIRQ_MAX_BUCKETS)
		nbuckets = FAIRQ_MAX_BUCKETS;
	/* enforce power-of-2 size */
	while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1))
		++nbuckets;

	ifq = fif->fif_ifq;
	ifp = FAIRQIF_IFP(fif);

	if ((cl = fif->fif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		if (cl->cl_head)
			fairq_purgeq(fif, cl, 0, NULL, NULL);
#if CLASSQ_RIO
		if (cl->cl_qtype == Q_RIO)
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (cl->cl_qtype == Q_RED)
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (cl->cl_qtype == Q_BLUE)
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		cl->cl_qtype = Q_DROPTAIL;
		cl->cl_qstate = QS_RUNNING;
	} else {
		cl = zalloc(fairq_cl_zone);
		if (cl == NULL)
			goto err_ret;
		bzero(cl, fairq_cl_size);
		cl->cl_nbuckets = nbuckets;
		cl->cl_nbucket_mask = nbuckets - 1;

		cl->cl_buckets = _MALLOC(sizeof (struct fairq_bucket) *
		    cl->cl_nbuckets, M_DEVBUF, M_WAITOK|M_ZERO);
		if (cl->cl_buckets == NULL)
			goto err_buckets;
		cl->cl_head = NULL;
	}

	fif->fif_classes[pri] = cl;
	if (flags & FARF_DEFAULTCLASS)
		fif->fif_default = cl;
	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;	/* use default */
	}
	cl->cl_qlimit = qlimit;
	for (i = 0; i < cl->cl_nbuckets; ++i) {
		_qinit(&cl->cl_buckets[i].queue, Q_DROPTAIL, qlimit);
	}
	cl->cl_bandwidth = bandwidth / 8;	/* cvt to bytes per second */
	cl->cl_qtype = Q_DROPTAIL;
	cl->cl_qstate = QS_RUNNING;
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > fif->fif_maxpri)
		fif->fif_maxpri = pri;
	cl->cl_fif = fif;
	cl->cl_handle = qid;
	cl->cl_hogs_m1 = hogs_m1 / 8;
	cl->cl_lssc_m1 = lssc_m1 / 8;	/* NOT YET USED */
	cl->cl_bw_current = 0;

	if (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) {
#if CLASSQ_RED || CLASSQ_RIO
		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
		int pkttime;
#endif /* CLASSQ_RED || CLASSQ_RIO */

		cl->cl_qflags = 0;
		if (flags & FARF_ECN) {
			if (flags & FARF_BLUE)
				cl->cl_qflags |= BLUEF_ECN;
			else if (flags & FARF_SFB)
				cl->cl_qflags |= SFBF_ECN;
			else if (flags & FARF_RED)
				cl->cl_qflags |= REDF_ECN;
			else if (flags & FARF_RIO)
				cl->cl_qflags |= RIOF_ECN;
		}
		if (flags & FARF_FLOWCTL) {
			if (flags & FARF_SFB)
				cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & FARF_CLEARDSCP) {
			if (flags & FARF_RIO)
				cl->cl_qflags |= RIOF_CLEARDSCP;
		}
#if CLASSQ_RED || CLASSQ_RIO
		/*
		 * XXX: RED & RIO should be watching link speed and MTU
		 *	events and recompute pkttime accordingly.
		 */
		if (ifbandwidth < 8)
			pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
			    (ifbandwidth / 8);

		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
#if CLASSQ_RIO
		if (flags & FARF_RIO) {
			cl->cl_rio =
			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
			if (cl->cl_rio != NULL)
				cl->cl_qtype = Q_RIO;
		}
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (flags & FARF_RED) {
			cl->cl_red = red_alloc(ifp, 0, 0,
			    cl->cl_qlimit * 10/100,
			    cl->cl_qlimit * 30/100,
			    cl->cl_qflags, pkttime);
			if (cl->cl_red != NULL)
				cl->cl_qtype = Q_RED;
		}
#endif /* CLASSQ_RED */
#endif /* CLASSQ_RED || CLASSQ_RIO */
#if CLASSQ_BLUE
		if (flags & FARF_BLUE) {
			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
			if (cl->cl_blue != NULL)
				cl->cl_qtype = Q_BLUE;
		}
#endif /* CLASSQ_BLUE */
		if (flags & FARF_SFB) {
			if (!(cl->cl_flags & FARF_LAZY))
				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
				    cl->cl_qlimit, cl->cl_qflags);
			if (cl->cl_sfb != NULL || (cl->cl_flags & FARF_LAZY))
				cl->cl_qtype = Q_SFB;
		}
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
		    "flags=%b\n", if_name(ifp), fairq_style(fif),
		    cl->cl_handle, cl->cl_pri, cl->cl_qlimit, flags, FARF_BITS);
	}

	return (cl);

err_buckets:
	if (cl->cl_buckets != NULL)
		_FREE(cl->cl_buckets, M_DEVBUF);
err_ret:
	if (cl != NULL) {
		if (cl->cl_qalg.ptr != NULL) {
#if CLASSQ_RIO
			if (cl->cl_qtype == Q_RIO)
				rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
			if (cl->cl_qtype == Q_RED)
				red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
			if (cl->cl_qtype == Q_BLUE)
				blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
			if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
				sfb_destroy(cl->cl_sfb);
			cl->cl_qalg.ptr = NULL;
			cl->cl_qtype = Q_DROPTAIL;
			cl->cl_qstate = QS_RUNNING;
		}
		zfree(fairq_cl_zone, cl);
	}
	return (NULL);
}
예제 #11
0
int
priq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
{
	struct ifnet *ifp = ifq->ifcq_ifp;
	struct priq_class *cl0, *cl1, *cl2, *cl3, *cl4;
	struct priq_class *cl5, *cl6, *cl7, *cl8, *cl9;
	struct priq_if *pif;
	u_int32_t maxlen = 0, qflags = 0;
	int err = 0;

	IFCQ_LOCK_ASSERT_HELD(ifq);
	VERIFY(ifq->ifcq_disc == NULL);
	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);

	if (flags & PKTSCHEDF_QALG_RED)
		qflags |= PRCF_RED;
	if (flags & PKTSCHEDF_QALG_RIO)
		qflags |= PRCF_RIO;
	if (flags & PKTSCHEDF_QALG_BLUE)
		qflags |= PRCF_BLUE;
	if (flags & PKTSCHEDF_QALG_SFB)
		qflags |= PRCF_SFB;
	if (flags & PKTSCHEDF_QALG_ECN)
		qflags |= PRCF_ECN;
	if (flags & PKTSCHEDF_QALG_FLOWCTL)
		qflags |= PRCF_FLOWCTL;

	pif = priq_alloc(ifp, M_WAITOK, FALSE);
	if (pif == NULL)
		return (ENOMEM);

	if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
		maxlen = if_sndq_maxlen;

	if ((err = priq_add_queue(pif, 0, maxlen,
	    qflags | PRCF_LAZY, SCIDX_BK_SYS, &cl0)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 1, maxlen,
	    qflags | PRCF_LAZY, SCIDX_BK, &cl1)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 2, maxlen,
	    qflags | PRCF_DEFAULTCLASS, SCIDX_BE, &cl2)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 3, maxlen,
	    qflags | PRCF_LAZY, SCIDX_RD, &cl3)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 4, maxlen,
	    qflags | PRCF_LAZY, SCIDX_OAM, &cl4)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 5, maxlen,
	    qflags | PRCF_LAZY, SCIDX_AV, &cl5)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 6, maxlen,
	    qflags | PRCF_LAZY, SCIDX_RV, &cl6)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 7, maxlen,
	    qflags | PRCF_LAZY, SCIDX_VI, &cl7)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 8, maxlen,
	    qflags | PRCF_LAZY, SCIDX_VO, &cl8)) != 0)
		goto cleanup;

	if ((err = priq_add_queue(pif, 9, maxlen,
	    qflags, SCIDX_CTL, &cl9)) != 0)
		goto cleanup;

	err = ifclassq_attach(ifq, PKTSCHEDT_PRIQ, pif,
	    priq_enqueue_ifclassq, priq_dequeue_ifclassq, NULL,
	    priq_request_ifclassq);

	/* cache these for faster lookup */
	if (err == 0) {
		ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK_SYS;
		ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;

		ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
		ifq->ifcq_disc_slots[SCIDX_BK].cl = cl1;

		ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
		ifq->ifcq_disc_slots[SCIDX_BE].cl = cl2;

		ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_RD;
		ifq->ifcq_disc_slots[SCIDX_RD].cl = cl3;

		ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_OAM;
		ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl4;

		ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_AV;
		ifq->ifcq_disc_slots[SCIDX_AV].cl = cl5;

		ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_RV;
		ifq->ifcq_disc_slots[SCIDX_RV].cl = cl6;

		ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
		ifq->ifcq_disc_slots[SCIDX_VI].cl = cl7;

		ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
		ifq->ifcq_disc_slots[SCIDX_VO].cl = cl8;

		ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_CTL;
		ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl9;
	}

cleanup:
	if (err != 0)
		(void) priq_destroy_locked(pif);

	return (err);
}
예제 #12
0
파일: pktsched.c 프로젝트: Algozjb/xnu
int
pktsched_setup(struct ifclassq *ifq, u_int32_t scheduler, u_int32_t sflags)
{
	int error = 0;
	u_int32_t qflags = sflags;
	u_int32_t rflags;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	VERIFY(machclk_freq != 0);

	/* Nothing to do unless the scheduler type changes */
	if (ifq->ifcq_type == scheduler)
		return (0);

	qflags &= (PKTSCHEDF_QALG_RED | PKTSCHEDF_QALG_RIO |
	    PKTSCHEDF_QALG_BLUE | PKTSCHEDF_QALG_SFB);

	/* These are mutually exclusive */
	if (qflags != 0 &&
	    qflags != PKTSCHEDF_QALG_RED && qflags != PKTSCHEDF_QALG_RIO &&
	    qflags != PKTSCHEDF_QALG_BLUE && qflags != PKTSCHEDF_QALG_SFB) {
		panic("%s: RED|RIO|BLUE|SFB mutually exclusive\n", __func__);
		/* NOTREACHED */
	}

	/*
	 * Remember the flags that need to be restored upon success, as
	 * they may be cleared when we tear down existing scheduler.
	 */
	rflags = (ifq->ifcq_flags & IFCQF_ENABLED);

	if (ifq->ifcq_type != PKTSCHEDT_NONE) {
		(void) pktsched_teardown(ifq);

		/* Teardown should have succeeded */
		VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
		VERIFY(ifq->ifcq_disc == NULL);
		VERIFY(ifq->ifcq_enqueue == NULL);
		VERIFY(ifq->ifcq_dequeue == NULL);
		VERIFY(ifq->ifcq_dequeue_sc == NULL);
		VERIFY(ifq->ifcq_request == NULL);
	}

	switch (scheduler) {
#if PKTSCHED_PRIQ
	case PKTSCHEDT_PRIQ:
		error = priq_setup_ifclassq(ifq, sflags);
		break;
#endif /* PKTSCHED_PRIQ */

	case PKTSCHEDT_TCQ:
		error = tcq_setup_ifclassq(ifq, sflags);
		break;

	case PKTSCHEDT_QFQ:
		error = qfq_setup_ifclassq(ifq, sflags);
		break;

	default:
		error = ENXIO;
		break;
	}

	if (error == 0)
		ifq->ifcq_flags |= rflags;

	return (error);
}
예제 #13
0
static struct qfq_class *
qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit,
    u_int32_t flags, u_int32_t maxsz, u_int32_t qid, classq_pkt_type_t ptype)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct qfq_group *grp;
	struct qfq_class *cl;
	u_int32_t w;			/* approximated weight */
	int i;

	IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq);

	if (qif->qif_classes >= qif->qif_maxclasses) {
		log(LOG_ERR, "%s: %s out of classes! (max %d)\n",
		    if_name(QFQIF_IFP(qif)), qfq_style(qif),
		    qif->qif_maxclasses);
		return (NULL);
	}

	ifq = qif->qif_ifq;
	ifp = QFQIF_IFP(qif);

	cl = zalloc(qfq_cl_zone);
	if (cl == NULL)
		return (NULL);

	bzero(cl, qfq_cl_size);

	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype);
	cl->cl_qif = qif;
	cl->cl_flags = flags;
	cl->cl_handle = qid;

	/*
	 * Find a free slot in the class table.  If the slot matching
	 * the lower bits of qid is free, use this slot.  Otherwise,
	 * use the first free slot.
	 */
	i = qid % qif->qif_maxclasses;
	if (qif->qif_class_tbl[i] == NULL) {
		qif->qif_class_tbl[i] = cl;
	} else {
		for (i = 0; i < qif->qif_maxclasses; i++) {
			if (qif->qif_class_tbl[i] == NULL) {
				qif->qif_class_tbl[i] = cl;
				break;
			}
		}
		if (i == qif->qif_maxclasses) {
			zfree(qfq_cl_zone, cl);
			return (NULL);
		}
	}

	w = weight;
	VERIFY(w > 0 && w <= QFQ_MAX_WEIGHT);
	cl->cl_lmax = maxsz;
	cl->cl_inv_w = (QFQ_ONE_FP / w);
	w = (QFQ_ONE_FP / cl->cl_inv_w);
	VERIFY(qif->qif_wsum + w <= QFQ_MAX_WSUM);

	i = qfq_calc_index(cl, cl->cl_inv_w, cl->cl_lmax);
	VERIFY(i <= QFQ_MAX_INDEX);
	grp = qif->qif_groups[i];
	if (grp == NULL) {
		grp = _MALLOC(sizeof (*grp), M_DEVBUF, M_WAITOK|M_ZERO);
		if (grp != NULL) {
			grp->qfg_index = i;
			grp->qfg_slot_shift =
			    QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i);
			grp->qfg_slots = _MALLOC(sizeof (struct qfq_class *) *
			    qif->qif_maxslots, M_DEVBUF, M_WAITOK|M_ZERO);
			if (grp->qfg_slots == NULL) {
				log(LOG_ERR, "%s: %s unable to allocate group "
				    "slots for index %d\n", if_name(ifp),
				    qfq_style(qif), i);
			}
		} else {
			log(LOG_ERR, "%s: %s unable to allocate group for "
			    "qid=%d\n", if_name(ifp), qfq_style(qif),
			    cl->cl_handle);
		}
		if (grp == NULL || grp->qfg_slots == NULL) {
			qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL;
			if (grp != NULL)
				_FREE(grp, M_DEVBUF);
			zfree(qfq_cl_zone, cl);
			return (NULL);
		} else {
			qif->qif_groups[i] = grp;
		}
	}
	cl->cl_grp = grp;
	qif->qif_wsum += w;
	/* XXX cl->cl_S = qif->qif_V; ? */
	/* XXX compute qif->qif_i_wsum */

	qif->qif_classes++;

	if (flags & QFCF_DEFAULTCLASS)
		qif->qif_default = cl;

	if (flags & QFCF_SFB) {
		cl->cl_qflags = 0;
		if (flags & QFCF_ECN) {
			cl->cl_qflags |= SFBF_ECN;
		}
		if (flags & QFCF_FLOWCTL) {
			cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & QFCF_DELAYBASED) {
			cl->cl_qflags |= SFBF_DELAYBASED;
		}
		if (!(cl->cl_flags & QFCF_LAZY))
			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
		if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY))
			qtype(&cl->cl_q) = Q_SFB;
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d grp=%d weight=%d "
		    "qlimit=%d flags=%b\n", if_name(ifp), qfq_style(qif),
		    cl->cl_handle, cl->cl_grp->qfg_index, weight, qlimit,
		    flags, QFCF_BITS);
	}

	return (cl);
}
예제 #14
0
static struct priq_class *
priq_class_create(struct priq_if *pif, int pri, u_int32_t qlimit,
    int flags, u_int32_t qid)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct priq_class *cl;

	IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);

	/* Sanitize flags unless internally configured */
	if (pif->pif_flags & PRIQIFF_ALTQ)
		flags &= PRCF_USERFLAGS;

#if !CLASSQ_RED
	if (flags & PRCF_RED) {
		log(LOG_ERR, "%s: %s RED not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* !CLASSQ_RED */

#if !CLASSQ_RIO
	if (flags & PRCF_RIO) {
		log(LOG_ERR, "%s: %s RIO not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_RIO */

#if !CLASSQ_BLUE
	if (flags & PRCF_BLUE) {
		log(LOG_ERR, "%s: %s BLUE not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_BLUE */

	/* These are mutually exclusive */
	if ((flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RED &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RIO &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_BLUE &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_SFB) {
		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}

	ifq = pif->pif_ifq;
	ifp = PRIQIF_IFP(pif);

	if ((cl = pif->pif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		if (!qempty(&cl->cl_q))
			priq_purgeq(pif, cl, 0, NULL, NULL);
#if CLASSQ_RIO
		if (q_is_rio(&cl->cl_q))
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (q_is_red(&cl->cl_q))
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (q_is_blue(&cl->cl_q))
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		qtype(&cl->cl_q) = Q_DROPTAIL;
		qstate(&cl->cl_q) = QS_RUNNING;
	} else {
		cl = zalloc(priq_cl_zone);
		if (cl == NULL)
			return (NULL);

		bzero(cl, priq_cl_size);
	}

	pif->pif_classes[pri] = cl;
	if (flags & PRCF_DEFAULTCLASS)
		pif->pif_default = cl;
	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > pif->pif_maxpri)
		pif->pif_maxpri = pri;
	cl->cl_pif = pif;
	cl->cl_handle = qid;

	if (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) {
#if CLASSQ_RED || CLASSQ_RIO
		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
		int pkttime;
#endif /* CLASSQ_RED || CLASSQ_RIO */

		cl->cl_qflags = 0;
		if (flags & PRCF_ECN) {
			if (flags & PRCF_BLUE)
				cl->cl_qflags |= BLUEF_ECN;
			else if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_ECN;
			else if (flags & PRCF_RED)
				cl->cl_qflags |= REDF_ECN;
			else if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_ECN;
		}
		if (flags & PRCF_FLOWCTL) {
			if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & PRCF_CLEARDSCP) {
			if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_CLEARDSCP;
		}
#if CLASSQ_RED || CLASSQ_RIO
		/*
		 * XXX: RED & RIO should be watching link speed and MTU
		 *	events and recompute pkttime accordingly.
		 */
		if (ifbandwidth < 8)
			pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
			    (ifbandwidth / 8);

		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
#if CLASSQ_RED
		if (flags & PRCF_RED) {
			cl->cl_red = red_alloc(ifp, 0, 0,
			    qlimit(&cl->cl_q) * 10/100,
			    qlimit(&cl->cl_q) * 30/100,
			    cl->cl_qflags, pkttime);
			if (cl->cl_red != NULL)
				qtype(&cl->cl_q) = Q_RED;
		}
#endif /* CLASSQ_RED */
#if CLASSQ_RIO
		if (flags & PRCF_RIO) {
			cl->cl_rio =
			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
			if (cl->cl_rio != NULL)
				qtype(&cl->cl_q) = Q_RIO;
		}
#endif /* CLASSQ_RIO */
#endif /* CLASSQ_RED || CLASSQ_RIO */
#if CLASSQ_BLUE
		if (flags & PRCF_BLUE) {
			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
			if (cl->cl_blue != NULL)
				qtype(&cl->cl_q) = Q_BLUE;
		}
#endif /* CLASSQ_BLUE */
		if (flags & PRCF_SFB) {
			if (!(cl->cl_flags & PRCF_LAZY))
				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
				    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb != NULL || (cl->cl_flags & PRCF_LAZY))
				qtype(&cl->cl_q) = Q_SFB;
		}
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
		    "flags=%b\n", if_name(ifp), priq_style(pif),
		    cl->cl_handle, cl->cl_pri, qlimit, flags, PRCF_BITS);
	}

	return (cl);
}
예제 #15
0
/*
 * note: CLASSQDQ_POLL returns the next packet without removing the packet
 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
 *	CLASSQDQ_REMOVE must return the same packet if called immediately
 *	after CLASSQDQ_POLL.
 */
struct mbuf *
fairq_dequeue(struct fairq_if *fif, cqdq_op_t op)
{
	struct ifclassq *ifq = fif->fif_ifq;
	struct fairq_class *cl;
	struct fairq_class *best_cl;
	struct mbuf *best_m;
	struct mbuf *m;
	u_int64_t cur_time = read_machclk();
	u_int32_t best_scale;
	u_int32_t scale;
	int pri;
	int hit_limit;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (IFCQ_IS_EMPTY(ifq)) {
		/* no packet in the queue */
		return (NULL);
	}

	if (fif->fif_poll_cache && op == CLASSQDQ_REMOVE) {
		best_cl = fif->fif_poll_cache;
		m = fairq_getq(best_cl, cur_time);
		fif->fif_poll_cache = NULL;
		if (m != NULL) {
			IFCQ_DEC_LEN(ifq);
			IFCQ_DEC_BYTES(ifq, m_pktlen(m));
			IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
			PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1, m_pktlen(m));
		}
	} else {
		best_cl = NULL;
		best_m = NULL;
		best_scale = 0xFFFFFFFFU;

		for (pri = fif->fif_maxpri;  pri >= 0; pri--) {
			if ((cl = fif->fif_classes[pri]) == NULL)
				continue;
			if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
				continue;
			m = fairq_pollq(cl, cur_time, &hit_limit);
			if (m == NULL) {
				cl->cl_flags &= ~FARF_HAS_PACKETS;
				continue;
			}

			/*
			 * We can halt the search immediately if the queue
			 * did not hit its bandwidth limit.
			 */
			if (hit_limit == 0) {
				best_cl = cl;
				best_m = m;
				break;
			}

			/*
			 * Otherwise calculate the scale factor and select
			 * the queue with the lowest scale factor.  This
			 * apportions any unused bandwidth weighted by
			 * the relative bandwidth specification.
			 */
			scale = cl->cl_bw_current * 100 / cl->cl_bandwidth;
			if (scale < best_scale) {
				best_cl = cl;
				best_m = m;
				best_scale = scale;
			}
		}

		if (op == CLASSQDQ_POLL) {
			fif->fif_poll_cache = best_cl;
			m = best_m;
		} else if (best_cl != NULL) {
			m = fairq_getq(best_cl, cur_time);
			if (m != NULL) {
				IFCQ_DEC_LEN(ifq);
				IFCQ_DEC_BYTES(ifq, m_pktlen(m));
				IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
				PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1,
				    m_pktlen(m));
			}
		} else {
			m = NULL;
		}
	}
	return (m);
}
예제 #16
0
static void
priq_purgeq(struct priq_if *pif, struct priq_class *cl, u_int32_t flow,
    u_int32_t *packets, u_int32_t *bytes)
{
	struct ifclassq *ifq = pif->pif_ifq;
	u_int32_t cnt = 0, len = 0, qlen;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if ((qlen = qlen(&cl->cl_q)) == 0) {
		VERIFY(!pktsched_bit_tst(cl->cl_pri, &pif->pif_bitmap));
		goto done;
	}

	/* become regular mutex before freeing mbufs */
	IFCQ_CONVERT_LOCK(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
		sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
	else
		_flushq_flow(&cl->cl_q, flow, &cnt, &len);

	if (cnt > 0) {
		VERIFY(qlen(&cl->cl_q) == (qlen - cnt));

		PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
		IFCQ_DROP_ADD(ifq, cnt, len);

		VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
		IFCQ_LEN(ifq) -= cnt;

		if (qempty(&cl->cl_q))
			pktsched_bit_clr(cl->cl_pri, &pif->pif_bitmap);

		if (pktsched_verbose) {
			log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
			    "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
			    if_name(PRIQIF_IFP(pif)), priq_style(pif),
			    cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
			    cnt, len, flow);
		}
	}
done:
	if (packets != NULL)
		*packets = cnt;
	if (bytes != NULL)
		*bytes = len;
}
예제 #17
0
static inline int
priq_addq(struct priq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct priq_if *pif = cl->cl_pif;
	struct ifclassq *ifq = pif->pif_ifq;

	IFCQ_LOCK_ASSERT_HELD(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		return (red_addq(cl->cl_red, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q)) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = PRIQIF_IFP(pif);

			VERIFY(cl->cl_flags & PRCF_LAZY);
			cl->cl_flags &= ~PRCF_LAZY;
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				qtype(&cl->cl_q) = Q_DROPTAIL;
				cl->cl_flags &= ~PRCF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    priq_style(pif), cl->cl_handle,
				    cl->cl_pri);
			} else if (pif->pif_throttle != IFNET_THROTTLE_OFF) {
				/* if there's pending throttling, set it */
				cqrq_throttle_t tr = { 1, pif->pif_throttle };
				int err = priq_throttle(pif, &tr);

				if (err == EALREADY)
					err = 0;
				if (err != 0) {
					tr.level = IFNET_THROTTLE_OFF;
					(void) priq_throttle(pif, &tr);
				}
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
	} else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & PRCF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&cl->cl_q, m);

	return (0);
}
예제 #18
0
int
priq_enqueue(struct priq_if *pif, struct priq_class *cl, struct mbuf *m,
    struct pf_mtag *t)
{
	struct ifclassq *ifq = pif->pif_ifq;
	u_int32_t pri;
	int len, ret;

	IFCQ_LOCK_ASSERT_HELD(ifq);
	VERIFY(cl == NULL || cl->cl_pif == pif);

	if (cl == NULL) {
#if PF_ALTQ
		cl = priq_clh_to_clp(pif, t->pftag_qid);
#else /* !PF_ALTQ */
		cl = priq_clh_to_clp(pif, 0);
#endif /* !PF_ALTQ */
		if (cl == NULL) {
			cl = pif->pif_default;
			if (cl == NULL) {
				IFCQ_CONVERT_LOCK(ifq);
				m_freem(m);
				return (ENOBUFS);
			}
		}
	}
	pri = cl->cl_pri;
	VERIFY(pri < PRIQ_MAXPRI);

	len = m_pktlen(m);

	ret = priq_addq(cl, m, t);
	if (ret != 0) {
		if (ret == CLASSQEQ_SUCCESS_FC) {
			/* packet enqueued, return advisory feedback */
			ret = EQFULL;
		} else {
			VERIFY(ret == CLASSQEQ_DROPPED ||
			    ret == CLASSQEQ_DROPPED_FC ||
			    ret == CLASSQEQ_DROPPED_SP);
			/* packet has been freed in priq_addq */
			PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
			IFCQ_DROP_ADD(ifq, 1, len);
			switch (ret) {
			case CLASSQEQ_DROPPED:
				return (ENOBUFS);
			case CLASSQEQ_DROPPED_FC:
				return (EQFULL);
			case CLASSQEQ_DROPPED_SP:
				return (EQSUSPENDED);
			}
			/* NOT REACHED */
		}
	}
	IFCQ_INC_LEN(ifq);
	IFCQ_INC_BYTES(ifq, len);

	/* class is now active; indicate it as such */
	if (!pktsched_bit_tst(pri, &pif->pif_bitmap))
		pktsched_bit_set(pri, &pif->pif_bitmap);

	/* successfully queued. */
	return (ret);
}
예제 #19
0
static inline int
fairq_addq(struct fairq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct ifclassq *ifq = cl->cl_fif->fif_ifq;
	fairq_bucket_t *b;
	u_int32_t hash = m->m_pkthdr.pkt_flowid;
	u_int32_t hindex;
	u_int64_t bw;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	/*
	 * If the packet doesn't have any keep state put it on the end of
	 * our queue.  XXX this can result in out of order delivery.
	 */
	if (hash == 0) {
		if (cl->cl_head)
			b = cl->cl_head->prev;
		else
			b = &cl->cl_buckets[0];
	} else {
		hindex = (hash & cl->cl_nbucket_mask);
		b = &cl->cl_buckets[hindex];
	}

	/*
	 * Add the bucket to the end of the circular list of active buckets.
	 *
	 * As a special case we add the bucket to the beginning of the list
	 * instead of the end if it was not previously on the list and if
	 * its traffic is less then the hog level.
	 */
	if (b->in_use == 0) {
		b->in_use = 1;
		if (cl->cl_head == NULL) {
			cl->cl_head = b;
			b->next = b;
			b->prev = b;
		} else {
			b->next = cl->cl_head;
			b->prev = cl->cl_head->prev;
			b->prev->next = b;
			b->next->prev = b;

			if (b->bw_delta && cl->cl_hogs_m1) {
				bw = b->bw_bytes * machclk_freq / b->bw_delta;
				if (bw < cl->cl_hogs_m1)
					cl->cl_head = b;
			}
		}
	}

#if CLASSQ_RIO
	if (cl->cl_qtype == Q_RIO)
		return (rio_addq(cl->cl_rio, &b->queue, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (cl->cl_qtype == Q_RED)
		return (red_addq(cl->cl_red, &b->queue, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (cl->cl_qtype == Q_BLUE)
		return (blue_addq(cl->cl_blue, &b->queue, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (cl->cl_qtype == Q_SFB) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = FAIRQIF_IFP(cl->cl_fif);

			VERIFY(cl->cl_flags & FARF_LAZY);
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    cl->cl_qlimit, cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				cl->cl_qtype = Q_DROPTAIL;
				cl->cl_flags &= ~FARF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    fairq_style(cl->cl_fif), cl->cl_handle,
				    cl->cl_pri);
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &b->queue, m, t));
	} else if (qlen(&b->queue) >= qlimit(&b->queue)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & FARF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&b->queue, m);

	return (0);
}
예제 #20
0
int
tcq_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags)
{
	struct ifnet *ifp = ifq->ifcq_ifp;
	struct tcq_class *cl0, *cl1, *cl2, *cl3;
	struct tcq_if *tif;
	u_int32_t maxlen = 0, qflags = 0;
	int err = 0;

	IFCQ_LOCK_ASSERT_HELD(ifq);
	VERIFY(ifq->ifcq_disc == NULL);
	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);

	if (flags & PKTSCHEDF_QALG_RED)
		qflags |= TQCF_RED;
	if (flags & PKTSCHEDF_QALG_RIO)
		qflags |= TQCF_RIO;
	if (flags & PKTSCHEDF_QALG_BLUE)
		qflags |= TQCF_BLUE;
	if (flags & PKTSCHEDF_QALG_SFB)
		qflags |= TQCF_SFB;
	if (flags & PKTSCHEDF_QALG_ECN)
		qflags |= TQCF_ECN;
	if (flags & PKTSCHEDF_QALG_FLOWCTL)
		qflags |= TQCF_FLOWCTL;
	if (flags & PKTSCHEDF_QALG_DELAYBASED)
		qflags |= TQCF_DELAYBASED;

	tif = tcq_alloc(ifp, M_WAITOK, FALSE);
	if (tif == NULL)
		return (ENOMEM);

	if ((maxlen = IFCQ_MAXLEN(ifq)) == 0)
		maxlen = if_sndq_maxlen;

	if ((err = tcq_add_queue(tif, 0, maxlen,
	    qflags | PRCF_LAZY, SCIDX_BK, &cl0)) != 0)
		goto cleanup;

	if ((err = tcq_add_queue(tif, 1, maxlen,
	    qflags | TQCF_DEFAULTCLASS, SCIDX_BE, &cl1)) != 0)
		goto cleanup;

	if ((err = tcq_add_queue(tif, 2, maxlen,
	    qflags | PRCF_LAZY, SCIDX_VI, &cl2)) != 0)
		goto cleanup;

	if ((err = tcq_add_queue(tif, 3, maxlen,
	    qflags, SCIDX_VO, &cl3)) != 0)
		goto cleanup;

	err = ifclassq_attach(ifq, PKTSCHEDT_TCQ, tif,
	    tcq_enqueue_ifclassq, NULL, tcq_dequeue_tc_ifclassq,
	    tcq_request_ifclassq);

	/* cache these for faster lookup */
	if (err == 0) {
		/* Map {BK_SYS,BK} to TC_BK */
		ifq->ifcq_disc_slots[SCIDX_BK_SYS].qid = SCIDX_BK;
		ifq->ifcq_disc_slots[SCIDX_BK_SYS].cl = cl0;

		ifq->ifcq_disc_slots[SCIDX_BK].qid = SCIDX_BK;
		ifq->ifcq_disc_slots[SCIDX_BK].cl = cl0;

		/* Map {BE,RD,OAM} to TC_BE */
		ifq->ifcq_disc_slots[SCIDX_BE].qid = SCIDX_BE;
		ifq->ifcq_disc_slots[SCIDX_BE].cl = cl1;

		ifq->ifcq_disc_slots[SCIDX_RD].qid = SCIDX_BE;
		ifq->ifcq_disc_slots[SCIDX_RD].cl = cl1;

		ifq->ifcq_disc_slots[SCIDX_OAM].qid = SCIDX_BE;
		ifq->ifcq_disc_slots[SCIDX_OAM].cl = cl1;

		/* Map {AV,RV,VI} to TC_VI */
		ifq->ifcq_disc_slots[SCIDX_AV].qid = SCIDX_VI;
		ifq->ifcq_disc_slots[SCIDX_AV].cl = cl2;

		ifq->ifcq_disc_slots[SCIDX_RV].qid = SCIDX_VI;
		ifq->ifcq_disc_slots[SCIDX_RV].cl = cl2;

		ifq->ifcq_disc_slots[SCIDX_VI].qid = SCIDX_VI;
		ifq->ifcq_disc_slots[SCIDX_VI].cl = cl2;

		/* Map {VO,CTL} to TC_VO */
		ifq->ifcq_disc_slots[SCIDX_VO].qid = SCIDX_VO;
		ifq->ifcq_disc_slots[SCIDX_VO].cl = cl3;

		ifq->ifcq_disc_slots[SCIDX_CTL].qid = SCIDX_VO;
		ifq->ifcq_disc_slots[SCIDX_CTL].cl = cl3;
	}

cleanup:
	if (err != 0)
		(void) tcq_destroy_locked(tif);

	return (err);
}