Exemple #1
0
static inline int
fairq_addq(struct fairq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct ifclassq *ifq = cl->cl_fif->fif_ifq;
	fairq_bucket_t *b;
	u_int32_t hash = m->m_pkthdr.pkt_flowid;
	u_int32_t hindex;
	u_int64_t bw;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	/*
	 * If the packet doesn't have any keep state put it on the end of
	 * our queue.  XXX this can result in out of order delivery.
	 */
	if (hash == 0) {
		if (cl->cl_head)
			b = cl->cl_head->prev;
		else
			b = &cl->cl_buckets[0];
	} else {
		hindex = (hash & cl->cl_nbucket_mask);
		b = &cl->cl_buckets[hindex];
	}

	/*
	 * Add the bucket to the end of the circular list of active buckets.
	 *
	 * As a special case we add the bucket to the beginning of the list
	 * instead of the end if it was not previously on the list and if
	 * its traffic is less then the hog level.
	 */
	if (b->in_use == 0) {
		b->in_use = 1;
		if (cl->cl_head == NULL) {
			cl->cl_head = b;
			b->next = b;
			b->prev = b;
		} else {
			b->next = cl->cl_head;
			b->prev = cl->cl_head->prev;
			b->prev->next = b;
			b->next->prev = b;

			if (b->bw_delta && cl->cl_hogs_m1) {
				bw = b->bw_bytes * machclk_freq / b->bw_delta;
				if (bw < cl->cl_hogs_m1)
					cl->cl_head = b;
			}
		}
	}

#if CLASSQ_RIO
	if (cl->cl_qtype == Q_RIO)
		return (rio_addq(cl->cl_rio, &b->queue, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (cl->cl_qtype == Q_RED)
		return (red_addq(cl->cl_red, &b->queue, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (cl->cl_qtype == Q_BLUE)
		return (blue_addq(cl->cl_blue, &b->queue, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (cl->cl_qtype == Q_SFB) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = FAIRQIF_IFP(cl->cl_fif);

			VERIFY(cl->cl_flags & FARF_LAZY);
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    cl->cl_qlimit, cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				cl->cl_qtype = Q_DROPTAIL;
				cl->cl_flags &= ~FARF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    fairq_style(cl->cl_fif), cl->cl_handle,
				    cl->cl_pri);
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &b->queue, m, t));
	} else if (qlen(&b->queue) >= qlimit(&b->queue)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & FARF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&b->queue, m);

	return (0);
}
Exemple #2
0
static struct fairq_class *
fairq_class_create(struct fairq_if *fif, int pri, u_int32_t qlimit,
    u_int64_t bandwidth, u_int32_t nbuckets, int flags, u_int64_t hogs_m1,
    u_int64_t lssc_m1, u_int64_t lssc_d, u_int64_t lssc_m2, u_int32_t qid)
{
#pragma unused(lssc_d, lssc_m2)
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct fairq_class *cl;
	u_int32_t i;

	IFCQ_LOCK_ASSERT_HELD(fif->fif_ifq);

	/* Sanitize flags unless internally configured */
	if (fif->fif_flags & FAIRQIFF_ALTQ)
		flags &= FARF_USERFLAGS;

#if !CLASSQ_RED
	if (flags & FARF_RED) {
		log(LOG_ERR, "%s: %s RED not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* !CLASSQ_RED */

#if !CLASSQ_RIO
	if (flags & FARF_RIO) {
		log(LOG_ERR, "%s: %s RIO not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* CLASSQ_RIO */

#if !CLASSQ_BLUE
	if (flags & FARF_BLUE) {
		log(LOG_ERR, "%s: %s BLUE not available!\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}
#endif /* CLASSQ_BLUE */

	/* These are mutually exclusive */
	if ((flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RED &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_RIO &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_BLUE &&
	    (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) != FARF_SFB) {
		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif));
		return (NULL);
	}

	if (bandwidth == 0 || (bandwidth / 8) == 0) {
		log(LOG_ERR, "%s: %s invalid data rate %llu\n",
		    if_name(FAIRQIF_IFP(fif)), fairq_style(fif), bandwidth);
		return (NULL);
	}

	if (nbuckets == 0)
		nbuckets = 256;
	if (nbuckets > FAIRQ_MAX_BUCKETS)
		nbuckets = FAIRQ_MAX_BUCKETS;
	/* enforce power-of-2 size */
	while ((nbuckets ^ (nbuckets - 1)) != ((nbuckets << 1) - 1))
		++nbuckets;

	ifq = fif->fif_ifq;
	ifp = FAIRQIF_IFP(fif);

	if ((cl = fif->fif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		if (cl->cl_head)
			fairq_purgeq(fif, cl, 0, NULL, NULL);
#if CLASSQ_RIO
		if (cl->cl_qtype == Q_RIO)
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (cl->cl_qtype == Q_RED)
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (cl->cl_qtype == Q_BLUE)
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		cl->cl_qtype = Q_DROPTAIL;
		cl->cl_qstate = QS_RUNNING;
	} else {
		cl = zalloc(fairq_cl_zone);
		if (cl == NULL)
			goto err_ret;
		bzero(cl, fairq_cl_size);
		cl->cl_nbuckets = nbuckets;
		cl->cl_nbucket_mask = nbuckets - 1;

		cl->cl_buckets = _MALLOC(sizeof (struct fairq_bucket) *
		    cl->cl_nbuckets, M_DEVBUF, M_WAITOK|M_ZERO);
		if (cl->cl_buckets == NULL)
			goto err_buckets;
		cl->cl_head = NULL;
	}

	fif->fif_classes[pri] = cl;
	if (flags & FARF_DEFAULTCLASS)
		fif->fif_default = cl;
	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;	/* use default */
	}
	cl->cl_qlimit = qlimit;
	for (i = 0; i < cl->cl_nbuckets; ++i) {
		_qinit(&cl->cl_buckets[i].queue, Q_DROPTAIL, qlimit);
	}
	cl->cl_bandwidth = bandwidth / 8;	/* cvt to bytes per second */
	cl->cl_qtype = Q_DROPTAIL;
	cl->cl_qstate = QS_RUNNING;
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > fif->fif_maxpri)
		fif->fif_maxpri = pri;
	cl->cl_fif = fif;
	cl->cl_handle = qid;
	cl->cl_hogs_m1 = hogs_m1 / 8;
	cl->cl_lssc_m1 = lssc_m1 / 8;	/* NOT YET USED */
	cl->cl_bw_current = 0;

	if (flags & (FARF_RED|FARF_RIO|FARF_BLUE|FARF_SFB)) {
#if CLASSQ_RED || CLASSQ_RIO
		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
		int pkttime;
#endif /* CLASSQ_RED || CLASSQ_RIO */

		cl->cl_qflags = 0;
		if (flags & FARF_ECN) {
			if (flags & FARF_BLUE)
				cl->cl_qflags |= BLUEF_ECN;
			else if (flags & FARF_SFB)
				cl->cl_qflags |= SFBF_ECN;
			else if (flags & FARF_RED)
				cl->cl_qflags |= REDF_ECN;
			else if (flags & FARF_RIO)
				cl->cl_qflags |= RIOF_ECN;
		}
		if (flags & FARF_FLOWCTL) {
			if (flags & FARF_SFB)
				cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & FARF_CLEARDSCP) {
			if (flags & FARF_RIO)
				cl->cl_qflags |= RIOF_CLEARDSCP;
		}
#if CLASSQ_RED || CLASSQ_RIO
		/*
		 * XXX: RED & RIO should be watching link speed and MTU
		 *	events and recompute pkttime accordingly.
		 */
		if (ifbandwidth < 8)
			pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
			    (ifbandwidth / 8);

		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
#if CLASSQ_RIO
		if (flags & FARF_RIO) {
			cl->cl_rio =
			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
			if (cl->cl_rio != NULL)
				cl->cl_qtype = Q_RIO;
		}
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (flags & FARF_RED) {
			cl->cl_red = red_alloc(ifp, 0, 0,
			    cl->cl_qlimit * 10/100,
			    cl->cl_qlimit * 30/100,
			    cl->cl_qflags, pkttime);
			if (cl->cl_red != NULL)
				cl->cl_qtype = Q_RED;
		}
#endif /* CLASSQ_RED */
#endif /* CLASSQ_RED || CLASSQ_RIO */
#if CLASSQ_BLUE
		if (flags & FARF_BLUE) {
			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
			if (cl->cl_blue != NULL)
				cl->cl_qtype = Q_BLUE;
		}
#endif /* CLASSQ_BLUE */
		if (flags & FARF_SFB) {
			if (!(cl->cl_flags & FARF_LAZY))
				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
				    cl->cl_qlimit, cl->cl_qflags);
			if (cl->cl_sfb != NULL || (cl->cl_flags & FARF_LAZY))
				cl->cl_qtype = Q_SFB;
		}
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
		    "flags=%b\n", if_name(ifp), fairq_style(fif),
		    cl->cl_handle, cl->cl_pri, cl->cl_qlimit, flags, FARF_BITS);
	}

	return (cl);

err_buckets:
	if (cl->cl_buckets != NULL)
		_FREE(cl->cl_buckets, M_DEVBUF);
err_ret:
	if (cl != NULL) {
		if (cl->cl_qalg.ptr != NULL) {
#if CLASSQ_RIO
			if (cl->cl_qtype == Q_RIO)
				rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
			if (cl->cl_qtype == Q_RED)
				red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
			if (cl->cl_qtype == Q_BLUE)
				blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
			if (cl->cl_qtype == Q_SFB && cl->cl_sfb != NULL)
				sfb_destroy(cl->cl_sfb);
			cl->cl_qalg.ptr = NULL;
			cl->cl_qtype = Q_DROPTAIL;
			cl->cl_qstate = QS_RUNNING;
		}
		zfree(fairq_cl_zone, cl);
	}
	return (NULL);
}
Exemple #3
0
static inline int
priq_addq(struct priq_class *cl, struct mbuf *m, struct pf_mtag *t)
{
	struct priq_if *pif = cl->cl_pif;
	struct ifclassq *ifq = pif->pif_ifq;

	IFCQ_LOCK_ASSERT_HELD(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		return (rio_addq(cl->cl_rio, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		return (red_addq(cl->cl_red, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		return (blue_addq(cl->cl_blue, &cl->cl_q, m, t));
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q)) {
		if (cl->cl_sfb == NULL) {
			struct ifnet *ifp = PRIQIF_IFP(pif);

			VERIFY(cl->cl_flags & PRCF_LAZY);
			cl->cl_flags &= ~PRCF_LAZY;
			IFCQ_CONVERT_LOCK(ifq);

			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb == NULL) {
				/* fall back to droptail */
				qtype(&cl->cl_q) = Q_DROPTAIL;
				cl->cl_flags &= ~PRCF_SFB;
				cl->cl_qflags &= ~(SFBF_ECN | SFBF_FLOWCTL);

				log(LOG_ERR, "%s: %s SFB lazy allocation "
				    "failed for qid=%d pri=%d, falling back "
				    "to DROPTAIL\n", if_name(ifp),
				    priq_style(pif), cl->cl_handle,
				    cl->cl_pri);
			} else if (pif->pif_throttle != IFNET_THROTTLE_OFF) {
				/* if there's pending throttling, set it */
				cqrq_throttle_t tr = { 1, pif->pif_throttle };
				int err = priq_throttle(pif, &tr);

				if (err == EALREADY)
					err = 0;
				if (err != 0) {
					tr.level = IFNET_THROTTLE_OFF;
					(void) priq_throttle(pif, &tr);
				}
			}
		}
		if (cl->cl_sfb != NULL)
			return (sfb_addq(cl->cl_sfb, &cl->cl_q, m, t));
	} else if (qlen(&cl->cl_q) >= qlimit(&cl->cl_q)) {
		IFCQ_CONVERT_LOCK(ifq);
		m_freem(m);
		return (CLASSQEQ_DROPPED);
	}

#if PF_ECN
	if (cl->cl_flags & PRCF_CLEARDSCP)
		write_dsfield(m, t, 0);
#endif /* PF_ECN */

	_addq(&cl->cl_q, m);

	return (0);
}
Exemple #4
0
static struct qfq_class *
qfq_class_create(struct qfq_if *qif, u_int32_t weight, u_int32_t qlimit,
    u_int32_t flags, u_int32_t maxsz, u_int32_t qid, classq_pkt_type_t ptype)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct qfq_group *grp;
	struct qfq_class *cl;
	u_int32_t w;			/* approximated weight */
	int i;

	IFCQ_LOCK_ASSERT_HELD(qif->qif_ifq);

	if (qif->qif_classes >= qif->qif_maxclasses) {
		log(LOG_ERR, "%s: %s out of classes! (max %d)\n",
		    if_name(QFQIF_IFP(qif)), qfq_style(qif),
		    qif->qif_maxclasses);
		return (NULL);
	}

	ifq = qif->qif_ifq;
	ifp = QFQIF_IFP(qif);

	cl = zalloc(qfq_cl_zone);
	if (cl == NULL)
		return (NULL);

	bzero(cl, qfq_cl_size);

	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit, ptype);
	cl->cl_qif = qif;
	cl->cl_flags = flags;
	cl->cl_handle = qid;

	/*
	 * Find a free slot in the class table.  If the slot matching
	 * the lower bits of qid is free, use this slot.  Otherwise,
	 * use the first free slot.
	 */
	i = qid % qif->qif_maxclasses;
	if (qif->qif_class_tbl[i] == NULL) {
		qif->qif_class_tbl[i] = cl;
	} else {
		for (i = 0; i < qif->qif_maxclasses; i++) {
			if (qif->qif_class_tbl[i] == NULL) {
				qif->qif_class_tbl[i] = cl;
				break;
			}
		}
		if (i == qif->qif_maxclasses) {
			zfree(qfq_cl_zone, cl);
			return (NULL);
		}
	}

	w = weight;
	VERIFY(w > 0 && w <= QFQ_MAX_WEIGHT);
	cl->cl_lmax = maxsz;
	cl->cl_inv_w = (QFQ_ONE_FP / w);
	w = (QFQ_ONE_FP / cl->cl_inv_w);
	VERIFY(qif->qif_wsum + w <= QFQ_MAX_WSUM);

	i = qfq_calc_index(cl, cl->cl_inv_w, cl->cl_lmax);
	VERIFY(i <= QFQ_MAX_INDEX);
	grp = qif->qif_groups[i];
	if (grp == NULL) {
		grp = _MALLOC(sizeof (*grp), M_DEVBUF, M_WAITOK|M_ZERO);
		if (grp != NULL) {
			grp->qfg_index = i;
			grp->qfg_slot_shift =
			    QFQ_MTU_SHIFT + QFQ_FRAC_BITS - (QFQ_MAX_INDEX - i);
			grp->qfg_slots = _MALLOC(sizeof (struct qfq_class *) *
			    qif->qif_maxslots, M_DEVBUF, M_WAITOK|M_ZERO);
			if (grp->qfg_slots == NULL) {
				log(LOG_ERR, "%s: %s unable to allocate group "
				    "slots for index %d\n", if_name(ifp),
				    qfq_style(qif), i);
			}
		} else {
			log(LOG_ERR, "%s: %s unable to allocate group for "
			    "qid=%d\n", if_name(ifp), qfq_style(qif),
			    cl->cl_handle);
		}
		if (grp == NULL || grp->qfg_slots == NULL) {
			qif->qif_class_tbl[qid % qif->qif_maxclasses] = NULL;
			if (grp != NULL)
				_FREE(grp, M_DEVBUF);
			zfree(qfq_cl_zone, cl);
			return (NULL);
		} else {
			qif->qif_groups[i] = grp;
		}
	}
	cl->cl_grp = grp;
	qif->qif_wsum += w;
	/* XXX cl->cl_S = qif->qif_V; ? */
	/* XXX compute qif->qif_i_wsum */

	qif->qif_classes++;

	if (flags & QFCF_DEFAULTCLASS)
		qif->qif_default = cl;

	if (flags & QFCF_SFB) {
		cl->cl_qflags = 0;
		if (flags & QFCF_ECN) {
			cl->cl_qflags |= SFBF_ECN;
		}
		if (flags & QFCF_FLOWCTL) {
			cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & QFCF_DELAYBASED) {
			cl->cl_qflags |= SFBF_DELAYBASED;
		}
		if (!(cl->cl_flags & QFCF_LAZY))
			cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
			    qlimit(&cl->cl_q), cl->cl_qflags);
		if (cl->cl_sfb != NULL || (cl->cl_flags & QFCF_LAZY))
			qtype(&cl->cl_q) = Q_SFB;
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d grp=%d weight=%d "
		    "qlimit=%d flags=%b\n", if_name(ifp), qfq_style(qif),
		    cl->cl_handle, cl->cl_grp->qfg_index, weight, qlimit,
		    flags, QFCF_BITS);
	}

	return (cl);
}
Exemple #5
0
static struct priq_class *
priq_class_create(struct priq_if *pif, int pri, u_int32_t qlimit,
    int flags, u_int32_t qid)
{
	struct ifnet *ifp;
	struct ifclassq *ifq;
	struct priq_class *cl;

	IFCQ_LOCK_ASSERT_HELD(pif->pif_ifq);

	/* Sanitize flags unless internally configured */
	if (pif->pif_flags & PRIQIFF_ALTQ)
		flags &= PRCF_USERFLAGS;

#if !CLASSQ_RED
	if (flags & PRCF_RED) {
		log(LOG_ERR, "%s: %s RED not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* !CLASSQ_RED */

#if !CLASSQ_RIO
	if (flags & PRCF_RIO) {
		log(LOG_ERR, "%s: %s RIO not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_RIO */

#if !CLASSQ_BLUE
	if (flags & PRCF_BLUE) {
		log(LOG_ERR, "%s: %s BLUE not available!\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}
#endif /* CLASSQ_BLUE */

	/* These are mutually exclusive */
	if ((flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RED &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_RIO &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_BLUE &&
	    (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) != PRCF_SFB) {
		log(LOG_ERR, "%s: %s more than one RED|RIO|BLUE|SFB\n",
		    if_name(PRIQIF_IFP(pif)), priq_style(pif));
		return (NULL);
	}

	ifq = pif->pif_ifq;
	ifp = PRIQIF_IFP(pif);

	if ((cl = pif->pif_classes[pri]) != NULL) {
		/* modify the class instead of creating a new one */
		if (!qempty(&cl->cl_q))
			priq_purgeq(pif, cl, 0, NULL, NULL);
#if CLASSQ_RIO
		if (q_is_rio(&cl->cl_q))
			rio_destroy(cl->cl_rio);
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
		if (q_is_red(&cl->cl_q))
			red_destroy(cl->cl_red);
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
		if (q_is_blue(&cl->cl_q))
			blue_destroy(cl->cl_blue);
#endif /* CLASSQ_BLUE */
		if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
			sfb_destroy(cl->cl_sfb);
		cl->cl_qalg.ptr = NULL;
		qtype(&cl->cl_q) = Q_DROPTAIL;
		qstate(&cl->cl_q) = QS_RUNNING;
	} else {
		cl = zalloc(priq_cl_zone);
		if (cl == NULL)
			return (NULL);

		bzero(cl, priq_cl_size);
	}

	pif->pif_classes[pri] = cl;
	if (flags & PRCF_DEFAULTCLASS)
		pif->pif_default = cl;
	if (qlimit == 0 || qlimit > IFCQ_MAXLEN(ifq)) {
		qlimit = IFCQ_MAXLEN(ifq);
		if (qlimit == 0)
			qlimit = DEFAULT_QLIMIT;  /* use default */
	}
	_qinit(&cl->cl_q, Q_DROPTAIL, qlimit);
	cl->cl_flags = flags;
	cl->cl_pri = pri;
	if (pri > pif->pif_maxpri)
		pif->pif_maxpri = pri;
	cl->cl_pif = pif;
	cl->cl_handle = qid;

	if (flags & (PRCF_RED|PRCF_RIO|PRCF_BLUE|PRCF_SFB)) {
#if CLASSQ_RED || CLASSQ_RIO
		u_int64_t ifbandwidth = ifnet_output_linkrate(ifp);
		int pkttime;
#endif /* CLASSQ_RED || CLASSQ_RIO */

		cl->cl_qflags = 0;
		if (flags & PRCF_ECN) {
			if (flags & PRCF_BLUE)
				cl->cl_qflags |= BLUEF_ECN;
			else if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_ECN;
			else if (flags & PRCF_RED)
				cl->cl_qflags |= REDF_ECN;
			else if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_ECN;
		}
		if (flags & PRCF_FLOWCTL) {
			if (flags & PRCF_SFB)
				cl->cl_qflags |= SFBF_FLOWCTL;
		}
		if (flags & PRCF_CLEARDSCP) {
			if (flags & PRCF_RIO)
				cl->cl_qflags |= RIOF_CLEARDSCP;
		}
#if CLASSQ_RED || CLASSQ_RIO
		/*
		 * XXX: RED & RIO should be watching link speed and MTU
		 *	events and recompute pkttime accordingly.
		 */
		if (ifbandwidth < 8)
			pkttime = 1000 * 1000 * 1000; /* 1 sec */
		else
			pkttime = (int64_t)ifp->if_mtu * 1000 * 1000 * 1000 /
			    (ifbandwidth / 8);

		/* Test for exclusivity {RED,RIO,BLUE,SFB} was done above */
#if CLASSQ_RED
		if (flags & PRCF_RED) {
			cl->cl_red = red_alloc(ifp, 0, 0,
			    qlimit(&cl->cl_q) * 10/100,
			    qlimit(&cl->cl_q) * 30/100,
			    cl->cl_qflags, pkttime);
			if (cl->cl_red != NULL)
				qtype(&cl->cl_q) = Q_RED;
		}
#endif /* CLASSQ_RED */
#if CLASSQ_RIO
		if (flags & PRCF_RIO) {
			cl->cl_rio =
			    rio_alloc(ifp, 0, NULL, cl->cl_qflags, pkttime);
			if (cl->cl_rio != NULL)
				qtype(&cl->cl_q) = Q_RIO;
		}
#endif /* CLASSQ_RIO */
#endif /* CLASSQ_RED || CLASSQ_RIO */
#if CLASSQ_BLUE
		if (flags & PRCF_BLUE) {
			cl->cl_blue = blue_alloc(ifp, 0, 0, cl->cl_qflags);
			if (cl->cl_blue != NULL)
				qtype(&cl->cl_q) = Q_BLUE;
		}
#endif /* CLASSQ_BLUE */
		if (flags & PRCF_SFB) {
			if (!(cl->cl_flags & PRCF_LAZY))
				cl->cl_sfb = sfb_alloc(ifp, cl->cl_handle,
				    qlimit(&cl->cl_q), cl->cl_qflags);
			if (cl->cl_sfb != NULL || (cl->cl_flags & PRCF_LAZY))
				qtype(&cl->cl_q) = Q_SFB;
		}
	}

	if (pktsched_verbose) {
		log(LOG_DEBUG, "%s: %s created qid=%d pri=%d qlimit=%d "
		    "flags=%b\n", if_name(ifp), priq_style(pif),
		    cl->cl_handle, cl->cl_pri, qlimit, flags, PRCF_BITS);
	}

	return (cl);
}