Exemple #1
0
/*
 * priq_dequeue is a dequeue function to be registered to
 * (*altq_dequeue) in struct ifaltq.
 *
 * note: ALTDQ_POLL returns the next packet without removing the packet
 *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
 *	ALTDQ_REMOVE must return the same packet if called immediately
 *	after ALTDQ_POLL.
 */
static struct mbuf *
priq_dequeue(struct ifaltq *ifq, int op)
{
	struct priq_if	*pif = (struct priq_if *)ifq->altq_disc;
	struct priq_class *cl;
	struct mbuf *m;
	int pri;

	IFQ_LOCK_ASSERT(ifq);

	if (IFQ_IS_EMPTY(ifq))
		/* no packet in the queue */
		return (NULL);

	for (pri = pif->pif_maxpri;  pri >= 0; pri--) {
		if ((cl = pif->pif_classes[pri]) != NULL &&
		    !qempty(cl->cl_q)) {
			if (op == ALTDQ_POLL)
				return (priq_pollq(cl));

			m = priq_getq(cl);
			if (m != NULL) {
				IFQ_DEC_LEN(ifq);
				if (qempty(cl->cl_q))
					cl->cl_period++;
				PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m));
			}
			return (m);
		}
	}
	return (NULL);
}
static int
codel_request(struct ifaltq *ifq, int req, void *arg)
{
	struct codel_if	*cif = (struct codel_if *)ifq->altq_disc;
	struct mbuf *m;

	IFQ_LOCK_ASSERT(ifq);

	switch (req) {
	case ALTRQ_PURGE:
		if (!ALTQ_IS_ENABLED(cif->cif_ifq))
			break;

		if (qempty(cif->cl_q))
			break;

		while ((m = _getq(cif->cl_q)) != NULL) {
			PKTCNTR_ADD(&cif->cl_stats.cl_dropcnt, m_pktlen(m));
			m_freem(m);
			IFQ_DEC_LEN(cif->cif_ifq);
		}
		cif->cif_ifq->ifq_len = 0;
		break;
	}

	return (0);
}
Exemple #3
0
int
fairq_enqueue(struct fairq_if *fif, struct fairq_class *cl, struct mbuf *m,
    struct pf_mtag *t)
{
	struct ifclassq *ifq = fif->fif_ifq;
	int len, ret;

	IFCQ_LOCK_ASSERT_HELD(ifq);
	VERIFY(cl == NULL || cl->cl_fif == fif);

	if (cl == NULL) {
#if PF_ALTQ
		cl = fairq_clh_to_clp(fif, t->pftag_qid);
#else /* !PF_ALTQ */
		cl = fairq_clh_to_clp(fif, 0);
#endif /* !PF_ALTQ */
		if (cl == NULL) {
			cl = fif->fif_default;
			if (cl == NULL) {
				IFCQ_CONVERT_LOCK(ifq);
				m_freem(m);
				return (ENOBUFS);
			}
		}
	}

	cl->cl_flags |= FARF_HAS_PACKETS;
	len = m_pktlen(m);

	ret = fairq_addq(cl, m, t);
	if (ret != 0) {
		if (ret == CLASSQEQ_SUCCESS_FC) {
			/* packet enqueued, return advisory feedback */
			ret = EQFULL;
		} else {
			VERIFY(ret == CLASSQEQ_DROPPED ||
			    ret == CLASSQEQ_DROPPED_FC ||
			    ret == CLASSQEQ_DROPPED_SP);

			/* packet has been freed in fairq_addq */
			PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
			IFCQ_DROP_ADD(ifq, 1, len);
			switch (ret) {
			case CLASSQEQ_DROPPED:
				return (ENOBUFS);
			case CLASSQEQ_DROPPED_FC:
				return (EQFULL);
			case CLASSQEQ_DROPPED_SP:
				return (EQSUSPENDED);
			}
			/* NOT REACHED */
		}
	}
	IFCQ_INC_LEN(ifq);
	IFCQ_INC_BYTES(ifq, len);

	/* successfully queued. */
	return (ret);
}
Exemple #4
0
static void
tcq_purgeq(struct tcq_if *tif, struct tcq_class *cl, u_int32_t flow,
    u_int32_t *packets, u_int32_t *bytes)
{
	struct ifclassq *ifq = tif->tif_ifq;
	u_int32_t cnt = 0, len = 0, qlen;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if ((qlen = qlen(&cl->cl_q)) == 0)
		goto done;

	/* become regular mutex before freeing mbufs */
	IFCQ_CONVERT_LOCK(ifq);

#if CLASSQ_RIO
	if (q_is_rio(&cl->cl_q))
		rio_purgeq(cl->cl_rio, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_RIO */
#if CLASSQ_RED
	if (q_is_red(&cl->cl_q))
		red_purgeq(cl->cl_red, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_RED */
#if CLASSQ_BLUE
	if (q_is_blue(&cl->cl_q))
		blue_purgeq(cl->cl_blue, &cl->cl_q, flow, &cnt, &len);
	else
#endif /* CLASSQ_BLUE */
	if (q_is_sfb(&cl->cl_q) && cl->cl_sfb != NULL)
		sfb_purgeq(cl->cl_sfb, &cl->cl_q, flow, &cnt, &len);
	else
		_flushq_flow(&cl->cl_q, flow, &cnt, &len);

	if (cnt > 0) {
		VERIFY(qlen(&cl->cl_q) == (qlen - cnt));

		PKTCNTR_ADD(&cl->cl_dropcnt, cnt, len);
		IFCQ_DROP_ADD(ifq, cnt, len);

		VERIFY(((signed)IFCQ_LEN(ifq) - cnt) >= 0);
		IFCQ_LEN(ifq) -= cnt;

		if (pktsched_verbose) {
			log(LOG_DEBUG, "%s: %s purge qid=%d pri=%d "
			    "qlen=[%d,%d] cnt=%d len=%d flow=0x%x\n",
			    if_name(TCQIF_IFP(tif)), tcq_style(tif),
			    cl->cl_handle, cl->cl_pri, qlen, qlen(&cl->cl_q),
			    cnt, len, flow);
		}
	}
done:
	if (packets != NULL)
		*packets = cnt;
	if (bytes != NULL)
		*bytes = len;
}
Exemple #5
0
/*
 * priq_enqueue is an enqueue function to be registered to
 * (*altq_enqueue) in struct ifaltq.
 */
static int
priq_enqueue(struct ifaltq_subque *ifsq, struct mbuf *m,
    struct altq_pktattr *pktattr)
{
	struct ifaltq *ifq = ifsq->ifsq_altq;
	struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
	struct priq_class *cl;
	int error;
	int len;

	if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) {
		/*
		 * Race happened, the unrelated subqueue was
		 * picked during the packet scheduler transition.
		 */
		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
		m_freem(m);
		return ENOBUFS;
	}

	crit_enter();

	/* grab class set by classifier */
	if ((m->m_flags & M_PKTHDR) == 0) {
		/* should not happen */
		if_printf(ifq->altq_ifp, "altq: packet does not have pkthdr\n");
		m_freem(m);
		error = ENOBUFS;
		goto done;
	}

	if (m->m_pkthdr.fw_flags & PF_MBUF_STRUCTURE)
		cl = clh_to_clp(pif, m->m_pkthdr.pf.qid);
	else
		cl = NULL;
	if (cl == NULL) {
		cl = pif->pif_default;
		if (cl == NULL) {
			m_freem(m);
			error = ENOBUFS;
			goto done;
		}
	}
	cl->cl_pktattr = NULL;
	len = m_pktlen(m);
	if (priq_addq(cl, m) != 0) {
		/* drop occurred.  mbuf was freed in priq_addq. */
		PKTCNTR_ADD(&cl->cl_dropcnt, len);
		error = ENOBUFS;
		goto done;
	}
	ifsq->ifq_len++;
	error = 0;
done:
	crit_exit();
	return (error);
}
Exemple #6
0
static int
cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
{
	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
	struct rm_class	*cl;
	struct m_tag	*t;
	int		 len;

	IFQ_LOCK_ASSERT(ifq);

	/* grab class set by classifier */
	if ((m->m_flags & M_PKTHDR) == 0) {
		/* should not happen */
#if defined(__NetBSD__) || defined(__OpenBSD__)\
    || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
		printf("altq: packet for %s does not have pkthdr\n",
		    ifq->altq_ifp->if_xname);
#else
		printf("altq: packet for %s%d does not have pkthdr\n",
		    ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
#endif
		m_freem(m);
		return (ENOBUFS);
	}
	cl = NULL;
	if ((t = m_tag_find(m, PACKET_TAG_PF_QID, NULL)) != NULL)
		cl = clh_to_clp(cbqp, ((struct altq_tag *)(t+1))->qid);
#ifdef ALTQ3_COMPAT
	else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
		cl = pktattr->pattr_class;
#endif
	if (cl == NULL) {
		cl = cbqp->ifnp.default_;
		if (cl == NULL) {
			m_freem(m);
			return (ENOBUFS);
		}
	}
#ifdef ALTQ3_COMPAT
	if (pktattr != NULL)
		cl->pktattr_ = pktattr;  /* save proto hdr used by ECN */
	else
#endif
		cl->pktattr_ = NULL;
	len = m_pktlen(m);
	if (rmc_queue_packet(cl, m) != 0) {
		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
		return (ENOBUFS);
	}

	/* successfully queued. */
	++cbqp->cbq_qlen;
	IFQ_INC_LEN(ifq);
	return (0);
}
Exemple #7
0
/*
 * priq_enqueue is an enqueue function to be registered to
 * (*altq_enqueue) in struct ifaltq.
 */
static int
priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
{
	struct priq_if	*pif = (struct priq_if *)ifq->altq_disc;
	struct priq_class *cl;
	struct pf_mtag *t;
	int len;

	IFQ_LOCK_ASSERT(ifq);

	/* grab class set by classifier */
	if ((m->m_flags & M_PKTHDR) == 0) {
		/* should not happen */
#if defined(__NetBSD__) || defined(__OpenBSD__)\
    || (defined(__FreeBSD__) && __FreeBSD_version >= 501113)
		printf("altq: packet for %s does not have pkthdr\n",
		    ifq->altq_ifp->if_xname);
#else
		printf("altq: packet for %s%d does not have pkthdr\n",
		    ifq->altq_ifp->if_name, ifq->altq_ifp->if_unit);
#endif
		m_freem(m);
		return (ENOBUFS);
	}
	cl = NULL;
	if ((t = pf_find_mtag(m)) != NULL)
		cl = clh_to_clp(pif, t->qid);
#ifdef ALTQ3_COMPAT
	else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
		cl = pktattr->pattr_class;
#endif
	if (cl == NULL) {
		cl = pif->pif_default;
		if (cl == NULL) {
			m_freem(m);
			return (ENOBUFS);
		}
	}
#ifdef ALTQ3_COMPAT
	if (pktattr != NULL)
		cl->cl_pktattr = pktattr;  /* save proto hdr used by ECN */
	else
#endif
		cl->cl_pktattr = NULL;
	len = m_pktlen(m);
	if (priq_addq(cl, m) != 0) {
		/* drop occurred.  mbuf was freed in priq_addq. */
		PKTCNTR_ADD(&cl->cl_dropcnt, len);
		return (ENOBUFS);
	}
	IFQ_INC_LEN(ifq);

	/* successfully queued. */
	return (0);
}
Exemple #8
0
static void
priq_purgeq(struct priq_class *cl)
{
	struct mbuf *m;

	if (qempty(cl->cl_q))
		return;

	while ((m = _getq(cl->cl_q)) != NULL) {
		PKTCNTR_ADD(&cl->cl_dropcnt, m_pktlen(m));
		m_freem(m);
	}
	KKASSERT(qlen(cl->cl_q) == 0);
}
Exemple #9
0
/*
 * priq_enqueue is an enqueue function to be registered to
 * (*altq_enqueue) in struct ifaltq.
 */
static int
priq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
{
	struct priq_if	*pif = (struct priq_if *)ifq->altq_disc;
	struct priq_class *cl;
	struct m_tag *t;
	int len;

	/* grab class set by classifier */
	if ((m->m_flags & M_PKTHDR) == 0) {
		/* should not happen */
		printf("altq: packet for %s does not have pkthdr\n",
		    ifq->altq_ifp->if_xname);
		m_freem(m);
		return (ENOBUFS);
	}
	cl = NULL;
	if ((t = m_tag_find(m, PACKET_TAG_ALTQ_QID, NULL)) != NULL)
		cl = clh_to_clp(pif, ((struct altq_tag *)(t+1))->qid);
#ifdef ALTQ3_COMPAT
	else if ((ifq->altq_flags & ALTQF_CLASSIFY) && pktattr != NULL)
		cl = pktattr->pattr_class;
#endif
	if (cl == NULL) {
		cl = pif->pif_default;
		if (cl == NULL) {
			m_freem(m);
			return (ENOBUFS);
		}
	}
#ifdef ALTQ3_COMPAT
	if (pktattr != NULL)
		cl->cl_pktattr = pktattr;  /* save proto hdr used by ECN */
	else
#endif
		cl->cl_pktattr = NULL;
	len = m_pktlen(m);
	if (priq_addq(cl, m) != 0) {
		/* drop occurred.  mbuf was freed in priq_addq. */
		PKTCNTR_ADD(&cl->cl_dropcnt, len);
		return (ENOBUFS);
	}
	IFQ_INC_LEN(ifq);

	/* successfully queued. */
	return (0);
}
Exemple #10
0
/*
 * priq_dequeue is a dequeue function to be registered to
 * (*altq_dequeue) in struct ifaltq.
 *
 * note: ALTDQ_POLL returns the next packet without removing the packet
 *	from the queue.  ALTDQ_REMOVE is a normal dequeue operation.
 *	ALTDQ_REMOVE must return the same packet if called immediately
 *	after ALTDQ_POLL.
 */
static struct mbuf *
priq_dequeue(struct ifaltq_subque *ifsq, struct mbuf *mpolled, int op)
{
	struct ifaltq *ifq = ifsq->ifsq_altq;
	struct priq_if *pif = (struct priq_if *)ifq->altq_disc;
	struct priq_class *cl;
	struct mbuf *m;
	int pri;

	if (ifsq_get_index(ifsq) != PRIQ_SUBQ_INDEX) {
		/*
		 * Race happened, the unrelated subqueue was
		 * picked during the packet scheduler transition.
		 */
		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
		return NULL;
	}

	if (ifsq_is_empty(ifsq)) {
		/* no packet in the queue */
		KKASSERT(mpolled == NULL);
		return (NULL);
	}

	crit_enter();
	m = NULL;
	for (pri = pif->pif_maxpri;  pri >= 0; pri--) {
		if ((cl = pif->pif_classes[pri]) != NULL && !qempty(cl->cl_q)) {
			if (op == ALTDQ_POLL) {
				m = priq_pollq(cl);
				break;
			}

			m = priq_getq(cl);
			if (m != NULL) {
				ifsq->ifq_len--;
				if (qempty(cl->cl_q))
					cl->cl_period++;
				PKTCNTR_ADD(&cl->cl_xmitcnt, m_pktlen(m));
			}
			break;
		}
	}
	crit_exit();
	KKASSERT(mpolled == NULL || mpolled == m);
	return (m);
}
Exemple #11
0
/*
 * note: CLASSQDQ_POLL returns the next packet without removing the packet
 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
 *	CLASSQDQ_REMOVE must return the same packet if called immediately
 *	after CLASSQDQ_POLL.
 */
struct mbuf *
priq_dequeue(struct priq_if *pif, cqdq_op_t op)
{
	struct ifclassq *ifq = pif->pif_ifq;
	struct priq_class *cl;
	struct mbuf *m;
	u_int32_t pri, len;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (pif->pif_bitmap == 0) {
		/* no active class; nothing to dequeue */
		return (NULL);
	}
	VERIFY(!IFCQ_IS_EMPTY(ifq));

	pri = pktsched_fls(pif->pif_bitmap) - 1;	/* zero based */
	VERIFY(pri < PRIQ_MAXPRI);
	cl = pif->pif_classes[pri];
	VERIFY(cl != NULL && !qempty(&cl->cl_q));

	if (op == CLASSQDQ_POLL)
		return (priq_pollq(cl));

	m = priq_getq(cl);
	VERIFY(m != NULL);	/* qalg must be work conserving */
	len = m_pktlen(m);

	IFCQ_DEC_LEN(ifq);
	IFCQ_DEC_BYTES(ifq, len);
	if (qempty(&cl->cl_q)) {
		cl->cl_period++;
		/* class is now inactive; indicate it as such */
		pktsched_bit_clr(pri, &pif->pif_bitmap);
	}
	PKTCNTR_ADD(&cl->cl_xmitcnt, 1, len);
	IFCQ_XMIT_ADD(ifq, 1, len);

	return (m);
}
static struct mbuf *
codel_dequeue(struct ifaltq *ifq, int op)
{
	struct codel_if *cif = (struct codel_if *)ifq->altq_disc;
	struct mbuf *m;

	IFQ_LOCK_ASSERT(ifq);

	if (IFQ_IS_EMPTY(ifq))
		return (NULL);

	if (op == ALTDQ_POLL)
		return (qhead(cif->cl_q));


	m = codel_getq(&cif->codel, cif->cl_q);
	if (m != NULL) {
		IFQ_DEC_LEN(ifq);
		PKTCNTR_ADD(&cif->cl_stats.cl_xmitcnt, m_pktlen(m));
		return (m);
	}

	return (NULL);
}
Exemple #13
0
static int
cbq_enqueue(struct ifaltq *ifq, struct mbuf *m, struct altq_pktattr *pktattr)
{
	cbq_state_t	*cbqp = (cbq_state_t *)ifq->altq_disc;
	struct rm_class	*cl;
	int		 len;

	/* grab class set by classifier */
	if ((m->m_flags & M_PKTHDR) == 0) {
		/* should not happen */
		printf("altq: packet for %s does not have pkthdr\n",
		    ifq->altq_ifp->if_xname);
		m_freem(m);
		return (ENOBUFS);
	}
	if ((cl = clh_to_clp(cbqp, m->m_pkthdr.pf.qid)) == NULL) {
		cl = cbqp->ifnp.default_;
		if (cl == NULL) {
			m_freem(m);
			return (ENOBUFS);
		}
		cl->pktattr_ = NULL;
	}

	len = m_pktlen(m);
	if (rmc_queue_packet(cl, m) != 0) {
		/* drop occurred.  some mbuf was freed in rmc_queue_packet. */
		PKTCNTR_ADD(&cl->stats_.drop_cnt, len);
		return (ENOBUFS);
	}

	/* successfully queued. */
	++cbqp->cbq_qlen;
	IFQ_INC_LEN(ifq);
	return (0);
}
Exemple #14
0
/*
 * note: CLASSQDQ_POLL returns the next packet without removing the packet
 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
 *	CLASSQDQ_REMOVE must return the same packet if called immediately
 *	after CLASSQDQ_POLL.
 */
struct mbuf *
fairq_dequeue(struct fairq_if *fif, cqdq_op_t op)
{
	struct ifclassq *ifq = fif->fif_ifq;
	struct fairq_class *cl;
	struct fairq_class *best_cl;
	struct mbuf *best_m;
	struct mbuf *m;
	u_int64_t cur_time = read_machclk();
	u_int32_t best_scale;
	u_int32_t scale;
	int pri;
	int hit_limit;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (IFCQ_IS_EMPTY(ifq)) {
		/* no packet in the queue */
		return (NULL);
	}

	if (fif->fif_poll_cache && op == CLASSQDQ_REMOVE) {
		best_cl = fif->fif_poll_cache;
		m = fairq_getq(best_cl, cur_time);
		fif->fif_poll_cache = NULL;
		if (m != NULL) {
			IFCQ_DEC_LEN(ifq);
			IFCQ_DEC_BYTES(ifq, m_pktlen(m));
			IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
			PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1, m_pktlen(m));
		}
	} else {
		best_cl = NULL;
		best_m = NULL;
		best_scale = 0xFFFFFFFFU;

		for (pri = fif->fif_maxpri;  pri >= 0; pri--) {
			if ((cl = fif->fif_classes[pri]) == NULL)
				continue;
			if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
				continue;
			m = fairq_pollq(cl, cur_time, &hit_limit);
			if (m == NULL) {
				cl->cl_flags &= ~FARF_HAS_PACKETS;
				continue;
			}

			/*
			 * We can halt the search immediately if the queue
			 * did not hit its bandwidth limit.
			 */
			if (hit_limit == 0) {
				best_cl = cl;
				best_m = m;
				break;
			}

			/*
			 * Otherwise calculate the scale factor and select
			 * the queue with the lowest scale factor.  This
			 * apportions any unused bandwidth weighted by
			 * the relative bandwidth specification.
			 */
			scale = cl->cl_bw_current * 100 / cl->cl_bandwidth;
			if (scale < best_scale) {
				best_cl = cl;
				best_m = m;
				best_scale = scale;
			}
		}

		if (op == CLASSQDQ_POLL) {
			fif->fif_poll_cache = best_cl;
			m = best_m;
		} else if (best_cl != NULL) {
			m = fairq_getq(best_cl, cur_time);
			if (m != NULL) {
				IFCQ_DEC_LEN(ifq);
				IFCQ_DEC_BYTES(ifq, m_pktlen(m));
				IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
				PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1,
				    m_pktlen(m));
			}
		} else {
			m = NULL;
		}
	}
	return (m);
}
Exemple #15
0
int
priq_enqueue(struct priq_if *pif, struct priq_class *cl, struct mbuf *m,
    struct pf_mtag *t)
{
	struct ifclassq *ifq = pif->pif_ifq;
	u_int32_t pri;
	int len, ret;

	IFCQ_LOCK_ASSERT_HELD(ifq);
	VERIFY(cl == NULL || cl->cl_pif == pif);

	if (cl == NULL) {
#if PF_ALTQ
		cl = priq_clh_to_clp(pif, t->pftag_qid);
#else /* !PF_ALTQ */
		cl = priq_clh_to_clp(pif, 0);
#endif /* !PF_ALTQ */
		if (cl == NULL) {
			cl = pif->pif_default;
			if (cl == NULL) {
				IFCQ_CONVERT_LOCK(ifq);
				m_freem(m);
				return (ENOBUFS);
			}
		}
	}
	pri = cl->cl_pri;
	VERIFY(pri < PRIQ_MAXPRI);

	len = m_pktlen(m);

	ret = priq_addq(cl, m, t);
	if (ret != 0) {
		if (ret == CLASSQEQ_SUCCESS_FC) {
			/* packet enqueued, return advisory feedback */
			ret = EQFULL;
		} else {
			VERIFY(ret == CLASSQEQ_DROPPED ||
			    ret == CLASSQEQ_DROPPED_FC ||
			    ret == CLASSQEQ_DROPPED_SP);
			/* packet has been freed in priq_addq */
			PKTCNTR_ADD(&cl->cl_dropcnt, 1, len);
			IFCQ_DROP_ADD(ifq, 1, len);
			switch (ret) {
			case CLASSQEQ_DROPPED:
				return (ENOBUFS);
			case CLASSQEQ_DROPPED_FC:
				return (EQFULL);
			case CLASSQEQ_DROPPED_SP:
				return (EQSUSPENDED);
			}
			/* NOT REACHED */
		}
	}
	IFCQ_INC_LEN(ifq);
	IFCQ_INC_BYTES(ifq, len);

	/* class is now active; indicate it as such */
	if (!pktsched_bit_tst(pri, &pif->pif_bitmap))
		pktsched_bit_set(pri, &pif->pif_bitmap);

	/* successfully queued. */
	return (ret);
}
struct mbuf *
codel_getq(struct codel *c, class_queue_t *q)
{
	struct mbuf	*m;
	u_int64_t	 now;
	int		 drop;

	if ((m = _getq(q)) == NULL) {
		c->vars.dropping = 0;
		return (m);
	}

	now = read_machclk();
	drop = codel_should_drop(c, q, m, now);
	if (c->vars.dropping) {
		if (!drop) {
			/* sojourn time below target - leave dropping state */
			c->vars.dropping = 0;
		} else if (codel_time_after_eq(now, c->vars.drop_next)) {
			/* It's time for the next drop. Drop the current
			 * packet and dequeue the next. The dequeue might
			 * take us out of dropping state.
			 * If not, schedule the next drop.
			 * A large backlog might result in drop rates so high
			 * that the next drop should happen now,
			 * hence the while loop.
			 */
			while (c->vars.dropping &&
			    codel_time_after_eq(now, c->vars.drop_next)) {
				c->vars.count++; /* don't care of possible wrap
						  * since there is no more
						  * divide */
				codel_Newton_step(&c->vars);
				/* TODO ECN */
				PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
				m_freem(m);
				m = _getq(q);
				if (!codel_should_drop(c, q, m, now))
					/* leave dropping state */
					c->vars.dropping = 0;
				else
					/* and schedule the next drop */
					c->vars.drop_next =
					    codel_control_law(c->vars.drop_next,
						c->params.interval,
						c->vars.rec_inv_sqrt);
			}
		}
	} else if (drop) {
		/* TODO ECN */
		PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
		m_freem(m);

		m = _getq(q);
		drop = codel_should_drop(c, q, m, now);

		c->vars.dropping = 1;
		/* if min went above target close to when we last went below it
		 * assume that the drop rate that controlled the queue on the
		 * last cycle is a good starting point to control it now.
		 */
		if (codel_time_before(now - c->vars.drop_next,
		    16 * c->params.interval)) {
			c->vars.count = (c->vars.count - c->vars.lastcount) | 1;
			/* we dont care if rec_inv_sqrt approximation
			 * is not very precise :
			 * Next Newton steps will correct it quadratically.
			 */
			codel_Newton_step(&c->vars);
		} else {
			c->vars.count = 1;
			c->vars.rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
		}
		c->vars.lastcount = c->vars.count;
		c->vars.drop_next = codel_control_law(now, c->params.interval,
		    c->vars.rec_inv_sqrt);
	}

	return (m);
}