Exemplo n.º 1
0
int
codel_addq(struct codel *c, class_queue_t *q, struct mbuf *m)
{
	struct m_tag *mtag;
	uint64_t *enqueue_time;

	if (qlen(q) < qlimit(q)) {
		mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL);
		if (mtag == NULL)
			mtag = m_tag_alloc(MTAG_CODEL, 0, sizeof(uint64_t),
			    M_NOWAIT);
		if (mtag == NULL) {
			m_freem(m);
			return (-1);
		}
		enqueue_time = (uint64_t *)(mtag + 1);
		*enqueue_time = read_machclk();
		m_tag_prepend(m, mtag);
		_addq(q, m);
		return (0);
	}
	c->drop_overlimit++;
	m_freem(m);

	return (-1);
}
Exemplo n.º 2
0
struct mbuf *
tbr_dequeue(struct ifaltq_subque *ifsq, int op)
{
	struct ifaltq *ifq = ifsq->ifsq_altq;
	struct tb_regulator *tbr;
	struct mbuf *m;
	int64_t interval;
	uint64_t now;

	if (ifsq_get_index(ifsq) != ALTQ_SUBQ_INDEX_DEFAULT) {
		/*
		 * Race happened, the unrelated subqueue was
		 * picked during the packet scheduler transition.
		 */
		ifsq_classic_request(ifsq, ALTRQ_PURGE, NULL);
		return NULL;
	}

	crit_enter();
	tbr = ifq->altq_tbr;
	if (op == ALTDQ_REMOVE && tbr->tbr_lastop == ALTDQ_POLL) {
		/* if this is a remove after poll, bypass tbr check */
	} else {
		/* update token only when it is negative */
		if (tbr->tbr_token <= 0) {
			now = read_machclk();
			interval = now - tbr->tbr_last;
			if (interval >= tbr->tbr_filluptime)
				tbr->tbr_token = tbr->tbr_depth;
			else {
				tbr->tbr_token += interval * tbr->tbr_rate;
				if (tbr->tbr_token > tbr->tbr_depth)
					tbr->tbr_token = tbr->tbr_depth;
			}
			tbr->tbr_last = now;
		}
		/* if token is still negative, don't allow dequeue */
		if (tbr->tbr_token <= 0) {
			crit_exit();
			return (NULL);
		}
	}

	if (ifq_is_enabled(ifq))
		m = (*ifsq->ifsq_dequeue)(ifsq, op);
	else
		m = ifsq_classic_dequeue(ifsq, op);

	if (m != NULL && op == ALTDQ_REMOVE)
		tbr->tbr_token -= TBR_SCALE(m_pktlen(m));
	tbr->tbr_lastop = op;
	crit_exit();
	return (m);
}
Exemplo n.º 3
0
/*
 * set a token bucket regulator.
 * if the specified rate is zero, the token bucket regulator is deleted.
 */
static int
tbr_set_locked(struct ifaltq *ifq, struct tb_profile *profile)
{
	struct tb_regulator *tbr, *otbr;

	if (machclk_freq == 0)
		init_machclk();
	if (machclk_freq == 0) {
		kprintf("%s: no cpu clock available!\n", __func__);
		return (ENXIO);
	}

	if (profile->rate == 0) {
		/* delete this tbr */
		if ((tbr = ifq->altq_tbr) == NULL)
			return (ENOENT);
		ifq->altq_tbr = NULL;
		kfree(tbr, M_ALTQ);
		return (0);
	}

	tbr = kmalloc(sizeof(*tbr), M_ALTQ, M_WAITOK | M_ZERO);
	tbr->tbr_rate = TBR_SCALE(profile->rate / 8) / machclk_freq;
	tbr->tbr_depth = TBR_SCALE(profile->depth);
	if (tbr->tbr_rate > 0)
		tbr->tbr_filluptime = tbr->tbr_depth / tbr->tbr_rate;
	else
		tbr->tbr_filluptime = 0xffffffffffffffffLL;
	tbr->tbr_token = tbr->tbr_depth;
	tbr->tbr_last = read_machclk();
	tbr->tbr_lastop = ALTDQ_REMOVE;

	otbr = ifq->altq_tbr;
	ifq->altq_tbr = tbr;	/* set the new tbr */

	if (otbr != NULL)
		kfree(otbr, M_ALTQ);
	else if (tbr_timer == 0) {
		callout_reset_bycpu(&tbr_callout, 1, tbr_timeout, NULL, 0);
		tbr_timer = 1;
	}
	return (0);
}
Exemplo n.º 4
0
/*
 * note: CLASSQDQ_POLL returns the next packet without removing the packet
 *	from the queue.  CLASSQDQ_REMOVE is a normal dequeue operation.
 *	CLASSQDQ_REMOVE must return the same packet if called immediately
 *	after CLASSQDQ_POLL.
 */
struct mbuf *
fairq_dequeue(struct fairq_if *fif, cqdq_op_t op)
{
	struct ifclassq *ifq = fif->fif_ifq;
	struct fairq_class *cl;
	struct fairq_class *best_cl;
	struct mbuf *best_m;
	struct mbuf *m;
	u_int64_t cur_time = read_machclk();
	u_int32_t best_scale;
	u_int32_t scale;
	int pri;
	int hit_limit;

	IFCQ_LOCK_ASSERT_HELD(ifq);

	if (IFCQ_IS_EMPTY(ifq)) {
		/* no packet in the queue */
		return (NULL);
	}

	if (fif->fif_poll_cache && op == CLASSQDQ_REMOVE) {
		best_cl = fif->fif_poll_cache;
		m = fairq_getq(best_cl, cur_time);
		fif->fif_poll_cache = NULL;
		if (m != NULL) {
			IFCQ_DEC_LEN(ifq);
			IFCQ_DEC_BYTES(ifq, m_pktlen(m));
			IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
			PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1, m_pktlen(m));
		}
	} else {
		best_cl = NULL;
		best_m = NULL;
		best_scale = 0xFFFFFFFFU;

		for (pri = fif->fif_maxpri;  pri >= 0; pri--) {
			if ((cl = fif->fif_classes[pri]) == NULL)
				continue;
			if ((cl->cl_flags & FARF_HAS_PACKETS) == 0)
				continue;
			m = fairq_pollq(cl, cur_time, &hit_limit);
			if (m == NULL) {
				cl->cl_flags &= ~FARF_HAS_PACKETS;
				continue;
			}

			/*
			 * We can halt the search immediately if the queue
			 * did not hit its bandwidth limit.
			 */
			if (hit_limit == 0) {
				best_cl = cl;
				best_m = m;
				break;
			}

			/*
			 * Otherwise calculate the scale factor and select
			 * the queue with the lowest scale factor.  This
			 * apportions any unused bandwidth weighted by
			 * the relative bandwidth specification.
			 */
			scale = cl->cl_bw_current * 100 / cl->cl_bandwidth;
			if (scale < best_scale) {
				best_cl = cl;
				best_m = m;
				best_scale = scale;
			}
		}

		if (op == CLASSQDQ_POLL) {
			fif->fif_poll_cache = best_cl;
			m = best_m;
		} else if (best_cl != NULL) {
			m = fairq_getq(best_cl, cur_time);
			if (m != NULL) {
				IFCQ_DEC_LEN(ifq);
				IFCQ_DEC_BYTES(ifq, m_pktlen(m));
				IFCQ_XMIT_ADD(ifq, 1, m_pktlen(m));
				PKTCNTR_ADD(&best_cl->cl_xmitcnt, 1,
				    m_pktlen(m));
			}
		} else {
			m = NULL;
		}
	}
	return (m);
}
Exemplo n.º 5
0
struct mbuf *
codel_getq(struct codel *c, class_queue_t *q)
{
	struct mbuf	*m;
	u_int64_t	 now;
	int		 drop;

	if ((m = _getq(q)) == NULL) {
		c->vars.dropping = 0;
		return (m);
	}

	now = read_machclk();
	drop = codel_should_drop(c, q, m, now);
	if (c->vars.dropping) {
		if (!drop) {
			/* sojourn time below target - leave dropping state */
			c->vars.dropping = 0;
		} else if (codel_time_after_eq(now, c->vars.drop_next)) {
			/* It's time for the next drop. Drop the current
			 * packet and dequeue the next. The dequeue might
			 * take us out of dropping state.
			 * If not, schedule the next drop.
			 * A large backlog might result in drop rates so high
			 * that the next drop should happen now,
			 * hence the while loop.
			 */
			while (c->vars.dropping &&
			    codel_time_after_eq(now, c->vars.drop_next)) {
				c->vars.count++; /* don't care of possible wrap
						  * since there is no more
						  * divide */
				codel_Newton_step(&c->vars);
				/* TODO ECN */
				PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
				m_freem(m);
				m = _getq(q);
				if (!codel_should_drop(c, q, m, now))
					/* leave dropping state */
					c->vars.dropping = 0;
				else
					/* and schedule the next drop */
					c->vars.drop_next =
					    codel_control_law(c->vars.drop_next,
						c->params.interval,
						c->vars.rec_inv_sqrt);
			}
		}
	} else if (drop) {
		/* TODO ECN */
		PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
		m_freem(m);

		m = _getq(q);
		drop = codel_should_drop(c, q, m, now);

		c->vars.dropping = 1;
		/* if min went above target close to when we last went below it
		 * assume that the drop rate that controlled the queue on the
		 * last cycle is a good starting point to control it now.
		 */
		if (codel_time_before(now - c->vars.drop_next,
		    16 * c->params.interval)) {
			c->vars.count = (c->vars.count - c->vars.lastcount) | 1;
			/* we dont care if rec_inv_sqrt approximation
			 * is not very precise :
			 * Next Newton steps will correct it quadratically.
			 */
			codel_Newton_step(&c->vars);
		} else {
			c->vars.count = 1;
			c->vars.rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
		}
		c->vars.lastcount = c->vars.count;
		c->vars.drop_next = codel_control_law(now, c->params.interval,
		    c->vars.rec_inv_sqrt);
	}

	return (m);
}