예제 #1
0
/* Borrow from codel_should_drop in Linux kernel */
static bool codel_should_mark(const struct sk_buff *skb,
	                      struct wfq_class *cl,
			      s64 now_ns)
{
        bool ok_to_mark;
        codel_time_t now = ns_to_codel_time(now_ns);

        cl->ldelay = ns_to_codel_time(now_ns - skb->tstamp.tv64);

	if (codel_time_before(cl->ldelay, (codel_time_t)wfq_codel_target) ||
	    cl->len_bytes <= wfq_max_pkt_bytes)
	{
		/* went below - stay below for at least interval */
		cl->first_above_time = 0;
		return false;
	}

	ok_to_mark = false;
	if (cl->first_above_time == 0)
	{
                /* just went above from below. If we stay above
         	 * for at least interval we'll say it's ok to mark
         	 */
                 cl->first_above_time = now + wfq_codel_interval;
	}
	else if (codel_time_after(now, cl->first_above_time))
	{
		ok_to_mark = true;
	}

	return ok_to_mark;
}
예제 #2
0
/* CoDel ECN marking. Borrow from codel_dequeue in Linux kernel */
static void codel_marking(struct sk_buff *skb, struct wfq_class *cl)
{
	s64 now_ns = ktime_get_ns();
	codel_time_t now = ns_to_codel_time(now_ns);
	bool mark = codel_should_mark(skb, cl, now_ns);

	if (cl->marking)
	{
		if (!mark)
		{
			/* sojourn time below target - leave marking state */
			cl->marking = false;
		}
		else if (codel_time_after_eq(now, cl->mark_next))
		{
			/* It's time for the next mark */
			cl->count++;
			codel_Newton_step(cl);
			cl->mark_next = codel_control_law(cl->mark_next,
					  	          wfq_codel_interval,
					                  cl->rec_inv_sqrt);
			INET_ECN_set_ce(skb);
		}
	}
	else if (mark)
	{
		u32 delta;

		INET_ECN_set_ce(skb);
		cl->marking = true;
		/* if min went above target close to when we last went below it
         	 * assume that the drop rate that controlled the queue on the
         	 * last cycle is a good starting point to control it now.
         	 */
		delta = cl->count - cl->lastcount;
 		if (delta > 1 &&
 		    codel_time_before(now - cl->mark_next,
 				      (codel_time_t)wfq_codel_interval * 16))
 		{
         		cl->count = delta;
             		/* we dont care if rec_inv_sqrt approximation
              		 * is not very precise :
              		 * Next Newton steps will correct it quadratically.
              		 */
         		codel_Newton_step(cl);
 		}
 		else
 		{
 			cl->count = 1;
 			cl->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
 		}
 		cl->lastcount = cl->count;
 		cl->mark_next = codel_control_law(now,
 						  wfq_codel_interval,
 						  cl->rec_inv_sqrt);
	}
}
예제 #3
0
static int
codel_should_drop(struct codel *c, class_queue_t *q, struct mbuf *m,
    u_int64_t now)
{
	struct m_tag *mtag;
	uint64_t *enqueue_time;

	if (m == NULL) {
		c->vars.first_above_time = 0;
		return (0);
	}

	mtag = m_tag_locate(m, MTAG_CODEL, 0, NULL);
	if (mtag == NULL) {
		/* Only one warning per second. */
		if (ppsratecheck(&c->last_log, &c->last_pps, 1))
			printf("%s: could not found the packet mtag!\n",
			    __func__);
		c->vars.first_above_time = 0;
		return (0);
	}
	enqueue_time = (uint64_t *)(mtag + 1);
	c->vars.ldelay = now - *enqueue_time;
	c->stats.maxpacket = MAX(c->stats.maxpacket, m_pktlen(m));

	if (codel_time_before(c->vars.ldelay, c->params.target) ||
	    qsize(q) <= c->stats.maxpacket) {
		/* went below - stay below for at least interval */
		c->vars.first_above_time = 0;
		return (0);
	}
	if (c->vars.first_above_time == 0) {
		/* just went above from below. If we stay above
		 * for at least interval we'll say it's ok to drop
		 */
		c->vars.first_above_time = now + c->params.interval;
		return (0);
	}
	if (codel_time_after(now, c->vars.first_above_time))
		return (1);

	return (0);
}
예제 #4
0
struct mbuf *
codel_getq(struct codel *c, class_queue_t *q)
{
	struct mbuf	*m;
	u_int64_t	 now;
	int		 drop;

	if ((m = _getq(q)) == NULL) {
		c->vars.dropping = 0;
		return (m);
	}

	now = read_machclk();
	drop = codel_should_drop(c, q, m, now);
	if (c->vars.dropping) {
		if (!drop) {
			/* sojourn time below target - leave dropping state */
			c->vars.dropping = 0;
		} else if (codel_time_after_eq(now, c->vars.drop_next)) {
			/* It's time for the next drop. Drop the current
			 * packet and dequeue the next. The dequeue might
			 * take us out of dropping state.
			 * If not, schedule the next drop.
			 * A large backlog might result in drop rates so high
			 * that the next drop should happen now,
			 * hence the while loop.
			 */
			while (c->vars.dropping &&
			    codel_time_after_eq(now, c->vars.drop_next)) {
				c->vars.count++; /* don't care of possible wrap
						  * since there is no more
						  * divide */
				codel_Newton_step(&c->vars);
				/* TODO ECN */
				PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
				m_freem(m);
				m = _getq(q);
				if (!codel_should_drop(c, q, m, now))
					/* leave dropping state */
					c->vars.dropping = 0;
				else
					/* and schedule the next drop */
					c->vars.drop_next =
					    codel_control_law(c->vars.drop_next,
						c->params.interval,
						c->vars.rec_inv_sqrt);
			}
		}
	} else if (drop) {
		/* TODO ECN */
		PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m));
		m_freem(m);

		m = _getq(q);
		drop = codel_should_drop(c, q, m, now);

		c->vars.dropping = 1;
		/* if min went above target close to when we last went below it
		 * assume that the drop rate that controlled the queue on the
		 * last cycle is a good starting point to control it now.
		 */
		if (codel_time_before(now - c->vars.drop_next,
		    16 * c->params.interval)) {
			c->vars.count = (c->vars.count - c->vars.lastcount) | 1;
			/* we dont care if rec_inv_sqrt approximation
			 * is not very precise :
			 * Next Newton steps will correct it quadratically.
			 */
			codel_Newton_step(&c->vars);
		} else {
			c->vars.count = 1;
			c->vars.rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
		}
		c->vars.lastcount = c->vars.count;
		c->vars.drop_next = codel_control_law(now, c->params.interval,
		    c->vars.rec_inv_sqrt);
	}

	return (m);
}