/* CoDel ECN marking. Borrow from codel_dequeue in Linux kernel */ static void codel_marking(struct sk_buff *skb, struct wfq_class *cl) { s64 now_ns = ktime_get_ns(); codel_time_t now = ns_to_codel_time(now_ns); bool mark = codel_should_mark(skb, cl, now_ns); if (cl->marking) { if (!mark) { /* sojourn time below target - leave marking state */ cl->marking = false; } else if (codel_time_after_eq(now, cl->mark_next)) { /* It's time for the next mark */ cl->count++; codel_Newton_step(cl); cl->mark_next = codel_control_law(cl->mark_next, wfq_codel_interval, cl->rec_inv_sqrt); INET_ECN_set_ce(skb); } } else if (mark) { u32 delta; INET_ECN_set_ce(skb); cl->marking = true; /* if min went above target close to when we last went below it * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ delta = cl->count - cl->lastcount; if (delta > 1 && codel_time_before(now - cl->mark_next, (codel_time_t)wfq_codel_interval * 16)) { cl->count = delta; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. */ codel_Newton_step(cl); } else { cl->count = 1; cl->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; } cl->lastcount = cl->count; cl->mark_next = codel_control_law(now, wfq_codel_interval, cl->rec_inv_sqrt); } }
struct mbuf * codel_getq(struct codel *c, class_queue_t *q) { struct mbuf *m; u_int64_t now; int drop; if ((m = _getq(q)) == NULL) { c->vars.dropping = 0; return (m); } now = read_machclk(); drop = codel_should_drop(c, q, m, now); if (c->vars.dropping) { if (!drop) { /* sojourn time below target - leave dropping state */ c->vars.dropping = 0; } else if (codel_time_after_eq(now, c->vars.drop_next)) { /* It's time for the next drop. Drop the current * packet and dequeue the next. The dequeue might * take us out of dropping state. * If not, schedule the next drop. * A large backlog might result in drop rates so high * that the next drop should happen now, * hence the while loop. */ while (c->vars.dropping && codel_time_after_eq(now, c->vars.drop_next)) { c->vars.count++; /* don't care of possible wrap * since there is no more * divide */ codel_Newton_step(&c->vars); /* TODO ECN */ PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m)); m_freem(m); m = _getq(q); if (!codel_should_drop(c, q, m, now)) /* leave dropping state */ c->vars.dropping = 0; else /* and schedule the next drop */ c->vars.drop_next = codel_control_law(c->vars.drop_next, c->params.interval, c->vars.rec_inv_sqrt); } } } else if (drop) { /* TODO ECN */ PKTCNTR_ADD(&c->stats.drop_cnt, m_pktlen(m)); m_freem(m); m = _getq(q); drop = codel_should_drop(c, q, m, now); c->vars.dropping = 1; /* if min went above target close to when we last went below it * assume that the drop rate that controlled the queue on the * last cycle is a good starting point to control it now. */ if (codel_time_before(now - c->vars.drop_next, 16 * c->params.interval)) { c->vars.count = (c->vars.count - c->vars.lastcount) | 1; /* we dont care if rec_inv_sqrt approximation * is not very precise : * Next Newton steps will correct it quadratically. */ codel_Newton_step(&c->vars); } else { c->vars.count = 1; c->vars.rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT; } c->vars.lastcount = c->vars.count; c->vars.drop_next = codel_control_law(now, c->params.interval, c->vars.rec_inv_sqrt); } return (m); }