Beispiel #1
0
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->ops->peek(q->qdisc);

	if (skb) {
		s64 now;
		s64 toks;
		s64 ptoks = 0;
		unsigned int len = qdisc_pkt_len(skb);

		now = ktime_get_ns();
		toks = min_t(s64, now - q->t_c, q->buffer);

		if (tbf_peak_present(q)) {
			ptoks = toks + q->ptokens;
			if (ptoks > q->mtu)
				ptoks = q->mtu;
			ptoks -= (s64) psched_l2t_ns(&q->peak, len);
		}
		toks += q->tokens;
		if (toks > q->buffer)
			toks = q->buffer;
		toks -= (s64) psched_l2t_ns(&q->rate, len);

		if ((toks|ptoks) >= 0) {
			skb = qdisc_dequeue_peeked(q->qdisc);
			if (unlikely(!skb))
				return NULL;

			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			qdisc_qstats_backlog_dec(sch, skb);
			sch->q.qlen--;
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		qdisc_watchdog_schedule_ns(&q->watchdog,
					   now + max_t(long, -toks, -ptoks));

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		qdisc_qstats_overlimit(sch);
	}
	return NULL;
}
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
	struct tbf_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	skb = q->qdisc->ops->peek(q->qdisc);

	if (skb) {
		psched_time_t now;
		long toks;
		long ptoks = 0;
		unsigned int len = qdisc_pkt_len(skb);

		now = psched_get_time();
		toks = psched_tdiff_bounded(now, q->t_c, q->buffer);

		if (q->P_tab) {
			ptoks = toks + q->ptokens;
			if (ptoks > (long)q->mtu)
				ptoks = q->mtu;
			ptoks -= L2T_P(q, len);
		}
		toks += q->tokens;
		if (toks > (long)q->buffer)
			toks = q->buffer;
		toks -= L2T(q, len);

		if ((toks|ptoks) >= 0) {
			skb = qdisc_dequeue_peeked(q->qdisc);
			if (unlikely(!skb))
				return NULL;

			q->t_c = now;
			q->tokens = toks;
			q->ptokens = ptoks;
			sch->q.qlen--;
			qdisc_unthrottled(sch);
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		qdisc_watchdog_schedule(&q->watchdog,
					now + max_t(long, -toks, -ptoks));

		/* Maybe we have a shorter packet in the queue,
		   which can be sent now. It sounds cool,
		   but, however, this is wrong in principle.
		   We MUST NOT reorder packets under these circumstances.

		   Really, if we split the flow into independent
		   subflows, it would be a very good solution.
		   This is the main idea of all FQ algorithms
		   (cf. CSZ, HPFQ, HFSC)
		 */

		sch->qstats.overlimits++;
	}
	return NULL;
}
Beispiel #3
0
static struct sk_buff *prio_dequeue(struct Qdisc *sch)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	int prio;

	for (prio = 0; prio < q->bands; prio++) {
		struct Qdisc *qdisc = q->queues[prio];
		struct sk_buff *skb = qdisc_dequeue_peeked(qdisc);
		if (skb) {
			qdisc_bstats_update(sch, skb);
			sch->q.qlen--;
			return skb;
		}
	}
	return NULL;

}
Beispiel #4
0
static struct sk_buff *netem_dequeue(struct Qdisc *sch)
{
	struct netem_sched_data *q = qdisc_priv(sch);
	struct sk_buff *skb;

	if (qdisc_is_throttled(sch))
		return NULL;

	skb = q->qdisc->ops->peek(q->qdisc);
	if (skb) {
		const struct netem_skb_cb *cb = netem_skb_cb(skb);
		psched_time_t now = psched_get_time();

		/* if more time remaining? */
		if (cb->time_to_send <= now) {
			skb = qdisc_dequeue_peeked(q->qdisc);
			if (unlikely(!skb))
				return NULL;

#ifdef CONFIG_NET_CLS_ACT
			/*
			 * If it's at ingress let's pretend the delay is
			 * from the network (tstamp will be updated).
			 */
			if (G_TC_FROM(skb->tc_verd) & AT_INGRESS)
				skb->tstamp.tv64 = 0;
#endif

			sch->q.qlen--;
			qdisc_unthrottled(sch);
			qdisc_bstats_update(sch, skb);
			return skb;
		}

		qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
	}

	return NULL;
}
Beispiel #5
0
static struct sk_buff *wfq_dequeue(struct Qdisc *sch)
{
        struct wfq_sched_data *q = qdisc_priv(sch);
        int i, weight;
        struct wfq_class *cl = NULL;
        u64 min_time;
        struct sk_buff *skb = NULL;
        struct sk_buff *next_pkt = NULL;
        unsigned int len;
        s64 bucket_ns = (s64)l2t_ns(&q->rate, wfq_bucket_bytes);
        s64 result, now;
        int prio = prio_schedule(q);

        if (prio < 0)
                return NULL;

        /* Find the active queue with the smallest head finish time */
        for (i = 0; i < wfq_max_queues; i++)
        {
                if (q->queues[i].prio != prio || q->queues[i].len_bytes == 0 )
                        continue;

                if (!cl || wfq_time_before(q->queues[i].head_fin_time,
                                           min_time))
                {
                        cl = &q->queues[i];
                        min_time = cl->head_fin_time;
                }
        }

        /* get head packet */
        skb = cl->qdisc->ops->peek(cl->qdisc);
        if (unlikely(!skb))
                return NULL;

        len = skb_size(skb);
        now = ktime_get_ns();
        result = tbf_schedule(len, q, now);

        /* We don't have enough tokens */
        if (result < 0)
        {
                /* For hrtimer absolute mode, we use now + t */
                qdisc_watchdog_schedule_ns(&q->watchdog, now - result, true);
                qdisc_qstats_overlimit(sch);
                return NULL;
        }


        skb = qdisc_dequeue_peeked(cl->qdisc);
        if (unlikely(!skb))
                return NULL;

        q->sum_len_bytes -= len;
        sch->q.qlen--;
        cl->len_bytes -= len;
        q->prio_len_bytes[prio] -= len;

        /* Set the head_fin_time for the remaining head packet */
        if (cl->len_bytes > 0)
        {
                /* Get the current head packet */
                next_pkt = cl->qdisc->ops->peek(cl->qdisc);
                weight = wfq_queue_weight[cl->id];
                if (likely(next_pkt && weight))
                {
                        len = skb_size(next_pkt);
                        cl->head_fin_time += div_u64((u64)len, (u32)weight);
                        if (wfq_time_before(q->virtual_time[prio],
                                            cl->head_fin_time))
                                q->virtual_time[prio] = cl->head_fin_time;
                }
        }

        /* Bucket */
        q->time_ns = now;
        q->tokens = min_t(s64, result, bucket_ns);
        qdisc_unthrottled(sch);
        qdisc_bstats_update(sch, skb);

        /* TCN */
        if (wfq_ecn_scheme == wfq_tcn)
                tcn_marking(skb);
        /* CoDel */
        else if (wfq_ecn_scheme == wfq_codel)
                codel_marking(skb, cl);
        /* dequeue equeue length based ECN marking */
        else if (wfq_enable_dequeue_ecn == wfq_enable)
                wfq_qlen_marking(skb, q, cl);

        return skb;
}