Example #1
0
static struct sk_buff *gred_dequeue(struct Qdisc* sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb) {
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			if (net_ratelimit())
				printk(KERN_WARNING "GRED: Unable to relocate "
				       "VQ 0x%x after dequeue, screwing up "
				       "backlog.\n", tc_index_to_dp(skb));
		} else {
			q->backlog -= skb->len;

			if (!q->backlog && !gred_wred_mode(t))
				red_start_of_idle_period(&q->parms);
		}

		return skb;
	}

	if (gred_wred_mode(t) && !red_is_idling(&t->wred_set))
		red_start_of_idle_period(&t->wred_set);

	return NULL;
}
Example #2
0
/* dequeue doesn't actually dequeue until the release command is
 * received. */
static struct sk_buff *queue_dequeue(struct Qdisc* sch)
{
    struct queue_sched_data *q = qdisc_priv(sch);
    struct sk_buff* peek;
    /*
    struct timeval tv;

    if (!q->stop) {
      do_gettimeofday(&tv);
      printk("packet dequeued at %lu.%06lu\n", tv.tv_sec, tv.tv_usec);
    }
    */

    if (sch->flags & TCQ_F_THROTTLED)
        return NULL;

    peek = (struct sk_buff *)((sch->q).next);

    /* this pointer comparison may be shady */
    if (peek == q->stop) {
        /*
        do_gettimeofday(&tv);
        printk("stop packet at %lu.%06lu\n", tv.tv_sec, tv.tv_usec);
        */

        /* this is the tail of the last round. Release it and block the queue */
        sch->flags |= TCQ_F_THROTTLED;
        return NULL;
    }

    return qdisc_dequeue_head(sch);
}
Example #3
0
static struct sk_buff *gred_dequeue(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct gred_sched *t = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb) {
		struct gred_sched_data *q;
		u16 dp = tc_index_to_dp(skb);

		if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
			net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
					     tc_index_to_dp(skb));
		} else {
			q->backlog -= qdisc_pkt_len(skb);

			if (gred_wred_mode(t)) {
				if (!sch->qstats.backlog)
					red_start_of_idle_period(&t->wred_set);
			} else {
				if (!q->backlog)
					red_start_of_idle_period(&q->vars);
			}
		}

		return skb;
	}

	return NULL;
}
Example #4
0
static struct sk_buff * red_dequeue(struct Qdisc* sch)
{
	struct sk_buff *skb;
	struct red_sched_data *q = qdisc_priv(sch);

	skb = qdisc_dequeue_head(sch);

	if (skb == NULL && !red_is_idling(&q->parms))
		red_start_of_idle_period(&q->parms);

	return skb;
}
Example #5
0
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc* sch)
{
	struct sk_buff *skb_head;
	struct fifo_sched_data *q = qdisc_priv(sch);

	if (likely(skb_queue_len(&sch->q) < q->limit))
		return qdisc_enqueue_tail(skb, sch);

	/* queue full, remove one skb to fulfill the limit */
	skb_head = qdisc_dequeue_head(sch);
	sch->bstats.bytes -= qdisc_pkt_len(skb_head);
	sch->bstats.packets--;
	sch->qstats.drops++;
	kfree_skb(skb_head);

	qdisc_enqueue_tail(skb, sch);

	return NET_XMIT_CN;
}
Example #6
0
static struct mbuf *
generic_qdisc_dequeue(struct Qdisc *qdisc)
{
	struct mbuf *m = qdisc_dequeue_head(qdisc);

	if (!m) {
		return NULL;
	}

        if (unlikely(m->priority == NM_MAGIC_PRIORITY_TXQE)) {
            /* nm_os_generic_xmit_frame() asked us an event on this mbuf.
             * We have to set the priority to the normal TX token, so that
             * generic_ndo_start_xmit can pass it to the driver. */
            m->priority = NM_MAGIC_PRIORITY_TX;
            ND(5, "Event met, notify %p", m);
            netmap_generic_irq(NA(qdisc_dev(qdisc)),
                               skb_get_queue_mapping(m), NULL);
        }

	ND(5, "Dequeuing mbuf, len %u", qdisc_qlen(qdisc));

	return m;
}