static struct sk_buff *gred_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_head(sch); if (skb) { struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n", tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); if (gred_wred_mode(t)) { if (!sch->qstats.backlog) red_start_of_idle_period(&t->wred_set); } else { if (!q->backlog) red_start_of_idle_period(&q->vars); } } return skb; } return NULL; }
static unsigned int gred_drop(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_tail(sch); if (skb) { unsigned int len = qdisc_pkt_len(skb); struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (net_ratelimit()) pr_warning("GRED: Unable to relocate VQ 0x%x " "while dropping, screwing up " "backlog.\n", tc_index_to_dp(skb)); } else { q->backlog -= len; q->stats.other++; if (!q->backlog && !gred_wred_mode(t)) red_start_of_idle_period(&q->vars); } qdisc_drop(skb, sch); return len; } if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) red_start_of_idle_period(&t->wred_set); return 0; }
static struct sk_buff *gred_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct gred_sched *t = qdisc_priv(sch); skb = qdisc_dequeue_head(sch); if (skb) { struct gred_sched_data *q; u16 dp = tc_index_to_dp(skb); if (dp >= t->DPs || (q = t->tab[dp]) == NULL) { if (net_ratelimit()) pr_warning("GRED: Unable to relocate VQ 0x%x " "after dequeue, screwing up " "backlog.\n", tc_index_to_dp(skb)); } else { q->backlog -= qdisc_pkt_len(skb); if (!q->backlog && !gred_wred_mode(t)) red_start_of_idle_period(&q->vars); } return skb; } if (gred_wred_mode(t) && !red_is_idling(&t->wred_set)) red_start_of_idle_period(&t->wred_set); return NULL; }
static struct sk_buff * red_dequeue(struct Qdisc* sch) { struct sk_buff *skb; struct red_sched_data *q = qdisc_priv(sch); skb = qdisc_dequeue_head(sch); if (skb == NULL && !red_is_idling(&q->parms)) red_start_of_idle_period(&q->parms); return skb; }
static unsigned int choke_drop(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); unsigned int len; len = qdisc_queue_drop(sch); if (len > 0) q->stats.other++; else { if (!red_is_idling(&q->vars)) red_start_of_idle_period(&q->vars); } return len; }
static unsigned int red_drop(struct Qdisc* sch) { struct sk_buff *skb; struct red_sched_data *q = qdisc_priv(sch); skb = qdisc_dequeue_tail(sch); if (skb) { unsigned int len = skb->len; q->stats.other++; qdisc_drop(skb, sch); return len; } if (!red_is_idling(&q->parms)) red_start_of_idle_period(&q->parms); return 0; }
static struct sk_buff *choke_dequeue(struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; if (q->head == q->tail) { if (!red_is_idling(&q->vars)) red_start_of_idle_period(&q->vars); return NULL; } skb = q->tab[q->head]; q->tab[q->head] = NULL; choke_zap_head_holes(q); --sch->q.qlen; sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb); return skb; }