static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (q->filter_list) { /* If using external classifiers, get result and record it. */ if (!choke_classify(skb, sch, &ret)) goto other_drop; /* Packet was eaten by filter */ } choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); /* Is queue small? */ if (q->vars.qavg <= p->qth_min) q->vars.qcount = -1; else { unsigned int idx; /* Draw a packet at random from queue and compare flow */ if (choke_match_random(q, skb, &idx)) { q->stats.matched++; choke_drop_by_idx(sch, idx); goto congestion_drop; } /* Queue is large, always mark/drop */ if (q->vars.qavg > p->qth_max) { q->vars.qcount = -1; sch->qstats.overlimits++; if (use_harddrop(q) || !use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; } else if (++q->vars.qcount) { if (red_mark_probability(p, &q->vars, q->vars.qavg)) { q->vars.qcount = 0; q->vars.qR = red_random(p); sch->qstats.overlimits++; if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; } } else q->vars.qR = red_random(p); } /* Admit new packet */ if (sch->q.qlen < q->limit) { q->tab[q->tail] = skb; q->tail = (q->tail + 1) & q->tab_mask; ++sch->q.qlen; sch->qstats.backlog += qdisc_pkt_len(skb); return NET_XMIT_SUCCESS; } q->stats.pdrop++; sch->qstats.drops++; kfree_skb(skb); return NET_XMIT_DROP; congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; other_drop: if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; }
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct choke_sched_data *q = qdisc_priv(sch); const struct red_parms *p = &q->parms; choke_skb_cb(skb)->keys_valid = 0; /* Compute average queue usage (see RED) */ q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen); if (red_is_idling(&q->vars)) red_end_of_idle_period(&q->vars); /* Is queue small? */ if (q->vars.qavg <= p->qth_min) q->vars.qcount = -1; else { unsigned int idx; /* Draw a packet at random from queue and compare flow */ if (choke_match_random(q, skb, &idx)) { q->stats.matched++; choke_drop_by_idx(sch, idx, to_free); goto congestion_drop; } /* Queue is large, always mark/drop */ if (q->vars.qavg > p->qth_max) { q->vars.qcount = -1; qdisc_qstats_overlimit(sch); if (use_harddrop(q) || !use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; } else if (++q->vars.qcount) { if (red_mark_probability(p, &q->vars, q->vars.qavg)) { q->vars.qcount = 0; q->vars.qR = red_random(p); qdisc_qstats_overlimit(sch); if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; } } else q->vars.qR = red_random(p); } /* Admit new packet */ if (sch->q.qlen < q->limit) { q->tab[q->tail] = skb; q->tail = (q->tail + 1) & q->tab_mask; ++sch->q.qlen; qdisc_qstats_backlog_inc(sch, skb); return NET_XMIT_SUCCESS; } q->stats.pdrop++; return qdisc_drop(skb, sch, to_free); congestion_drop: qdisc_drop(skb, sch, to_free); return NET_XMIT_CN; }