static int sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } /* If selected queue has length q->limit, this means that * all another queues are empty and that we do simple tail drop, * i.e. drop _this_ packet. */ if (q->qs[x].qlen >= q->limit) return qdisc_drop(skb, sch); sch->qstats.backlog += skb->len; __skb_queue_tail(&q->qs[x], skb); sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->bstats.bytes += skb->len; sch->bstats.packets++; return 0; } sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); if (sfq_q_enqueue(skb, q, SFQ_TAIL)) { sch->qstats.drops++; return NET_XMIT_DROP; } sch->qstats.backlog += skb->len; if (++sch->q.qlen <= q->limit) { sch->bstats.bytes += skb->len; sch->bstats.packets++; return 0; } sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int drop_len; sch->qstats.backlog += skb->len; if ((drop_len = sfq_q_enqueue(skb, q, SFQ_HEAD))) { sch->qstats.backlog -= drop_len; sch->qstats.drops++; return NET_XMIT_CN; } if (++sch->q.qlen <= q->limit) { sch->qstats.requeues++; return 0; } sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_requeue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x; int ret; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret == NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } sch->qstats.backlog += skb->len; __skb_queue_head(&q->qs[x], skb); /* If selected queue has length q->limit+1, this means that * all another queues are empty and we do simple tail drop. * This packet is still requeued at head of queue, tail packet * is dropped. */ if (q->qs[x].qlen > q->limit) { skb = q->qs[x].prev; __skb_unlink(skb, &q->qs[x]); sch->qstats.drops++; sch->qstats.backlog -= skb->len; kfree_skb(skb); return NET_XMIT_CN; } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->qstats.requeues++; return 0; } sch->qstats.drops++; sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_change(struct Qdisc *sch, struct nlattr *opt) { struct sfq_sched_data *q = qdisc_priv(sch); struct tc_sfq_qopt *ctl = nla_data(opt); struct tc_sfq_qopt_v1 *ctl_v1 = NULL; unsigned int qlen; struct red_parms *p = NULL; if (opt->nla_len < nla_attr_size(sizeof(*ctl))) return -EINVAL; if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1))) ctl_v1 = nla_data(opt); if (ctl->divisor && (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536)) return -EINVAL; if (ctl_v1 && ctl_v1->qth_min) { p = kmalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; } sch_tree_lock(sch); if (ctl->quantum) { q->quantum = ctl->quantum; q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); } q->perturb_period = ctl->perturb_period * HZ; if (ctl->flows) q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS); if (ctl->divisor) { q->divisor = ctl->divisor; q->maxflows = min_t(u32, q->maxflows, q->divisor); } if (ctl_v1) { if (ctl_v1->depth) q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH); if (p) { swap(q->red_parms, p); red_set_parms(q->red_parms, ctl_v1->qth_min, ctl_v1->qth_max, ctl_v1->Wlog, ctl_v1->Plog, ctl_v1->Scell_log, NULL, ctl_v1->max_P); } q->flags = ctl_v1->flags; q->headdrop = ctl_v1->headdrop; } if (ctl->limit) { q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); q->maxflows = min_t(u32, q->maxflows, q->limit); } qlen = sch->q.qlen; while (sch->q.qlen > q->limit) sfq_drop(sch); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); del_timer(&q->perturb_timer); if (q->perturb_period) { mod_timer(&q->perturb_timer, jiffies + q->perturb_period); q->perturbation = prandom_u32(); } sch_tree_unlock(sch); kfree(p); return 0; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* should already be 0 anyway... */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (sfq_prob_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: sch->qstats.overlimits++; if (sfq_hard_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch); /* We know we have at least one packet in queue */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch); slot_queue_add(slot, skb); return NET_XMIT_CN; } enqueue: sch->qstats.backlog += qdisc_pkt_len(skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* We put this flow at the end of our flow list. * This might sound unfair for a new flow to wait after old ones, * but we could endup servicing new flows only, and freeze old ones. */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* Return Congestion Notification only if we dropped a packet * from this flow. */ if (qlen != slot->qlen) return NET_XMIT_CN; /* As we dropped a packet, better let upper stack know this */ qdisc_tree_decrease_qlen(sch, 1); return NET_XMIT_SUCCESS; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } /* If selected queue has length q->limit, do simple tail drop, * i.e. drop _this_ packet. */ if (slot->qlen >= q->limit) return qdisc_drop(skb, sch); sch->qstats.backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* Return Congestion Notification only if we dropped a packet * from this flow. */ if (qlen != slot->qlen) return NET_XMIT_CN; /* As we dropped a packet, better let upper stack know this */ qdisc_tree_decrease_qlen(sch, 1); return NET_XMIT_SUCCESS; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (sfq_prob_mark(q)) { /* */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: sch->qstats.overlimits++; if (sfq_hard_mark(q)) { /* */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch); /* */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch); slot_queue_add(slot, skb); return NET_XMIT_CN; } enqueue: sch->qstats.backlog += qdisc_pkt_len(skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* */ if (q->tail == NULL) { /* */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* */ q->tail = slot; /* */ slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* */ if (qlen != slot->qlen) return NET_XMIT_CN; /* */ qdisc_tree_decrease_qlen(sch, 1); return NET_XMIT_SUCCESS; }