static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } __skb_queue_head(&q->qs[x], skb); sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen < q->limit - 1) return 0; sch->stats.drops++; sfq_drop(sch); return NET_XMIT_CN; }
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; int result; if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= SFQ_HASH_DIVISOR) return TC_H_MIN(skb->priority); if (!q->filter_list) return sfq_hash(q, skb) + 1; *qerr = NET_XMIT_BYPASS; result = tc_classify(skb, q->filter_list, &res); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS; case TC_ACT_SHOT: return 0; } #endif if (TC_H_MIN(res.classid) <= SFQ_HASH_DIVISOR) return TC_H_MIN(res.classid); } return 0; }
static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct sfq_sched_data *q = qdisc_priv(sch); struct tcf_result res; struct tcf_proto *fl; int result; if (TC_H_MAJ(skb->priority) == sch->handle && TC_H_MIN(skb->priority) > 0 && TC_H_MIN(skb->priority) <= q->divisor) return TC_H_MIN(skb->priority); fl = rcu_dereference_bh(q->filter_list); if (!fl) return sfq_hash(q, skb) + 1; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; result = tc_classify(skb, fl, &res, false); if (result >= 0) { #ifdef CONFIG_NET_CLS_ACT switch (result) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return 0; } #endif if (TC_H_MIN(res.classid) <= q->divisor) return TC_H_MIN(res.classid); } return 0; }
static int sfq_q_enqueue(struct sk_buff *skb, struct sfq_sched_data *q, int end) { unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == q->depth) { q->ht[hash] = x = q->dep[q->depth].next; q->hash[x] = hash; } if (end == SFQ_TAIL) { /* If selected queue has length q->limit, this means that * all other queues are empty and that we do simple tail drop, * i.e. drop _this_ packet. */ if (q->qs[x].qlen >= q->limit) { unsigned int drop_len = skb->len; kfree_skb(skb); return drop_len; } __skb_queue_tail(&q->qs[x], skb); } else { /* end == SFQ_HEAD */ __skb_queue_head(&q->qs[x], skb); /* If selected queue has length q->limit+1, this means that * all other queues are empty and we do simple tail drop. * This packet is still requeued at head of queue, tail packet * is dropped. */ if (q->qs[x].qlen > q->limit) { unsigned int drop_len; skb = q->qs[x].prev; drop_len = skb->len; __skb_unlink(skb, &q->qs[x]); kfree_skb(skb); return drop_len; } } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == q->depth) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } return 0; }
static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } sch->qstats.backlog += skb->len; __skb_queue_head(&q->qs[x], skb); /* If selected queue has length q->limit+1, this means that * all another queues are empty and we do simple tail drop. * This packet is still requeued at head of queue, tail packet * is dropped. */ if (q->qs[x].qlen > q->limit) { skb = q->qs[x].prev; __skb_unlink(skb, &q->qs[x]); sch->qstats.drops++; sch->qstats.backlog -= skb->len; kfree_skb(skb); return NET_XMIT_CN; } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->qstats.requeues++; return 0; } sch->qstats.drops++; sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } /* If selected queue has length q->limit, this means that * all another queues are empty and that we do simple tail drop, * i.e. drop _this_ packet. */ if (q->qs[x].qlen >= q->limit) return qdisc_drop(skb, sch); sch->qstats.backlog += skb->len; __skb_queue_tail(&q->qs[x], skb); sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->bstats.bytes += skb->len; sch->bstats.packets++; return 0; } sfq_drop(sch); return NET_XMIT_CN; }
/* * When q->perturbation is changed, we rehash all queued skbs * to avoid OOO (Out Of Order) effects. * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change * counters. */ static void sfq_rehash(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; int i; struct sfq_slot *slot; struct sk_buff_head list; int dropped = 0; __skb_queue_head_init(&list); for (i = 0; i < q->maxflows; i++) { slot = &q->slots[i]; if (!slot->qlen) continue; while (slot->qlen) { skb = slot_dequeue_head(slot); sfq_dec(q, i); __skb_queue_tail(&list, skb); } slot->backlog = 0; red_set_vars(&slot->vars); q->ht[slot->hash] = SFQ_EMPTY_SLOT; } q->tail = NULL; while ((skb = __skb_dequeue(&list)) != NULL) { unsigned int hash = sfq_hash(q, skb); sfq_index x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) { drop: sch->qstats.backlog -= qdisc_pkt_len(skb); kfree_skb(skb); dropped++; continue; } q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } if (slot->qlen >= q->maxdepth) goto drop; slot_queue_add(slot, skb); if (q->red_parms) slot->vars.qavg = red_calc_qavg(q->red_parms, &slot->vars, slot->backlog); slot->backlog += qdisc_pkt_len(skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->scaled_quantum; } } sch->q.qlen -= dropped; qdisc_tree_decrease_qlen(sch, dropped); }