static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = (struct sfq_sched_data *)sch->data; unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } __skb_queue_head(&q->qs[x], skb); sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen < q->limit - 1) return 0; sch->stats.drops++; sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_q_enqueue(struct sk_buff *skb, struct sfq_sched_data *q, int end) { unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == q->depth) { q->ht[hash] = x = q->dep[q->depth].next; q->hash[x] = hash; } if (end == SFQ_TAIL) { /* If selected queue has length q->limit, this means that * all other queues are empty and that we do simple tail drop, * i.e. drop _this_ packet. */ if (q->qs[x].qlen >= q->limit) { unsigned int drop_len = skb->len; kfree_skb(skb); return drop_len; } __skb_queue_tail(&q->qs[x], skb); } else { /* end == SFQ_HEAD */ __skb_queue_head(&q->qs[x], skb); /* If selected queue has length q->limit+1, this means that * all other queues are empty and we do simple tail drop. * This packet is still requeued at head of queue, tail packet * is dropped. */ if (q->qs[x].qlen > q->limit) { unsigned int drop_len; skb = q->qs[x].prev; drop_len = skb->len; __skb_unlink(skb, &q->qs[x]); kfree_skb(skb); return drop_len; } } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == q->depth) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } return 0; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } /* If selected queue has length q->limit, do simple tail drop, * i.e. drop _this_ packet. */ if (slot->qlen >= q->limit) return qdisc_drop(skb, sch); sch->qstats.backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* Return Congestion Notification only if we dropped a packet * from this flow. */ return (qlen != slot->qlen) ? NET_XMIT_CN : NET_XMIT_SUCCESS; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x; int ret; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret == NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } /* If selected queue has length q->limit, this means that * all another queues are empty and that we do simple tail drop, * i.e. drop _this_ packet. */ if (q->qs[x].qlen >= q->limit) return qdisc_drop(skb, sch); sch->qstats.backlog += skb->len; __skb_queue_tail(&q->qs[x], skb); sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->bstats.bytes += skb->len; sch->bstats.packets++; return 0; } sfq_drop(sch); return NET_XMIT_CN; }
static int sfq_requeue(struct sk_buff *skb, struct Qdisc* sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned hash = sfq_hash(q, skb); sfq_index x; x = q->ht[hash]; if (x == SFQ_DEPTH) { q->ht[hash] = x = q->dep[SFQ_DEPTH].next; q->hash[x] = hash; } sch->qstats.backlog += skb->len; __skb_queue_head(&q->qs[x], skb); /* If selected queue has length q->limit+1, this means that * all another queues are empty and we do simple tail drop. * This packet is still requeued at head of queue, tail packet * is dropped. */ if (q->qs[x].qlen > q->limit) { skb = q->qs[x].prev; __skb_unlink(skb, &q->qs[x]); sch->qstats.drops++; sch->qstats.backlog -= skb->len; kfree_skb(skb); return NET_XMIT_CN; } sfq_inc(q, x); if (q->qs[x].qlen == 1) { /* The flow is new */ if (q->tail == SFQ_DEPTH) { /* It is the first flow */ q->tail = x; q->next[x] = x; q->allot[x] = q->quantum; } else { q->next[x] = q->next[q->tail]; q->next[q->tail] = x; q->tail = x; } } if (++sch->q.qlen <= q->limit) { sch->qstats.requeues++; return 0; } sch->qstats.drops++; sfq_drop(sch); return NET_XMIT_CN; }
/* * When q->perturbation is changed, we rehash all queued skbs * to avoid OOO (Out Of Order) effects. * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change * counters. */ static void sfq_rehash(struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); struct sk_buff *skb; int i; struct sfq_slot *slot; struct sk_buff_head list; int dropped = 0; __skb_queue_head_init(&list); for (i = 0; i < q->maxflows; i++) { slot = &q->slots[i]; if (!slot->qlen) continue; while (slot->qlen) { skb = slot_dequeue_head(slot); sfq_dec(q, i); __skb_queue_tail(&list, skb); } slot->backlog = 0; red_set_vars(&slot->vars); q->ht[slot->hash] = SFQ_EMPTY_SLOT; } q->tail = NULL; while ((skb = __skb_dequeue(&list)) != NULL) { unsigned int hash = sfq_hash(q, skb); sfq_index x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) { drop: sch->qstats.backlog -= qdisc_pkt_len(skb); kfree_skb(skb); dropped++; continue; } q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; } if (slot->qlen >= q->maxdepth) goto drop; slot_queue_add(slot, skb); if (q->red_parms) slot->vars.qavg = red_calc_qavg(q->red_parms, &slot->vars, slot->backlog); slot->backlog += qdisc_pkt_len(skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } q->tail = slot; slot->allot = q->scaled_quantum; } } sch->q.qlen -= dropped; qdisc_tree_decrease_qlen(sch, dropped); }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* get a free slot */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* should already be 0 anyway... */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (sfq_prob_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: sch->qstats.overlimits++; if (sfq_hard_mark(q)) { /* We know we have at least one packet in queue */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch); /* We know we have at least one packet in queue */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch); slot_queue_add(slot, skb); return NET_XMIT_CN; } enqueue: sch->qstats.backlog += qdisc_pkt_len(skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* The flow is new */ if (q->tail == NULL) { /* It is the first flow */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* We put this flow at the end of our flow list. * This might sound unfair for a new flow to wait after old ones, * but we could endup servicing new flows only, and freeze old ones. */ q->tail = slot; /* We could use a bigger initial quantum for new flows */ slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* Return Congestion Notification only if we dropped a packet * from this flow. */ if (qlen != slot->qlen) return NET_XMIT_CN; /* As we dropped a packet, better let upper stack know this */ qdisc_tree_decrease_qlen(sch, 1); return NET_XMIT_SUCCESS; }
static int sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfq_sched_data *q = qdisc_priv(sch); unsigned int hash; sfq_index x, qlen; struct sfq_slot *slot; int uninitialized_var(ret); struct sk_buff *head; int delta; hash = sfq_classify(skb, sch, &ret); if (hash == 0) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } hash--; x = q->ht[hash]; slot = &q->slots[x]; if (x == SFQ_EMPTY_SLOT) { x = q->dep[0].next; /* */ if (x >= SFQ_MAX_FLOWS) return qdisc_drop(skb, sch); q->ht[hash] = x; slot = &q->slots[x]; slot->hash = hash; slot->backlog = 0; /* */ red_set_vars(&slot->vars); goto enqueue; } if (q->red_parms) { slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms, &slot->vars, slot->backlog); switch (red_action(q->red_parms, &slot->vars, slot->vars.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (sfq_prob_mark(q)) { /* */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.prob_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.prob_mark++; break; } } q->stats.prob_drop++; goto congestion_drop; case RED_HARD_MARK: sch->qstats.overlimits++; if (sfq_hard_mark(q)) { /* */ if (sfq_headdrop(q) && INET_ECN_set_ce(slot->skblist_next)) { q->stats.forced_mark_head++; break; } if (INET_ECN_set_ce(skb)) { q->stats.forced_mark++; break; } } q->stats.forced_drop++; goto congestion_drop; } } if (slot->qlen >= q->maxdepth) { congestion_drop: if (!sfq_headdrop(q)) return qdisc_drop(skb, sch); /* */ head = slot_dequeue_head(slot); delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb); sch->qstats.backlog -= delta; slot->backlog -= delta; qdisc_drop(head, sch); slot_queue_add(slot, skb); return NET_XMIT_CN; } enqueue: sch->qstats.backlog += qdisc_pkt_len(skb); slot->backlog += qdisc_pkt_len(skb); slot_queue_add(slot, skb); sfq_inc(q, x); if (slot->qlen == 1) { /* */ if (q->tail == NULL) { /* */ slot->next = x; } else { slot->next = q->tail->next; q->tail->next = x; } /* */ q->tail = slot; /* */ slot->allot = q->scaled_quantum; } if (++sch->q.qlen <= q->limit) return NET_XMIT_SUCCESS; qlen = slot->qlen; sfq_drop(sch); /* */ if (qlen != slot->qlen) return NET_XMIT_CN; /* */ qdisc_tree_decrease_qlen(sch, 1); return NET_XMIT_SUCCESS; }