static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q) { struct rb_node **p, *parent; struct sock *sk = skb->sk; struct rb_root *root; struct fq_flow *f; int band; /* warning: no starvation prevention... */ band = prio2band[skb->priority & TC_PRIO_MAX]; if (unlikely(band == 0)) return &q->internal; if (unlikely(!sk)) { /* By forcing low order bit to 1, we make sure to not * collide with a local flow (socket pointers are word aligned) */ sk = (struct sock *)(skb_get_rxhash(skb) | 1L); } root = &q->fq_root[hash_32((u32)(long)sk, q->fq_trees_log)]; if (q->flows >= (2U << q->fq_trees_log) && q->inactive_flows > q->flows/2) fq_gc(q, root, sk); p = &root->rb_node; parent = NULL; while (*p) { parent = *p; f = container_of(parent, struct fq_flow, fq_node); if (f->sk == sk) { /* socket might have been reallocated, so check * if its sk_hash is the same. * It not, we need to refill credit with * initial quantum */ if (unlikely(skb->sk && f->socket_hash != sk->sk_hash)) { f->credit = q->initial_quantum; f->socket_hash = sk->sk_hash; } return f; } if (f->sk > sk) p = &parent->rb_right; else p = &parent->rb_left; } f = kmem_cache_zalloc(fq_flow_cachep, GFP_ATOMIC | __GFP_NOWARN); if (unlikely(!f)) { q->stat_allocation_errors++; return &q->internal; } fq_flow_set_detached(f); f->sk = sk; if (skb->sk) f->socket_hash = sk->sk_hash; f->credit = q->initial_quantum; rb_link_node(&f->fq_node, parent, p); rb_insert_color(&f->fq_node, root); q->flows++; q->inactive_flows++; return f; }
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct sfb_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; int i; u32 p_min = ~0; u32 minqlen = ~0; u32 r, slot, salt, sfbhash; int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; if (unlikely(sch->q.qlen >= q->limit)) { sch->qstats.overlimits++; q->stats.queuedrop++; goto drop; } if (q->rehash_interval > 0) { unsigned long limit = q->rehash_time + q->rehash_interval; if (unlikely(time_after(jiffies, limit))) { sfb_swap_slot(q); q->rehash_time = jiffies; } else if (unlikely(!q->double_buffering && q->warmup_time > 0 && time_after(jiffies, limit - q->warmup_time))) { q->double_buffering = true; } } if (q->filter_list) { /* If using external classifiers, get result and record it. */ if (!sfb_classify(skb, q, &ret, &salt)) goto other_drop; } else { salt = skb_get_rxhash(skb); } slot = q->slot; sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); if (minqlen > b->qlen) minqlen = b->qlen; if (p_min > b->p_mark) p_min = b->p_mark; } slot ^= 1; sfb_skb_cb(skb)->hashes[slot] = 0; if (unlikely(minqlen >= q->max)) { sch->qstats.overlimits++; q->stats.bucketdrop++; goto drop; } if (unlikely(p_min >= SFB_MAX_PROB)) { /* Inelastic flow */ if (q->double_buffering) { sfbhash = jhash_1word(salt, q->bins[slot].perturbation); if (!sfbhash) sfbhash = 1; sfb_skb_cb(skb)->hashes[slot] = sfbhash; for (i = 0; i < SFB_LEVELS; i++) { u32 hash = sfbhash & SFB_BUCKET_MASK; struct sfb_bucket *b = &q->bins[slot].bins[i][hash]; sfbhash >>= SFB_BUCKET_SHIFT; if (b->qlen == 0) decrement_prob(b, q); else if (b->qlen >= q->bin_size) increment_prob(b, q); } } if (sfb_rate_limit(skb, q)) { sch->qstats.overlimits++; q->stats.penaltydrop++; goto drop; } goto enqueue; }