static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); struct fq_flow *f; if (unlikely(sch->q.qlen >= sch->limit)) return qdisc_drop(skb, sch); f = fq_classify(skb, q); if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q->stat_flows_plimit++; return qdisc_drop(skb, sch); } f->qlen++; if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); q->inactive_flows--; } /* Note: this overwrites f->age */ flow_queue_add(f, skb); if (unlikely(f == &q->internal)) { q->stat_internal_packets++; } sch->q.qlen++; return NET_XMIT_SUCCESS; }
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct fq_sched_data *q = qdisc_priv(sch); struct fq_flow *f; if (unlikely(sch->q.qlen >= sch->limit)) return qdisc_drop(skb, sch); f = fq_classify(skb, q); if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q->stat_flows_plimit++; return qdisc_drop(skb, sch); } f->qlen++; flow_queue_add(f, skb); if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; sch->qstats.backlog += qdisc_pkt_len(skb); if (fq_flow_is_detached(f)) { fq_flow_add_tail(&q->new_flows, f); if (q->quantum > f->credit) f->credit = q->quantum; q->inactive_flows--; qdisc_unthrottled(sch); } if (unlikely(f == &q->internal)) { q->stat_internal_packets++; qdisc_unthrottled(sch); } sch->q.qlen++; return NET_XMIT_SUCCESS; }
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free) { struct fq_sched_data *q = qdisc_priv(sch); struct fq_flow *f; if (unlikely(sch->q.qlen >= sch->limit)) return qdisc_drop(skb, sch, to_free); f = fq_classify(skb, q); if (unlikely(f->qlen >= q->flow_plimit && f != &q->internal)) { q->stat_flows_plimit++; return qdisc_drop(skb, sch, to_free); } f->qlen++; if (skb_is_retransmit(skb)) q->stat_tcp_retrans++; qdisc_qstats_backlog_inc(sch, skb); if (fq_flow_is_detached(f)) { struct sock *sk = skb->sk; fq_flow_add_tail(&q->new_flows, f); if (time_after(jiffies, f->age + q->flow_refill_delay)) f->credit = max_t(u32, f->credit, q->quantum); if (sk && q->rate_enable) { if (unlikely(smp_load_acquire(&sk->sk_pacing_status) != SK_PACING_FQ)) smp_store_release(&sk->sk_pacing_status, SK_PACING_FQ); } q->inactive_flows--; } /* Note: this overwrites f->age */ flow_queue_add(f, skb); if (unlikely(f == &q->internal)) { q->stat_internal_packets++; } sch->q.qlen++; return NET_XMIT_SUCCESS; }