/** * This function is both preempt and irq safe. The former is due to explicit * preemption disable. The latter is guaranteed by the fact that the slow path * is explicitly protected by an irq-safe spinlock whereas the fast patch uses * this_cpu_add which is irq-safe by definition. Hence there is no need muck * with irq state before calling this one */ void percpu_counter_add_batch(struct percpu_counter *fbc, int64_t amount, int32_t batch) { int64_t count; preempt_disable(); count = __this_cpu_read(*fbc->counters) + amount; if (count >= batch || count <= -batch) { unsigned long flags; spin_lock_irqsave(&fbc->lock); fbc->count += count; __this_cpu_sub(*fbc->counters, count - amount); spin_unlock_irqsave(&fbc->lock); } else { this_cpu_add(*fbc->counters, amount); } preempt_enable(); }
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { int band = prio2band[skb->priority & TC_PRIO_MAX]; struct pfifo_fast_priv *priv = qdisc_priv(qdisc); struct skb_array *q = band2list(priv, band); unsigned int pkt_len = qdisc_pkt_len(skb); int err; err = skb_array_produce(q, skb); if (unlikely(err)) return qdisc_drop_cpu(skb, qdisc, to_free); qdisc_qstats_cpu_qlen_inc(qdisc); /* Note: skb can not be used after skb_array_produce(), * so we better not use qdisc_qstats_cpu_backlog_inc() */ this_cpu_add(qdisc->cpu_qstats->backlog, pkt_len); return NET_XMIT_SUCCESS; }