static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp) { unsigned int size = sk_filter_len(fp); atomic_sub(size, &sk->sk_omem_alloc); call_rcu_bh(&fp->rcu, sk_filter_rcu_release); }
void rcu_barrier_bh(void) { struct rcu_synchronize rcu; init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); }
/** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * * Control will return to the caller some time after a full rcu_bh grace * period has elapsed, in other words after all currently executing rcu_bh * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), * and may be nested. */ void synchronize_rcu_bh(void) { struct rcu_synchronize rcu; if (rcu_blocking_is_gp()) return; init_completion(&rcu.completion); /* Will wake me after RCU finished. */ call_rcu_bh(&rcu.head, wakeme_after_rcu); /* Wait for it. */ wait_for_completion(&rcu.completion); }
/* * Called with preemption disabled, and from cross-cpu IRQ context. */ static void rcu_barrier_func(void *type) { int cpu = smp_processor_id(); struct rcu_head *head = &per_cpu(rcu_barrier_head, cpu); atomic_inc(&rcu_barrier_cpu_count); switch ((enum rcu_barrier)type) { case RCU_BARRIER_STD: call_rcu(head, rcu_barrier_callback); break; case RCU_BARRIER_BH: call_rcu_bh(head, rcu_barrier_callback); break; case RCU_BARRIER_SCHED: call_rcu_sched(head, rcu_barrier_callback); break; } }
static void instance_put(struct nfulnl_instance *inst) { if (inst && refcount_dec_and_test(&inst->use)) call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu); }
static inline void dnrt_drop(struct dn_route *rt) { dst_release(&rt->u.dst); call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); }
static inline void dnrt_free(struct dn_route *rt) { call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); }