void rcu_note_might_resched(void) { unsigned long flags; raw_local_irq_save(flags); rcu_eob(rcu_cpu()); raw_local_irq_restore(flags); }
/* * Insert an RCU callback onto the calling CPUs list of 'current batch' * callbacks. Lockless version, can be invoked anywhere except under NMI. */ void call_rcu_sched(struct rcu_head *cb, void (*func)(struct rcu_head *rcu)) { unsigned long flags; struct rcu_data *rd; struct rcu_list *cblist; int which; cb->func = func; cb->next = NULL; raw_local_irq_save(flags); smp_mb(); rd = &rcu_data[rcu_cpu()]; which = ACCESS_ONCE(rcu_which); cblist = &rd->cblist[which]; /* The following is not NMI-safe, therefore call_rcu() * cannot be invoked under NMI. */ rcu_list_add(cblist, cb); rd->nqueued++; smp_mb(); raw_local_irq_restore(flags); }
void jrcu_read_unlock(void) { if (preempt_count() == 1) rcu_eob(rcu_cpu()); preempt_enable(); }
void __rcu_preempt_sub(void) { rcu_eob(rcu_cpu()); }