/** * context_tracking_user_exit - Inform the context tracking that the CPU is * exiting userspace mode and entering the kernel. * * This function must be called after we entered the kernel from userspace * before any use of RCU read side critical section. This potentially include * any high level kernel code like syscalls, exceptions, signal handling, etc... * * This call supports re-entrancy. This way it can be called from any exception * handler without needing to know if we came from userspace or not. */ void context_tracking_user_exit(void) { unsigned long flags; if (!static_key_false(&context_tracking_enabled)) return; if (in_interrupt()) return; local_irq_save(flags); if (__this_cpu_read(context_tracking.state) == IN_USER) { if (__this_cpu_read(context_tracking.active)) { /* * We are going to run code that may use RCU. Inform * RCU core about that (ie: we may need the tick again). */ rcu_user_exit(); vtime_user_exit(current); trace_user_exit(0); } __this_cpu_write(context_tracking.state, IN_KERNEL); } local_irq_restore(flags); }
/** * context_tracking_user_enter - Inform the context tracking that the CPU is going to * enter userspace mode. * * This function must be called right before we switch from the kernel * to userspace, when it's guaranteed the remaining kernel instructions * to execute won't use any RCU read side critical section because this * function sets RCU in extended quiescent state. */ void context_tracking_user_enter(void) { unsigned long flags; /* * Repeat the user_enter() check here because some archs may be calling * this from asm and if no CPU needs context tracking, they shouldn't * go further. Repeat the check here until they support the static key * check. */ if (!static_key_false(&context_tracking_enabled)) return; /* * Some contexts may involve an exception occuring in an irq, * leading to that nesting: * rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit() * This would mess up the dyntick_nesting count though. And rcu_irq_*() * helpers are enough to protect RCU uses inside the exception. So * just return immediately if we detect we are in an IRQ. */ if (in_interrupt()) return; /* Kernel threads aren't supposed to go to userspace */ WARN_ON_ONCE(!current->mm); local_irq_save(flags); if ( __this_cpu_read(context_tracking.state) != IN_USER) { if (__this_cpu_read(context_tracking.active)) { trace_user_enter(0); /* * At this stage, only low level arch entry code remains and * then we'll run in userspace. We can assume there won't be * any RCU read-side critical section until the next call to * user_exit() or rcu_irq_enter(). Let's remove RCU's dependency * on the tick. */ vtime_user_enter(current); rcu_user_enter(); } /* * Even if context tracking is disabled on this CPU, because it's outside * the full dynticks mask for example, we still have to keep track of the * context transitions and states to prevent inconsistency on those of * other CPUs. * If a task triggers an exception in userspace, sleep on the exception * handler and then migrate to another CPU, that new CPU must know where * the exception returns by the time we call exception_exit(). * This information can only be provided by the previous CPU when it called * exception_enter(). * OTOH we can spare the calls to vtime and RCU when context_tracking.active * is false because we know that CPU is not tickless. */ __this_cpu_write(context_tracking.state, IN_USER); } local_irq_restore(flags); }
int main (int argc, char *argv[]) { jump_label_init(); if (static_key_false(&key)) { printf("%s\t\tFAIL\n", __FILE__); return 1; } else { printf("%s\t\tOK\n", __FILE__); return 0; } }
int main (int argc, char *argv[]) { int ret = 0; jump_label_init(); if (static_key_false(&key)) ++ret; else ret += 0; static_key_slow_inc(&key); if (static_key_false(&key)) ret += 0; else ++ret; if (ret) printf("%s\t\tFAIL\n", __FILE__); else printf("%s\t\tOK\n", __FILE__); return 0; }
/* * When a guest is interrupted for a longer amount of time, missed clock * ticks are not redelivered later. Due to that, this function may on * occasion account more time than the calling functions think elapsed. */ static __always_inline u64 steal_account_process_time(u64 maxtime) { #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { u64 steal; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; steal = min(steal, maxtime); account_steal_time(steal); this_rq()->prev_steal_time += steal; return steal; } #endif return 0; }
static __always_inline bool steal_account_process_tick(void) { #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { u64 steal, st = 0; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; st = steal_ticks(steal); this_rq()->prev_steal_time += st * TICK_NSEC; account_steal_time(st); return st; } #endif return false; }
/* * When a guest is interrupted for a longer amount of time, missed clock * ticks are not redelivered later. Due to that, this function may on * occasion account more time than the calling functions think elapsed. */ static __always_inline cputime_t steal_account_process_time(cputime_t maxtime) { #ifdef CONFIG_PARAVIRT if (static_key_false(¶virt_steal_enabled)) { cputime_t steal_cputime; u64 steal; steal = paravirt_steal_clock(smp_processor_id()); steal -= this_rq()->prev_steal_time; steal_cputime = min(nsecs_to_cputime(steal), maxtime); account_steal_time(steal_cputime); this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime); return steal_cputime; } #endif return 0; }
static inline void vmmr0_mmu_audit(struct vmmr0_vcpu *vcpu, int point) { if (static_key_false((&mmu_audit_key))) __vmmr0_mmu_audit(vcpu, point); }
int sched_clock_stable(void) { return static_key_false(&__sched_clock_stable); }
static inline void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { if (static_key_false((&mmu_audit_key))) __kvm_mmu_audit(vcpu, point); }