static __init int jump_label_test(void)
{
	int i;

	for (i = 0; i < 2; i++) {
		WARN_ON(static_key_enabled(&sk_true.key) != true);
		WARN_ON(static_key_enabled(&sk_false.key) != false);

		WARN_ON(!static_branch_likely(&sk_true));
		WARN_ON(!static_branch_unlikely(&sk_true));
		WARN_ON(static_branch_likely(&sk_false));
		WARN_ON(static_branch_unlikely(&sk_false));

		static_branch_disable(&sk_true);
		static_branch_enable(&sk_false);

		WARN_ON(static_key_enabled(&sk_true.key) == true);
		WARN_ON(static_key_enabled(&sk_false.key) == false);

		WARN_ON(static_branch_likely(&sk_true));
		WARN_ON(static_branch_unlikely(&sk_true));
		WARN_ON(!static_branch_likely(&sk_false));
		WARN_ON(!static_branch_unlikely(&sk_false));

		static_branch_enable(&sk_true);
		static_branch_disable(&sk_false);
	}

	return 0;
}
Exemple #2
0
static void __clear_sched_clock_stable(struct work_struct *work)
{
	struct sched_clock_data *scd = this_scd();

	/*
	 * Attempt to make the stable->unstable transition continuous.
	 *
	 * Trouble is, this is typically called from the TSC watchdog
	 * timer, which is late per definition. This means the tick
	 * values can already be screwy.
	 *
	 * Still do what we can.
	 */
	gtod_offset = (scd->tick_raw + raw_offset) - (scd->tick_gtod);

	printk(KERN_INFO "sched_clock: Marking unstable (%lld, %lld)<-(%lld, %lld)\n",
			scd->tick_gtod, gtod_offset,
			scd->tick_raw,  raw_offset);

	static_branch_disable(&__sched_clock_stable);
	tick_dep_set(TICK_DEP_BIT_CLOCK_UNSTABLE);
}
Exemple #3
0
void __init native_pv_lock_init(void)
{
	if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
		static_branch_disable(&virt_spin_lock_key);
}