static int boost_notify(struct notifier_block *nb, unsigned long action, void *hcpu) { unsigned cpu = (long)hcpu; const struct cpumask *cpumask; cpumask = get_cpu_mask(cpu); /* * Clear the boost-disable bit on the CPU_DOWN path so that * this cpu cannot block the remaining ones from boosting. On * the CPU_UP path we simply keep the boost-disable flag in * sync with the current global state. */ switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpumask); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: boost_set_msrs(1, cpumask); break; default: break; } return NOTIFY_OK; }
static int _store_boost(int val) { get_online_cpus(); boost_set_msrs(val, cpu_online_mask); put_online_cpus(); pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); return 0; }
static void __init acpi_cpufreq_boost_init(void) { if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc(); if (!msrs) return; acpi_cpufreq_driver.boost_supported = true; acpi_cpufreq_driver.boost_enabled = boost_state(0); get_online_cpus(); /* Force all MSRs to the same value */ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpu_online_mask); register_cpu_notifier(&boost_nb); put_online_cpus(); } }
static void __init acpi_cpufreq_boost_init(void) { if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc(); if (!msrs) return; pax_open_kernel(); *(bool *)&acpi_cpufreq_driver.boost_supported = true; *(bool *)&acpi_cpufreq_driver.boost_enabled = boost_state(0); pax_close_kernel(); cpu_notifier_register_begin(); /* Force all MSRs to the same value */ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpu_online_mask); __register_cpu_notifier(&boost_nb); cpu_notifier_register_done(); } }