int disable_nonboot_cpus(void) { int cpu, first_cpu, error = 0; cpu_maps_update_begin(); first_cpu = cpumask_first(cpu_online_mask); /* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); printk("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { if (cpu == first_cpu) continue; error = _cpu_down(cpu, 1); if (!error) cpumask_set_cpu(cpu, frozen_cpus); else { printk(KERN_ERR "Error taking CPU%d down: %d\n", cpu, error); break; } } if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ cpu_hotplug_disabled = 1; } else { printk(KERN_ERR "Non-boot CPUs are not disabled\n"); } cpu_maps_update_done(); return error; }
void __ref enable_nonboot_cpus(void) { int cpu, error; /* Allow everyone to use the CPU hotplug again */ cpu_maps_update_begin(); cpu_hotplug_disabled = 0; if (cpumask_empty(frozen_cpus)) goto out; pr_info("Enabling non-boot CPUs ...\n"); arch_enable_nonboot_cpus_begin(); for_each_cpu(cpu, frozen_cpus) { trace_suspend_resume(TPS("CPU_ON"), cpu, true); error = _cpu_up(cpu, 1); trace_suspend_resume(TPS("CPU_ON"), cpu, false); if (!error) { pr_info("CPU%d is up\n", cpu); continue; } pr_warn("Error taking CPU%d up: %d\n", cpu, error); }
/* * When tasks have been thawed, re-enable regular CPU hotplug (which had been * disabled while beginning to freeze tasks). */ void cpu_hotplug_enable_after_thaw(void) { cpu_maps_update_begin(); cpu_hotplug_disabled = 0; cpu_maps_update_done(); }
/* * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU * hotplug when tasks are about to be frozen. Also, don't allow the freezer * to continue until any currently running CPU hotplug operation gets * completed. * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the * 'cpu_add_remove_lock'. And this same lock is also taken by the regular * CPU hotplug path and released only after it is complete. Thus, we * (and hence the freezer) will block here until any currently running CPU * hotplug operation gets completed. */ void cpu_hotplug_disable_before_freeze(void) { cpu_maps_update_begin(); cpu_hotplug_disabled = 1; cpu_maps_update_done(); }
void __ref unregister_cpu_notifier(struct notifier_block *nb) { cpu_maps_update_begin(); raw_notifier_chain_unregister(&cpu_chain, nb); cpu_maps_update_done(); }
void cpu_hotplug_enable(void) { cpu_maps_update_begin(); WARN_ON(--cpu_hotplug_disabled < 0); cpu_maps_update_done(); }
/* * Wait for currently running CPU hotplug operations to complete (if any) and * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the * hotplug path before performing hotplug operations. So acquiring that lock * guarantees mutual exclusion from any currently running hotplug operations. */ void cpu_hotplug_disable(void) { cpu_maps_update_begin(); cpu_hotplug_disabled++; cpu_maps_update_done(); }
int __cpuinit cpu_up(unsigned int cpu) { int err = 0; #ifdef CONFIG_MEMORY_HOTPLUG int nid; pg_data_t *pgdat; #endif if (!cpu_possible(cpu)) { printk(KERN_ERR "can't online cpu %d because it is not " "configured as may-hotadd at boot time\n", cpu); #if defined(CONFIG_IA64) printk(KERN_ERR "please check additional_cpus= boot " "parameter\n"); #endif return -EINVAL; } #ifdef CONFIG_MEMORY_HOTPLUG nid = cpu_to_node(cpu); if (!node_online(nid)) { err = mem_online_node(nid); if (err) return err; } pgdat = NODE_DATA(nid); if (!pgdat) { printk(KERN_ERR "Can't online cpu %d due to NULL pgdat\n", cpu); return -ENOMEM; } if (pgdat->node_zonelists->_zonerefs->zone == NULL) { mutex_lock(&zonelists_mutex); build_all_zonelists(NULL); mutex_unlock(&zonelists_mutex); } #endif cpu_maps_update_begin(); #ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG if ((gcpu_num_limit.block != 0) && (num_online_cpus() >= gcpu_num_limit.block)) { pr_err("[%s]cpu lock is %d can not hotplug cpu.\n", __func__, gcpu_num_limit.block); err = -EPERM; goto out; } else if ((gcpu_num_limit.block == 0) && (num_online_cpus() >= gcpu_num_limit.max)) { pr_err("[%s]cpu max is %d can not hotplug cpu.\n", __func__, gcpu_num_limit.max); err = -EPERM; goto out; } if (cpu != (num_online_cpus())) { err = -EPERM; goto out; } #endif if (cpu_hotplug_disabled) { err = -EBUSY; goto out; } err = _cpu_up(cpu, 0); out: cpu_maps_update_done(); return err; }
int disable_nonboot_cpus(void) { int cpu, first_cpu, error = 0; #ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ int lated_cpu; #endif #ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ if (exynos_boot_cluster == CA7) lated_cpu = NR_CA7; else lated_cpu = NR_CA15; #endif cpu_maps_update_begin(); first_cpu = cpumask_first(cpu_online_mask); /* * We take down all of the non-boot CPUs in one shot to avoid races * with the userspace trying to use the CPU hotplug at the same time */ cpumask_clear(frozen_cpus); arch_disable_nonboot_cpus_begin(); printk("Disabling non-boot CPUs ...\n"); for_each_online_cpu(cpu) { #if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) if (cpu == first_cpu || cpu == lated_cpu) #else if (cpu == first_cpu) #endif continue; error = _cpu_down(cpu, 1); if (!error) cpumask_set_cpu(cpu, frozen_cpus); else { printk(KERN_ERR "Error taking CPU%d down: %d\n", cpu, error); break; } } #ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ if (num_online_cpus() > 1) { error = _cpu_down(lated_cpu, 1); if (!error) cpumask_set_cpu(lated_cpu, frozen_cpus); else printk(KERN_ERR "Error taking CPU%d down: %d\n", lated_cpu, error); } #endif arch_disable_nonboot_cpus_end(); if (!error) { BUG_ON(num_online_cpus() > 1); /* Make sure the CPUs won't be enabled by someone else */ cpu_hotplug_disabled = 1; } else { printk(KERN_ERR "Non-boot CPUs are not disabled\n"); } cpu_maps_update_done(); return error; }