/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ while (!idle_cpu(cpu)) cpu_relax(); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); while (!idle_cpu(cpu)) cpu_relax(); __cpu_die(cpu); cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); #ifdef CONFIG_HTC_ACPU_DEBUG { unsigned int status = 0; msm_proc_comm(PCOM_BACKUP_CPU_STATUS, (unsigned*)&cpu, (unsigned*) &status); } #endif out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .caller = current, .mod = mod, .hcpu = hcpu, }; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); set_cpu_active(cpu, false); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { set_cpu_active(cpu, true); nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { set_cpu_active(cpu, true); /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); /* Wait for it to sleep (leaving idle task). */ while (!idle_cpu(cpu)) yield(); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; if (skip_cpu_offline) return -EACCES; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } smpboot_park_threads(cpu); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); while (!idle_cpu(cpu)) cpu_relax(); __cpu_die(cpu); cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, enum cpuhp_state target) { struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0; bool hasdied = false; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_present(cpu)) return -EINVAL; cpu_hotplug_begin(); cpuhp_tasks_frozen = tasks_frozen; prev_state = st->state; st->target = target; /* * If the current CPU state is in the range of the AP hotplug thread, * then we need to kick the thread. */ if (st->state > CPUHP_TEARDOWN_CPU) { ret = cpuhp_kick_ap_work(cpu); /* * The AP side has done the error rollback already. Just * return the error code.. */ if (ret) goto out; /* * We might have stopped still in the range of the AP hotplug * thread. Nothing to do anymore. */ if (st->state > CPUHP_TEARDOWN_CPU) goto out; } /* * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need * to do the further cleanups. */ ret = cpuhp_down_callbacks(cpu, st, cpuhp_bp_states, target); if (ret && st->state > CPUHP_TEARDOWN_CPU && st->state < prev_state) { st->target = prev_state; st->rollback = true; cpuhp_kick_ap_work(cpu); } hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; out: cpu_hotplug_done(); /* This post dead nonsense must die */ if (!ret && hasdied) cpu_notify_nofail(CPU_POST_DEAD, cpu); return ret; }
/* * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke * callbacks when a state gets [un]installed at runtime. */ static void cpuhp_thread_fun(unsigned int cpu) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); int ret = 0; /* * Paired with the mb() in cpuhp_kick_ap_work and * cpuhp_invoke_ap_callback, so the work set is consistent visible. */ smp_mb(); if (!st->should_run) return; st->should_run = false; /* Single callback invocation for [un]install ? */ if (st->cb) { if (st->cb_state < CPUHP_AP_ONLINE) { local_irq_disable(); ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); local_irq_enable(); } else { ret = cpuhp_invoke_callback(cpu, st->cb_state, st->cb); } } else if (st->rollback) { BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); undo_cpu_down(cpu, st, cpuhp_ap_states); /* * This is a momentary workaround to keep the notifier users * happy. Will go away once we got rid of the notifiers. */ cpu_notify_nofail(CPU_DOWN_FAILED, cpu); st->rollback = false; } else { /* Cannot happen .... */ BUG_ON(st->state < CPUHP_AP_ONLINE_IDLE); /* Regular hotplug work */ if (st->state < st->target) ret = cpuhp_ap_online(cpu, st); else if (st->state > st->target) ret = cpuhp_ap_offline(cpu, st); } st->result = ret; complete(&st->done); }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); pr_warn("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } /* * By now we've cleared cpu_active_mask, wait for all preempt-disabled * and RCU users of this state to go away such that all new such users * will observe it. * * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might * not imply sync_sched(), so explicitly call both. * * Do sync before park smpboot threads to take care the rcu boost case. */ #ifdef CONFIG_PREEMPT synchronize_sched(); #endif synchronize_rcu(); smpboot_park_threads(cpu); /* * So now all preempt/rcu users must observe !cpu_active(). */ err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ while (!idle_cpu(cpu)) cpu_relax(); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .caller = current, .mod = mod, .hcpu = hcpu, }; unsigned long timeout; unsigned long flags; struct task_struct *g, *p; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); set_cpu_active(cpu, false); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { set_cpu_active(cpu, true); nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { set_cpu_active(cpu, true); /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); timeout = jiffies + HZ; /* Wait for it to sleep (leaving idle task). */ while (!idle_cpu(cpu)) { msleep(1); if (time_after(jiffies, timeout)) { printk("%s: CPU%d not idle after offline. Running tasks:\n", __func__, cpu); read_lock_irqsave(&tasklist_lock, flags); do_each_thread(g, p) { if (!p->se.on_rq || task_cpu(p) != cpu) continue; sched_show_task(p); } while_each_thread(g, p); read_unlock_irqrestore(&tasklist_lock, flags); timeout = jiffies + HZ; } } /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* Requires cpu_add_remove_lock to be held */ static int _cpu_down(unsigned int cpu, int tasks_frozen) { int err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; cpu_hotplug_begin(); err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); pr_warn("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } /* * By now we've cleared cpu_active_mask, wait for all preempt-disabled * and RCU users of this state to go away such that all new such users * will observe it. * * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might * not imply sync_sched(), so wait for both. * * Do sync before park smpboot threads to take care the rcu boost case. */ if (IS_ENABLED(CONFIG_PREEMPT)) synchronize_rcu_mult(call_rcu, call_rcu_sched); else synchronize_rcu(); smpboot_park_threads(cpu); /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. */ irq_lock_sparse(); /* * So now all preempt/rcu users must observe !cpu_active(). */ err = stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); irq_unlock_sparse(); goto out_release; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ while (!per_cpu(cpu_dead_idle, cpu)) cpu_relax(); smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ per_cpu(cpu_dead_idle, cpu) = false; /* Interrupts are moved away from the dying cpu, reenable alloc/free */ irq_unlock_sparse(); hotplug_cpu__broadcast_tick_pull(cpu); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ tick_cleanup_dead_cpu(cpu); cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); return err; }
/* Requires cpu_add_remove_lock to be held */ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) { int mycpu, err, nr_calls = 0; void *hcpu = (void *)(long)cpu; unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; struct take_cpu_down_param tcd_param = { .mod = mod, .hcpu = hcpu, }; cpumask_var_t cpumask; cpumask_var_t cpumask_org; if (num_online_cpus() == 1) return -EBUSY; if (!cpu_online(cpu)) return -EINVAL; /* Move the downtaker off the unplug cpu */ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) return -ENOMEM; if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { free_cpumask_var(cpumask); return -ENOMEM; } cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); set_cpus_allowed_ptr(current, cpumask); free_cpumask_var(cpumask); migrate_disable(); mycpu = smp_processor_id(); if (mycpu == cpu) { printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); migrate_enable(); err = -EBUSY; goto restore_cpus; } cpu_hotplug_begin(); err = cpu_unplug_begin(cpu); if (err) { printk("cpu_unplug_begin(%d) failed\n", cpu); goto out_cancel; } err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); if (err) { nr_calls--; __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); printk("%s: attempt to take down CPU %u failed\n", __func__, cpu); goto out_release; } __cpu_unplug_wait(cpu); smpboot_park_threads(cpu); /* Notifiers are done. Don't let any more tasks pin this CPU. */ cpu_unplug_sync(cpu); err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); if (err) { /* CPU didn't die: tell everyone. Can't complain. */ smpboot_unpark_threads(cpu); cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); goto out_release; } BUG_ON(cpu_online(cpu)); /* * The migration_call() CPU_DYING callback will have removed all * runnable tasks from the cpu, there's only the idle task left now * that the migration thread is done doing the stop_machine thing. * * Wait for the stop thread to go away. */ while (!idle_cpu(cpu)) cpu_relax(); /* This actually kills the CPU. */ __cpu_die(cpu); /* CPU is completely dead: tell everyone. Too late to complain. */ cpu_notify_nofail(CPU_DEAD | mod, hcpu); check_for_tasks(cpu); out_release: cpu_unplug_done(cpu); out_cancel: migrate_enable(); cpu_hotplug_done(); if (!err) cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); restore_cpus: set_cpus_allowed_ptr(current, cpumask_org); free_cpumask_var(cpumask_org); return err; }
static int notify_dead(unsigned int cpu) { cpu_notify_nofail(CPU_DEAD, cpu); check_for_tasks(cpu); return 0; }