static int take_cpu_down(void *unused) { void *hcpu = (void *)(long)smp_processor_id(); int notifier_rc = notifier_call_chain(&cpu_chain, CPU_DYING, hcpu, NULL); BUG_ON(notifier_rc != NOTIFY_DONE); __cpu_disable(); return 0; }
/* Take this CPU down. */ static int __ref take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; int err; /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); return 0; }
static int __ref take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; int err; err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); return 0; }
static int __ref take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; int err; err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); /* Park the stopper thread */ kthread_park(current); return 0; }
/* Take this CPU down. */ static int take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; int err; /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); /* Give up timekeeping duties */ tick_handover_do_timer(); /* Park the stopper thread */ stop_machine_park((long)param->hcpu); return 0; }
/* Take this CPU down. */ static int __ref take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; int err; err=migration_call(&migration_notifier, CPU_DYING | param->mod, param->hcpu); if(err==NOTIFY_BAD){ printk("[Warning]take_cpu_down: CPU%lu donw failed!\n",(long)param->hcpu); return err ; } /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); return 0; }
/* Take this CPU down. */ static int __ref take_cpu_down(void *_param) { struct take_cpu_down_param *param = _param; unsigned int cpu = (unsigned long)param->hcpu; int err; /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; cpu_notify(CPU_DYING | param->mod, param->hcpu); if (task_cpu(param->caller) == cpu) move_task_off_dead_cpu(cpu, param->caller); /* Force idle task to run as soon as we yield: it should immediately notice cpu is offline and die quickly. */ sched_idle_next(); return 0; }
/* Take this CPU down. */ static int take_cpu_down(void *_param) { struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state); enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE); int err, cpu = smp_processor_id(); /* Ensure this CPU doesn't handle any more interrupts. */ err = __cpu_disable(); if (err < 0) return err; /* Invoke the former CPU_DYING callbacks */ for (; st->state > target; st->state--) { struct cpuhp_step *step = cpuhp_ap_states + st->state; cpuhp_invoke_callback(cpu, st->state, step->teardown); } /* Give up timekeeping duties */ tick_handover_do_timer(); /* Park the stopper thread */ stop_machine_park(cpu); return 0; }