예제 #1
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
	cpu_hotplug_done();
	trace_sched_cpu_hotplug(cpu, ret, 1);

	return ret;
}
예제 #2
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct task_struct *idle;

	cpu_hotplug_begin();

	if (cpu_online(cpu)) {
		ret = 0;
		goto out;
	}

	if (!cpu_present(cpu)) {
		ret = -EINVAL;
		goto out;
	}

	idle = idle_thread_get(cpu);
	if (IS_ERR(idle)) {
		ret = PTR_ERR(idle);
		goto out;
	}

	ret = smpboot_create_threads(cpu);
	if (ret)
		goto out;

	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Wake the per cpu threads */
	smpboot_unpark_threads(cpu);

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
out:
	cpu_hotplug_done();
	trace_sched_cpu_hotplug(cpu, ret, 1);

	return ret;
}
예제 #3
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
    int err, nr_calls = 0;
    void *hcpu = (void *)(long)cpu;
    unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
    struct take_cpu_down_param tcd_param = {
        .mod = mod,
        .hcpu = hcpu,
    };

    if (num_online_cpus() == 1)
        return -EBUSY;

    if (!cpu_online(cpu))
        return -EINVAL;

    cpu_hotplug_begin();

    err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
    if (err) {
        nr_calls--;
        __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
        printk("%s: attempt to take down CPU %u failed\n",
               __func__, cpu);
        goto out_release;
    }
    smpboot_park_threads(cpu);

    err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
    if (err) {
        /* CPU didn't die: tell everyone.  Can't complain. */
        smpboot_unpark_threads(cpu);
        cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
        goto out_release;
    }
    BUG_ON(cpu_online(cpu));

    /*
     * The migration_call() CPU_DYING callback will have removed all
     * runnable tasks from the cpu, there's only the idle task left now
     * that the migration thread is done doing the stop_machine thing.
     *
     * Wait for the stop thread to go away.
     */
    while (!idle_cpu(cpu))
        cpu_relax();

    /* This actually kills the CPU. */
    __cpu_die(cpu);

    /* CPU is completely dead: tell everyone.  Too late to complain. */
    cpu_notify_nofail(CPU_DEAD | mod, hcpu);

    check_for_tasks(cpu);

out_release:
    cpu_hotplug_done();
    trace_sched_cpu_hotplug(cpu, err, 0);
    if (!err)
        cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
    return err;
}
예제 #4
0
/* Requires cpu_add_remove_lock to be held */
static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
{
	int err, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
	struct take_cpu_down_param tcd_param = {
		.mod = mod,
		.hcpu = hcpu,
	};

	if (num_online_cpus() == 1)
		return -EBUSY;

	if (!cpu_online(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
	if (err) {
		nr_calls--;
		__cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
		printk("%s: attempt to take down CPU %u failed\n",
				__func__, cpu);
		goto out_release;
	}
	smpboot_park_threads(cpu);

	/*
	 * By now we've cleared cpu_active_mask, wait for all preempt-disabled
	 * and RCU users of this state to go away such that all new such users
	 * will observe it.
	 *
	 * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
	 * not imply sync_sched(), so explicitly call both.
	 */
#ifdef CONFIG_PREEMPT
	synchronize_sched();
#endif
	synchronize_rcu();

	/*
	 * So now all preempt/rcu users must observe !cpu_active().
	 */

	err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
	if (err) {
		/* CPU didn't die: tell everyone.  Can't complain. */
		smpboot_unpark_threads(cpu);
		cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
		goto out_release;
	}
	BUG_ON(cpu_online(cpu));

	/*
	 * The migration_call() CPU_DYING callback will have removed all
	 * runnable tasks from the cpu, there's only the idle task left now
	 * that the migration thread is done doing the stop_machine thing.
	 *
	 * Wait for the stop thread to go away.
	 */
	while (!idle_cpu(cpu))
		cpu_relax();

	/* This actually kills the CPU. */
	__cpu_die(cpu);

	/* CPU is completely dead: tell everyone.  Too late to complain. */
	cpu_notify_nofail(CPU_DEAD | mod, hcpu);

	check_for_tasks(cpu);

out_release:
	cpu_hotplug_done();
	trace_sched_cpu_hotplug(cpu, err, 0);
	if (!err)
		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
	return err;
}