Example #1
0
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	int err;

	
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);
	return 0;
}
Example #2
0
/* Take this CPU down. */
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);
	return 0;
}
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
    int ret, nr_calls = 0;
    void *hcpu = (void *)(long)cpu;
    unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
    struct task_struct *idle;

    if (cpu_online(cpu) || !cpu_present(cpu))
        return -EINVAL;

    cpu_hotplug_begin();

    idle = idle_thread_get(cpu);
    if (IS_ERR(idle)) {
        ret = PTR_ERR(idle);
        goto out;
    }

    ret = smpboot_create_threads(cpu);
    if (ret)
        goto out;

    ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
    if (ret) {
        nr_calls--;
        printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
               __func__, cpu);
        goto out_notify;
    }

    /* Arch-specific enabling code. */
    ret = __cpu_up(cpu);
    if (ret != 0)
        goto out_notify;
    BUG_ON(!cpu_online(cpu));

    /* Wake the per cpu threads */
    smpboot_unpark_threads(cpu);

    /* Now call notifier in preparation. */
    cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
    if (ret != 0)
        __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
out:
    cpu_hotplug_done();
    trace_sched_cpu_hotplug(cpu, ret, 1);

    return ret;
}
Example #4
0
static int bringup_cpu(unsigned int cpu)
{
	struct task_struct *idle = idle_thread_get(cpu);
	int ret;

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle);
	if (ret) {
		cpu_notify(CPU_UP_CANCELED, cpu);
		return ret;
	}
	ret = bringup_wait_for_ap(cpu);
	BUG_ON(!cpu_online(cpu));
	return ret;
}
Example #5
0
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	int err;

	
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);
	/* Park the stopper thread */
	kthread_park(current);
	return 0;
}
Example #6
0
/* Take this CPU down. */
static int take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);
	/* Give up timekeeping duties */
	tick_handover_do_timer();
	/* Park the stopper thread */
	stop_machine_park((long)param->hcpu);
	return 0;
}
Example #7
0
/* Take this CPU down. */
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	int err;

	err=migration_call(&migration_notifier, CPU_DYING | param->mod, param->hcpu);
	if(err==NOTIFY_BAD){
		printk("[Warning]take_cpu_down: CPU%lu donw failed!\n",(long)param->hcpu);
		return err ;
	}
	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);
	return 0;
}
Example #8
0
/* Take this CPU down. */
static int __ref take_cpu_down(void *_param)
{
	struct take_cpu_down_param *param = _param;
	unsigned int cpu = (unsigned long)param->hcpu;
	int err;

	/* Ensure this CPU doesn't handle any more interrupts. */
	err = __cpu_disable();
	if (err < 0)
		return err;

	cpu_notify(CPU_DYING | param->mod, param->hcpu);

	if (task_cpu(param->caller) == cpu)
		move_task_off_dead_cpu(cpu, param->caller);
	/* Force idle task to run as soon as we yield: it should
	   immediately notice cpu is offline and die quickly. */
	sched_idle_next();
	return 0;
}
Example #9
0
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	
	ret = __cpu_up(cpu);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	
	cpu_notify(CPU_ONLINE | mod, hcpu);

	#ifdef CONFIG_HTC_ACPU_DEBUG
	{
		unsigned int status = 1;
		msm_proc_comm(PCOM_BACKUP_CPU_STATUS, (unsigned*)&cpu, (unsigned*) &status);
	}
	#endif
out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
	cpu_hotplug_done();

	return ret;
}
Example #10
0
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();

	ret = smpboot_prepare(cpu);
	if (ret)
		goto out;

	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu, idle_thread_get(cpu));
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
out:
	cpu_hotplug_done();

	return ret;
}
Example #11
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
	ret = __cpu_up(cpu);
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);
#ifdef CONFIG_MT_LOAD_BALANCE_PROFILER
	mt_lbprof_update_state(cpu, MT_LBPROF_NO_TASK_STATE);
#endif

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
	cpu_hotplug_done();

	return ret;
}
Example #12
0
/* Requires cpu_add_remove_lock to be held */
static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
{
	int ret, nr_calls = 0;
	void *hcpu = (void *)(long)cpu;
	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;

	if (cpu_online(cpu) || !cpu_present(cpu))
		return -EINVAL;

	cpu_hotplug_begin();
	ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
	if (ret) {
		nr_calls--;
		printk("%s: attempt to bring up CPU %u failed\n",
				__func__, cpu);
		goto out_notify;
	}

	/* Arch-specific enabling code. */
//	printk("_cpu_up+\n");
	ret = __cpu_up(cpu);
//	printk("_cpu_up- ret=%x\n",ret );
	if (ret != 0)
		goto out_notify;
	BUG_ON(!cpu_online(cpu));

	/* Now call notifier in preparation. */
	cpu_notify(CPU_ONLINE | mod, hcpu);

out_notify:
	if (ret != 0)
		__cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
//	printk("_cpu_up ret CPU_UP_CANCELED");
	cpu_hotplug_done();

	return ret;
}
Example #13
0
static void cpu_notify_nofail(unsigned long val, void *v)
{
	BUG_ON(cpu_notify(val, v));
}
Example #14
0
static int notify_online(unsigned int cpu)
{
	cpu_notify(CPU_ONLINE, cpu);
	return 0;
}
Example #15
0
static int notify_starting(unsigned int cpu)
{
	cpu_notify(CPU_STARTING, cpu);
	return 0;
}
Example #16
0
static int notify_dying(unsigned int cpu)
{
	cpu_notify(CPU_DYING, cpu);
	return 0;
}
Example #17
0
static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
{
	BUG_ON(cpu_notify(val, cpu));
}