Esempio n. 1
0
int disable_nonboot_cpus(void)
{
    int cpu, first_cpu, error = 0;
#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
    int lated_cpu;
#endif

#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
    if (exynos_boot_cluster == CL_ZERO)
        lated_cpu = NR_CLUST0_CPUS;
    else
        lated_cpu = NR_CLUST1_CPUS;
#endif

    cpu_maps_update_begin();
    first_cpu = cpumask_first(cpu_online_mask);
    /*
     * We take down all of the non-boot CPUs in one shot to avoid races
     * with the userspace trying to use the CPU hotplug at the same time
     */
    cpumask_clear(frozen_cpus);

    printk("Disabling non-boot CPUs ...\n");
    for_each_online_cpu(cpu) {
#if defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
        if (cpu == first_cpu || cpu == lated_cpu)
#else
        if (cpu == first_cpu)
#endif
            continue;
        error = _cpu_down(cpu, 1);
        if (!error)
            cpumask_set_cpu(cpu, frozen_cpus);
        else {
            printk(KERN_ERR "Error taking CPU%d down: %d\n",
                   cpu, error);
            break;
        }
    }

#ifdef CONFIG_ARM_EXYNOS_MP_CPUFREQ
    if (num_online_cpus() > 1) {
        error = _cpu_down(lated_cpu, 1);
        if (!error)
            cpumask_set_cpu(lated_cpu, frozen_cpus);
        else
            printk(KERN_ERR "Error taking CPU%d down: %d\n",
                   lated_cpu, error);
    }
#endif
    if (!error) {
        BUG_ON(num_online_cpus() > 1);
        /* Make sure the CPUs won't be enabled by someone else */
        cpu_hotplug_disabled = 1;
    } else {
        printk(KERN_ERR "Non-boot CPUs are not disabled\n");
    }
    cpu_maps_update_done();
    return error;
}
Esempio n. 2
0
File: cpu.c Progetto: borkmann/kasan
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);

	pr_info("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		error = _cpu_down(cpu, 1);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
			break;
		}
	}

	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		/* Make sure the CPUs won't be enabled by someone else */
		cpu_hotplug_disabled = 1;
	} else {
		pr_err("Non-boot CPUs are not disabled\n");
	}
	cpu_maps_update_done();
	return error;
}
Esempio n. 3
0
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	cpumask_clear(frozen_cpus);
	arch_disable_nonboot_cpus_begin();

	printk("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		error = _cpu_down(cpu, 1);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			printk(KERN_ERR "Error taking CPU%d down: %d\n",
				cpu, error);
			break;
		}
	}

	arch_disable_nonboot_cpus_end();

	if (!error) {
		BUG_ON(num_online_cpus() > 1);
		
		cpu_hotplug_disabled = 1;
	} else {
		printk(KERN_ERR "Non-boot CPUs are not disabled\n");
	}
	cpu_maps_update_done();
	return error;
}
Esempio n. 4
0
static int cpu_down(unsigned int cpu)
{
	int err;

	/* Unlike Linux there is no lock, as there are no other callers
	 * and no other CPUS. */
	err = _cpu_down(cpu, 0);

	return 0;
}
Esempio n. 5
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	cpu_maps_update_done();
	return err;
}
Esempio n. 6
0
static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
{
	int err;

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0, target);

out:
	cpu_maps_update_done();
	return err;
}
Esempio n. 7
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	printk("Take CPU down - Begin\n");
	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	cpu_maps_update_done();
	printk("Take CPU down - End\n");
	return err;
}
Esempio n. 8
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	cpu_maps_update_begin();
	sec_debug_task_log_msg(cpu, "cpu_down+");

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	sec_debug_task_log_msg(cpu, "cpu_down-");
	cpu_maps_update_done();
	return err;
}
Esempio n. 9
0
int disable_nonboot_cpus(void)
{
	int cpu, first_cpu, error = 0;

	cpu_maps_update_begin();
	first_cpu = cpumask_first(cpu_online_mask);
	/*
	 * We take down all of the non-boot CPUs in one shot to avoid races
	 * with the userspace trying to use the CPU hotplug at the same time
	 */
	cpumask_clear(frozen_cpus);

	pr_info("Disabling non-boot CPUs ...\n");
	for_each_online_cpu(cpu) {
		if (cpu == first_cpu)
			continue;
		trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
		error = _cpu_down(cpu, 1);
		trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
		if (!error)
			cpumask_set_cpu(cpu, frozen_cpus);
		else {
			pr_err("Error taking CPU%d down: %d\n", cpu, error);
			break;
		}
	}

	if (!error)
		BUG_ON(num_online_cpus() > 1);
	else
		pr_err("Non-boot CPUs are not disabled\n");

	/*
	 * Make sure the CPUs won't be enabled by someone else. We need to do
	 * this even in case of failure as all disable_nonboot_cpus() users are
	 * supposed to do enable_nonboot_cpus() on the failure path.
	 */
	cpu_hotplug_disabled++;

	cpu_maps_update_done();
	return error;
}
Esempio n. 10
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	trace_cpu_hotplug(cpu, POWER_CPU_DOWN_START);

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	cpu_maps_update_done();
	trace_cpu_hotplug(cpu, POWER_CPU_DOWN_DONE);
	return err;
}
Esempio n. 11
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	cpu_maps_update_begin();

#ifdef CONFIG_CPU_FREQ_GOV_K3HOTPLUG

	/* check whether cpu is locked or limited.*/
	if ((gcpu_num_limit.block != 0)
		&& (num_online_cpus() <= gcpu_num_limit.block)) {
		pr_err("[%s]cpu lock is %d can not hotplug cpu.\n",
			__func__, gcpu_num_limit.block);
		err = -EPERM;
		goto out;
	} else if ((gcpu_num_limit.block == 0)
				&& (num_online_cpus() <= gcpu_num_limit.min)) {
		pr_err("[%s]cpu min is %d can not hotplug cpu.\n",
			__func__, gcpu_num_limit.min);
		err = -EPERM;
		goto out;
	}

	if (cpu != (num_online_cpus() - 1)) {
		err = -EPERM;
		goto out;
	}
#endif


	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	cpu_maps_update_done();
	return err;
}
Esempio n. 12
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	printk("Take CPU%u down - Begin\n", cpu);

	cpu_maps_update_begin();

	if (cpu_hotplug_disabled || wifi_is_powering_onoff) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	cpu_maps_update_done();
	
	printk("Take CPU%u down - End\n", cpu);
	return err;
}
Esempio n. 13
0
int __ref cpu_down(unsigned int cpu)
{
	int err;

	if (num_online_cpus() <= pm_qos_request(PM_QOS_CPU_ONLINE_MIN))
		return 0;

	cpu_maps_update_begin();
	sec_debug_task_log_msg(cpu, "cpudown+");

	if (cpu_hotplug_disabled) {
		err = -EBUSY;
		goto out;
	}

	err = _cpu_down(cpu, 0);

out:
	pr_info("_cpu_down ret=%d\n", err);
	sec_debug_task_log_msg(cpu, "cpudown-");
	cpu_maps_update_done();
	return err;
}