示例#1
0
static int __init bl_idle_init(void)
{
	int ret;
	struct device_node *root = of_find_node_by_path("/");

	if (!root)
		return -ENODEV;

	/*
	 * Initialize the driver just for a compliant set of machines
	 */
	if (!of_match_node(compatible_machine_match, root))
		return -ENODEV;
	/*
	 * For now the differentiation between little and big cores
	 * is based on the part number. A7 cores are considered little
	 * cores, A15 are considered big cores. This distinction may
	 * evolve in the future with a more generic matching approach.
	 */
	ret = bl_idle_driver_init(&bl_idle_little_driver,
				  ARM_CPU_PART_CORTEX_A7);
	if (ret)
		return ret;

	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
	if (ret)
		goto out_uninit_little;

	/* Start at index 1, index 0 standard WFI */
	ret = dt_init_idle_driver(&bl_idle_big_driver, bl_idle_state_match, 1);
	if (ret < 0)
		goto out_uninit_big;

	/* Start at index 1, index 0 standard WFI */
	ret = dt_init_idle_driver(&bl_idle_little_driver,
				  bl_idle_state_match, 1);
	if (ret < 0)
		goto out_uninit_big;

	ret = cpuidle_register(&bl_idle_little_driver, NULL);
	if (ret)
		goto out_uninit_big;

	ret = cpuidle_register(&bl_idle_big_driver, NULL);
	if (ret)
		goto out_unregister_little;

	return 0;

out_unregister_little:
	cpuidle_unregister(&bl_idle_little_driver);
out_uninit_big:
	kfree(bl_idle_big_driver.cpumask);
out_uninit_little:
	kfree(bl_idle_little_driver.cpumask);

	return ret;
}
示例#2
0
static int __init exynos_init_cpuidle(void)
{
	int ret;

	ret = exynos_idle_state_init(&exynos64_idle_cluster0_driver, &hmp_fast_cpu_mask);
	if (ret) {
		pr_err("fail exynos_idle_state_init(cluster 0) ret = %d\n", ret);
		return ret;
	}

	cpuidle_profile_state_init(&exynos64_idle_cluster0_driver);

	exynos64_idle_cluster0_driver.safe_state_index = IDLE_C1;
	exynos64_idle_cluster0_driver.cpumask = &hmp_fast_cpu_mask;
	ret = cpuidle_register(&exynos64_idle_cluster0_driver, NULL);

	if (ret) {
		pr_err("fast cpu cpuidle_register fail ret = %d\n", ret);
		return ret;
	}

	ret = exynos_idle_state_init(&exynos64_idle_cluster1_driver, &hmp_slow_cpu_mask);
	if (ret) {
		pr_err("fail exynos_idle_state_init(cluster 1) ret = %d\n", ret);
		return ret;
	}

	exynos64_idle_cluster1_driver.safe_state_index = IDLE_C1;
	exynos64_idle_cluster1_driver.cpumask = &hmp_slow_cpu_mask;
	ret = cpuidle_register(&exynos64_idle_cluster1_driver, NULL);

	if (ret) {
		pr_err("slow cpu cpuidle_register fail ret = %d\n", ret);
		return ret;
	}

	/* TODO : SKIP idle correlation */

	register_pm_notifier(&exynos_cpuidle_notifier);
	register_reboot_notifier(&exynos_cpuidle_reboot_nb);

#if defined(CONFIG_EXYNOS_MARCH_DYNAMIC_CPU_HOTPLUG)
	fb_register_client(&fb_block);
#endif

	pr_info("%s, finish initialization of cpuidle\n", __func__);

	return 0;
}
int __init imx6q_cpuidle_init(void)
{
	/* Set INT_MEM_CLK_LPM bit to get a reliable WAIT mode support */
	imx6_set_int_mem_clk_lpm(true);

	return cpuidle_register(&imx6q_cpuidle_driver, NULL);
}
示例#4
0
int __init tegra30_cpuidle_init(void)
{
#ifdef CONFIG_PM_SLEEP
	tegra_tear_down_cpu = tegra30_tear_down_cpu;
#endif
	return cpuidle_register(&tegra_idle_driver, NULL);
}
示例#5
0
int __init tegra114_cpuidle_init(void)
{
	if (!psci_smp_available())
		return cpuidle_register(&tegra_idle_driver, NULL);

	return 0;
}
/*
 * arm64_idle_init
 *
 * Registers the arm64 specific cpuidle driver with the cpuidle
 * framework. It relies on core code to parse the idle states
 * and initialize them using driver data structures accordingly.
 */
int __init mt8173_cpuidle_init(void)
{
	int cpu, ret;
	struct cpuidle_driver *drv = &mt8173_cpuidle_driver;

	/*
	 * Call arch CPU operations in order to initialize
	 * idle states suspend back-end specific data
	 */
	for_each_possible_cpu(cpu) {
		ret = cpu_init_idle(cpu);
		if (ret) {
			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
			return ret;
		}
	}

	ret = cpuidle_register(drv, NULL);
	if (ret) {
		pr_err("failed to register cpuidle driver\n");
		return ret;
	}

	return 0;
}
示例#7
0
int __init tegra20_cpuidle_init(void)
{
#ifdef CONFIG_PM_SLEEP
	tegra_tear_down_cpu = tegra20_tear_down_cpu;
#endif
	return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
}
示例#8
0
static int dbx500_cpuidle_probe(struct platform_device *pdev)
{
	/* Configure wake up reasons */
	prcmu_enable_wakeups(PRCMU_WAKEUP(ARM) | PRCMU_WAKEUP(RTC) |
			     PRCMU_WAKEUP(ABB));

	return cpuidle_register(&ux500_idle_driver, NULL);
}
示例#9
0
文件: rk3288.c 项目: Astralix/kernel
static void __init rk3288_init_cpuidle(void)
{
	int ret;

	rk3288_cpuidle_driver.states[0].enter = rk3288_cpuidle_enter;
	ret = cpuidle_register(&rk3288_cpuidle_driver, NULL);
	if (ret)
		pr_err("%s: failed to register cpuidle driver: %d\n", __func__, ret);
}
示例#10
0
static int __init bl_idle_init(void)
{
	int ret;

	/*
	 * Initialize the driver just for a compliant set of machines
	 */
	if (!of_machine_is_compatible("arm,vexpress,v2p-ca15_a7"))
		return -ENODEV;
	/*
	 * For now the differentiation between little and big cores
	 * is based on the part number. A7 cores are considered little
	 * cores, A15 are considered big cores. This distinction may
	 * evolve in the future with a more generic matching approach.
	 */
	ret = bl_idle_driver_init(&bl_idle_little_driver,
				  ARM_CPU_PART_CORTEX_A7);
	if (ret)
		return ret;

	ret = bl_idle_driver_init(&bl_idle_big_driver, ARM_CPU_PART_CORTEX_A15);
	if (ret)
		goto out_uninit_little;

	ret = cpuidle_register(&bl_idle_little_driver, NULL);
	if (ret)
		goto out_uninit_big;

	ret = cpuidle_register(&bl_idle_big_driver, NULL);
	if (ret)
		goto out_unregister_little;

	return 0;

out_unregister_little:
	cpuidle_unregister(&bl_idle_little_driver);
out_uninit_big:
	kfree(bl_idle_big_driver.cpumask);
out_uninit_little:
	kfree(bl_idle_little_driver.cpumask);

	return ret;
}
/* Initialize CPU idle by registering the idle states */
static int kirkwood_cpuidle_probe(struct platform_device *pdev)
{
	struct resource *res;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	ddr_operation_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ddr_operation_base))
		return PTR_ERR(ddr_operation_base);

	return cpuidle_register(&kirkwood_idle_driver, NULL);
}
示例#12
0
int __init sh_mobile_setup_cpuidle(void)
{
	int ret;

	if (sh_mobile_sleep_supported & SUSP_SH_SF)
		cpuidle_driver.states[1].disabled = false;

	if (sh_mobile_sleep_supported & SUSP_SH_STANDBY)
		cpuidle_driver.states[2].disabled = false;

	return cpuidle_register(&cpuidle_driver);
}
示例#13
0
int __init imx6sx_cpuidle_init(void)
{
	imx6_enable_rbc(false);
	/*
	 * set ARM power up/down timing to the fastest,
	 * sw2iso and sw can be set to one 32K cycle = 31us
	 * except for power up sw2iso which need to be
	 * larger than LDO ramp up time.
	 */
	imx_gpc_set_arm_power_up_timing(2, 1);
	imx_gpc_set_arm_power_down_timing(1, 1);

	return cpuidle_register(&imx6sx_cpuidle_driver, NULL);
}
示例#14
0
static int exynos_cpuidle_probe(struct platform_device *pdev)
{
	int ret;

	exynos_enter_aftr = (void *)(pdev->dev.platform_data);

	ret = cpuidle_register(&exynos_idle_driver, NULL);
	if (ret) {
		dev_err(&pdev->dev, "failed to register cpuidle driver\n");
		return ret;
	}

	return 0;
}
示例#15
0
static int exynos_cpuidle_probe(struct platform_device *pdev)
{
	int ret;

	if (IS_ENABLED(CONFIG_SMP) &&
	    (of_machine_is_compatible("samsung,exynos4210") ||
	     of_machine_is_compatible("samsung,exynos3250"))) {
		exynos_cpuidle_pdata = pdev->dev.platform_data;

		ret = cpuidle_register(&exynos_coupled_idle_driver,
				       cpu_possible_mask);
	} else {
		exynos_enter_aftr = (void *)(pdev->dev.platform_data);

		ret = cpuidle_register(&exynos_idle_driver, NULL);
	}

	if (ret) {
		dev_err(&pdev->dev, "failed to register cpuidle driver\n");
		return ret;
	}

	return 0;
}
示例#16
0
文件: cpuidle.c 项目: ChineseDr/linux
static int __init davinci_cpuidle_probe(struct platform_device *pdev)
{
    struct davinci_cpuidle_config *pdata = pdev->dev.platform_data;

    if (!pdata) {
        dev_err(&pdev->dev, "cannot get platform data\n");
        return -ENOENT;
    }

    ddr2_reg_base = pdata->ddr2_ctlr_base;

    ddr2_pdown = pdata->ddr2_pdown;

    return cpuidle_register(&davinci_idle_driver, NULL);
}
/*
 * mcpm_idle_init
 *
 * Registers the mcpm specific cpuidle driver with the cpuidle
 * framework with the valid set of states.
 */
static int __init mmp_cpuidle_init(void)
{
	struct cpuidle_driver *drv = &mmp_idle_driver;
	int i;

	drv->state_count = mcpm_plat_get_cpuidle_states(drv->states);
	if (drv->state_count < 0)
		return drv->state_count;

	for (i = 0; i < drv->state_count; i++) {
		if (!drv->states[i].enter)
			drv->states[i].enter = mmp_enter_powerdown;
	}

	return cpuidle_register(&mmp_idle_driver, NULL);
}
/*
 * arm64_idle_sprd_init
 *
 * Registers the arm64 specific cpuidle driver with the cpuidle
 * framework. It relies on core code to parse the idle states
 * and initialize them using driver data structures accordingly.
 */
int __init arm64_idle_sprd_init(void)
{
	int i, ret;
	const char *entry_method;
	struct device_node *idle_states_node;
	const struct cpu_suspend_ops *suspend_init;
	struct cpuidle_driver *drv = &arm64_idle_driver;

	idle_states_node = of_find_node_by_path("/cpus/idle-states");
	if (!idle_states_node) {
		return -ENOENT;
	}
	if (of_property_read_string(idle_states_node, "entry-method",
				    &entry_method)) {
		pr_warn(" * %s missing entry-method property\n",
			    idle_states_node->full_name);
		of_node_put(idle_states_node);
		return -EOPNOTSUPP;
	}
	/*
	 * State at index 0 is standby wfi and considered standard
	 * on all ARM platforms. If in some platforms simple wfi
	 * can't be used as "state 0", DT bindings must be implemented
	 * to work around this issue and allow installing a special
	 * handler for idle state index 0.
	 */
	drv->states[0].exit_latency = 1;
	drv->states[0].target_residency = 1;
	drv->states[0].flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
	strncpy(drv->states[0].name, "ARM WFI", CPUIDLE_NAME_LEN);
	strncpy(drv->states[0].desc, "ARM WFI", CPUIDLE_DESC_LEN);

	drv->cpumask = (struct cpumask *) cpu_possible_mask;
	/*
	 * Start at index 1, request idle state nodes to be filled
	 */
	ret = of_init_idle_driver(drv, state_nodes, 1, true);
	if (ret) {
		return ret;
	}

	for (i = 0; i < drv->state_count; i++)
		drv->states[i].enter = arm_enter_idle_state;
	printk("[CPUIDLE]<arm64_idle_sprd_init> init OK. \n");
	return cpuidle_register(drv, NULL);
}
示例#19
0
static int __init pseries_processor_idle_init(void)
{
	int retval;

	retval = pseries_idle_probe();
	if (retval)
		return retval;

	pseries_cpuidle_driver_init();
	retval = cpuidle_register(&pseries_idle_driver, NULL);
	if (retval) {
		printk(KERN_DEBUG "Registration of pseries driver failed.\n");
		return retval;
	}

	register_cpu_notifier(&setup_hotplug_notifier);
	printk(KERN_DEBUG "pseries_idle_driver registered\n");
	return 0;
}
示例#20
0
/*
 * arm64_idle_init
 *
 * Registers the arm64 specific cpuidle driver with the cpuidle
 * framework. It relies on core code to parse the idle states
 * and initialize them using driver data structures accordingly.
 */
static int __init arm64_idle_init(void)
{
	int cpu, ret;
	struct cpuidle_driver *drv = &arm64_idle_driver;

	/*
	 * Initialize idle states data, starting at index 1.
	 * This driver is DT only, if no DT idle states are detected (ret == 0)
	 * let the driver initialization fail accordingly since there is no
	 * reason to initialize the idle driver if only wfi is supported.
	 */
	ret = dt_init_idle_driver(drv, arm64_idle_state_match, 1);
	if (ret <= 0) {
		if (ret)
			pr_err("failed to initialize idle states\n");
		return ret ? : -ENODEV;
	}

	/*
	 * Call arch CPU operations in order to initialize
	 * idle states suspend back-end specific data
	 */
	for_each_possible_cpu(cpu) {
		ret = cpu_init_idle(cpu);
		if (ret) {
			pr_err("CPU %d failed to init idle CPU ops\n", cpu);
			return ret;
		}
	}

	ret = cpuidle_register(drv, NULL);
	if (ret) {
		pr_err("failed to register cpuidle driver\n");
		return ret;
	}

	return 0;
}
示例#21
0
static int __init rockchip_ca9_cpuidle_init(void)
{
	struct device_node *np;
	int ret;

	if (!cpu_is_rockchip())
		return -ENODEV;
	if (read_cpuid_part() != ARM_CPU_PART_CORTEX_A9)
		return -ENODEV;
	np = of_find_compatible_node(NULL, NULL, "arm,cortex-a9-gic");
	if (!np)
		return -ENODEV;
	gic_cpu_base = of_iomap(np, 1);
	if (!gic_cpu_base) {
		pr_err("%s: failed to map gic cpu registers\n", __func__);
		return -EINVAL;
	}
	rockchip_ca9_cpuidle_driver.states[0].enter = rockchip_ca9_cpuidle_enter;
	ret = cpuidle_register(&rockchip_ca9_cpuidle_driver, NULL);
	if (ret)
		pr_err("%s: failed to register cpuidle driver: %d\n", __func__, ret);

	return ret;
}
示例#22
0
int __init tegra30_cpuidle_init(void)
{
	return cpuidle_register(&tegra_idle_driver, NULL);
}
示例#23
0
int __init imx6sl_cpuidle_init(void)
{
	return cpuidle_register(&imx6sl_cpuidle_driver, NULL);
}
示例#24
0
/* Initialize CPU idle by registering the idle states */
static int at91_cpuidle_probe(struct platform_device *dev)
{
	at91_standby = (void *)(dev->dev.platform_data);
	
	return cpuidle_register(&at91_idle_driver, NULL);
}
示例#25
0
int __init tegra20_cpuidle_init(void)
{
	return cpuidle_register(&tegra_idle_driver, cpu_possible_mask);
}
示例#26
0
static int calxeda_cpuidle_probe(struct platform_device *pdev)
{
	return cpuidle_register(&calxeda_idle_driver, NULL);
}
示例#27
0
static int __init s3c64xx_init_cpuidle(void)
{
	return cpuidle_register(&s3c64xx_cpuidle_driver, NULL);
}
int __init mt8173_cpuidle_init(void)
{
	return cpuidle_register(&mt8173_cpuidle_driver, NULL);
}
示例#29
0
static int tegra_cpuidle_register(unsigned int cpu)
{
	struct cpuidle_driver *drv;
	struct cpuidle_state *state;

	drv = &per_cpu(cpuidle_drv, cpu);
	drv->name = driver_name;
	drv->owner = owner;
	drv->cpumask = &per_cpu(idle_mask, cpu);
	cpumask_set_cpu(cpu, drv->cpumask);
	drv->state_count = 0;

	state = &drv->states[CPUIDLE_STATE_CLKGATING];
	snprintf(state->name, CPUIDLE_NAME_LEN, "clock-gated");
	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU clock gated");
	state->exit_latency = 10;
	state->target_residency = 10;
	state->power_usage = 600;
	state->flags = CPUIDLE_FLAG_TIME_VALID;
	state->enter = tegra_idle_enter_clock_gating;
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
	drv->safe_state_index = 0;
#endif
	drv->state_count++;

#ifdef CONFIG_PM_SLEEP
	state = &drv->states[CPUIDLE_STATE_POWERGATING];
	snprintf(state->name, CPUIDLE_NAME_LEN, "powered-down");
	snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power gated");
	state->exit_latency = tegra_cpu_power_good_time();
	state->target_residency = tegra_cpu_power_off_time() +
		tegra_cpu_power_good_time();
	if (state->target_residency < tegra_pd_min_residency)
		state->target_residency = tegra_pd_min_residency;
	state->power_usage = 100;
	state->flags = CPUIDLE_FLAG_TIME_VALID;
	state->enter = tegra_idle_enter_pd;
	drv->state_count++;

	if (cpu == 0) {
		state = &drv->states[CPUIDLE_STATE_MC_CLK_STOP];
		snprintf(state->name, CPUIDLE_NAME_LEN, "mc-clock");
		snprintf(state->desc, CPUIDLE_DESC_LEN, "MC clock stop");
		state->exit_latency = tegra_cpu_power_good_time() +
			DRAM_SELF_REFRESH_EXIT_LATENCY;
		state->target_residency = tegra_cpu_power_off_time() +
			tegra_cpu_power_good_time() + DRAM_SELF_REFRESH_EXIT_LATENCY;
		if (state->target_residency < tegra_mc_clk_stop_min_residency())
			state->target_residency =
					tegra_mc_clk_stop_min_residency();
		state->power_usage = 0;
		state->flags = CPUIDLE_FLAG_TIME_VALID;
		state->enter = tegra_idle_enter_pd;
		state->disabled = true;
		drv->state_count++;
	}
#endif

	if (cpuidle_register(drv, NULL)) {
		pr_err("CPU%u: failed to register driver\n", cpu);
		return -EIO;
	}

	on_each_cpu_mask(drv->cpumask, tegra_cpuidle_setup_bctimer,
				(void *)CLOCK_EVT_NOTIFY_BROADCAST_ON, 1);

	return 0;
}
static int gem5_cpuidle_init(void)
{
    return cpuidle_register(&gem5_idle_driver, NULL);
}