/* Initialize CPU idle by registering the idle states */ static int kirkwood_init_cpuidle(void) { struct cpuidle_device *device; cpuidle_register_driver(&kirkwood_idle_driver); device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; /* Wait for interrupt state */ device->states[0].enter = kirkwood_enter_idle; device->states[0].exit_latency = 1; device->states[0].target_residency = 10000; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "WFI"); strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and DDR self refresh state */ device->states[1].enter = kirkwood_enter_idle; device->states[1].exit_latency = 10; device->states[1].target_residency = 10000; device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[1].name, "DDR SR"); strcpy(device->states[1].desc, "WFI and DDR Self Refresh"); if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; } return 0; }
int __init msm_cpuidle_init(void) { unsigned int cpu = 0; int ret = 0; msm_cpuidle_set_states(); ret = cpuidle_register_driver(&msm_cpuidle_driver); if (ret) pr_err("%s: failed to register cpuidle driver: %d\n", __func__, ret); for_each_possible_cpu(cpu) { struct cpuidle_device *dev = &per_cpu(msm_cpuidle_devs, cpu); dev->cpu = cpu; msm_cpuidle_set_cpu_statedata(dev); ret = cpuidle_register_device(dev); if (ret) { pr_err("%s: failed to register cpuidle device for " "cpu %u: %d\n", __func__, cpu, ret); return ret; } } return 0; }
/* Initialize CPU idle by registering the idle states */ static int s5p6442_init_cpuidle(void) { struct cpuidle_device *device; cpuidle_register_driver(&s5p6442_idle_driver); device = &per_cpu(s5p6442_cpuidle_device, smp_processor_id()); device->state_count = 1; /* Wait for interrupt state */ device->states[0].enter = s5p6442_enter_idle_normal; device->states[0].exit_latency = 1; /* uS */ device->states[0].target_residency = 10000; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "IDLE"); strcpy(device->states[0].desc, "ARM clock gating - WFI"); if (cpuidle_register_device(device)) { printk(KERN_ERR "s5p6442_init_cpuidle: Failed registering\n"); return -EIO; } spin_lock_init(&idle2_lock); return 0; }
int __init tegra114_cpuidle_init(void) { int ret; unsigned int cpu; struct cpuidle_device *dev; struct cpuidle_driver *drv = &tegra_idle_driver; ret = cpuidle_register_driver(&tegra_idle_driver); if (ret) { pr_err("CPUidle driver registration failed\n"); return ret; } for_each_possible_cpu(cpu) { dev = &per_cpu(tegra_idle_device, cpu); dev->cpu = cpu; dev->state_count = drv->state_count; ret = cpuidle_register_device(dev); if (ret) { pr_err("CPU%u: CPUidle device registration failed\n", cpu); return ret; } } return 0; }
int shmobile_cpuidle_init(void) { struct cpuidle_device *dev = &shmobile_cpuidle_dev; cpuidle_register_driver(cpuidle_drv); dev->state_count = cpuidle_drv->state_count; cpuidle_register_device(dev); return 0; }
/** * omap3_idle_init - Init routine for OMAP3 idle * * Registers the OMAP3 specific cpuidle driver with the cpuidle * framework with the valid set of states. */ int __init omap3_idle_init(void) { int i, count = 0; struct omap3_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); omap_init_power_states(); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { cx = &omap3_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap3_enter_idle_bm : omap3_enter_idle; if (cx->type == OMAP3_STATE_C1) dev->safe_state = state; sprintf(state->name, "C%d", count+1); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); count++; } if (!count) return -EINVAL; dev->state_count = count; if (enable_off_mode) omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); else omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } return 0; }
static __init int omap3_idle_init(void) { int i, count = 0; struct omap3_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; printk(KERN_INFO "OMAP CPU idle driver initializing.\n"); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); for (i = 0; i < OMAP3_MAX_STATES; i++) { cx = &omap3_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap3_enter_idle_bm : omap3_enter_idle; if (cx->type == OMAP3_STATE_C3) dev->safe_state = state; snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", count+1); count++; BUG_ON(count == CPUIDLE_STATE_MAX); /* harsh... oh well */ } if (!count) return -EINVAL; /* No valid states configured. */ dev->state_count = count; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __FUNCTION__); return -EIO; } create_pmproc_entry(); /* Initialize UART inactivity time */ uart_inactivity_timeout = msecs_to_jiffies(UART_TIME_OUT); uart_last_awake = jiffies; return 0; }
static int tegra_cpuidle_register_device(unsigned int cpu) { struct cpuidle_device *dev; struct cpuidle_state *state; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->state_count = 0; dev->cpu = cpu; state = &dev->states[0]; snprintf(state->name, CPUIDLE_NAME_LEN, "LP3"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU flow-controlled"); state->exit_latency = 10; state->target_residency = 10; state->power_usage = 600; state->flags = CPUIDLE_FLAG_TIME_VALID; state->enter = tegra_idle_enter_lp3; dev->safe_state = state; dev->state_count++; #ifdef CONFIG_PM_SLEEP state = &dev->states[1]; snprintf(state->name, CPUIDLE_NAME_LEN, "LP2"); snprintf(state->desc, CPUIDLE_DESC_LEN, "CPU power-gate"); state->exit_latency = tegra_cpu_power_good_time(); state->target_residency = tegra_cpu_power_off_time() + tegra_cpu_power_good_time(); if (state->target_residency < tegra_lp2_min_residency) state->target_residency = tegra_lp2_min_residency; state->power_usage = 0; state->flags = CPUIDLE_FLAG_TIME_VALID; state->enter = tegra_idle_enter_lp2; dev->power_specified = 1; dev->safe_state = state; dev->state_count++; #endif dev->prepare = tegra_idle_prepare; if (cpuidle_register_device(dev)) { pr_err("CPU%u: failed to register idle device\n", cpu); kfree(dev); return -EIO; } per_cpu(idle_devices, cpu) = dev; return 0; }
static int kirkwood_init_cpuidle(void) { struct cpuidle_device *device; device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; cpuidle_register_driver(&kirkwood_idle_driver); if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; } return 0; }
/** * omap4_idle_init - Init routine for OMAP4 idle * * Registers the OMAP4 specific cpuidle driver with the cpuidle * framework with the valid set of states. */ int __init omap4_idle_init(void) { int cpu_id, i, count = 0; struct omap4_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; mpu_pd = pwrdm_lookup("mpu_pwrdm"); cpu1_pd = pwrdm_lookup("cpu1_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); omap_init_power_states(); cpuidle_register_driver(&omap4_idle_driver); for_each_cpu(cpu_id, cpu_online_mask) { pr_err("CPUidle for CPU%d registered\n", cpu_id); dev = &per_cpu(omap4_idle_dev, cpu_id); dev->cpu = cpu_id; count = 0; for (i = OMAP4_STATE_C1; i < OMAP4_MAX_STATES; i++) { cx = &omap4_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; if (cx->type == OMAP4_STATE_C1) dev->safe_state = state; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap4_enter_idle_bm : omap4_enter_idle; sprintf(state->name, "C%d", count+1); strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); count++; } if (!count) return -EINVAL; dev->state_count = count; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } }
int omap3_idle_init(void) { int i, count = 0; struct omap3_processor_cx *cx; struct cpuidle_state *state; struct cpuidle_device *dev; omap_init_power_states(); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); for (i = 0; i < OMAP3_MAX_STATES; i++) { cx = &omap3_power_states[i]; state = &dev->states[count]; if (!cx->valid) continue; cpuidle_set_statedata(state, cx); state->exit_latency = cx->sleep_latency + cx->wakeup_latency; state->target_residency = cx->threshold; state->flags = cx->flags; state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? omap3_enter_idle_bm : omap3_enter_idle; if (cx->type == OMAP3_STATE_C2) dev->safe_state = state; sprintf(state->name, "C%d", count+1); count++; } if (!count) return -EINVAL; dev->state_count = count; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __FUNCTION__); return -EIO; } #ifdef CONFIG_PROC_FS create_pmproc_entry(); #endif /* #ifdef CONFIG_PROC_FS */ /* Initialize UART inactivity time */ awake_time_end = jiffies + msecs_to_jiffies(UART_TIME_OUT); return 0; }
static int __init s3c64xx_init_cpuidle(void) { int ret; memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set, sizeof(s3c64xx_cpuidle_set)); cpuidle_register_driver(&s3c64xx_cpuidle_driver); ret = cpuidle_register_device(&s3c64xx_cpuidle_device); if (ret) { pr_err("Failed to register cpuidle device: %d\n", ret); return ret; } return 0; }
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; static int first_run; if (disabled_by_idle_boot_param()) return 0; if (!first_run) { dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; } if (!pr) return -EINVAL; if (acpi_gbl_FADT.cst_control && !nocst) { status = acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Notifying BIOS of _CST ability failed")); } } acpi_processor_get_power_info(pr); pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on * platforms that only support C1. */ if (pr->flags.power) { acpi_processor_setup_cpuidle(pr); if (cpuidle_register_device(&pr->power.dev)) return -EIO; } return 0; }
static int mv_cpu_idle_write(struct file *file, const char *buffer, unsigned long count, void *data) { MV_U32 regs[4]; /* Reading / Writing from system controller internal registers */ if (!strncmp (buffer, "enable", strlen("enable"))) { if(device_registered == 0) { device_registered = 1; if (cpuidle_register_device(kirkwood_cpu_idle_device)) { printk(KERN_ERR "mv_cpu_idle_write: Failed registering\n"); return -EIO; } } cpuidle_enable_device(kirkwood_cpu_idle_device); } else if (!strncmp (buffer, "disable", strlen("disable"))) { cpuidle_disable_device(kirkwood_cpu_idle_device); } else if (!strncmp (buffer, "test", strlen("test"))) { /* Store Interrupt mask registers. */ regs[0] = MV_REG_READ(MV_IRQ_MASK_LOW_REG); regs[1] = MV_REG_READ(MV_IRQ_MASK_HIGH_REG); regs[2] = MV_REG_READ(MV_IRQ_MASK_ERROR_REG); /* Disable all interrupts . */ MV_REG_WRITE(MV_IRQ_MASK_LOW_REG, 0x0); MV_REG_WRITE(MV_IRQ_MASK_HIGH_REG, 0x0); MV_REG_WRITE(MV_IRQ_MASK_ERROR_REG, 0x0); /* Enable only the UART interrupt. */ MV_REG_BIT_SET(MV_IRQ_MASK_HIGH_REG, 1 << (UART_IRQ_NUM(0) - 32)); printk(KERN_INFO "Press any key to leave deep idle:"); mv_kw2_cpu_idle_enter(); /* Restore Interrupt mask registers. */ MV_REG_WRITE(MV_IRQ_MASK_LOW_REG, regs[0]); MV_REG_WRITE(MV_IRQ_MASK_HIGH_REG, regs[1]); MV_REG_WRITE(MV_IRQ_MASK_ERROR_REG, regs[2]); } return count; }
static int shmobile_cpuidle_init(void) { struct cpuidle_device *dev = &shmobile_cpuidle_dev; struct cpuidle_driver *drv = &shmobile_cpuidle_driver; int i; for (i = 0; i < CPUIDLE_STATE_MAX; i++) drv->states[i].enter = shmobile_cpuidle_enter; if (shmobile_cpuidle_setup) shmobile_cpuidle_setup(drv); cpuidle_register_driver(drv); dev->state_count = drv->state_count; cpuidle_register_device(dev); return 0; }
/* Initialize CPU idle by registering the idle states */ static int kw_cpuidle_probe(struct platform_device *pdev) { struct cpuidle_device *device; cpuidle_register_driver(&kirkwood_idle_driver); device = &per_cpu(kirkwood_cpuidle_device, smp_processor_id()); device->state_count = KIRKWOOD_MAX_STATES; /* Wait for interrupt state */ device->states[0].enter = kirkwood_enter_idle; device->states[0].exit_latency = 1; device->states[0].target_residency = 100; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "WFI"); strcpy(device->states[0].desc, "Wait for interrupt"); /* CPU Deep Idle state */ device->states[1].enter = kirkwood_enter_idle; device->states[1].exit_latency = 10; device->states[1].target_residency = 5000; device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[1].name, "DEEP IDLE"); strcpy(device->states[1].desc, "CPU Deep Idle"); #if 0 if (cpuidle_register_device(device)) { printk(KERN_ERR "kirkwood_init_cpuidle: Failed registering\n"); return -EIO; } #endif kirkwood_cpu_idle_device = device; #ifdef CONFIG_MV_PMU_PROC /* Create proc entry. */ cpu_idle_proc = create_proc_entry("cpu_idle", 0666, mv_pm_proc_entry); cpu_idle_proc->read_proc = mv_cpu_idle_read; cpu_idle_proc->write_proc = mv_cpu_idle_write; cpu_idle_proc->nlink = 1; #endif /* CONFIG_MV_PMU_PROC */ return 0; }
static int __init sunxi_init_cpuidle(void) { int cpu; struct cpuidle_device *device; sun8i_cpuidle_state_init(); sunxi_idle_driver.safe_state_index = 0; cpuidle_register_driver(&sunxi_idle_driver); for_each_possible_cpu(cpu) { device = &per_cpu(sunxi_cpuidle_device, cpu); device->cpu = cpu; if (cpuidle_register_device(device)) { printk(KERN_ERR "CPUidle register device failed\n,"); return -EIO; } } return 0; }
static int __init wmt_cpuidle_driver_init(void) { struct cpuidle_device *device = NULL; if (wmt_cpuidle_check_env()) { printk(KERN_WARNING "wmt_cpuidle check env failed!\n"); return -EINVAL; } cpuidle_register_driver(&wmt_cpuidle_driver); device = &per_cpu(wmt_cpuidle_device, smp_processor_id()); device->state_count = WMT_CPU_IDLE_MAX_STATES; /* Wait for interrupt state */ device->states[0].enter = wmt_enter_idle; device->states[0].exit_latency = 1; device->states[0].target_residency = 10000; device->states[0].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[0].name, "WFI"); strcpy(device->states[0].desc, "Wait for interrupt"); /* Wait for interrupt and DDR self refresh state */ device->states[1].enter = wmt_enter_idle; device->states[1].exit_latency = 10; device->states[1].target_residency = 10000; device->states[1].flags = CPUIDLE_FLAG_TIME_VALID; strcpy(device->states[1].name, "ZAC_OFF"); strcpy(device->states[1].desc, "WFI and disable ZAC clock"); if (cpuidle_register_device(device)) { printk(KERN_ERR "wmt_cpuidle_driver_init: Failed registering\n"); return -EIO; } printk(KERN_INFO "WMT cpuidle driver register\n"); return 0; }
/** * cpuidle_init - Init routine for v9r1 idle * * Registers the v9r1 specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init cpuidle_init(void) { struct cpuidle_device *dev; int i, max_cpuidle_state, cpu_id; cpuidle_register_driver(&idle_driver); wdt_base = ioremap(REG_BASE_WD0, REG_WD0_IOSIZE); gp_cpuidle_state = (unsigned int *)MEM_AXI_CPU_IDLE_ADDR; *gp_cpuidle_state = (CPU_IDLE_C4 << CPUIDLE_STATE_START_BIT) | (CPU_IDLE_STAT_VALID << CPUIDLE_STATE_MAGIC_START_BIT);/*init*/ for_each_cpu(cpu_id, cpu_online_mask) { dev = &per_cpu(idle_dev, cpu_id); dev->cpu = cpu_id; printk(KERN_ERR "cpu=%d\n", dev->cpu); if (cpu_id == 0) dev->state_count = ARRAY_SIZE(cpuidle_set); else dev->state_count = 1; /* Support IDLE only */ max_cpuidle_state = dev->state_count; for (i = 0; i < max_cpuidle_state; i++) { memcpy(&dev->states[i], &cpuidle_set[i], sizeof(struct cpuidle_state)); } dev->safe_state = &dev->states[0]; if (cpuidle_register_device(dev)) { cpuidle_unregister_driver(&idle_driver); printk(KERN_ERR "CPUidle register device failed\n,"); return -EIO; } }
/* pseries_idle_devices_init() * allocate, initialize and register cpuidle device */ static int pseries_idle_devices_init(void) { int i; struct cpuidle_driver *drv = &pseries_idle_driver; struct cpuidle_device *dev; pseries_cpuidle_devices = alloc_percpu(struct cpuidle_device); if (pseries_cpuidle_devices == NULL) return -ENOMEM; for_each_possible_cpu(i) { dev = per_cpu_ptr(pseries_cpuidle_devices, i); dev->state_count = drv->state_count; dev->cpu = i; if (cpuidle_register_device(dev)) { printk(KERN_DEBUG \ "cpuidle_register_device %d failed!\n", i); return -EIO; } } return 0; }
/** * cpuidle_register: registers the driver and the cpu devices with the * coupled_cpus passed as parameter. This function is used for all common * initialization pattern there are in the arch specific drivers. The * devices is globally defined in this file. * * @drv : a valid pointer to a struct cpuidle_driver * @coupled_cpus: a cpumask for the coupled states * * Returns 0 on success, < 0 otherwise */ int cpuidle_register(struct cpuidle_driver *drv, const struct cpumask *const coupled_cpus) { int ret, cpu; struct cpuidle_device *device; ret = cpuidle_register_driver(drv); if (ret) { pr_err("failed to register cpuidle driver\n"); return ret; } for_each_possible_cpu(cpu) { device = &per_cpu(cpuidle_dev, cpu); device->cpu = cpu; #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED /* * On multiplatform for ARM, the coupled idle states could * enabled in the kernel even if the cpuidle driver does not * use it. Note, coupled_cpus is a struct copy. */ if (coupled_cpus) device->coupled_cpus = *coupled_cpus; #endif ret = cpuidle_register_device(device); if (!ret) continue; pr_err("Failed to register cpuidle device for cpu%d\n", cpu); cpuidle_unregister(drv); break; } return ret; }
static int __init exynos_init_cpuidle(void) { int cpuid, ret; struct cpuidle_device *device; ret = exynos_idle_state_init(&exynos64_idle_cluster0_driver, cpu_online_mask); if (ret) return ret; cpuidle_profile_state_init(&exynos64_idle_cluster0_driver); exynos64_idle_cluster0_driver.safe_state_index = IDLE_C1; ret = cpuidle_register_driver(&exynos64_idle_cluster0_driver); if (ret) { pr_err("CPUidle register device failed\n"); return ret; } for_each_cpu(cpuid, cpu_online_mask) { device = &per_cpu(exynos_cpuidle_device, cpuid); device->cpu = cpuid; device->state_count = exynos64_idle_cluster0_driver.state_count; /* Big core will not change idle time correlation factor */ if (cpuid & 0x4) device->skip_idle_correlation = true; else device->skip_idle_correlation = false; if (cpuidle_register_device(device)) { printk(KERN_ERR "CPUidle register device failed\n,"); return -EIO; } }
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device) { acpi_status status = 0; static int first_run; #ifdef CONFIG_ACPI_PROCFS struct proc_dir_entry *entry = NULL; #endif if (boot_option_idle_override) return 0; if (!first_run) { if (idle_halt) { /* * When the boot option of "idle=halt" is added, halt * is used for CPU IDLE. * In such case C2/C3 is meaningless. So the max_cstate * is set to one. */ max_cstate = 1; } dmi_check_system(processor_power_dmi_table); max_cstate = acpi_processor_cstate_check(max_cstate); if (max_cstate < ACPI_C_STATES_MAX) printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate); first_run++; } if (!pr) return -EINVAL; if (acpi_gbl_FADT.cst_control && !nocst) { status = acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Notifying BIOS of _CST ability failed")); } } acpi_processor_get_power_info(pr); pr->flags.power_setup_done = 1; /* * Install the idle handler if processor power management is supported. * Note that we use previously set idle handler will be used on * platforms that only support C1. */ if (pr->flags.power) { acpi_processor_setup_cpuidle(pr); if (cpuidle_register_device(&pr->power.dev)) return -EIO; } #ifdef CONFIG_ACPI_PROCFS /* 'power' [R] */ entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, S_IRUGO, acpi_device_dir(device), &acpi_processor_power_fops, acpi_driver_data(device)); if (!entry) return -EIO; #endif return 0; }
/** * omap3_idle_init - Init routine for OMAP3 idle * * Registers the OMAP3 specific cpuidle driver to the cpuidle * framework with the valid set of states. */ int __init omap3_idle_init(void) { struct cpuidle_device *dev; struct omap3_idle_statedata *cx; mpu_pd = pwrdm_lookup("mpu_pwrdm"); core_pd = pwrdm_lookup("core_pwrdm"); per_pd = pwrdm_lookup("per_pwrdm"); cam_pd = pwrdm_lookup("cam_pwrdm"); cpuidle_register_driver(&omap3_idle_driver); dev = &per_cpu(omap3_idle_dev, smp_processor_id()); /* C1 . MPU WFI + Core active */ cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); (&dev->states[0])->enter = omap3_enter_idle; dev->safe_state = &dev->states[0]; cx->valid = 1; /* C1 is always valid */ cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C2 . MPU WFI + Core inactive */ cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); cx->mpu_state = PWRDM_POWER_ON; cx->core_state = PWRDM_POWER_ON; /* C3 . MPU CSWR + Core inactive */ cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_ON; /* C4 . MPU OFF + Core inactive */ cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_ON; /* C5 . MPU RET + Core RET */ cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); cx->mpu_state = PWRDM_POWER_RET; cx->core_state = PWRDM_POWER_RET; /* C6 . MPU OFF + Core RET */ cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_RET; /* C7 . MPU OFF + Core OFF */ cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); /* * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot * enable OFF mode in a stable form for previous revisions. * We disable C7 state as a result. */ if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) { cx->valid = 0; pr_warn("%s: core off state C7 disabled due to i583\n", __func__); } cx->mpu_state = PWRDM_POWER_OFF; cx->core_state = PWRDM_POWER_OFF; dev->state_count = OMAP3_NUM_STATES; if (cpuidle_register_device(dev)) { printk(KERN_ERR "%s: CPUidle register device failed\n", __func__); return -EIO; } return 0; }