static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); dev->last_residency = (int)idle_time; local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @state: the state data * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_state *state) { ktime_t kt1, kt2; s64 idle_time; struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; local_irq_disable(); /* Do not access any ACPI IO ports in suspend path */ if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); return 0; } lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); local_irq_enable(); cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return idle_time; }
/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @drv: cpuidle driver with cpuidle state information * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); sched_clock_idle_wakeup_event(0); lapic_timer_state_broadcast(pr, cx, 0); return index; }
static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } } lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); kt1 = ktime_get_real(); sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); dev->last_residency = (int)idle_time; sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index; }
/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @drv: cpuidle driver with cpuidle state information * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; return -EINVAL; } } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); sched_clock_idle_wakeup_event(0); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @pr: Target processor * @cx: Target state context * @timer_bc: Whether or not to change timer mode to broadcast */ static void acpi_idle_enter_bm(struct acpi_processor *pr, struct acpi_processor_cx *cx, bool timer_bc) { acpi_unlazy_tlb(smp_processor_id()); /* * Must be done before busmaster disable as we might need to * access HPET ! */ if (timer_bc) lapic_timer_state_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } if (timer_bc) lapic_timer_state_broadcast(pr, cx, 0); }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }
static int acpi_idle_enter(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); struct acpi_processor *pr; pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->type != ACPI_STATE_C1) { if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { index = ACPI_IDLE_STATE_START; cx = per_cpu(acpi_cstate[index], dev->cpu); } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { if (cx->bm_sts_skip || !acpi_idle_bm_check()) { acpi_idle_enter_bm(pr, cx, true); return index; } else if (drv->safe_state_index >= 0) { index = drv->safe_state_index; cx = per_cpu(acpi_cstate[index], dev->cpu); } else { acpi_safe_halt(); return -EBUSY; } } } lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_c1 - enters an ACPI C1 state-type * @dev: the target CPU * @drv: cpuidle driver containing cpuidle state info * @index: index of target state * * This is equivalent to the HALT instruction. */ static int acpi_idle_enter_c1(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } lapic_timer_state_broadcast(pr, cx, 1); acpi_idle_do_entry(cx); lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @state: the state data * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_state *state) { struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (dev->safe_state) { dev->last_state = dev->safe_state; return dev->safe_state->enter(dev, dev->safe_state); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); return 0; } } local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; } } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; spin_unlock(&c3_lock); } kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return idle_time; }
/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @state: the state data */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, struct cpuidle_state *state) { struct acpi_processor *pr; struct acpi_processor_cx *cx = cpuidle_get_statedata(state); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __get_cpu_var(processors); if (unlikely(!pr)) return 0; if (acpi_idle_suspend) return(acpi_idle_enter_c1(dev, state)); local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return 0; } } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return idle_time; }
static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time_ns; s64 idle_time; pr = __this_cpu_read(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { local_irq_disable(); acpi_safe_halt(); local_irq_enable(); return -EINVAL; } } local_irq_disable(); if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; smp_mb(); if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } } acpi_unlazy_tlb(smp_processor_id()); sched_clock_idle_sleep_event(); lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } kt2 = ktime_get_real(); idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); idle_time = idle_time_ns; do_div(idle_time, NSEC_PER_USEC); dev->last_residency = (int)idle_time; sched_clock_idle_wakeup_event(idle_time_ns); local_irq_enable(); if (cx->entry_method != ACPI_CSTATE_FFH) current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index; }
/** * acpi_idle_enter_bm - enters C3 with proper BM handling * @dev: the target CPU * @drv: cpuidle driver containing state data * @index: the index of suggested state * * If BM is detected, the deepest non-C3 idle state is entered instead. */ static int acpi_idle_enter_bm(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { struct acpi_processor *pr; struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); pr = __this_cpu_read(processors); if (unlikely(!pr)) return -EINVAL; if (!cx->bm_sts_skip && acpi_idle_bm_check()) { if (drv->safe_state_index >= 0) { return drv->states[drv->safe_state_index].enter(dev, drv, drv->safe_state_index); } else { acpi_safe_halt(); return -EBUSY; } } if (cx->entry_method == ACPI_CSTATE_FFH) { if (current_set_polling_and_test()) return -EINVAL; } acpi_unlazy_tlb(smp_processor_id()); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); c3_cpu_count++; /* Disable bus master arbitration when all CPUs are in C3 */ if (c3_cpu_count == num_online_cpus()) acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); raw_spin_unlock(&c3_lock); } else if (!pr->flags.bm_check) { ACPI_FLUSH_CPU_CACHE(); } acpi_idle_do_entry(cx); /* Re-enable bus master arbitration */ if (pr->flags.bm_check && pr->flags.bm_control) { raw_spin_lock(&c3_lock); acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_count--; raw_spin_unlock(&c3_lock); } sched_clock_idle_wakeup_event(0); lapic_timer_state_broadcast(pr, cx, 0); return index; }
/** * acpi_idle_enter_simple - enters an ACPI state without BM handling * @dev: the target CPU * @index: the index of suggested state */ static int acpi_idle_enter_simple(struct cpuidle_device *dev, int index) { struct acpi_processor *pr; struct cpuidle_state_usage *state_usage = &dev->states_usage[index]; struct acpi_processor_cx *cx = cpuidle_get_statedata(state_usage); ktime_t kt1, kt2; s64 idle_time; s64 sleep_ticks = 0; pr = __get_cpu_var(processors); dev->last_residency = 0; if (unlikely(!pr)) return -EINVAL; local_irq_disable(); if (acpi_idle_suspend) { local_irq_enable(); cpu_relax(); return -EBUSY; } if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* * TS_POLLING-cleared state must be visible before we test * NEED_RESCHED: */ smp_mb(); } if (unlikely(need_resched())) { current_thread_info()->status |= TS_POLLING; local_irq_enable(); return -EINVAL; } /* * Must be done before busmaster disable as we might need to * access HPET ! */ lapic_timer_state_broadcast(pr, cx, 1); if (cx->type == ACPI_STATE_C3) ACPI_FLUSH_CPU_CACHE(); kt1 = ktime_get_real(); /* Tell the scheduler that we are going deep-idle: */ sched_clock_idle_sleep_event(); acpi_idle_do_entry(cx); kt2 = ktime_get_real(); idle_time = ktime_to_us(ktime_sub(kt2, kt1)); sleep_ticks = us_to_pm_timer_ticks(idle_time); /* Update device last_residency*/ dev->last_residency = (int)idle_time; /* Tell the scheduler how much we idled: */ sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); local_irq_enable(); current_thread_info()->status |= TS_POLLING; cx->usage++; lapic_timer_state_broadcast(pr, cx, 0); cx->time += idle_time; return index; }