static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) { uint64_t idle_res = 0, idle_usage = 0; uint64_t last_state_update_tick, current_tick, current_stime; uint64_t usage[ACPI_PROCESSOR_MAX_POWER] = { 0 }; uint64_t res_tick[ACPI_PROCESSOR_MAX_POWER] = { 0 }; unsigned int i; signed int last_state_idx; printk("==cpu%d==\n", cpu); last_state_idx = power->last_state ? power->last_state->idx : -1; printk("active state:\t\tC%d\n", last_state_idx); printk("max_cstate:\t\tC%d\n", max_cstate); printk("states:\n"); spin_lock_irq(&power->stat_lock); current_tick = cpuidle_get_tick(); current_stime = NOW(); for ( i = 1; i < power->count; i++ ) { res_tick[i] = power->states[i].time; usage[i] = power->states[i].usage; } last_state_update_tick = power->last_state_update_tick; spin_unlock_irq(&power->stat_lock); if ( last_state_idx >= 0 ) { res_tick[last_state_idx] += ticks_elapsed(last_state_update_tick, current_tick); usage[last_state_idx]++; } for ( i = 1; i < power->count; i++ ) { idle_usage += usage[i]; idle_res += tick_to_ns(res_tick[i]); printk((last_state_idx == i) ? " *" : " "); printk("C%d:\t", i); printk("type[C%d] ", power->states[i].type); printk("latency[%03d] ", power->states[i].latency); printk("usage[%08"PRIu64"] ", usage[i]); printk("method[%5s] ", acpi_cstate_method_name[power->states[i].entry_method]); printk("duration[%"PRIu64"]\n", tick_to_ns(res_tick[i])); } printk((last_state_idx == 0) ? " *" : " "); printk("C0:\tusage[%08"PRIu64"] duration[%"PRIu64"]\n", usage[0] + idle_usage, current_stime - idle_res); print_hw_residencies(cpu); }
static void print_acpi_power(uint32_t cpu, struct acpi_processor_power *power) { uint32_t i, idle_usage = 0; uint64_t res, idle_res = 0; printk("==cpu%d==\n", cpu); printk("active state:\t\tC%d\n", power->last_state ? power->last_state->idx : -1); printk("max_cstate:\t\tC%d\n", max_cstate); printk("states:\n"); for ( i = 1; i < power->count; i++ ) { res = tick_to_ns(power->states[i].time); idle_usage += power->states[i].usage; idle_res += res; printk((power->last_state && power->last_state->idx == i) ? " *" : " "); printk("C%d:\t", i); printk("type[C%d] ", power->states[i].type); printk("latency[%03d] ", power->states[i].latency); printk("usage[%08d] ", power->states[i].usage); printk("method[%5s] ", acpi_cstate_method_name[power->states[i].entry_method]); printk("duration[%"PRId64"]\n", res); } printk(" C0:\tusage[%08d] duration[%"PRId64"]\n", idle_usage, NOW() - idle_res); print_hw_residencies(cpu); }
static inline void acpi_update_idle_stats(struct acpi_processor_power *power, struct acpi_processor_cx *cx, int64_t sleep_ticks) { /* Interrupts are disabled */ spin_lock(&power->stat_lock); cx->usage++; if ( sleep_ticks > 0 ) { power->last_residency = tick_to_ns(sleep_ticks) / 1000UL; cx->time += sleep_ticks; } spin_unlock(&power->stat_lock); }
void update_idle_stats(struct acpi_processor_power *power, struct acpi_processor_cx *cx, uint64_t before, uint64_t after) { int64_t sleep_ticks = ticks_elapsed(before, after); /* Interrupts are disabled */ spin_lock(&power->stat_lock); cx->usage++; if ( sleep_ticks > 0 ) { power->last_residency = tick_to_ns(sleep_ticks) / 1000UL; cx->time += sleep_ticks; } spin_unlock(&power->stat_lock); }
static void acpi_processor_idle(void) { struct acpi_processor_power *power = processor_powers[smp_processor_id()]; struct acpi_processor_cx *cx = NULL; int next_state; int64_t sleep_ticks = 0; uint64_t t1, t2 = 0; u32 exp = 0, pred = 0; u32 irq_traced[4] = { 0 }; if ( max_cstate > 0 && power && !sched_has_urgent_vcpu() && (next_state = cpuidle_current_governor->select(power)) > 0 ) { cx = &power->states[next_state]; if ( power->flags.bm_check && acpi_idle_bm_check() && cx->type == ACPI_STATE_C3 ) cx = power->safe_state; if ( cx->idx > max_cstate ) cx = &power->states[max_cstate]; menu_get_trace_data(&exp, &pred); } if ( !cx ) { if ( pm_idle_save ) pm_idle_save(); else safe_halt(); return; } cpufreq_dbs_timer_suspend(); sched_tick_suspend(); /* sched_tick_suspend() can raise TIMER_SOFTIRQ. Process it now. */ process_pending_softirqs(); /* * Interrupts must be disabled during bus mastering calculations and * for C2/C3 transitions. */ local_irq_disable(); if ( !cpu_is_haltable(smp_processor_id()) ) { local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } if ( (cx->type == ACPI_STATE_C3) && errata_c6_eoi_workaround() ) cx = power->safe_state; power->last_state = cx; /* * Sleep: * ------ * Invoke the current Cx state to put the processor to sleep. */ switch ( cx->type ) { case ACPI_STATE_C1: case ACPI_STATE_C2: if ( cx->type == ACPI_STATE_C1 || local_apic_timer_c2_ok ) { /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* Invoke C2 */ acpi_idle_do_entry(cx); /* Get end time (ticks) */ t2 = get_tick(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Re-enable interrupts */ local_irq_enable(); /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); break; } case ACPI_STATE_C3: /* * Before invoking C3, be aware that TSC/APIC timer may be * stopped by H/W. Without carefully handling of TSC/APIC stop issues, * deep C state can't work correctly. */ /* preparing APIC stop */ lapic_timer_off(); /* Get start time (ticks) */ t1 = get_tick(); /* Trace cpu idle entry */ TRACE_4D(TRC_PM_IDLE_ENTRY, cx->idx, t1, exp, pred); /* * disable bus master * bm_check implies we need ARB_DIS * !bm_check implies we need cache flush * bm_control implies whether we can do ARB_DIS * * That leaves a case where bm_check is set and bm_control is * not set. In that case we cannot do much, we enter C3 * without doing anything. */ if ( power->flags.bm_check && power->flags.bm_control ) { spin_lock(&c3_cpu_status.lock); if ( ++c3_cpu_status.count == num_online_cpus() ) { /* * All CPUs are trying to go to C3 * Disable bus master arbitration */ acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); } spin_unlock(&c3_cpu_status.lock); } else if ( !power->flags.bm_check ) { /* SMP with no shared cache... Invalidate cache */ ACPI_FLUSH_CPU_CACHE(); } /* Invoke C3 */ acpi_idle_do_entry(cx); if ( power->flags.bm_check && power->flags.bm_control ) { /* Enable bus master arbitration */ spin_lock(&c3_cpu_status.lock); acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); c3_cpu_status.count--; spin_unlock(&c3_cpu_status.lock); } /* Get end time (ticks) */ t2 = get_tick(); /* recovering TSC */ cstate_restore_tsc(); trace_exit_reason(irq_traced); /* Trace cpu idle exit */ TRACE_6D(TRC_PM_IDLE_EXIT, cx->idx, t2, irq_traced[0], irq_traced[1], irq_traced[2], irq_traced[3]); /* Re-enable interrupts */ local_irq_enable(); /* recovering APIC */ lapic_timer_on(); /* Compute time (ticks) that we were actually asleep */ sleep_ticks = ticks_elapsed(t1, t2); break; default: local_irq_enable(); sched_tick_resume(); cpufreq_dbs_timer_resume(); return; } cx->usage++; if ( sleep_ticks > 0 ) { power->last_residency = tick_to_ns(sleep_ticks) / 1000UL; cx->time += sleep_ticks; } sched_tick_resume(); cpufreq_dbs_timer_resume(); if ( cpuidle_current_governor->reflect ) cpuidle_current_governor->reflect(power); }