static int read_proc_dpm_opt_stats(char *page, char **start, off_t offset, int count, int *eof, void *data) { int len = 0; struct dpm_opt *opt; struct list_head *p; dpm_md_time_t total_time; if (dpm_lock_interruptible()) return -ERESTARTSYS; if (!dpm_enabled) { dpm_unlock(); len += sprintf(page + len, "DPM IS DISABLED\n"); *eof = 1; return len; } for (p = dpm_opts.next; p != &dpm_opts; p = p->next) { opt = list_entry(p, struct dpm_opt, list); len += sprintf(page + len, "%s", opt->name); total_time = opt->stats.total_time; if (opt == dpm_active_opt) total_time += dpm_md_time() - opt->stats.start_time; len += sprintf_u64(page + len, 0, " ", opt->stats.total_time); len += sprintf_u64(page + len, 0, " ", opt->stats.count); len += sprintf(page + len, "\n"); } dpm_unlock(); *eof = 1; return len; }
static void latency_stats(struct dpm_idle_parms *idle_parms) { u32 latency; if (idle_parms->entry_time && idle_parms->exit_time) { latency = dpm_md_time() - idle_parms->exit_time; if (latency > idle_lats.max_latency_to_idle_task) idle_lats.max_latency_to_idle_task = latency; } if (idle_parms->entry_time) { latency = idle_parms->entry_time - idle_parms->start_time; if (latency > idle_lats.max_latency_to_idle) idle_lats.max_latency_to_idle = latency; if (idle_parms->irq_check_time) { latency = idle_parms->entry_time - idle_parms->irq_check_time; if (latency > idle_lats.max_cs_to_idle) idle_lats.max_cs_to_idle = latency; } } }
static void preempt_idle(struct dpm_idle_parms *idle_parms) { incr_stat(idle_preemptions); stat_entry_time(idle_parms, dpm_md_time()); stat_exit_time(idle_parms, 0); }
static void stat_start_time(struct dpm_idle_parms *idle_parms) { idle_parms->entry_time = 0; idle_parms->exit_time = 0; idle_parms->start_time = dpm_md_time(); idle_parms->irq_check_time = 0; }
static void full_idle(struct dpm_idle_parms *idle_parms, struct dpm_opt *idle_task_opt, struct dpm_opt *idle_opt) { dpm_fscaler idle_fscaler, idle_task_fscaler; if (fscaleable(&idle_task_opt->md_opt, &idle_opt->md_opt) && fscaleable(&idle_opt->md_opt, &idle_task_opt->md_opt)) { /* In case we've spent so much time getting ready that an interrupt is already pending we can preempt the idle. */ idle_fscaler = compute_fscaler(&idle_task_opt->md_opt, &idle_opt->md_opt); idle_task_fscaler = compute_fscaler(&idle_opt->md_opt, &idle_task_opt->md_opt); if (return_from_idle_immediate()) { preempt_idle(idle_parms); return; } stat_irq_check_time(idle_parms, dpm_md_time()); dpm_quick_enter_state(DPM_IDLE_STATE); #ifdef CONFIG_DPM_OPT_STATS dpm_update_stats(&idle_opt->stats, &idle_task_opt->stats); #endif idle_fscaler(&idle_opt->md_opt.regs); if (basic_idle(idle_parms)) incr_stat(full_idles); else incr_stat(idle_preemptions); idle_task_fscaler(&idle_task_opt->md_opt.regs); dpm_quick_enter_state(DPM_IDLE_TASK_STATE); #ifdef CONFIG_DPM_OPT_STATS dpm_update_stats(&idle_task_opt->stats, &idle_opt->stats); #endif } else { /* If you're concerned at all about interrupt latency you don't want to be here. The current policy requires a voltage scale or some other long-latency operation to move between idle and idle-task. */ dpm_set_os(DPM_IDLE_STATE); if (basic_idle(idle_parms)) incr_stat(inefficient_idles); else incr_stat(idle_preemptions); dpm_set_os(DPM_IDLE_TASK_STATE); } }