static void full_idle(struct dpm_idle_parms *idle_parms, struct dpm_opt *idle_task_opt, struct dpm_opt *idle_opt) { dpm_fscaler idle_fscaler, idle_task_fscaler; if (fscaleable(&idle_task_opt->md_opt, &idle_opt->md_opt) && fscaleable(&idle_opt->md_opt, &idle_task_opt->md_opt)) { /* In case we've spent so much time getting ready that an interrupt is already pending we can preempt the idle. */ idle_fscaler = compute_fscaler(&idle_task_opt->md_opt, &idle_opt->md_opt); idle_task_fscaler = compute_fscaler(&idle_opt->md_opt, &idle_task_opt->md_opt); if (return_from_idle_immediate()) { preempt_idle(idle_parms); return; } stat_irq_check_time(idle_parms, dpm_md_time()); dpm_quick_enter_state(DPM_IDLE_STATE); #ifdef CONFIG_DPM_OPT_STATS dpm_update_stats(&idle_opt->stats, &idle_task_opt->stats); #endif idle_fscaler(&idle_opt->md_opt.regs); if (basic_idle(idle_parms)) incr_stat(full_idles); else incr_stat(idle_preemptions); idle_task_fscaler(&idle_task_opt->md_opt.regs); dpm_quick_enter_state(DPM_IDLE_TASK_STATE); #ifdef CONFIG_DPM_OPT_STATS dpm_update_stats(&idle_task_opt->stats, &idle_opt->stats); #endif } else { /* If you're concerned at all about interrupt latency you don't want to be here. The current policy requires a voltage scale or some other long-latency operation to move between idle and idle-task. */ dpm_set_os(DPM_IDLE_STATE); if (basic_idle(idle_parms)) incr_stat(inefficient_idles); else incr_stat(idle_preemptions); dpm_set_os(DPM_IDLE_TASK_STATE); } }
static inline void quick_idle(struct dpm_idle_parms *idle_parms) { dpm_quick_enter_state(DPM_IDLE_STATE); if (basic_idle(idle_parms)) incr_stat(quick_idles); else incr_stat(idle_preemptions); dpm_quick_enter_state(DPM_IDLE_TASK_STATE); }
void dpm_idle(void) { unsigned long flags; struct dpm_idle_parms *idle_parms = &dpm_idle_parms; struct dpm_opt *idle_task_opt, *idle_opt; current->dpm_state = DPM_NO_STATE; dpm_set_os(DPM_IDLE_TASK_STATE); dpm_md_idle_set_parms(&idle_parms->md); #ifdef EXTREME_WORST_CASE flush_instruction_cache(); flush_dcache_all(); local_flush_tlb_all(); #endif critical_save_and_cli(flags); if (!current->need_resched) { incr_stat(idles); stat_start_time(idle_parms); if (!dpm_enabled) { basic_idle(idle_parms); } else if (dpm_active_state != DPM_IDLE_TASK_STATE) { incr_stat(interrupted_idles); } else { idle_task_opt = dpm_active_policy-> classes[DPM_IDLE_TASK_STATE]->opt; idle_opt = dpm_active_policy-> classes[DPM_IDLE_STATE]->opt; if ((dpm_active_opt != idle_task_opt) || (idle_task_opt == idle_opt) || dpm_trylock()) { quick_idle(idle_parms); } else { dpm_unlock(); full_idle(idle_parms, idle_task_opt, idle_opt); } } latency_stats(idle_parms); } critical_restore_flags(flags); }
int card_group_operate_key_deal() { #if 0 int key = 0; extern int g_key_val; key = g_key_val; g_key_val = 0; if( key != 0 ) return key; basic_idle( &main_basic ); socket_interrupt(); //if( a0106_has_pool_card() ){ // return 'a'; //} #endif return 0; }