예제 #1
0
static void
full_idle(struct dpm_idle_parms *idle_parms,
	  struct dpm_opt *idle_task_opt, struct dpm_opt *idle_opt)
{
	dpm_fscaler idle_fscaler, idle_task_fscaler;

	if (fscaleable(&idle_task_opt->md_opt, &idle_opt->md_opt) &&
	    fscaleable(&idle_opt->md_opt, &idle_task_opt->md_opt)) {

		/* In case we've spent so much time getting ready that an
		   interrupt is already pending we can preempt the idle.  */

		idle_fscaler = compute_fscaler(&idle_task_opt->md_opt, 
					       &idle_opt->md_opt);

		idle_task_fscaler = compute_fscaler(&idle_opt->md_opt, 
						    &idle_task_opt->md_opt);

		if (return_from_idle_immediate()) {
			preempt_idle(idle_parms);
			return;
		}
		stat_irq_check_time(idle_parms, dpm_md_time());
			
		dpm_quick_enter_state(DPM_IDLE_STATE);
#ifdef CONFIG_DPM_OPT_STATS
		dpm_update_stats(&idle_opt->stats, &idle_task_opt->stats);
#endif
		idle_fscaler(&idle_opt->md_opt.regs);
		if (basic_idle(idle_parms))
			incr_stat(full_idles);
		else
			incr_stat(idle_preemptions);
		idle_task_fscaler(&idle_task_opt->md_opt.regs);
		dpm_quick_enter_state(DPM_IDLE_TASK_STATE);
#ifdef CONFIG_DPM_OPT_STATS
		dpm_update_stats(&idle_task_opt->stats, &idle_opt->stats);
#endif

	} else {

		/* If you're concerned at all about interrupt latency you don't
		   want to be here.  The current policy requires a voltage
		   scale or some other long-latency operation to move between
		   idle and idle-task. */

		dpm_set_os(DPM_IDLE_STATE);
		if (basic_idle(idle_parms))
			incr_stat(inefficient_idles);
		else
			incr_stat(idle_preemptions);
		dpm_set_os(DPM_IDLE_TASK_STATE);
	}
}
예제 #2
0
void
dpm_idle(void)
{
	unsigned long flags;
	struct dpm_idle_parms *idle_parms = &dpm_idle_parms;
	struct dpm_opt *idle_task_opt, *idle_opt;

	current->dpm_state = DPM_NO_STATE;
	dpm_set_os(DPM_IDLE_TASK_STATE);

	dpm_md_idle_set_parms(&idle_parms->md);
		
#ifdef EXTREME_WORST_CASE
	flush_instruction_cache();
	flush_dcache_all();
	local_flush_tlb_all();
#endif

	critical_save_and_cli(flags);

	if (!current->need_resched) {

		incr_stat(idles);
		stat_start_time(idle_parms);

		if (!dpm_enabled) {

			basic_idle(idle_parms);

		} else if (dpm_active_state != DPM_IDLE_TASK_STATE) {

			incr_stat(interrupted_idles);

		} else {
			idle_task_opt = dpm_active_policy-> 
				classes[DPM_IDLE_TASK_STATE]->opt;
			idle_opt = dpm_active_policy-> 
				classes[DPM_IDLE_STATE]->opt;

			if ((dpm_active_opt != idle_task_opt) ||
			    (idle_task_opt == idle_opt) ||
			    dpm_trylock()) {

				quick_idle(idle_parms);

			} else {
				dpm_unlock();
				full_idle(idle_parms, idle_task_opt, idle_opt);
			}
		}
		latency_stats(idle_parms);
	}
	critical_restore_flags(flags);
}
예제 #3
0
int get_device_list(char * page)
{
	int i;
	int len;

	len = sprintf(page, "Character devices:\n");
	read_lock(&chrdevs_lock);
#ifdef CONFIG_DPM_NONPREEMPT
	dpm_set_os(DPM_NONPREEMPT_STATE);
#endif
	for (i = 0; i < MAX_CHRDEV ; i++) {
		if (chrdevs[i].fops) {
			len += sprintf(page+len, "%3d %s\n", i, chrdevs[i].name);
		}
	}
#ifdef CONFIG_DPM_NONPREEMPT
	dpm_set_os(current->dpm_state);
#endif
	read_unlock(&chrdevs_lock);
	len += get_blkdev_list(page+len);
	return len;
}
예제 #4
0
static int
dpm_set_task_state(pid_t pid, dpm_state_t task_state)
{
	struct task_struct *p;

	if (task_state == -(DPM_TASK_STATE_LIMIT + 1))
		task_state = DPM_NO_STATE;
	else if (abs(task_state) > DPM_TASK_STATE_LIMIT) {
		dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, -EINVAL);
		return -EINVAL;
	} else
		task_state += DPM_TASK_STATE;

	read_lock(&tasklist_lock);

	if (pid == 0)
		p = current;
	else
		p = find_task_by_pid(pid);

	if (!p) {
		read_unlock(&tasklist_lock);
		dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, -ENOENT);
		return -ENOENT;
	}

	p->dpm_state = task_state;
	read_unlock(&tasklist_lock);

	dpm_trace(DPM_TRACE_SET_TASK_STATE, pid, task_state, 0);

	if (pid == 0)
		dpm_set_os(p->dpm_state);


	return 0;
}