Esempio n. 1
0
void usecase_update_governor_state(void)
{
	bool cancel_work = false;

	/*
	 * usecase_mutex will have to be unlocked to ensure safe exit of
	 * delayed_usecase_work(). Protect this function with its own mutex
	 * from being executed by multiple threads at that point.
	 */
	mutex_lock(&state_mutex);
	mutex_lock(&usecase_mutex);

	if (uc_master_enable && (usecase_conf[UX500_UC_AUTO].enable ||
		usecase_conf[UX500_UC_USER].enable)) {
		/*
		 * Usecases are enabled. If we are in early suspend put
		 * governor to work.
		 */
		if ((is_early_suspend ||
			(usecase_conf[UX500_UC_USER].enable &&
			usecase_conf[UX500_UC_USER].force_usecase)) &&
			!is_work_scheduled) {
			schedule_delayed_work_on(0, &work_usecase,
				msecs_to_jiffies(CPULOAD_MEAS_DELAY));
			is_work_scheduled = true;
		} else if (!is_early_suspend && is_work_scheduled) {
			/* Exiting from early suspend. */
			cancel_work = true;
		}

	} else if (is_work_scheduled) {
		/* No usecase enabled or governor is not enabled. */
		cancel_work =  true;
	}

	if (cancel_work) {
		/*
		 * usecase_mutex is used by delayed_usecase_work() so it must
		 * be unlocked before we call to cacnel the work.
		 */
		mutex_unlock(&usecase_mutex);
		cancel_delayed_work_sync(&work_usecase);
		mutex_lock(&usecase_mutex);

		is_work_scheduled = false;

		/*
		 * Set the default settings before exiting,
		 * except when a forced usecase is enabled.
		 */
		if (!(usecase_conf[UX500_UC_USER].enable &&
			usecase_conf[UX500_UC_USER].force_usecase))
			set_cpu_config(UX500_UC_NORMAL);
	}

	mutex_unlock(&usecase_mutex);
	mutex_unlock(&state_mutex);
}
void disable_cpu_config(struct work_struct *work)
{
	set_cpu_config(0);
}
void enable_cpu_config(struct work_struct *work)
{
	set_cpu_config(1);
}
Esempio n. 4
0
static void delayed_usecase_work(struct work_struct *work)
{
	unsigned long avg, load, trend, balance;
	bool inc_perf = false;
	bool dec_perf = false;
	u32 irqs_per_s;

	/* determine loadavg  */
	avg = determine_loadavg();
	hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
					avg, lower_threshold, upper_threshold);

	/* determine instant load */
	load = determine_cpu_load();
	hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);

	/* determine load trend */
	trend = determine_cpu_load_trend();
	hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
					trend, min_trend, trend_unbalance);

	/* determine load balancing */
	balance = determine_cpu_balance_trend();
	hp_printk("load balancing trend = %lu min %lu\n",
					balance, max_unbalance);

	irqs_per_s = get_num_interrupts_per_s();

	/* Dont let configuration change in the middle of our calculations. */
	mutex_lock(&usecase_mutex);

	/* detect "instant" load increase */
	if (load > max_instant || irqs_per_s > exit_irq_per_s) {
		inc_perf = true;
	} else if (!usecase_conf[UX500_UC_USER].enable &&
			usecase_conf[UX500_UC_AUTO].enable) {
		/* detect high loadavg use case */
		if (avg > upper_threshold)
			inc_perf = true;
		/* detect idle use case */
		else if (trend < min_trend)
			dec_perf = true;
		/* detect unbalanced low cpu load use case */
		else if ((balance > max_unbalance) && (trend < trend_unbalance))
			dec_perf = true;
		/* detect low loadavg use case */
		else if (avg < lower_threshold)
			dec_perf = true;
		/* All user use cases disabled, current load not triggering
		 * any change.
		 */
		else if (user_config_updated)
			dec_perf = true;
	} else {
		dec_perf = true;
	}

	/*
	 * set_cpu_config() will not update the config unless it has been
	 * changed.
	 */
	if (dec_perf) {
		if (usecase_conf[UX500_UC_USER].enable)
			set_cpu_config(UX500_UC_USER);
		else if (usecase_conf[UX500_UC_AUTO].enable)
			set_cpu_config(UX500_UC_AUTO);
	} else if (inc_perf &&
		!(usecase_conf[UX500_UC_USER].enable &&
		usecase_conf[UX500_UC_USER].force_usecase)) {
		set_cpu_config(UX500_UC_NORMAL);
	}

	mutex_unlock(&usecase_mutex);

	/* reprogramm scheduled work */
	schedule_delayed_work_on(0, &work_usecase,
				msecs_to_jiffies(CPULOAD_MEAS_DELAY));

}