示例#1
0
static unsigned long determine_cpu_load_trend(void)
{
    int i, k;
    unsigned long total_load = 0;

    /* Get cpu load of each cpu */
    for_each_online_cpu(i) {
        unsigned int load = 0;
        struct hotplug_cpu_info *info;

        info = &per_cpu(hotplug_info, i);

        for (k = 0; k < LOAD_MONITOR; k++)
            load +=	info->load[k];

        load /= LOAD_MONITOR;

#ifdef CONFIG_DEBUG_PRINTK
        hp_printk("cpu %d load trend %u\n", i, load);
#else
        hp_;
#endif

        total_load += load;
    }

    return total_load;
}
示例#2
0
static unsigned long determine_cpu_load(void)
{
    int i;
    unsigned long total_load = 0;

    /* get cpu load of each cpu */
    for_each_online_cpu(i) {
        unsigned int load;
        unsigned int idle_time, wall_time;
        cputime64_t cur_wall_time, cur_idle_time;
        struct hotplug_cpu_info *info;

        info = &per_cpu(hotplug_info, i);

        /* update both cur_idle_time and cur_wall_time */
        cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

        /* how much wall time has passed since last iteration? */
        wall_time = (unsigned int) cputime64_sub(cur_wall_time,
                    info->prev_cpu_wall);
        info->prev_cpu_wall = cur_wall_time;

        /* how much idle time has passed since last iteration? */
        idle_time = (unsigned int) cputime64_sub(cur_idle_time,
                    info->prev_cpu_idle);
        info->prev_cpu_idle = cur_idle_time;

        if (unlikely(!wall_time || wall_time < idle_time))
            continue;

        /* load is the percentage of time not spent in idle */
        load = 100 * (wall_time - idle_time) / wall_time;
        info->load[info->idx++] = load;
        if (info->idx >= LOAD_MONITOR)
            info->idx = 0;

#ifdef CONFIG_DEBUG_PRINTK
        hp_printk("cpu %d load %u ", i, load);
#else
        hp_;
#endif

        total_load += load;
    }

    return total_load / num_online_cpus();
}
示例#3
0
static void delayed_usecase_work(struct work_struct *work)
{
	unsigned long avg, load, trend, balance;
	bool inc_perf = false;
	bool dec_perf = false;
	u32 irqs_per_s;

	/* determine loadavg  */
	avg = determine_loadavg();
	hp_printk("loadavg = %lu lower th %lu upper th %lu\n",
					avg, lower_threshold, upper_threshold);

	/* determine instant load */
	load = determine_cpu_load();
	hp_printk("cpu instant load = %lu max %lu\n", load, max_instant);

	/* determine load trend */
	trend = determine_cpu_load_trend();
	hp_printk("cpu load trend = %lu min %lu unbal %lu\n",
					trend, min_trend, trend_unbalance);

	/* determine load balancing */
	balance = determine_cpu_balance_trend();
	hp_printk("load balancing trend = %lu min %lu\n",
					balance, max_unbalance);

	irqs_per_s = get_num_interrupts_per_s();

	/* Dont let configuration change in the middle of our calculations. */
	mutex_lock(&usecase_mutex);

	/* detect "instant" load increase */
	if (load > max_instant || irqs_per_s > exit_irq_per_s) {
		inc_perf = true;
	} else if (!usecase_conf[UX500_UC_USER].enable &&
			usecase_conf[UX500_UC_AUTO].enable) {
		/* detect high loadavg use case */
		if (avg > upper_threshold)
			inc_perf = true;
		/* detect idle use case */
		else if (trend < min_trend)
			dec_perf = true;
		/* detect unbalanced low cpu load use case */
		else if ((balance > max_unbalance) && (trend < trend_unbalance))
			dec_perf = true;
		/* detect low loadavg use case */
		else if (avg < lower_threshold)
			dec_perf = true;
		/* All user use cases disabled, current load not triggering
		 * any change.
		 */
		else if (user_config_updated)
			dec_perf = true;
	} else {
		dec_perf = true;
	}

	/*
	 * set_cpu_config() will not update the config unless it has been
	 * changed.
	 */
	if (dec_perf) {
		if (usecase_conf[UX500_UC_USER].enable)
			set_cpu_config(UX500_UC_USER);
		else if (usecase_conf[UX500_UC_AUTO].enable)
			set_cpu_config(UX500_UC_AUTO);
	} else if (inc_perf &&
		!(usecase_conf[UX500_UC_USER].enable &&
		usecase_conf[UX500_UC_USER].force_usecase)) {
		set_cpu_config(UX500_UC_NORMAL);
	}

	mutex_unlock(&usecase_mutex);

	/* reprogramm scheduled work */
	schedule_delayed_work_on(0, &work_usecase,
				msecs_to_jiffies(CPULOAD_MEAS_DELAY));

}