static inline u64 get_cpu_idle_time_greenmax(unsigned int cpu, u64 *wall)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
		return get_cpu_idle_time_jiffy(cpu, wall);
	else
		idle_time += get_cpu_iowait_time_us(cpu, wall);

	return idle_time;
}
Ejemplo n.º 2
0
/*
 * Class:     edu_wayne_cs_bugu_util_NativeLib
 * Method:    getCPUIdleTime
 * Signature: (I)J
 */
JNIEXPORT jlong JNICALL Java_edu_wayne_cs_bugu_util_NativeLib_getCPUIdleTime
  (JNIEnv * env, jobject obj, jint cpu_num){
	jint i;
	jlong idletime = 0;
	for( i = 0; i < cpu_num; i++)
	{
		idletime += get_cpu_idle_time_us(i, NULL);
	}

	return idletime;
}
Ejemplo n.º 3
0
static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
		return get_cpu_idle_time_jiffy(cpu, wall);
	else
		idle_time += get_cpu_iowait_time_us(cpu, wall);

	return idle_time;
}
Ejemplo n.º 4
0
static void cpuload_timer(struct work_struct *work)
{
	unsigned int i, avg_load, max_load, load = 0;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
						tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
						tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if(wall_time < idle_time)
			idle_time = wall_time;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		if(tmp_info->load > load)
			load = tmp_info->load;
	}
	max_load=load;
	avg_load = load / num_online_cpus();

	if (high_transition == 0) {
		if (max_load > trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 1;
			sampling_rate = HZ/25;  //40ms
		}
	} else {
		if (max_load <= trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 0;
			sampling_rate = HZ/10; //100ms
		}
	}

	queue_delayed_work_on(0, busfreq_wq, &busfreq_work, 0);

	if (hybrid == 1)
		queue_delayed_work_on(0, cpuload_wq, &cpuload_work, HZ/25);
	else
		queue_delayed_work_on(0, cpuload_wq, &dummy_work, HZ);

}
Ejemplo n.º 5
0
static u64 get_idle_time(int cpu)
{
	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
		
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
	else
		idle = usecs_to_cputime64(idle_time);

	return idle;
}
Ejemplo n.º 6
0
static u64 get_idle_time(int cpu)
{
	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
		/* !NO_HZ so we can rely on cpustat.idle */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
#else
		idle = kstat_cpu(cpu).cpustat.idle;
#endif
	else
Ejemplo n.º 7
0
static u64 get_idle_time(int cpu)
{
	u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
		/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
	else
		idle = usecs_to_cputime64(idle_time);

	return idle;
}
Ejemplo n.º 8
0
static cputime64_t get_idle_time(int cpu)
{
	u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
	cputime64_t idle;

	if (idle_time == -1ULL) {
		/* !NO_HZ so we can rely on cpustat.idle */
		idle = kstat_cpu(cpu).cpustat.idle;
		idle = cputime64_add(idle, arch_idle_time(cpu));
	} else
		idle = usecs_to_cputime64(idle_time);

	return idle;
}
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		/* Idle micro accounting is supported. Use finer thresholds */
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD -
			MICRO_FREQUENCY_DOWN_DIFFERENTIAL;
#ifdef CONFIG_ARCH_HI6XXX
        tuners->od_6xxx_up_threshold = HI6XXX_FREQUENCY_UP_THRESHOLD;
        tuners->od_6xxx_down_threshold = HI6XXX_FREQUENCY_DOWN_THRESHOLD;
#endif
		/*
		 * In nohz/micro accounting case we set the minimum frequency
		 * not depending on HZ, but fixed (very low). The deferred
		 * timer might skip some samples if idle/sleeping as needed.
		*/
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
		tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD -
			DEF_FREQUENCY_DOWN_DIFFERENTIAL;

		/* For correct statistics, we need 10 ticks for each measure */
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice_load = 0;
	tuners->powersave_bias = default_powersave_bias;
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}
Ejemplo n.º 10
0
static u64 get_idle_time(int cpu)
{
	u64 idle, idle_usecs = -1ULL;

	if (cpu_online(cpu))
		idle_usecs = get_cpu_idle_time_us(cpu, NULL);

	if (idle_usecs == -1ULL)
		/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
	else
		idle = idle_usecs * NSEC_PER_USEC;

	return idle;
}
/*lint --e{551,713}*/
static cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)/*lint !e551*/
{

    u64 idle_time = get_cpu_idle_time_us((int)cpu, NULL);/*lint !e530 !e712 */
    /*lint --e{501} */
    if (idle_time == -1ULL)
    {
        return get_cpu_idle_time_jiffy(cpu, wall);
    }
    else
    {
        idle_time += get_cpu_iowait_time_us((int)cpu, wall);
    }

    return idle_time;
}
Ejemplo n.º 12
0
static u64 get_idle_time(int cpu)
{
    #ifdef CONFIG_MTK_IDLE_TIME_FIX
	u64 idle, idle_time = get_cpu_idle_time_us_wo_cpuoffline(cpu, NULL);
    #else
    u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL);
    #endif
	
	if (idle_time == -1ULL)
		/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
	else
		idle = usecs_to_cputime64(idle_time);

	return idle;
}
Ejemplo n.º 13
0
static unsigned long determine_cpu_load(void)
{
    int i;
    unsigned long total_load = 0;

    /* get cpu load of each cpu */
    for_each_online_cpu(i) {
        unsigned int load;
        unsigned int idle_time, wall_time;
        cputime64_t cur_wall_time, cur_idle_time;
        struct hotplug_cpu_info *info;

        info = &per_cpu(hotplug_info, i);

        /* update both cur_idle_time and cur_wall_time */
        cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

        /* how much wall time has passed since last iteration? */
        wall_time = (unsigned int) cputime64_sub(cur_wall_time,
                    info->prev_cpu_wall);
        info->prev_cpu_wall = cur_wall_time;

        /* how much idle time has passed since last iteration? */
        idle_time = (unsigned int) cputime64_sub(cur_idle_time,
                    info->prev_cpu_idle);
        info->prev_cpu_idle = cur_idle_time;

        if (unlikely(!wall_time || wall_time < idle_time))
            continue;

        /* load is the percentage of time not spent in idle */
        load = 100 * (wall_time - idle_time) / wall_time;
        info->load[info->idx++] = load;
        if (info->idx >= LOAD_MONITOR)
            info->idx = 0;

#ifdef CONFIG_DEBUG_PRINTK
        hp_printk("cpu %d load %u ", i, load);
#else
        hp_;
#endif

        total_load += load;
    }

    return total_load / num_online_cpus();
}
Ejemplo n.º 14
0
unsigned long long mtprof_get_cpu_idle(int cpu)
{
    u64 unused = 0, idle_time = 0;
	idle_time = get_cpu_idle_time_us(cpu, NULL);

	if (idle_time == -1ULL)
	{	
		return get_cpu_idle_time_jiffy(cpu, &unused);
	}
	else
	{	
		idle_time += get_cpu_iowait_time_us(cpu, &unused);
	}
	printk("update time is is %llu\n", unused);
					
	return idle_time;
}
Ejemplo n.º 15
0
/*
 * Choose the cpu frequency based off the load. For now choose the minimum
 * frequency that will satisfy the load, which is not always the lower power.
 */
static unsigned int cpufreq_interactive_calc_freq(unsigned int cpu)
{
	unsigned int delta_time;
	unsigned int idle_time;
	unsigned int cpu_load;
	u64 current_wall_time;
	u64 current_idle_time;;

	current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);

	idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
	delta_time = (unsigned int) current_wall_time - freq_change_time;

	cpu_load = 100 * (delta_time - idle_time) / delta_time;

	return policy->cur * cpu_load / 100;
}
Ejemplo n.º 16
0
static void init_cpu_load_trend(void)
{
	int i;

	for_each_possible_cpu(i) {
		struct hotplug_cpu_info *info;
		int j;

		info = &per_cpu(hotplug_info, i);

		info->prev_cpu_idle = get_cpu_idle_time_us(i,
						&(info->prev_cpu_wall));
		info->prev_cpu_io = get_cpu_iowait_time_us(i,
						&(info->prev_cpu_wall));

		for (j = 0; j < LOAD_MONITOR; j++) {
			info->load[j] = 100;
		}
		info->idx = 0;
	}
}
Ejemplo n.º 17
0
/*
 * Choose the cpu frequency based off the load. For now choose the minimum
 * frequency that will satisfy the load, which is not always the lower power.
 */
static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu)
{
	unsigned int delta_time;
	unsigned int idle_time;
	unsigned int cpu_load;
	unsigned int newfreq;
	u64 current_wall_time;
	u64 current_idle_time;;

	current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);

	idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
	delta_time = (unsigned int) current_wall_time - freq_change_time;

	cpu_load = 100 * (delta_time - idle_time) / delta_time;

	if (cpu_load > 25) newfreq = policy->max;
	else newfreq = policy->cur * cpu_load / 100;

	return newfreq;
}
Ejemplo n.º 18
0
static void cpufreq_idle(void)
{
	struct timer_list *t;
	u64 *cpu_time_in_idle;
	u64 *cpu_idle_exit_time;

	pm_idle_old();

	if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
			return;

	/* Timer to fire in 1-2 ticks, jiffie aligned. */
	t = &per_cpu(cpu_timer, smp_processor_id());
	cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id());
	cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id());

	if (timer_pending(t) == 0) {
		*cpu_time_in_idle = get_cpu_idle_time_us(
				smp_processor_id(), cpu_idle_exit_time);
		mod_timer(t, jiffies + 2);
	}
}
static u64 get_idle_time(int cpu)
{
	u64 idle, idle_time = -1ULL;

	/* FIXME: the idle time from get_cpu_idle_time_us() is reset while CPU is hot-pluged.
	 *        Using cpustat[CPUTIME_IDLE] to get idle. It isn't very accurate, but stable */
#if 0
	if (cpu_online(cpu))
		idle_time = get_cpu_idle_time_us(cpu, NULL);
#endif

	if (idle_time == -1ULL)
		idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
	else
		idle = usecs_to_cputime64(idle_time);

	//FIXME: this idle function has bug on our shark platform:
	//       not monotone increasing
	if(DEBUG_PRINT) printk("acedebug: get_idle_time: cpu=%d, idle=%llu\n", cpu, idle);

	return idle;
}
Ejemplo n.º 20
0
static int init_monitor(void)
{
	unsigned int i;
	TIME_STEP = sampling_rate * HZ / 1000;
	printk("[Monitor] Monitor init\n");

	pgdata = kmalloc(sizeof(struct cpu_monitor_info_s), GFP_KERNEL);
	if(pgdata == NULL)	return -ENOMEM;
	memset(pgdata, 0x00, sizeof(struct cpu_monitor_info_s));

	/* Initialize cpu_monitor_info_s pgdata */

	for(i=0; i<NUM_CPUS; ++i)
	{
		pgdata->cpu_info[i].cpuid = i;
		pgdata->cpu_info[i].prev_cpu_idle = get_cpu_idle_time_us(i, &(pgdata->cpu_info[0].prev_cpu_wall));
	}

	register_timer(pgdata, TIME_STEP);

	return 0;
}
Ejemplo n.º 21
0
/*
 * Choose the cpu frequency based off the load. For now choose the minimum
 * frequency that will satisfy the load, which is not always the lower power.
 */
static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu)
{
	unsigned int delta_time;
	unsigned int idle_time;
	unsigned int cpu_load;
	unsigned int newfreq;
	u64 current_wall_time;
	u64 current_idle_time;;

	current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);

	idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
	delta_time = (unsigned int) current_wall_time - freq_change_time;

	cpu_load = 100 * (delta_time - idle_time) / delta_time;

	if (cpu_load > 98) newfreq = policy->max;
	else newfreq = policy->cur * cpu_load / 100;
// Addition by Huexxx...
	if (newfreq < 300000) newfreq = 300000;	
// End of Huexxx's addition
	return newfreq;
}
Ejemplo n.º 22
0
static int od_init(struct dbs_data *dbs_data)
{
	struct od_dbs_tuners *tuners;
	u64 idle_time;
	int cpu;

	tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
	if (!tuners) {
		pr_err("%s: kzalloc failed\n", __func__);
		return -ENOMEM;
	}

	cpu = get_cpu();
	idle_time = get_cpu_idle_time_us(cpu, NULL);
	put_cpu();
	if (idle_time != -1ULL) {
		
		tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
		dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
	} else {
		tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

		
		dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
			jiffies_to_usecs(10);
	}

	tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
	tuners->ignore_nice_load = 0;
	tuners->powersave_bias = default_powersave_bias;
	tuners->io_is_busy = should_io_be_busy();

	dbs_data->tuners = tuners;
	mutex_init(&dbs_data->mutex);
	return 0;
}
/* We use the same work function to sale up and down */
static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
{
unsigned int cpu;
cpumask_t tmp_mask = work_cpumask;

for_each_cpu(cpu, tmp_mask) {
if (!suspended && (target_freq >= freq_threshold || target_freq == policy->max) ) {
if (policy->cur < 400000) {
// avoid quick jump from lowest to highest
target_freq = resume_speed;
}
if (nr_running() == 1) {
cpumask_clear_cpu(cpu, &work_cpumask);
return;
}
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
} else {
if (!suspended) {
target_freq = cpufreq_interactivex_calc_freq(cpu);
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_L);
} else { // special care when suspended
if (target_freq > suspendfreq) {
__cpufreq_driver_target(policy, suspendfreq, CPUFREQ_RELATION_H);
} else {
target_freq = cpufreq_interactivex_calc_freq(cpu);
if (target_freq < policy->cur)
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
}
}
}
freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
cpumask_clear_cpu(cpu, &work_cpumask);
}


}
Ejemplo n.º 24
0
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	// exit if we turned off dynamic hotplug by tegrak
	// cancel the timer
	if (!hotplug_on) {
		if (!second_core_on && cpu_online(1) == 1)
			cpu_down(1);
		goto off_hotplug;
	}

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*do not ever hotplug out CPU 0*/
	if((cpu_rq_min == 0) && (flag_hotplug == HOTPLUG_OUT))
		goto no_hotplug;

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
		cpu_up(select_off_cpu);
		DBG_PRINT("cpu%d on\n", select_off_cpu);
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
		cpu_down(cpu_rq_min);
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:
	//printk("hotplug_timer done.\n");

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);
off_hotplug:

	mutex_unlock(&hotplug_lock);
}
Ejemplo n.º 25
0
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
#endif
		cpu_up(select_off_cpu);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d on\n", select_off_cpu);
#endif
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
#endif
		cpu_down(cpu_rq_min);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
#endif
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

	mutex_unlock(&hotplug_lock);
}
Ejemplo n.º 26
0
static void hotplug_timer(struct work_struct *work)
{
	extern unsigned int sysctl_sched_olord_period;
	unsigned int i, load = 0;
	int offline_target = -1, online_target = -1;
	struct cpu_time_info *tmp_info;
	cputime64_t cur_wall_time, cur_idle_time;
	unsigned int idle_time, wall_time;
	
	printk(KERN_INFO "%u\n", sysctl_sched_olord_period);
	
	mutex_lock(&hotplug_lock);
	
	/* Find the target CPUs for online and offline */
	for (i = 0; i < (sizeof cpus / sizeof (int)); i++){

		//printk(KERN_INFO "cpus[%u]: %u\n", i, cpus[i]);

		if(cpu_online(cpus[i])){
			offline_target = cpus[i];
			break;
		}
		else
			online_target = cpus[i];
	}
	
	//printk(KERN_INFO "offline: %d, online %d\n", offline_target, online_target);
	
	
	/* Calculate load */
	tmp_info = &per_cpu(hotplug_cpu_time, offline_target);

	cur_idle_time = get_cpu_idle_time_us(offline_target, &cur_wall_time);

	/* Use cputime64_sub for older kernels */
	//idle_time = (unsigned int)cputime64_sub(cur_idle_time,
	//		tmp_info->prev_cpu_idle);
	idle_time = (unsigned int)(cur_idle_time - tmp_info->prev_cpu_idle);

	tmp_info->prev_cpu_idle = cur_idle_time;

	/* Use cputime64_sub for older kernels */
	//wall_time = (unsigned int)cputime64_sub(cur_wall_time,
	//		tmp_info->prev_cpu_wall);
	wall_time = (cur_wall_time - tmp_info->prev_cpu_wall);

	tmp_info->prev_cpu_wall = cur_wall_time;

	if (wall_time < idle_time)
		goto no_hotplug;

	load = 100 * (wall_time - idle_time) / wall_time;

	//printk(KERN_INFO "Load %u\n", load);
	
	/* Offline */
	if (((load < trans_load_l_inuse)) &&
	    (num_online_cpus() > 1) && (offline_target > 0)) {
		//printk(KERN_INFO "load: %u cpu %u turning off\n", load, offline_target);
		cpu_down(offline_target);
		hotpluging_rate = CHECK_DELAY;
		 
	/* Online */
	} else if (((load > trans_load_h_inuse)) &&
		(num_present_cpus() > num_online_cpus()) &&
		   (online_target != -1)) {
		//printk(KERN_INFO "load: %u cpu %u turning on\n", load, online_target);
		cpu_up(online_target);
		hotpluging_rate = CHECK_DELAY * 10;
	}
		
no_hotplug:

	mutex_unlock(&hotplug_lock);

	/* If we're being removed, don't queue more work */
	if (likely(die == 0))
		queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

}
Ejemplo n.º 27
0
unsigned long long mtprof_get_cpu_idle(int cpu)
{
    unsigned long long *unused = 0;
    return get_cpu_idle_time_us(cpu, unused);
}
Ejemplo n.º 28
0
static void cpufreq_interactivex_timer(unsigned long data)
{
	u64 delta_idle;
	u64 update_time;
	u64 *cpu_time_in_idle;
	u64 *cpu_idle_exit_time;
	struct timer_list *t;

	u64 now_idle = get_cpu_idle_time_us(data,
						&update_time);


	cpu_time_in_idle = &per_cpu(time_in_idle, data);
	cpu_idle_exit_time = &per_cpu(idle_exit_time, data);

	if (update_time == *cpu_idle_exit_time)
		return;

	delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);

	/* Scale up if there were no idle cycles since coming out of idle */
	if (delta_idle == 0) {
		if (policy->cur == policy->max)
			return;

		if (nr_running() < 1)
			return;

		target_freq = policy->max;

		cpumask_set_cpu(data, &work_cpumask);
		queue_work(up_wq, &freq_scale_work);
		return;
	}

	/*
	 * There is a window where if the cpu utlization can go from low to high
	 * between the timer expiring, delta_idle will be > 0 and the cpu will
	 * be 100% busy, preventing idle from running, and this timer from
	 * firing. So setup another timer to fire to check cpu utlization.
	 * Do not setup the timer if there is no scheduled work.
	 */
	t = &per_cpu(cpu_timer, data);
	if (!timer_pending(t) && nr_running() > 0) {
			*cpu_time_in_idle = get_cpu_idle_time_us(
					data, cpu_idle_exit_time);
			mod_timer(t, jiffies + 2);
	}

	if (policy->cur == policy->min)
		return;

	/*
	 * Do not scale down unless we have been at this frequency for the
	 * minimum sample time.
	 */
	if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
		return;

	target_freq = policy->min;
	cpumask_set_cpu(data, &work_cpumask);
	queue_work(down_wq, &freq_scale_work);
}
Ejemplo n.º 29
0
static void cpufreq_interactive_timer(unsigned long data)
{
	unsigned int delta_idle;
	unsigned int delta_time;
	int cpu_load;
	int load_since_change;
	u64 time_in_idle;
	u64 idle_exit_time;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	u64 now_idle;
	unsigned int new_freq;
	unsigned int index;
	unsigned long flags;

	smp_rmb();

	if (!pcpu->governor_enabled)
		goto exit;

	/*
	 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
	 * this lets idle exit know the current idle time sample has
	 * been processed, and idle exit can generate a new sample and
	 * re-arm the timer.  This prevents a concurrent idle
	 * exit on that CPU from writing a new set of info at the same time
	 * the timer function runs (the timer function can't use that info
	 * until more time passes).
	 */
	time_in_idle = pcpu->time_in_idle;
	idle_exit_time = pcpu->idle_exit_time;
	now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
	smp_wmb();

	/* If we raced with cancelling a timer, skip. */
	if (!idle_exit_time) {
		dbgpr("timer %d: no valid idle exit sample\n", (int) data);
		goto exit;
	}

#if DEBUG
	if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
		dbgpr("timer %d: late by %d ticks\n",
		      (int) data, jiffies - pcpu->cpu_timer.expires);
#endif

	delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
						  idle_exit_time);

	/*
	 * If timer ran less than 1ms after short-term sample started, retry.
	 */
	if (delta_time < 1000) {
		dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
		      delta_time, idle_exit_time, pcpu->timer_run_time);
		goto rearm;
	}

	if (delta_idle > delta_time)
		cpu_load = 0;
	else
		cpu_load = 100 * (delta_time - delta_idle) / delta_time;

	delta_idle = (unsigned int) cputime64_sub(now_idle,
						 pcpu->freq_change_time_in_idle);
	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
						  pcpu->freq_change_time);

	if ((delta_time == 0) || (delta_idle > delta_time))
		load_since_change = 0;
	else
		load_since_change =
			100 * (delta_time - delta_idle) / delta_time;

	/*
	 * Combine short-term load (since last idle timer started or timer
	 * function re-armed itself) and long-term load (since last frequency
	 * change) to determine new target frequency
	 */
	new_freq = cpufreq_interactive_get_target(cpu_load, load_since_change,
						  pcpu->policy);

	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
					   new_freq, CPUFREQ_RELATION_H,
					   &index)) {
		dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
		goto rearm;
	}

	new_freq = pcpu->freq_table[index].frequency;

	if (pcpu->target_freq == new_freq)
	{
		dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
		goto rearm_if_notmax;
	}

	/*
	 * Do not scale down unless we have been at this frequency for the
	 * minimum sample time.
	 */
	if (new_freq < pcpu->target_freq) {
		if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
		    min_sample_time) {
			dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
			goto rearm;
		}
	}

	dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);

	if (new_freq < pcpu->target_freq) {
		pcpu->target_freq = new_freq;
		spin_lock_irqsave(&down_cpumask_lock, flags);
		cpumask_set_cpu(data, &down_cpumask);
		spin_unlock_irqrestore(&down_cpumask_lock, flags);
		queue_work(down_wq, &freq_scale_down_work);
	} else {
		pcpu->target_freq = new_freq;
#if DEBUG
		up_request_time = ktime_to_us(ktime_get());
#endif
		spin_lock_irqsave(&up_cpumask_lock, flags);
		cpumask_set_cpu(data, &up_cpumask);
		spin_unlock_irqrestore(&up_cpumask_lock, flags);
		wake_up_process(up_task);
	}

rearm_if_notmax:
	/*
	 * Already set max speed and don't see a need to change that,
	 * wait until next idle to re-evaluate, don't need timer.
	 */
	if (pcpu->target_freq == pcpu->policy->max)
		goto exit;

rearm:
	if (!timer_pending(&pcpu->cpu_timer)) {
		/*
		 * If already at min: if that CPU is idle, don't set timer.
		 * Else cancel the timer if that CPU goes idle.  We don't
		 * need to re-evaluate speed until the next idle exit.
		 */
		if (pcpu->target_freq == pcpu->policy->min) {
			smp_rmb();

			if (pcpu->idling) {
				dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
				goto exit;
			}

			pcpu->timer_idlecancel = 1;
		}

		pcpu->time_in_idle = get_cpu_idle_time_us(
			data, &pcpu->idle_exit_time);
		mod_timer(&pcpu->cpu_timer, jiffies + 2);
		dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
	}

exit:
	return;
}