示例#1
0
/* Get Idle Time */
unsigned int rkusb_calc_laod( unsigned long data )
{
                unsigned int idle_ticks, total_ticks;
                unsigned int load = 0;
	//cputime64_t cur_jiffies;
	unsigned int j;
	int     next = (int)data;
	idle_ticks = UINT_MAX;
	{
		cputime64_t total_idle_ticks;
		cputime64_t total_total_ticks;
		unsigned int tmp_idle_ticks;
		j = 0;
		total_total_ticks = jiffies64_to_cputime64(get_jiffies_64());
	                total_ticks = (unsigned int) cputime64_sub(total_total_ticks,last_total_ticks);
	                last_total_ticks = get_jiffies_64();
	                
		total_idle_ticks = rkusb_get_cpu_idle_time(j);
		tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks,last_idle_ticks);
		last_idle_ticks = total_idle_ticks;
		if (tmp_idle_ticks < idle_ticks)
			idle_ticks = tmp_idle_ticks;
	}
	if (likely(total_ticks > idle_ticks))
		load = (100 * (total_ticks - idle_ticks)) / total_ticks;
                mod_timer( &cal_load_timer , jiffies + (next*HZ));
                S_ERR("%s:load=%d,tick=%d\n" , __func__ , load , next); 
                return load;
}
示例#2
0
void get_cpu_info(unsigned long arg)
{
	unsigned int i;
	unsigned int frequency;
	cputime64_t curr_idle_time, curr_wall_time;
	unsigned int delta_wall_time, delta_idle_time;
	unsigned int cpu_load;
	
	struct cpu_monitor_info_s *pdata = (struct cpu_monitor_info_s *)arg;

	/* CPU frequency */
	frequency = cpufreq_quick_get(0);
	printk("[Monitor] cpu frequency: %u\n", frequency);

	for(i=0; i<NUM_CPUS; ++i)
	{
		/* CPU load */
		curr_idle_time = get_cpu_idle_time_us(i, &curr_wall_time);
		delta_wall_time = (unsigned int) cputime64_sub(curr_wall_time, 
				pdata->cpu_info[i].prev_cpu_wall);
		pdata->cpu_info[i].prev_cpu_wall = curr_wall_time;

		delta_idle_time = (unsigned int) cputime64_sub(curr_idle_time, 
				pdata->cpu_info[i].prev_cpu_idle);
		pdata->cpu_info[i].prev_cpu_idle = curr_idle_time;

		cpu_load = 100*(delta_wall_time - delta_idle_time)/delta_wall_time;
		if(cpu_load>100)	cpu_load=0;
		printk("[Monitor] cpu %u load: %u\n", i, cpu_load);
	}
	
	if(g_counter<10)
		register_timer(pdata, TIME_STEP);
}
示例#3
0
static void cpuload_timer(struct work_struct *work)
{
	unsigned int i, avg_load, max_load, load = 0;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int) cputime64_sub(cur_idle_time,
						tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int) cputime64_sub(cur_wall_time,
						tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if(wall_time < idle_time)
			idle_time = wall_time;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		if(tmp_info->load > load)
			load = tmp_info->load;
	}
	max_load=load;
	avg_load = load / num_online_cpus();

	if (high_transition == 0) {
		if (max_load > trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 1;
			sampling_rate = HZ/25;  //40ms
		}
	} else {
		if (max_load <= trans_load) {
			cancel_delayed_work_sync(&busfreq_work);
			high_transition = 0;
			sampling_rate = HZ/10; //100ms
		}
	}

	queue_delayed_work_on(0, busfreq_wq, &busfreq_work, 0);

	if (hybrid == 1)
		queue_delayed_work_on(0, cpuload_wq, &cpuload_work, HZ/25);
	else
		queue_delayed_work_on(0, cpuload_wq, &dummy_work, HZ);

}
示例#4
0
static unsigned long determine_cpu_load(void)
{
    int i;
    unsigned long total_load = 0;

    /* get cpu load of each cpu */
    for_each_online_cpu(i) {
        unsigned int load;
        unsigned int idle_time, wall_time;
        cputime64_t cur_wall_time, cur_idle_time;
        struct hotplug_cpu_info *info;

        info = &per_cpu(hotplug_info, i);

        /* update both cur_idle_time and cur_wall_time */
        cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

        /* how much wall time has passed since last iteration? */
        wall_time = (unsigned int) cputime64_sub(cur_wall_time,
                    info->prev_cpu_wall);
        info->prev_cpu_wall = cur_wall_time;

        /* how much idle time has passed since last iteration? */
        idle_time = (unsigned int) cputime64_sub(cur_idle_time,
                    info->prev_cpu_idle);
        info->prev_cpu_idle = cur_idle_time;

        if (unlikely(!wall_time || wall_time < idle_time))
            continue;

        /* load is the percentage of time not spent in idle */
        load = 100 * (wall_time - idle_time) / wall_time;
        info->load[info->idx++] = load;
        if (info->idx >= LOAD_MONITOR)
            info->idx = 0;

#ifdef CONFIG_DEBUG_PRINTK
        hp_printk("cpu %d load %u ", i, load);
#else
        hp_;
#endif

        total_load += load;
    }

    return total_load / num_online_cpus();
}
示例#5
0
文件: system.c 项目: twobob/KK_kernel
static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
{
	cputime64_t idle_time;
	cputime64_t cur_jiffies;
	cputime64_t busy_time;

	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);

	busy_time = cputime64_add(busy_time,
			kstat_cpu(cpu).cpustat.nice);
	idle_time = cputime64_sub(cur_jiffies, busy_time);
	return jiffies_to_usecs(idle_time);
}
static void emc_last_stats_update(int last_sel)
{
	unsigned long flags;
	u64 cur_jiffies = get_jiffies_64();

	spin_lock_irqsave(&emc_stats.spinlock, flags);

	if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE)
		emc_stats.time_at_clock[emc_stats.last_sel] = cputime64_add(
			emc_stats.time_at_clock[emc_stats.last_sel],
			cputime64_sub(cur_jiffies, emc_stats.last_update));

	emc_stats.last_update = cur_jiffies;

	if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) {
		emc_stats.clkchange_count++;
		emc_stats.last_sel = last_sel;
	}
	spin_unlock_irqrestore(&emc_stats.spinlock, flags);
}
示例#7
0
static inline cputime64_t rkusb_get_cpu_idle_time(unsigned int cpu)
{
	cputime64_t idle_time;
	cputime64_t cur_jiffies;
	cputime64_t busy_time;
	cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);

	busy_time = cputime64_add(busy_time,	kstat_cpu(cpu).cpustat.nice);

	idle_time = cputime64_sub(cur_jiffies, busy_time);
	S_INFO("%s:jiffies=%Ld,busy=%Ld,idle=%Ld" , __FILE__,
	        cur_jiffies,busy_time,idle_time);
	return idle_time;
}
static void hp_stats_update(unsigned int cpu, bool up)
{
    u64 cur_jiffies = get_jiffies_64();
    bool was_up = hp_stats[cpu].up_down_count & 0x1;

    if (was_up)
        hp_stats[cpu].time_up_total = cputime64_add(
                                          hp_stats[cpu].time_up_total, cputime64_sub(
                                              cur_jiffies, hp_stats[cpu].last_update));

    if (was_up != up) {
        hp_stats[cpu].up_down_count++;
        if ((hp_stats[cpu].up_down_count & 0x1) != up) {
            /* FIXME: sysfs user space CPU control breaks stats */
            pr_err("tegra hotplug stats out of sync with %s CPU%d",
                   (cpu < CONFIG_NR_CPUS) ? "G" : "LP",
                   (cpu < CONFIG_NR_CPUS) ?  cpu : 0);
            hp_stats[cpu].up_down_count ^=  0x1;
        }
    }
    hp_stats[cpu].last_update = cur_jiffies;
}
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
						  cputime64_t *wall)
{
	cputime64_t idle_time;
	cputime64_t cur_wall_time;
	cputime64_t busy_time;

	cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
	busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user,
			kstat_cpu(cpu).cpustat.system);

	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal);
	busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice);

	idle_time = cputime64_sub(cur_wall_time, busy_time);
	if (wall)
		*wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);

	return (cputime64_t)jiffies_to_usecs(idle_time);
}
示例#10
0
static void cpufreq_interactivex_timer(unsigned long data)
{
	u64 delta_idle;
	u64 update_time;
	u64 *cpu_time_in_idle;
	u64 *cpu_idle_exit_time;
	struct timer_list *t;

	u64 now_idle = get_cpu_idle_time_us(data,
						&update_time);


	cpu_time_in_idle = &per_cpu(time_in_idle, data);
	cpu_idle_exit_time = &per_cpu(idle_exit_time, data);

	if (update_time == *cpu_idle_exit_time)
		return;

	delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);

	/* Scale up if there were no idle cycles since coming out of idle */
	if (delta_idle == 0) {
		if (policy->cur == policy->max)
			return;

		if (nr_running() < 1)
			return;

		target_freq = policy->max;

		cpumask_set_cpu(data, &work_cpumask);
		queue_work(up_wq, &freq_scale_work);
		return;
	}

	/*
	 * There is a window where if the cpu utlization can go from low to high
	 * between the timer expiring, delta_idle will be > 0 and the cpu will
	 * be 100% busy, preventing idle from running, and this timer from
	 * firing. So setup another timer to fire to check cpu utlization.
	 * Do not setup the timer if there is no scheduled work.
	 */
	t = &per_cpu(cpu_timer, data);
	if (!timer_pending(t) && nr_running() > 0) {
			*cpu_time_in_idle = get_cpu_idle_time_us(
					data, cpu_idle_exit_time);
			mod_timer(t, jiffies + 2);
	}

	if (policy->cur == policy->min)
		return;

	/*
	 * Do not scale down unless we have been at this frequency for the
	 * minimum sample time.
	 */
	if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
		return;

	target_freq = policy->min;
	cpumask_set_cpu(data, &work_cpumask);
	queue_work(down_wq, &freq_scale_work);
}
示例#11
0
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	// exit if we turned off dynamic hotplug by tegrak
	// cancel the timer
	if (!hotplug_on) {
		if (!second_core_on && cpu_online(1) == 1)
			cpu_down(1);
		goto off_hotplug;
	}

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*do not ever hotplug out CPU 0*/
	if((cpu_rq_min == 0) && (flag_hotplug == HOTPLUG_OUT))
		goto no_hotplug;

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
		cpu_up(select_off_cpu);
		DBG_PRINT("cpu%d on\n", select_off_cpu);
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
		cpu_down(cpu_rq_min);
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:
	//printk("hotplug_timer done.\n");

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);
off_hotplug:

	mutex_unlock(&hotplug_lock);
}
static void hotplug_timer(struct work_struct *work)
{
	struct cpu_hotplug_info tmp_hotplug_info[4];
	int i;
	unsigned int load = 0;
	unsigned int cpu_rq_min=0;
	unsigned long nr_rq_min = -1UL;
	unsigned int select_off_cpu = 0;
	enum flag flag_hotplug;

	mutex_lock(&hotplug_lock);

	if (user_lock == 1)
		goto no_hotplug;

	for_each_online_cpu(i) {
		struct cpu_time_info *tmp_info;
		cputime64_t cur_wall_time, cur_idle_time;
		unsigned int idle_time, wall_time;

		tmp_info = &per_cpu(hotplug_cpu_time, i);

		cur_idle_time = get_cpu_idle_time_us(i, &cur_wall_time);

		idle_time = (unsigned int)cputime64_sub(cur_idle_time,
							tmp_info->prev_cpu_idle);
		tmp_info->prev_cpu_idle = cur_idle_time;

		wall_time = (unsigned int)cputime64_sub(cur_wall_time,
							tmp_info->prev_cpu_wall);
		tmp_info->prev_cpu_wall = cur_wall_time;

		if (wall_time < idle_time)
			goto no_hotplug;

		tmp_info->load = 100 * (wall_time - idle_time) / wall_time;

		load += tmp_info->load;
		/*find minimum runqueue length*/
		tmp_hotplug_info[i].nr_running = get_cpu_nr_running(i);

		if (i && nr_rq_min > tmp_hotplug_info[i].nr_running) {
			nr_rq_min = tmp_hotplug_info[i].nr_running;

			cpu_rq_min = i;
		}
	}

	for (i = NUM_CPUS - 1; i > 0; --i) {
		if (cpu_online(i) == 0) {
			select_off_cpu = i;
			break;
		}
	}

	/*standallone hotplug*/
	flag_hotplug = standalone_hotplug(load, nr_rq_min, cpu_rq_min);

	/*cpu hotplug*/
	if (flag_hotplug == HOTPLUG_IN && cpu_online(select_off_cpu) == CPU_OFF) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turning on!\n", select_off_cpu);
#endif
		cpu_up(select_off_cpu);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d on\n", select_off_cpu);
#endif
		hotpluging_rate = CHECK_DELAY * 4;
	} else if (flag_hotplug == HOTPLUG_OUT && cpu_online(cpu_rq_min) == CPU_ON) {
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d turnning off!\n", cpu_rq_min);
#endif
		cpu_down(cpu_rq_min);
#ifndef PRODUCT_SHIP
		DBG_PRINT("cpu%d off!\n", cpu_rq_min);
#endif
		hotpluging_rate = CHECK_DELAY;
	} 

no_hotplug:

	queue_delayed_work_on(0, hotplug_wq, &hotplug_work, hotpluging_rate);

	mutex_unlock(&hotplug_lock);
}
static void cpufreq_interactive_timer(unsigned long data)
{
	unsigned int delta_idle;
	unsigned int delta_time;
	int cpu_load;
	int load_since_change;
	u64 time_in_idle;
	u64 idle_exit_time;
	struct cpufreq_interactive_cpuinfo *pcpu =
		&per_cpu(cpuinfo, data);
	u64 now_idle;
	unsigned int new_freq;
	unsigned int index;
	unsigned long flags;

	smp_rmb();

	if (!pcpu->governor_enabled)
		goto exit;

	/*
	 * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
	 * this lets idle exit know the current idle time sample has
	 * been processed, and idle exit can generate a new sample and
	 * re-arm the timer.  This prevents a concurrent idle
	 * exit on that CPU from writing a new set of info at the same time
	 * the timer function runs (the timer function can't use that info
	 * until more time passes).
	 */
	time_in_idle = pcpu->time_in_idle;
	idle_exit_time = pcpu->idle_exit_time;
	now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
	smp_wmb();

	/* If we raced with cancelling a timer, skip. */
	if (!idle_exit_time) {
		dbgpr("timer %d: no valid idle exit sample\n", (int) data);
		goto exit;
	}

#if DEBUG
	if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
		dbgpr("timer %d: late by %d ticks\n",
		      (int) data, jiffies - pcpu->cpu_timer.expires);
#endif

	delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
						  idle_exit_time);

	/*
	 * If timer ran less than 1ms after short-term sample started, retry.
	 */
	if (delta_time < 1000) {
		dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
		      delta_time, idle_exit_time, pcpu->timer_run_time);
		goto rearm;
	}

	if (delta_idle > delta_time)
		cpu_load = 0;
	else
		cpu_load = 100 * (delta_time - delta_idle) / delta_time;

	delta_idle = (unsigned int) cputime64_sub(now_idle,
						 pcpu->freq_change_time_in_idle);
	delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
						  pcpu->freq_change_time);

	if ((delta_time == 0) || (delta_idle > delta_time))
		load_since_change = 0;
	else
		load_since_change =
			100 * (delta_time - delta_idle) / delta_time;

	/*
	 * Combine short-term load (since last idle timer started or timer
	 * function re-armed itself) and long-term load (since last frequency
	 * change) to determine new target frequency
	 */
	new_freq = cpufreq_interactive_get_target(cpu_load, load_since_change,
						  pcpu->policy);

	if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
					   new_freq, CPUFREQ_RELATION_H,
					   &index)) {
		dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
		goto rearm;
	}

	new_freq = pcpu->freq_table[index].frequency;

	if (pcpu->target_freq == new_freq)
	{
		dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
		goto rearm_if_notmax;
	}

	/*
	 * Do not scale down unless we have been at this frequency for the
	 * minimum sample time.
	 */
	if (new_freq < pcpu->target_freq) {
		if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
		    min_sample_time) {
			dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
			goto rearm;
		}
	}

	dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);

	if (new_freq < pcpu->target_freq) {
		pcpu->target_freq = new_freq;
		spin_lock_irqsave(&down_cpumask_lock, flags);
		cpumask_set_cpu(data, &down_cpumask);
		spin_unlock_irqrestore(&down_cpumask_lock, flags);
		queue_work(down_wq, &freq_scale_down_work);
	} else {
		pcpu->target_freq = new_freq;
#if DEBUG
		up_request_time = ktime_to_us(ktime_get());
#endif
		spin_lock_irqsave(&up_cpumask_lock, flags);
		cpumask_set_cpu(data, &up_cpumask);
		spin_unlock_irqrestore(&up_cpumask_lock, flags);
		wake_up_process(up_task);
	}

rearm_if_notmax:
	/*
	 * Already set max speed and don't see a need to change that,
	 * wait until next idle to re-evaluate, don't need timer.
	 */
	if (pcpu->target_freq == pcpu->policy->max)
		goto exit;

rearm:
	if (!timer_pending(&pcpu->cpu_timer)) {
		/*
		 * If already at min: if that CPU is idle, don't set timer.
		 * Else cancel the timer if that CPU goes idle.  We don't
		 * need to re-evaluate speed until the next idle exit.
		 */
		if (pcpu->target_freq == pcpu->policy->min) {
			smp_rmb();

			if (pcpu->idling) {
				dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
				goto exit;
			}

			pcpu->timer_idlecancel = 1;
		}

		pcpu->time_in_idle = get_cpu_idle_time_us(
			data, &pcpu->idle_exit_time);
		mod_timer(&pcpu->cpu_timer, jiffies + 2);
		dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
	}

exit:
	return;
}