Esempio n. 1
0
static int uptime_proc_show(struct seq_file *m, void *v)
{
	struct timespec uptime;
	struct timespec idle;
	u64 idletime;
	u64 nsec;
	u32 rem;
	int i;

	idletime = 0;
	for_each_possible_cpu(i)
		idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];

	do_posix_clock_monotonic_gettime(&uptime);
	monotonic_to_bootbased(&uptime);
	nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
	idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
	idle.tv_nsec = rem;
	seq_printf(m, "%lu.%02lu %lu.%02lu\n",
			(unsigned long) uptime.tv_sec,
			(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
			(unsigned long) idle.tv_sec,
			(idle.tv_nsec / (NSEC_PER_SEC / 100)));
	return 0;
}
Esempio n. 2
0
static int cpufreq_stats_update(unsigned int cpu)
{
	struct cpufreq_stats *stat;
	unsigned long long cur_time;

	cur_time = get_jiffies_64();
	spin_lock(&cpufreq_stats_lock);
	stat = per_cpu(cpufreq_stats_table, cpu);
	if (stat->time_in_state)
		stat->time_in_state[stat->last_index] =
			cputime64_add(stat->time_in_state[stat->last_index],
				      cputime_sub(cur_time, stat->last_time));
	stat->last_time = cur_time;
	spin_unlock(&cpufreq_stats_lock);

	#ifdef CONFIG_NC_DEBUG
	printk(KERN_INFO "============== CPUFREQ STATS");
	printk(KERN_INFO "CPUFREQ STATS: cpu: %u", stat->cpu);
	printk(KERN_INFO "CPUFREQ STATS: total_trans: %u", stat->total_trans);
	printk(KERN_INFO "CPUFREQ STATS: last_time: %lld", stat->last_time);
	printk(KERN_INFO "CPUFREQ STATS: max_state: %u", stat->max_state);
	printk(KERN_INFO "CPUFREQ STATS: state_num: %u", stat->state_num);
	printk(KERN_INFO "CPUFREQ STATS: last_index: %u", stat->last_index);
	printk(KERN_INFO "CPUFREQ STATS: time_in_state: %llu", cputime64_to_jiffies64(stat->time_in_state));
	printk(KERN_INFO "CPUFREQ STATS: *freq_table: %p", &stat->freq_table);
	#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	printk(KERN_INFO "CPUFREQ STATS: *trans_table: %p", &stat->trans_table);
	#endif
	printk(KERN_INFO "============= END CPUFREQ STATS");
	#endif

	return 0;
}
static void inline cpufreq_greenmax_calc_load(int j)
{
	struct greenmax_info_s *j_this_greenmax;
	u64 cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;
	unsigned int cur_load;

	j_this_greenmax = &per_cpu(greenmax_info, j);

	cur_idle_time = get_cpu_idle_time_greenmax(j, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(j, &cur_wall_time);

	wall_time = cur_wall_time - j_this_greenmax->prev_cpu_wall;
	j_this_greenmax->prev_cpu_wall = cur_wall_time;

	idle_time = cur_idle_time - j_this_greenmax->prev_cpu_idle;
	j_this_greenmax->prev_cpu_idle = cur_idle_time;

	iowait_time = cur_iowait_time - j_this_greenmax->prev_cpu_iowait;
	j_this_greenmax->prev_cpu_iowait = cur_iowait_time;

	if (ignore_nice) {
		u64 cur_nice;
		unsigned long cur_nice_jiffies;

		cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE] - j_this_greenmax->prev_cpu_nice;
		cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice);

		j_this_greenmax->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];

		idle_time += jiffies_to_usecs(cur_nice_jiffies);
	}

	/*
	 * For the purpose of ondemand, waiting for disk IO is an
	 * indication that you're performance critical, and not that
	 * the system is actually idle. So subtract the iowait time
	 * from the cpu idle time.
	 */
	if (io_is_busy && idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return;

	cur_load = 100 * (wall_time - idle_time) / wall_time;
	j_this_greenmax->cur_cpu_load = cur_load;
}
Esempio n. 4
0
static unsigned int calc_cur_load(unsigned int cpu)
{
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	u64 cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
	pcpu->prev_cpu_iowait = cur_iowait_time;

	if (ignore_nice) {
		u64 cur_nice;
		unsigned long cur_nice_jiffies;

		cur_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE] - pcpu->prev_cpu_nice;
		cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice);

		pcpu->prev_cpu_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

		idle_time += jiffies_to_usecs(cur_nice_jiffies);
	}

	if (io_is_busy && idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	return 100 * (wall_time - idle_time) / wall_time;
}