예제 #1
0
static int loadavg_proc_show(struct seq_file *m, void *v)
{
	unsigned long avnrun[3], nr_runnable = 0;
	struct cpumask cpus_allowed;
	int i;

	rcu_read_lock();
	if (task_in_nonroot_cpuacct(current) &&
		in_noninit_pid_ns(current->nsproxy->pid_ns)) {

		get_avenrun_from_tsk(current, avnrun, FIXED_1/200, 0);

		cpumask_copy(&cpus_allowed, cpu_possible_mask);
		if (task_subsys_state(current, cpuset_subsys_id)) {
			memset(&cpus_allowed, 0, sizeof(cpus_allowed));
			get_tsk_cpu_allowed(current, &cpus_allowed);
		}

		for_each_cpu_and(i, cpu_possible_mask, &cpus_allowed)
			nr_runnable += task_ca_running(current, i);

	} else {
		get_avenrun(avnrun, FIXED_1/200, 0);
		nr_runnable = nr_running();
	}
	rcu_read_unlock();

	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
		nr_running(), nr_threads,
		task_active_pid_ns(current)->last_pid);
	return 0;
}
예제 #2
0
/* We use the same work function to sale up and down */
static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
{
	unsigned int cpu;
	unsigned int newtarget;
	cpumask_t tmp_mask = work_cpumask;
	newtarget = FREQ_THRESHOLD;

	for_each_cpu(cpu, tmp_mask) {
	  if (!suspended) {
		if (target_freq == policy->max) {
			if (nr_running() == 1) {
				cpumask_clear_cpu(cpu, &work_cpumask);
				return;
			}
//			__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
			__cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H);
		} else {
			target_freq = cpufreq_interactivex_calc_freq(cpu);
			__cpufreq_driver_target(policy, target_freq,
							CPUFREQ_RELATION_L);
		}
	  }
	  freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
	  cpumask_clear_cpu(cpu, &work_cpumask);
	}


}
static void rq_work_fn(struct work_struct *work)
{
	int64_t time_diff = 0;
	int64_t nr_run = 0;
	unsigned long flags = 0;
	int64_t cur_time = ktime_to_ns(ktime_get());

	spin_lock_irqsave(&rq_data->lock, flags);

	if (rq_data->last_time == 0)
		rq_data->last_time = cur_time;
	if (rq_data->nr_run_avg == 0)
		rq_data->total_time = 0;

	nr_run = nr_running() * 100;
	time_diff = cur_time - rq_data->last_time;
	do_div(time_diff, 1000 * 1000);

	if (time_diff != 0 && rq_data->total_time != 0) {
		nr_run = (nr_run * time_diff) +
			(rq_data->nr_run_avg * rq_data->total_time);
		do_div(nr_run, rq_data->total_time + time_diff);
	}
	rq_data->nr_run_avg = nr_run;
	rq_data->total_time += time_diff;
	rq_data->last_time = cur_time;

	if (rq_data->update_rate != 0)
		queue_delayed_work(rq_data->nr_run_wq, &rq_data->work,
				   msecs_to_jiffies(rq_data->update_rate));

	spin_unlock_irqrestore(&rq_data->lock, flags);
}
static void update_rq_stats(void)
{
	unsigned long jiffy_gap = 0;
	unsigned int rq_avg = 0;
	unsigned long flags = 0;

    spin_lock_irqsave(&rq_lock, flags);

	jiffy_gap = jiffies - rq_info.rq_poll_last_jiffy;
	if (jiffy_gap >= rq_info.rq_poll_jiffies) {
		if (!rq_info.rq_avg)
			rq_info.rq_poll_total_jiffies = 0;

		rq_avg = nr_running() * 10;

		if (rq_info.rq_poll_total_jiffies) {
			rq_avg = (rq_avg * jiffy_gap) +
				(rq_info.rq_avg *
				 rq_info.rq_poll_total_jiffies);
			do_div(rq_avg,
			       rq_info.rq_poll_total_jiffies + jiffy_gap);
		}

		rq_info.rq_avg =  rq_avg;
		rq_info.rq_poll_total_jiffies += jiffy_gap;
		rq_info.rq_poll_last_jiffy = jiffies;
	}

    spin_unlock_irqrestore(&rq_lock, flags);
}
static void rq_work_fn(struct work_struct *work)
{
	int64_t time_diff = 0;
	int64_t rq_avg = 0;
	unsigned long flags = 0;

	spin_lock_irqsave(&rq_lock, flags);

	if (!rq_info.last_time)
		rq_info.last_time = ktime_to_ns(ktime_get());
	if (!rq_info.rq_avg)
		rq_info.total_time = 0;

	rq_avg = nr_running() * 10;
	time_diff = ktime_to_ns(ktime_get()) - rq_info.last_time;
	do_div(time_diff, (1000 * 1000));

	if (time_diff && rq_info.total_time) {
		rq_avg = (rq_avg * time_diff) +
			(rq_info.rq_avg * rq_info.total_time);
		do_div(rq_avg, rq_info.total_time + time_diff);
	}

	rq_info.rq_avg =  (unsigned int)rq_avg;

	/* Set the next poll */
	if (rq_info.rq_poll_ms)
		queue_delayed_work(msm_stats_wq, &rq_info.rq_work,
			msecs_to_jiffies(rq_info.rq_poll_ms));

	rq_info.total_time += time_diff;
	rq_info.last_time = ktime_to_ns(ktime_get());

	spin_unlock_irqrestore(&rq_lock, flags);
}
/* We use the same work function to sale up and down */
static void cpufreq_greenmax_freq_change(struct greenmax_info_s *this_greenmax) {
	unsigned int cpu;
	unsigned int new_freq = 0;
	unsigned int old_freq;
	int ramp_dir;
	struct cpufreq_policy *policy;
	unsigned int relation = CPUFREQ_RELATION_L;

	ramp_dir = this_greenmax->ramp_dir;
	old_freq = this_greenmax->old_freq;
	policy = this_greenmax->cur_policy;
	cpu = this_greenmax->cpu;

	dprintk(GREENMAX_DEBUG_ALG, "%d: %s\n", old_freq, __func__);

	if (old_freq != policy->cur) {
		// frequency was changed by someone else?
		dprintk(GREENMAX_DEBUG_ALG, "%d: frequency changed by 3rd party to %d\n",
				old_freq, policy->cur);
		new_freq = old_freq;
	} else if (ramp_dir > 0 && nr_running() > 1) {
		// ramp up logic:
		if (old_freq < this_greenmax->ideal_speed)
			new_freq = this_greenmax->ideal_speed;
		else if (ramp_up_step) {
			new_freq = old_freq + ramp_up_step;
			relation = CPUFREQ_RELATION_H;
		} else {
			new_freq = policy->max;
			relation = CPUFREQ_RELATION_H;
		}
	} else if (ramp_dir < 0) {
		// ramp down logic:
		if (old_freq > this_greenmax->ideal_speed) {
			new_freq = this_greenmax->ideal_speed;
			relation = CPUFREQ_RELATION_H;
		} else if (ramp_down_step)
			new_freq = old_freq - ramp_down_step;
		else {
			// Load heuristics: Adjust new_freq such that, assuming a linear
			// scaling of load vs. frequency, the load in the new frequency
			// will be max_cpu_load:
			new_freq = old_freq * this_greenmax->cur_cpu_load / max_cpu_load;
			if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
				new_freq = old_freq - 1;
		}
	}

	if (new_freq!=0){
		target_freq(policy, this_greenmax, new_freq, old_freq, relation);
	}

	this_greenmax->ramp_dir = 0;
}
static int loadavg_proc_show(struct seq_file *m, void *v)
{
	unsigned long avnrun[3];

	get_avenrun(avnrun, FIXED_1/200, 0);

	seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
		LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
		LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
		LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
		nr_running(), nr_threads,
		task_active_pid_ns(current)->last_pid);
	return 0;
}
예제 #8
0
static int loadavg_read_proc(char *page, char **start, off_t off,
				 int count, int *eof, void *data)
{
	int a, b, c;
	int len;

	a = avenrun[0] + (FIXED_1/200);
	b = avenrun[1] + (FIXED_1/200);
	c = avenrun[2] + (FIXED_1/200);
	len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		nr_running(), nr_threads, last_pid);
	return proc_calc_metrics(page, start, off, count, eof, len);
}
예제 #9
0
static inline enum flag
standalone_hotplug(unsigned int load, unsigned long nr_rq_min, unsigned int cpu_rq_min)
{
	unsigned int cur_freq;
	unsigned int nr_online_cpu;
	unsigned int avg_load;
	/*load threshold*/
	unsigned int threshold[CPULOAD_TABLE][2] = {
		{0, trans_load_h0},
		{trans_load_l1, trans_load_h1},
#if (NR_CPUS > 2)
		{trans_load_l2, trans_load_h2},
		{trans_load_l3, 100},
#endif
		{0, 0}
	};


	cur_freq = clk_get_rate(clk_get(NULL, "armclk")) / 1000;

	nr_online_cpu = num_online_cpus();

	avg_load = (unsigned int)((cur_freq * load) / max_performance);

	if (nr_online_cpu > 1 && (avg_load < threshold[nr_online_cpu - 1][0] ||
				  cur_freq <= freq_min)) {

		return HOTPLUG_OUT;
		/* If total nr_running is less than cpu(on-state) number, hotplug do not hotplug-in */
	} else if (nr_running() > nr_online_cpu &&
		   avg_load > threshold[nr_online_cpu - 1][1] && cur_freq > freq_min) {

		return HOTPLUG_IN;

	} else if (nr_online_cpu > 1 && nr_rq_min < trans_rq) {

		struct cpu_time_info *tmp_info;

		tmp_info = &per_cpu(hotplug_cpu_time, cpu_rq_min);
		/*If CPU(cpu_rq_min) load is less than trans_load_rq, hotplug-out*/
		if (tmp_info->load < trans_load_rq)
			return HOTPLUG_OUT;
	}

	return HOTPLUG_NOP;
}
예제 #10
0
static int loadavg_proc_show(struct seq_file *m, void *v)
{
	int a, b, c;
	unsigned long seq;

	do {
		seq = read_seqbegin(&xtime_lock);
		a = avenrun[0] + (FIXED_1/200);
		b = avenrun[1] + (FIXED_1/200);
		c = avenrun[2] + (FIXED_1/200);
	} while (read_seqretry(&xtime_lock, seq));

	seq_printf(m, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
		LOAD_INT(a), LOAD_FRAC(a),
		LOAD_INT(b), LOAD_FRAC(b),
		LOAD_INT(c), LOAD_FRAC(c),
		nr_running(), nr_threads,
		task_active_pid_ns(current)->last_pid);
	return 0;
}
예제 #11
0
static unsigned int calculate_thread_stats(void)
{
	unsigned int avg_nr_run = nr_running();
	unsigned int nr_run;
	unsigned int threshold_size;

	if (!eco_mode_active) {
		threshold_size =  ARRAY_SIZE(nr_run_thresholds_full);
		nr_run_hysteresis = 8;
		nr_fshift = 3;
#ifdef DEBUG_INTELLI_PLUG
		pr_info("intelliplug: full mode active!");
#endif
	}
	else {
		threshold_size =  ARRAY_SIZE(nr_run_thresholds_eco);
		nr_run_hysteresis = 4;
		nr_fshift = 1;
#ifdef DEBUG_INTELLI_PLUG
		pr_info("intelliplug: eco mode active!");
#endif
	}

	for (nr_run = 1; nr_run < threshold_size; nr_run++) {
		unsigned int nr_threshold;
		if (!eco_mode_active)
			nr_threshold = nr_run_thresholds_full[nr_run - 1];
		else
			nr_threshold = nr_run_thresholds_eco[nr_run - 1];

		if (nr_run_last <= nr_run)
			nr_threshold += nr_run_hysteresis;
		if (avg_nr_run <= (nr_threshold << (FSHIFT - nr_fshift)))
			break;
	}
	nr_run_last = nr_run;

	return nr_run;
}
/* We use the same work function to sale up and down */
static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
{
unsigned int cpu;
cpumask_t tmp_mask = work_cpumask;

for_each_cpu(cpu, tmp_mask) {
if (!suspended && (target_freq >= freq_threshold || target_freq == policy->max) ) {
if (policy->cur < 400000) {
// avoid quick jump from lowest to highest
target_freq = resume_speed;
}
if (nr_running() == 1) {
cpumask_clear_cpu(cpu, &work_cpumask);
return;
}
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
} else {
if (!suspended) {
target_freq = cpufreq_interactivex_calc_freq(cpu);
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_L);
} else { // special care when suspended
if (target_freq > suspendfreq) {
__cpufreq_driver_target(policy, suspendfreq, CPUFREQ_RELATION_H);
} else {
target_freq = cpufreq_interactivex_calc_freq(cpu);
if (target_freq < policy->cur)
__cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
}
}
}
freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
cpumask_clear_cpu(cpu, &work_cpumask);
}


}
static void dbgpr(char *fmt, ...)
{
	va_list args;
	int n;
	unsigned long flags;

	spin_lock_irqsave(&dbgpr_lock, flags);
	n = dbgbufe;
        va_start(args, fmt);
        vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
        va_end(args);
	dbgbuf[n].cpu = smp_processor_id();
	dbgbuf[n].run = nr_running();
	dbgbuf[n].jiffy = jiffies;

	if (++dbgbufe >= NDBGLNS)
		dbgbufe = 0;

	if (dbgbufe == dbgbufs)
		if (++dbgbufs >= NDBGLNS)
			dbgbufs = 0;

	spin_unlock_irqrestore(&dbgpr_lock, flags);
}
예제 #14
0
static void hotplug_decision_work_fn(struct work_struct *work)
{
        unsigned int running, disable_load, sampling_rate, enable_load, avg_running = 0;
        unsigned int online_cpus, available_cpus, i, j;
#if DEBUG
        unsigned int k;
#endif

        online_cpus = num_online_cpus();
        available_cpus = CPUS_AVAILABLE;
        disable_load = DISABLE_LOAD_THRESHOLD * online_cpus;
        enable_load = ENABLE_LOAD_THRESHOLD * online_cpus;
        /*
         * Multiply nr_running() by 100 so we don't have to
         * use fp division to get the average.
         */
        running = nr_running() * 100;

        history[index] = running;

#if DEBUG
        pr_info("online_cpus is: %d\n", online_cpus);
        pr_info("enable_load is: %d\n", enable_load);
        pr_info("disable_load is: %d\n", disable_load);
        pr_info("index is: %d\n", index);
        pr_info("running is: %d\n", running);
#endif

        /*
         * Use a circular buffer to calculate the average load
         * over the sampling periods.
         * This will absorb load spikes of short duration where
         * we don't want additional cores to be onlined because
         * the cpufreq driver should take care of those load spikes.
         */
        for (i = 0, j = index; i < SAMPLING_PERIODS; i++, j--) {
                avg_running += history[j];
                if (unlikely(j == 0))
                        j = INDEX_MAX_VALUE;
        }

        /*
         * If we are at the end of the buffer, return to the beginning.
         */
        if (unlikely(index++ == INDEX_MAX_VALUE))
                index = 0;

#if DEBUG
        pr_info("array contents: ");
        for (k = 0; k < SAMPLING_PERIODS; k++) {
                 pr_info("%d: %d\t",k, history[k]);
        }
        pr_info("\n");
        pr_info("avg_running before division: %d\n", avg_running);
#endif

        avg_running = avg_running / SAMPLING_PERIODS;

#if DEBUG
        pr_info("average_running is: %d\n", avg_running);
#endif

        if (likely(!(flags & HOTPLUG_DISABLED))) {
                if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus))) {
                        pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
                        /*
                         * Flush any delayed offlining work from the workqueue.
                         * No point in having expensive unnecessary hotplug transitions.
                         * We still online after flushing, because load is high enough to
                         * warrant it.
                         * We set the paused flag so the sampling can continue but no more
                         * hotplug events will occur.
                         */
                        flags |= HOTPLUG_PAUSED;
                        if (delayed_work_pending(&hotplug_offline_work))
                                cancel_delayed_work(&hotplug_offline_work);
                        schedule_work(&hotplug_online_all_work);
                        return;
                } else if (flags & HOTPLUG_PAUSED) {
                        schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
                        return;
                } else if ((avg_running >= enable_load) && (online_cpus < available_cpus)) {
                        pr_info("auto_hotplug: Onlining single CPU, avg running: %d\n", avg_running);
                        if (delayed_work_pending(&hotplug_offline_work))
                                cancel_delayed_work(&hotplug_offline_work);
                        schedule_work(&hotplug_online_single_work);
                        return;
                } else if ((avg_running <= disable_load) && (min_online_cpus < online_cpus)) {
                        /* Only queue a cpu_down() if there isn't one already pending */
                        if (!(delayed_work_pending(&hotplug_offline_work))) {
                                pr_info("auto_hotplug: Offlining CPU, avg running: %d\n", avg_running);
                                schedule_delayed_work_on(0, &hotplug_offline_work, HZ);
                        }
                        /* If boostpulse is active, clear the flags */
                        if (flags & BOOSTPULSE_ACTIVE) {
                                flags &= ~BOOSTPULSE_ACTIVE;
                                pr_info("auto_hotplug: Clearing boostpulse flags\n");
                        }
                }
        }

        /*
         * Reduce the sampling rate dynamically based on online cpus.
         */
        sampling_rate = MIN_SAMPLING_RATE;
#if DEBUG
        pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));
#endif
        schedule_delayed_work_on(0, &hotplug_decision_work, sampling_rate);

}
예제 #15
0
파일: proc_misc.c 프로젝트: kzlin129/tt-gpl
static int show_stat(struct seq_file *p, void *v)
{
	int i;
	unsigned long jif;
	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
	u64 sum = 0;

	user = nice = system = idle = iowait =
		irq = softirq = steal = cputime64_zero;
	jif = - wall_to_monotonic.tv_sec;
	if (wall_to_monotonic.tv_nsec)
		--jif;

	for_each_cpu(i) {
		int j;

		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
		for (j = 0 ; j < NR_IRQS ; j++)
			sum += kstat_cpu(i).irqs[j];
	}

	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu\n",
		(unsigned long long)cputime64_to_clock_t(user),
		(unsigned long long)cputime64_to_clock_t(nice),
		(unsigned long long)cputime64_to_clock_t(system),
		(unsigned long long)cputime64_to_clock_t(idle),
		(unsigned long long)cputime64_to_clock_t(iowait),
		(unsigned long long)cputime64_to_clock_t(irq),
		(unsigned long long)cputime64_to_clock_t(softirq),
		(unsigned long long)cputime64_to_clock_t(steal));
	for_each_online_cpu(i) {

		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
		user = kstat_cpu(i).cpustat.user;
		nice = kstat_cpu(i).cpustat.nice;
		system = kstat_cpu(i).cpustat.system;
		idle = kstat_cpu(i).cpustat.idle;
		iowait = kstat_cpu(i).cpustat.iowait;
		irq = kstat_cpu(i).cpustat.irq;
		softirq = kstat_cpu(i).cpustat.softirq;
		steal = kstat_cpu(i).cpustat.steal;
		seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n",
			i,
			(unsigned long long)cputime64_to_clock_t(user),
			(unsigned long long)cputime64_to_clock_t(nice),
			(unsigned long long)cputime64_to_clock_t(system),
			(unsigned long long)cputime64_to_clock_t(idle),
			(unsigned long long)cputime64_to_clock_t(iowait),
			(unsigned long long)cputime64_to_clock_t(irq),
			(unsigned long long)cputime64_to_clock_t(softirq),
			(unsigned long long)cputime64_to_clock_t(steal));
	}
	seq_printf(p, "intr %llu", (unsigned long long)sum);

#if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA)
	for (i = 0; i < NR_IRQS; i++)
		seq_printf(p, " %u", kstat_irqs(i));
#endif

	seq_printf(p,
		"\nctxt %llu\n"
		"btime %lu\n"
		"processes %lu\n"
		"procs_running %lu\n"
		"procs_blocked %lu\n",
		nr_context_switches(),
		(unsigned long)jif,
		total_forks,
		nr_running(),
		nr_iowait());

	return 0;
}
예제 #16
0
static void cpufreq_interactivex_timer(unsigned long data)
{
	u64 delta_idle;
	u64 update_time;
	u64 *cpu_time_in_idle;
	u64 *cpu_idle_exit_time;
	struct timer_list *t;

	u64 now_idle = get_cpu_idle_time_us(data,
						&update_time);


	cpu_time_in_idle = &per_cpu(time_in_idle, data);
	cpu_idle_exit_time = &per_cpu(idle_exit_time, data);

	if (update_time == *cpu_idle_exit_time)
		return;

	delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);

	/* Scale up if there were no idle cycles since coming out of idle */
	if (delta_idle == 0) {
		if (policy->cur == policy->max)
			return;

		if (nr_running() < 1)
			return;

		target_freq = policy->max;

		cpumask_set_cpu(data, &work_cpumask);
		queue_work(up_wq, &freq_scale_work);
		return;
	}

	/*
	 * There is a window where if the cpu utlization can go from low to high
	 * between the timer expiring, delta_idle will be > 0 and the cpu will
	 * be 100% busy, preventing idle from running, and this timer from
	 * firing. So setup another timer to fire to check cpu utlization.
	 * Do not setup the timer if there is no scheduled work.
	 */
	t = &per_cpu(cpu_timer, data);
	if (!timer_pending(t) && nr_running() > 0) {
			*cpu_time_in_idle = get_cpu_idle_time_us(
					data, cpu_idle_exit_time);
			mod_timer(t, jiffies + 2);
	}

	if (policy->cur == policy->min)
		return;

	/*
	 * Do not scale down unless we have been at this frequency for the
	 * minimum sample time.
	 */
	if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
		return;

	target_freq = policy->min;
	cpumask_set_cpu(data, &work_cpumask);
	queue_work(down_wq, &freq_scale_work);
}
예제 #17
0
파일: stat.c 프로젝트: 383530895/linux
static int show_stat(struct seq_file *p, void *v)
{
	int i, j;
	unsigned long jif;
	u64 user, nice, system, idle, iowait, irq, softirq, steal;
	u64 guest, guest_nice;
	u64 sum = 0;
	u64 sum_softirq = 0;
	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
	struct timespec boottime;

	user = nice = system = idle = iowait =
		irq = softirq = steal = 0;
	guest = guest_nice = 0;
	getboottime(&boottime);
	jif = boottime.tv_sec;

	for_each_possible_cpu(i) {
		user += kcpustat_cpu(i).cpustat[CPUTIME_USER];
		nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE];
		system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
		idle += get_idle_time(i);
		iowait += get_iowait_time(i);
		irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
		softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
		steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
		guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
		guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
		sum += kstat_cpu_irqs_sum(i);
		sum += arch_irq_stat_cpu(i);

		for (j = 0; j < NR_SOFTIRQS; j++) {
			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);

			per_softirq_sums[j] += softirq_stat;
			sum_softirq += softirq_stat;
		}
	}
	sum += arch_irq_stat();

	seq_puts(p, "cpu ");
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
	seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
	seq_putc(p, '\n');

	for_each_online_cpu(i) {
		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
		user = kcpustat_cpu(i).cpustat[CPUTIME_USER];
		nice = kcpustat_cpu(i).cpustat[CPUTIME_NICE];
		system = kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM];
		idle = get_idle_time(i);
		iowait = get_iowait_time(i);
		irq = kcpustat_cpu(i).cpustat[CPUTIME_IRQ];
		softirq = kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ];
		steal = kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
		guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
		guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
		seq_printf(p, "cpu%d", i);
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest));
		seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice));
		seq_putc(p, '\n');
	}
	seq_printf(p, "intr %llu", (unsigned long long)sum);

	/* sum again ? it could be updated? */
	for_each_irq_nr(j)
		seq_put_decimal_ull(p, ' ', kstat_irqs(j));

	seq_printf(p,
		"\nctxt %llu\n"
		"btime %lu\n"
		"processes %lu\n"
		"procs_running %lu\n"
		"procs_blocked %lu\n",
		nr_context_switches(),
		(unsigned long)jif,
		total_forks,
		nr_running(),
		nr_iowait());

	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);

	for (i = 0; i < NR_SOFTIRQS; i++)
		seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
	seq_putc(p, '\n');

	return 0;
}
예제 #18
0
static int show_stat(struct seq_file *p, void *v)
{
	int i, j;
	unsigned long jif;
	cputime64_t user, nice, system, idle, iowait, irq, softirq, steal;
	cputime64_t guest, guest_nice;
	u64 sum = 0;
	u64 sum_softirq = 0;
	unsigned int per_softirq_sums[NR_SOFTIRQS] = {0};
	struct timespec boottime;

	user = nice = system = idle = iowait =
		irq = softirq = steal = cputime64_zero;
	guest = guest_nice = cputime64_zero;
	getboottime(&boottime);
	jif = boottime.tv_sec;

	for_each_possible_cpu(i) {
		user = cputime64_add(user, kstat_cpu(i).cpustat.user);
		nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice);
		system = cputime64_add(system, kstat_cpu(i).cpustat.system);
		idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle);
		idle = cputime64_add(idle, arch_idle_time(i));
		iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait);
		irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq);
		softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq);
		steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
		guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
 		guest_nice = cputime64_add(guest_nice,
 			kstat_cpu(i).cpustat.guest_nice);
		sum += kstat_cpu_irqs_sum(i);
		sum += arch_irq_stat_cpu(i);

		for (j = 0; j < NR_SOFTIRQS; j++) {
			unsigned int softirq_stat = kstat_softirqs_cpu(j, i);

			per_softirq_sums[j] += softirq_stat;
			sum_softirq += softirq_stat;
		}
	}
	sum += arch_irq_stat();

	seq_printf(p, "cpu  %llu %llu %llu %llu %llu %llu %llu %llu %llu"
	"%llu\n",
		(unsigned long long)cputime64_to_clock_t(user),
		(unsigned long long)cputime64_to_clock_t(nice),
		(unsigned long long)cputime64_to_clock_t(system),
		(unsigned long long)cputime64_to_clock_t(idle),
		(unsigned long long)cputime64_to_clock_t(iowait),
		(unsigned long long)cputime64_to_clock_t(irq),
		(unsigned long long)cputime64_to_clock_t(softirq),
		(unsigned long long)cputime64_to_clock_t(steal),
		(unsigned long long)cputime64_to_clock_t(guest),
		(unsigned long long)cputime64_to_clock_t(guest_nice));
	for_each_online_cpu(i) {

		/* Copy values here to work around gcc-2.95.3, gcc-2.96 */
		user = kstat_cpu(i).cpustat.user;
		nice = kstat_cpu(i).cpustat.nice;
		system = kstat_cpu(i).cpustat.system;
		idle = kstat_cpu(i).cpustat.idle;
		idle = cputime64_add(idle, arch_idle_time(i));
		iowait = kstat_cpu(i).cpustat.iowait;
		irq = kstat_cpu(i).cpustat.irq;
		softirq = kstat_cpu(i).cpustat.softirq;
		steal = kstat_cpu(i).cpustat.steal;
		guest = kstat_cpu(i).cpustat.guest;
		guest_nice = kstat_cpu(i).cpustat.guest_nice;
		seq_printf(p,
			"cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu"
			"%llu\n",
			i,
			(unsigned long long)cputime64_to_clock_t(user),
			(unsigned long long)cputime64_to_clock_t(nice),
			(unsigned long long)cputime64_to_clock_t(system),
			(unsigned long long)cputime64_to_clock_t(idle),
			(unsigned long long)cputime64_to_clock_t(iowait),
			(unsigned long long)cputime64_to_clock_t(irq),
			(unsigned long long)cputime64_to_clock_t(softirq),
			(unsigned long long)cputime64_to_clock_t(steal),
			(unsigned long long)cputime64_to_clock_t(guest),
			(unsigned long long)cputime64_to_clock_t(guest_nice));
	}
	seq_printf(p, "intr %llu", (unsigned long long)sum);

	/* sum again ? it could be updated? */
	for_each_irq_nr(j)
		seq_printf(p, " %u", kstat_irqs(j));

	seq_printf(p,
		"\nctxt %llu\n"
		"btime %lu\n"
		"processes %lu\n"
		"procs_running %lu\n"
		"procs_blocked %lu\n",
		nr_context_switches(),
		(unsigned long)jif,
		total_forks,
		nr_running(),
		nr_iowait());

	seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq);

	for (i = 0; i < NR_SOFTIRQS; i++)
		seq_put_decimal_ull(p, ' ', per_softirq_sums[i]);
	seq_putc(p, '\n');

	return 0;
}