static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec calc_uptime; struct timespec calc_idle; int i; cputime_t calc_idletime = cputime_zero; if (!uptime) { do_posix_clock_monotonic_gettime(&calc_uptime); monotonic_to_bootbased(&calc_uptime); for_each_possible_cpu(i) calc_idletime += cputime64_add(calc_idletime, kstat_cpu(i).cpustat.idle); cputime_to_timespec(calc_idletime, &calc_idle); } else { calc_uptime.tv_sec = uptime * HZ + jiffies - startjiffies; calc_uptime.tv_nsec = 0; calc_idle.tv_sec = idletime * HZ + jiffies - startjiffies; calc_idle.tv_nsec = 0; } seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) calc_uptime.tv_sec, (calc_uptime.tv_nsec / (NSEC_PER_SEC / 100)), (unsigned long) calc_idle.tv_sec, (calc_idle.tv_nsec / (NSEC_PER_SEC / 100))); return 0; }
static int cpufreq_stats_update(unsigned int cpu) { struct cpufreq_stats *stat; unsigned long long cur_time; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (stat->time_in_state) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); #ifdef CONFIG_NC_DEBUG printk(KERN_INFO "============== CPUFREQ STATS"); printk(KERN_INFO "CPUFREQ STATS: cpu: %u", stat->cpu); printk(KERN_INFO "CPUFREQ STATS: total_trans: %u", stat->total_trans); printk(KERN_INFO "CPUFREQ STATS: last_time: %lld", stat->last_time); printk(KERN_INFO "CPUFREQ STATS: max_state: %u", stat->max_state); printk(KERN_INFO "CPUFREQ STATS: state_num: %u", stat->state_num); printk(KERN_INFO "CPUFREQ STATS: last_index: %u", stat->last_index); printk(KERN_INFO "CPUFREQ STATS: time_in_state: %llu", cputime64_to_jiffies64(stat->time_in_state)); printk(KERN_INFO "CPUFREQ STATS: *freq_table: %p", &stat->freq_table); #ifdef CONFIG_CPU_FREQ_STAT_DETAILS printk(KERN_INFO "CPUFREQ STATS: *trans_table: %p", &stat->trans_table); #endif printk(KERN_INFO "============= END CPUFREQ STATS"); #endif return 0; }
static int cpufreq_stats_update(unsigned int cpu) { struct cpufreq_stats *stat; struct all_cpufreq_stats *all_stat; unsigned long long cur_time; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); all_stat = per_cpu(all_cpufreq_stats, cpu); if (!stat) { spin_unlock(&cpufreq_stats_lock); return 0; } if (stat->time_in_state) { stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); if (all_stat) all_stat->time_in_state[stat->last_index] += cur_time - stat->last_time; } stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; }
void update_busfreq_stat(struct busfreq_data *data, unsigned int index) { #ifdef BUSFREQ_DEBUG unsigned long long cur_time = get_jiffies_64(); data->time_in_state[index] = cputime64_add(data->time_in_state[index], cputime_sub(cur_time, data->last_time)); data->last_time = cur_time; #endif }
static inline cputime64_t get_cpu_idle_time(unsigned int cpu) { cputime64_t idle_time; cputime64_t cur_jiffies; cputime64_t busy_time; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); idle_time = cputime64_sub(cur_jiffies, busy_time); return jiffies_to_usecs(idle_time); }
static inline cputime64_t rkusb_get_cpu_idle_time(unsigned int cpu) { cputime64_t idle_time; cputime64_t cur_jiffies; cputime64_t busy_time; cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); idle_time = cputime64_sub(cur_jiffies, busy_time); S_INFO("%s:jiffies=%Ld,busy=%Ld,idle=%Ld" , __FILE__, cur_jiffies,busy_time,idle_time); return idle_time; }
static int cpufreq_stats_update(unsigned int cpu) { struct cpufreq_stats *stat; unsigned long long cur_time; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (!stat) { spin_unlock(&cpufreq_stats_lock); return 0; } if (stat->time_in_state) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); if (cpu == 0) cpu0_time_in_state[stat->last_index] = cputime64_add(cpu0_time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); else if (cpu == 1) cpu1_time_in_state[stat->last_index] = cputime64_add(cpu1_time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); #ifdef CONFIG_QUAD_CORES_SOC_STAT else if (cpu == 2) cpu2_time_in_state[stat->last_index] = cputime64_add(cpu2_time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); else if (cpu == 3) cpu3_time_in_state[stat->last_index] = cputime64_add(cpu3_time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); #endif stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; }
static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, cputime64_t *wall) { cputime64_t idle_time; cputime64_t cur_wall_time; cputime64_t busy_time; cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, kstat_cpu(cpu).cpustat.system); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.nice); idle_time = cputime64_sub(cur_wall_time, busy_time); if (wall) *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); return (cputime64_t)jiffies_to_usecs(idle_time); }
static cputime64_t get_idle_time(int cpu) { u64 idle_time = get_cpu_idle_time_us(cpu, NULL); cputime64_t idle; if (idle_time == -1ULL) { /* !NO_HZ so we can rely on cpustat.idle */ idle = kstat_cpu(cpu).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(cpu)); } else idle = usecs_to_cputime64(idle_time); return idle; }
static int cpufreq_stats_update (unsigned int cpu) { struct cpufreq_stats *stat; unsigned long long cur_time; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = cpufreq_stats_table[cpu]; if (stat->time_in_state) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; }
static void emc_last_stats_update(int last_sel) { unsigned long flags; u64 cur_jiffies = get_jiffies_64(); spin_lock_irqsave(&emc_stats.spinlock, flags); if (emc_stats.last_sel < TEGRA_EMC_TABLE_MAX_SIZE) emc_stats.time_at_clock[emc_stats.last_sel] = cputime64_add( emc_stats.time_at_clock[emc_stats.last_sel], cputime64_sub(cur_jiffies, emc_stats.last_update)); emc_stats.last_update = cur_jiffies; if (last_sel < TEGRA_EMC_TABLE_MAX_SIZE) { emc_stats.clkchange_count++; emc_stats.last_sel = last_sel; } spin_unlock_irqrestore(&emc_stats.spinlock, flags); }
static int uptime_proc_show(struct seq_file *m, void *v) { struct timespec uptime; struct timespec idle; int i; cputime_t idletime = cputime_zero; for_each_possible_cpu(i) idletime = cputime64_add(idletime, kstat_cpu(i).cpustat.idle); do_posix_clock_monotonic_gettime(&uptime); monotonic_to_bootbased(&uptime); cputime_to_timespec(idletime, &idle); seq_printf(m, "%lu.%02lu %lu.%02lu\n", (unsigned long) uptime.tv_sec, (uptime.tv_nsec / (NSEC_PER_SEC / 100)), (unsigned long) idle.tv_sec, (idle.tv_nsec / (NSEC_PER_SEC / 100))); return 0; }
static void hp_stats_update(unsigned int cpu, bool up) { u64 cur_jiffies = get_jiffies_64(); bool was_up = hp_stats[cpu].up_down_count & 0x1; if (was_up) hp_stats[cpu].time_up_total = cputime64_add( hp_stats[cpu].time_up_total, cputime64_sub( cur_jiffies, hp_stats[cpu].last_update)); if (was_up != up) { hp_stats[cpu].up_down_count++; if ((hp_stats[cpu].up_down_count & 0x1) != up) { /* FIXME: sysfs user space CPU control breaks stats */ pr_err("tegra hotplug stats out of sync with %s CPU%d", (cpu < CONFIG_NR_CPUS) ? "G" : "LP", (cpu < CONFIG_NR_CPUS) ? cpu : 0); hp_stats[cpu].up_down_count ^= 0x1; } } hp_stats[cpu].last_update = cur_jiffies; }
static int cpufreq_stats_update(unsigned int cpu) { struct cpufreq_stats *stat; unsigned long long cur_time; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); #if defined (CONFIG_MACH_SAMSUNG_P5) if(!stat) { spin_unlock(&cpufreq_stats_lock); return -1; } #endif if (stat->time_in_state && stat->last_index >= 0) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; }
static int cpufreq_stats_update(unsigned int cpu) { struct cpufreq_stats *stat; unsigned long long cur_time; if (cpu_is_offline(cpu)) return 0; cur_time = get_jiffies_64(); spin_lock(&cpufreq_stats_lock); stat = per_cpu(cpufreq_stats_table, cpu); if (!stat) { spin_unlock(&cpufreq_stats_lock); return 0; } if (stat->time_in_state && stat->last_index >= 0) stat->time_in_state[stat->last_index] = cputime64_add(stat->time_in_state[stat->last_index], cputime_sub(cur_time, stat->last_time)); stat->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); return 0; }
/* Sync fs/proc/stat.c to caculate all cpu statistics */ static void get_all_cpu_stat(struct cpu_usage_stat *cpu_stat) { int i; cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; cputime64_t guest, guest_nice; if (!cpu_stat) return; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; guest = guest_nice = cputime64_zero; for_each_possible_cpu(i) { user = cputime64_add(user, kstat_cpu(i).cpustat.user); nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); system = cputime64_add(system, kstat_cpu(i).cpustat.system); idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); idle = cputime64_add(idle, arch_idle_time(i)); iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest_nice = cputime64_add(guest_nice, kstat_cpu(i).cpustat.guest_nice); } cpu_stat->user = user; cpu_stat->nice = nice; cpu_stat->system = system; cpu_stat->softirq = softirq; cpu_stat->irq = irq; cpu_stat->idle = idle; cpu_stat->iowait = iowait; cpu_stat->steal = steal; cpu_stat->guest = guest; cpu_stat->guest_nice = guest_nice; }
static int show_stat(struct seq_file *p, void *v) { int i; unsigned long jif; cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; u64 sum = 0; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; jif = - wall_to_monotonic.tv_sec; if (wall_to_monotonic.tv_nsec) --jif; for_each_cpu(i) { int j; user = cputime64_add(user, kstat_cpu(i).cpustat.user); nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); system = cputime64_add(system, kstat_cpu(i).cpustat.system); idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); for (j = 0 ; j < NR_IRQS ; j++) sum += kstat_cpu(i).irqs[j]; } seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu\n", (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kstat_cpu(i).cpustat.user; nice = kstat_cpu(i).cpustat.nice; system = kstat_cpu(i).cpustat.system; idle = kstat_cpu(i).cpustat.idle; iowait = kstat_cpu(i).cpustat.iowait; irq = kstat_cpu(i).cpustat.irq; softirq = kstat_cpu(i).cpustat.softirq; steal = kstat_cpu(i).cpustat.steal; seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu\n", i, (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal)); } seq_printf(p, "intr %llu", (unsigned long long)sum); #if !defined(CONFIG_PPC64) && !defined(CONFIG_ALPHA) for (i = 0; i < NR_IRQS; i++) seq_printf(p, " %u", kstat_irqs(i)); #endif seq_printf(p, "\nctxt %llu\n" "btime %lu\n" "processes %lu\n" "procs_running %lu\n" "procs_blocked %lu\n", nr_context_switches(), (unsigned long)jif, total_forks, nr_running(), nr_iowait()); return 0; }
static int show_stat(struct seq_file *p, void *v) { int i, j; unsigned long jif; cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; cputime64_t guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; guest = guest_nice = cputime64_zero; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { user = cputime64_add(user, kstat_cpu(i).cpustat.user); nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); system = cputime64_add(system, kstat_cpu(i).cpustat.system); idle = cputime64_add(idle, get_idle_time(i)); iowait = cputime64_add(iowait, get_iowait_time(i)); irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest_nice = cputime64_add(guest_nice, kstat_cpu(i).cpustat.guest_nice); sum += kstat_cpu_irqs_sum(i); sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); per_softirq_sums[j] += softirq_stat; sum_softirq += softirq_stat; } } sum += arch_irq_stat(); seq_puts(p, "cpu "); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kstat_cpu(i).cpustat.user; nice = kstat_cpu(i).cpustat.nice; system = kstat_cpu(i).cpustat.system; idle = get_idle_time(i); iowait = get_iowait_time(i); irq = kstat_cpu(i).cpustat.irq; softirq = kstat_cpu(i).cpustat.softirq; steal = kstat_cpu(i).cpustat.steal; guest = kstat_cpu(i).cpustat.guest; guest_nice = kstat_cpu(i).cpustat.guest_nice; seq_printf(p, "cpu%d", i); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(user)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(nice)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(system)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(idle)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(iowait)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(irq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(softirq)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(steal)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest)); seq_put_decimal_ull(p, ' ', cputime64_to_clock_t(guest_nice)); seq_putc(p, '\n'); } seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) seq_put_decimal_ull(p, ' ', kstat_irqs(j)); seq_printf(p, "\nctxt %llu\n" "btime %lu\n" "processes %lu\n" "procs_running %lu\n" "procs_blocked %lu\n", nr_context_switches(), (unsigned long)jif, total_forks, nr_running(), nr_iowait()); seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < NR_SOFTIRQS; i++) seq_put_decimal_ull(p, ' ', per_softirq_sums[i]); seq_putc(p, '\n'); return 0; }