void htc_show_interrupt(int i) { struct irqaction *action; unsigned long flags; if (i < NR_IRQS) { raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; if (!(kstat_irqs_cpu(i, 0)) || previous_irqs[i] == (kstat_irqs_cpu(i, 0))) goto unlock; printk("%3d:", i); printk("%6u\t", kstat_irqs_cpu(i, 0)-previous_irqs[i]); printk("%s", action->name); for (action = action->next; action; action = action->next) printk(KERN_INFO ", %s", action->name); printk("\n"); previous_irqs[i] = kstat_irqs_cpu(i, 0); unlock: raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { if (previous_irqs[NR_IRQS] == irq_err_count) return; printk("Err: %lud\n", irq_err_count-previous_irqs[NR_IRQS]); previous_irqs[NR_IRQS] = irq_err_count; } }
int show_interrupts(struct seq_file *p, void *v) { unsigned long flags, any_count = 0; int i = *(loff_t *) v, j; struct irqaction *action; struct irq_desc *desc; if (i > nr_irqs) return 0; if (i == nr_irqs) return show_other_interrupts(p); /* print header */ if (i == 0) { seq_printf(p, " "); for_each_online_cpu(j) seq_printf(p, "CPU%-8d", j); seq_putc(p, '\n'); } desc = irq_to_desc(i); if (!desc) return 0; spin_lock_irqsave(&desc->lock, flags); #ifndef CONFIG_SMP any_count = kstat_irqs(i); #else for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); #endif action = desc->action; if (!action && !any_count) goto out; seq_printf(p, "%3d: ", i); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %8s", desc->chip->name); seq_printf(p, "-%-8s", desc->name); if (action) { seq_printf(p, " %s", action->name); while ((action = action->next) != NULL) seq_printf(p, ", %s", action->name); } seq_putc(p, '\n'); out: spin_unlock_irqrestore(&desc->lock, flags); return 0; }
int show_interrupts(struct seq_file *p, void *v) { unsigned long flags, any_count = 0; int i = *(loff_t *)v, j, prec; struct irqaction *action; struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; if (i > nr_irqs) return 0; for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) j *= 10; if (i == nr_irqs) return show_other_interrupts(p, prec); if (i == 0) { seq_printf(p, "%*s", prec + 8, ""); for_each_online_cpu(j) seq_printf(p, "CPU%-8d", j); seq_putc(p, '\n'); } desc = irq_to_desc(i); if (!desc) return 0; data = irq_get_irq_data(i); chip = irq_data_get_irq_chip(data); raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; if (!action && !any_count) goto out; seq_printf(p, "%*d: ", prec, i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, " %14s", chip->name); seq_printf(p, "-%-8s", desc->name); if (action) { seq_printf(p, " %s", action->name); while ((action = action->next) != NULL) seq_printf(p, ", %s", action->name); } seq_putc(p, '\n'); out: raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; }
static u32 get_num_interrupts_per_s(void) { int cpu; int i; u64 num_irqs = 0; ktime_t now; static ktime_t last; unsigned int delta; u32 irqs = 0; now = ktime_get(); for_each_possible_cpu(cpu) { for (i = 0; i < NR_IRQS; i++) num_irqs += kstat_irqs_cpu(i, cpu); } pr_debug("%s: total num irqs: %lld, previous %lld\n", __func__, num_irqs, old_num_irqs); if (old_num_irqs > 0) { delta = (u32)ktime_to_ms(ktime_sub(now, last)) / 1000; delta = (delta > 0) ? delta : 1; irqs = ((u32)(num_irqs - old_num_irqs)) / delta; } old_num_irqs = num_irqs; last = now; pr_debug("delta irqs per sec:%d\n", irqs); return irqs; }
int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; struct irqaction *action; unsigned long flags; if (i < NR_IRQS) { spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; seq_printf(p, "%3d: ", i); for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, " %8s", irq_desc[i].chip->name); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, " %s", action->name); seq_putc(p, '\n'); skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) seq_printf(p, "%10u ", cpu_pda[j].__nmi_count); seq_printf(p, " CORE Non Maskable Interrupt\n"); seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); } return 0; }
int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; struct irqaction * action; unsigned long flags; if (i == 0) { seq_printf(p, " "); for_each_online_cpu(j) seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } if (i < NR_IRQS) { spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; seq_printf(p, "%3d: ",i); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(i)); #else for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); seq_printf(p, " %s", action->name); for (action=action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); skip: spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS)
void mt_save_irq_counts(void) { int irq, cpu; unsigned long flags; //do not refresh data in 20ms if(sched_clock() - mt_save_irq_count_time < 20000000) return; spin_lock_irqsave(&mt_irq_count_lock, flags); if(smp_processor_id() != 0){ //only record by CPU#0 spin_unlock_irqrestore(&mt_irq_count_lock, flags); return; } mt_save_irq_count_time = sched_clock(); for(cpu = 0; cpu < num_possible_cpus(); cpu++){ for(irq = 0; irq< NR_IRQS; irq++){ mt_irq_count[cpu][irq]=kstat_irqs_cpu(irq, cpu); } } #ifdef CONFIG_SMP for(cpu = 0; cpu < num_possible_cpus(); cpu++){ for(irq = 0; irq< NR_IPI; irq++){ mt_local_irq_count[cpu][irq] = __get_irq_stat(cpu, ipi_irqs[irq]); } } #endif spin_unlock_irqrestore(&mt_irq_count_lock, flags); }
static void usb_load(struct work_struct *work) { int cpu; unsigned int num_irqs = 0; static unsigned int old_num_irqs = UINT_MAX; for_each_online_cpu(cpu) num_irqs += kstat_irqs_cpu(IRQ_DB8500_USBOTG, cpu); if ((num_irqs > old_num_irqs) && (num_irqs - old_num_irqs) > USB_LIMIT) { prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, "usb", 125); if (!usb_pm_qos_is_latency_0) { usb_pm_qos_latency = pm_qos_add_request(PM_QOS_CPU_DMA_LATENCY, 0); usb_pm_qos_is_latency_0 = true; } } else { prcmu_qos_update_requirement(PRCMU_QOS_ARM_OPP, "usb", 25); if (usb_pm_qos_is_latency_0) { pm_qos_remove_request(usb_pm_qos_latency); usb_pm_qos_is_latency_0 = false; } } old_num_irqs = num_irqs; schedule_delayed_work_on(0, &work_usb_workaround, msecs_to_jiffies(USB_PROBE_DELAY)); }
notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) { unsigned int sum, touched = 0; int cpu = smp_processor_id(); clear_softint(1 << irq); pcr_ops->write(PCR_PIC_PRIV); local_cpu_data().__nmi_count++; if (notify_die(DIE_NMI, "nmi", regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; sum = kstat_irqs_cpu(0, cpu); if (__get_cpu_var(nmi_touch)) { __get_cpu_var(nmi_touch) = 0; touched = 1; } if (!touched && __get_cpu_var(last_irq_sum) == sum) { local_inc(&__get_cpu_var(alert_counter)); if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz) die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; local_set(&__get_cpu_var(alert_counter), 0); } if (nmi_usable) { write_pic(picl_value(nmi_hz)); pcr_ops->write(pcr_enable); } }
static void wlan_load(struct work_struct *work) { int cpu; unsigned int num_irqs = 0; static unsigned int old_num_irqs = UINT_MAX; for_each_online_cpu(cpu) num_irqs += kstat_irqs_cpu(IRQ_DB8500_SDMMC1, cpu); if ((num_irqs > old_num_irqs) && (num_irqs - old_num_irqs) > wlan_limit) { if (wlan_arm_khz) prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "wlan", wlan_arm_khz); prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "wlan", PRCMU_QOS_MAX_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "wlan", PRCMU_QOS_MAX_VALUE); if (!wlan_pm_qos_is_latency_0) { /* * The wake up latency is set to 0 to prevent * the system from going to sleep. This improves * the wlan throughput in DMA mode. * The wake up latency from sleep adds ~5% overhead * for TX in some cases. * This change doesn't increase performance for wlan * PIO since the CPU usage prevents sleep in this mode. */ pm_qos_add_request(&wlan_pm_qos_latency, PM_QOS_CPU_DMA_LATENCY, 0); wlan_pm_qos_is_latency_0 = true; } } else { prcmu_qos_update_requirement(PRCMU_QOS_ARM_KHZ, "wlan", PRCMU_QOS_DEFAULT_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_APE_OPP, "wlan", PRCMU_QOS_DEFAULT_VALUE); prcmu_qos_update_requirement(PRCMU_QOS_DDR_OPP, "wlan", PRCMU_QOS_DEFAULT_VALUE); if (wlan_pm_qos_is_latency_0) { pm_qos_remove_request(&wlan_pm_qos_latency); wlan_pm_qos_is_latency_0 = false; } } old_num_irqs = num_irqs; schedule_delayed_work_on(0, &work_wlan_workaround, msecs_to_jiffies(wlan_probe_delay)); }
static irqreturn_t sun3_int7(int irq, void *dev_id) { unsigned int cnt; cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 2000)) sun3_leds(led_pattern[cnt % 16000 / 2000]); return IRQ_HANDLED; }
int show_interrupts(struct seq_file *p, void *v) { #ifdef CONFIG_SMP int j; #endif int irq = *(loff_t *) v; struct irqaction * action; unsigned long flags; #ifdef CONFIG_SMP if (irq == 0) { seq_puts(p, " "); for_each_online_cpu(j) seq_printf(p, "CPU%d ", j); seq_putc(p, '\n'); } #endif if (irq < ACTUAL_NR_IRQS) { raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); action = irq_desc[irq].action; if (!action) goto unlock; seq_printf(p, "%3d: ", irq); #ifndef CONFIG_SMP seq_printf(p, "%10u ", kstat_irqs(irq)); #else for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(irq, j)); #endif seq_printf(p, " %14s", irq_desc[irq].chip->name); seq_printf(p, " %c%s", (action->flags & IRQF_DISABLED)?'+':' ', action->name); for (action=action->next; action; action = action->next) { seq_printf(p, ", %c%s", (action->flags & IRQF_DISABLED)?'+':' ', action->name); } seq_putc(p, '\n'); unlock: raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); for_each_online_cpu(j) seq_printf(p, "%10lu ", cpu_data[j].ipi_count); seq_putc(p, '\n'); #endif seq_printf(p, "ERR: %10lu\n", irq_err_count); } return 0; }
/* * Display interrupt management information through /proc/interrupts */ int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j, cpu; struct irqaction *action; unsigned long flags; switch (i) { /* display column title bar naming CPUs */ case 0: seq_printf(p, " "); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "CPU%d ", j); seq_putc(p, '\n'); break; /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %14s.%u", irq_desc[i].chip->name, (GxICR(i) & GxICR_LEVEL) >> GxICR_LEVEL_SHIFT); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); } spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ case NR_IRQS: seq_printf(p, "NMI: "); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); break; } return 0; }
static irqreturn_t sun3_int5(int irq, void *dev_id) { unsigned int cnt; #ifdef CONFIG_SUN3 intersil_clear(); #endif #ifdef CONFIG_SUN3 intersil_clear(); #endif xtime_update(1); update_process_times(user_mode(get_irq_regs())); cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 20)) sun3_leds(led_pattern[cnt % 160 / 20]); return IRQ_HANDLED; }
static ssize_t per_cpu_count_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) { struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); int cpu, irq = desc->irq_data.irq; ssize_t ret = 0; char *p = ""; for_each_possible_cpu(cpu) { unsigned int c = kstat_irqs_cpu(irq, cpu); ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); p = ","; } ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n"); return ret; }
static irqreturn_t sun3_int5(int irq, void *dev_id) { unsigned long flags; unsigned int cnt; local_irq_save(flags); #ifdef CONFIG_SUN3 intersil_clear(); #endif sun3_disable_irq(5); sun3_enable_irq(5); #ifdef CONFIG_SUN3 intersil_clear(); #endif xtime_update(1); update_process_times(user_mode(get_irq_regs())); cnt = kstat_irqs_cpu(irq, 0); if (!(cnt % 20)) sun3_leds(led_pattern[cnt % 160 / 20]); local_irq_restore(flags); return IRQ_HANDLED; }
void mt_show_current_irq_counts(void) { int irq, cpu, count; unsigned long flags; unsigned long long t_cur, t_diff = 0; t_cur = sched_clock(); spin_lock_irqsave(&mt_irq_count_lock, flags); pr_err("=========================================\nIRQ Status:\n"); for (cpu = 0; cpu < num_possible_cpus(); cpu++) { t_diff = t_cur - per_cpu(save_irq_count_time, cpu); pr_err("Current irq counts record at [%llu] ns.(last at %llu, diff:+%llu ns)\n", t_cur, per_cpu(save_irq_count_time, cpu), usec_high(t_diff)); pr_err("CPU%d state:%s\n", cpu, cpu_online(cpu) ? "online" : "offline"); for (irq = 0; irq < nr_irqs && irq < MAX_NR_IRQS; irq++) { count = kstat_irqs_cpu(irq, cpu); if (count != 0) pr_err(" IRQ[%3d:%14s] = %8d, (+%d times in %lld us)\n", irq, isr_name(irq), count, count - per_cpu(irq_count_mon, cpu).irqs[irq], usec_high(t_diff)); } } #ifdef CONFIG_SMP for (cpu = 0; cpu < num_possible_cpus(); cpu++) { pr_err("Local IRQ on CPU#%d:\n", cpu); for (irq = 0; irq < NR_IPI; irq++) { count = __get_irq_stat(cpu, ipi_irqs[irq]); if (count != 0) pr_err(" IRQ[%2d: IPI] = %8d,(+%d times in %lld us)\n", irq, count, count - per_cpu(ipi_count_mon, cpu).ipis[irq], usec_high(t_diff)); } } #endif spin_unlock_irqrestore(&mt_irq_count_lock, flags); }
/* for MTK fiq debug log mechanism*/ static void mt_aee_show_current_irq_counts(void) { int irq, cpu, count; unsigned long long t_cur, t_diff; t_cur = sched_clock(); /* spin_lock_irqsave(&mt_irq_count_lock, flags); */ aee_wdt_printf("\nIRQ Status\n"); for (cpu = 0; cpu < num_possible_cpus(); cpu++) { t_diff = t_cur - per_cpu(save_irq_count_time, cpu); aee_wdt_printf("Dur:%lld us,(now:%lld,last:%lld)\n", usec_high(t_diff), usec_high(t_cur), usec_high(per_cpu(save_irq_count_time, cpu))); aee_wdt_printf("CPU%d state:%s\n", cpu, cpu_online(cpu) ? "online" : "offline"); for (irq = 0; irq < nr_irqs && irq < MAX_NR_IRQS; irq++) { count = kstat_irqs_cpu(irq, cpu); if (count != 0) aee_wdt_printf(" %d:%s +%d(%d)\n", irq, isr_name(irq), count - per_cpu(irq_count_mon, cpu).irqs[irq], count); } } #ifdef CONFIG_SMP for (cpu = 0; cpu < num_possible_cpus(); cpu++) { aee_wdt_printf("CPU#%d:\n", cpu); for (irq = 0; irq < NR_IPI; irq++) { count = __get_irq_stat(cpu, ipi_irqs[irq]); if (count != 0) aee_wdt_printf(" %d:IPI +%d(%d)\n", irq, count - per_cpu(ipi_count_mon, cpu).ipis[irq], count); } } #endif /* spin_unlock_irqrestore(&mt_irq_count_lock, flags); */ }
int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j; unsigned long flags; if (i == 0) { seq_puts(p, " "); for_each_online_cpu(j) seq_printf(p, " CPU%d", j); #ifdef PARISC_IRQ_CR16_COUNTS seq_printf(p, " [min/avg/max] (CPU cycle counts)"); #endif seq_putc(p, '\n'); } if (i < NR_IRQS) { struct irqaction *action; atomic_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; seq_printf(p, "%3d: ", i); #ifdef CONFIG_SMP for_each_online_cpu(j) seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); #else seq_printf(p, "%10u ", kstat_irqs(i)); #endif seq_printf(p, " %14s", irq_desc[i].chip->typename); #ifndef PARISC_IRQ_CR16_COUNTS seq_printf(p, " %s", action->name); while ((action = action->next)) seq_printf(p, ", %s", action->name); #else for ( ;action; action = action->next) { unsigned int k, avg, min, max; min = max = action->cr16_hist[0]; for (avg = k = 0; k < PARISC_CR16_HIST_SIZE; k++) { int hist = action->cr16_hist[k]; if (hist) { avg += hist; } else break; if (hist > max) max = hist; if (hist < min) min = hist; } avg /= k; seq_printf(p, " %s[%d/%d/%d]", action->name, min,avg,max); } #endif seq_putc(p, '\n'); skip: atomic_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; }
static int show_stat(struct seq_file *p, void *v) { int i, j; unsigned long jif; cputime64_t user, nice, system, idle, iowait, irq, softirq, steal; cputime64_t guest, guest_nice; u64 sum = 0; u64 sum_softirq = 0; unsigned int per_softirq_sums[NR_SOFTIRQS] = {0}; struct timespec boottime; unsigned int per_irq_sum; user = nice = system = idle = iowait = irq = softirq = steal = cputime64_zero; guest = guest_nice = cputime64_zero; getboottime(&boottime); jif = boottime.tv_sec; for_each_possible_cpu(i) { user = cputime64_add(user, kstat_cpu(i).cpustat.user); nice = cputime64_add(nice, kstat_cpu(i).cpustat.nice); system = cputime64_add(system, kstat_cpu(i).cpustat.system); idle = cputime64_add(idle, kstat_cpu(i).cpustat.idle); idle = cputime64_add(idle, arch_idle_time(i)); iowait = cputime64_add(iowait, kstat_cpu(i).cpustat.iowait); irq = cputime64_add(irq, kstat_cpu(i).cpustat.irq); softirq = cputime64_add(softirq, kstat_cpu(i).cpustat.softirq); steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); guest_nice = cputime64_add(guest_nice, kstat_cpu(i).cpustat.guest_nice); for_each_irq_nr(j) { sum += kstat_irqs_cpu(j, i); } sum += arch_irq_stat_cpu(i); for (j = 0; j < NR_SOFTIRQS; j++) { unsigned int softirq_stat = kstat_softirqs_cpu(j, i); per_softirq_sums[j] += softirq_stat; sum_softirq += softirq_stat; } } sum += arch_irq_stat(); seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu " "%llu\n", (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal), (unsigned long long)cputime64_to_clock_t(guest), (unsigned long long)cputime64_to_clock_t(guest_nice)); for_each_online_cpu(i) { /* Copy values here to work around gcc-2.95.3, gcc-2.96 */ user = kstat_cpu(i).cpustat.user; nice = kstat_cpu(i).cpustat.nice; system = kstat_cpu(i).cpustat.system; idle = kstat_cpu(i).cpustat.idle; idle = cputime64_add(idle, arch_idle_time(i)); iowait = kstat_cpu(i).cpustat.iowait; irq = kstat_cpu(i).cpustat.irq; softirq = kstat_cpu(i).cpustat.softirq; steal = kstat_cpu(i).cpustat.steal; guest = kstat_cpu(i).cpustat.guest; guest_nice = kstat_cpu(i).cpustat.guest_nice; seq_printf(p, "cpu%d %llu %llu %llu %llu %llu %llu %llu %llu %llu " "%llu\n", i, (unsigned long long)cputime64_to_clock_t(user), (unsigned long long)cputime64_to_clock_t(nice), (unsigned long long)cputime64_to_clock_t(system), (unsigned long long)cputime64_to_clock_t(idle), (unsigned long long)cputime64_to_clock_t(iowait), (unsigned long long)cputime64_to_clock_t(irq), (unsigned long long)cputime64_to_clock_t(softirq), (unsigned long long)cputime64_to_clock_t(steal), (unsigned long long)cputime64_to_clock_t(guest), (unsigned long long)cputime64_to_clock_t(guest_nice)); } seq_printf(p, "intr %llu", (unsigned long long)sum); /* sum again ? it could be updated? */ for_each_irq_nr(j) { per_irq_sum = 0; for_each_possible_cpu(i) per_irq_sum += kstat_irqs_cpu(j, i); seq_printf(p, " %u", per_irq_sum); } seq_printf(p, "\nctxt %llu\n" "btime %lu\n" "processes %lu\n" "procs_running %lu\n" "procs_blocked %lu\n", nr_context_switches(), (unsigned long)jif, total_forks, nr_running(), nr_iowait()); seq_printf(p, "softirq %llu", (unsigned long long)sum_softirq); for (i = 0; i < NR_SOFTIRQS; i++) seq_printf(p, " %u", per_softirq_sums[i]); seq_printf(p, "\n"); return 0; }