static int __init topology_init(void) { int i; for_each_present_cpu(i) arch_register_cpu(i); return 0; }
static int __init topology_init(void) { int cpu; for_each_present_cpu(cpu) register_cpu(&per_cpu(cpu_topology, cpu), cpu); return 0; }
static int __init topology_init(void) { int i; for_each_present_cpu(i) register_cpu(&cpu_devices[i], i); return 0; }
/* * Display interrupt management information through /proc/interrupts */ int show_interrupts(struct seq_file *p, void *v) { int i = *(loff_t *) v, j, cpu; struct irqaction *action; unsigned long flags; switch (i) { /* display column title bar naming CPUs */ case 0: seq_printf(p, " "); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "CPU%d ", j); seq_putc(p, '\n'); break; /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); for_each_present_cpu(cpu) seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu)); seq_printf(p, " %14s.%u", irq_desc[i].chip->name, (GxICR(i) & GxICR_LEVEL) >> GxICR_LEVEL_SHIFT); seq_printf(p, " %s", action->name); for (action = action->next; action; action = action->next) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); } spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ case NR_IRQS: seq_printf(p, "NMI: "); for (j = 0; j < NR_CPUS; j++) if (cpu_online(j)) seq_printf(p, "%10u ", nmi_count(j)); seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); break; } return 0; }
void show_ipi_list(struct seq_file *p, int prec) { unsigned int cpu, i; for (i = 0; i < NR_IPI; i++) { seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : ""); for_each_present_cpu(cpu) seq_printf(p, "%10u ", __get_irq_stat(cpu, ipi_irqs[i])); seq_printf(p, " %s\n", ipi_types[i]); } }
static int __init topology_init(void) { int i; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif /* CONFIG_NUMA */ for_each_present_cpu(i) arch_register_cpu(i); return 0; }
void touch_nmi_watchdog (void) { if (nmi_watchdog > 0) { unsigned cpu; /* * Tell other CPUs to reset their alert counters. We cannot * do it ourselves because the alert count increase is not * atomic. */ for_each_present_cpu (cpu) per_cpu(nmi_touch, cpu) = 1; } touch_softlockup_watchdog(); }
static int gator_events_block_start(void) { int cpu; for_each_present_cpu(cpu) per_cpu(new_data_avail, cpu) = true; // register tracepoints if (block_rq_wr_enabled || block_rq_rd_enabled) if (GATOR_REGISTER_TRACE(block_rq_complete)) goto fail_block_rq_exit; pr_debug("gator: registered block event tracepoints\n"); return 0; // unregister tracepoints on error fail_block_rq_exit: pr_err("gator: block event tracepoints failed to activate, please verify that tracepoints are enabled in the linux kernel\n"); return -1; }
int bpmp_init_irq(void) { long ch; int r; int i; for (i = 0; i < ARRAY_SIZE(cpu_irqs); i++) { ch = PER_CPU_IB_CH(i); r = request_irq(cpu_irqs[i], bpmp_inbox_irq, 0, "bpmp", (void *)ch); if (r) return r; } r = register_cpu_notifier(&bpmp_cpu_nb); if (r) return r; for_each_present_cpu(i) bpmp_irq_set_affinity(i); return 0; }