static int nmi_timer_setup(void) { int cpu, err; u64 period; /* clock cycles per tick: */ period = (u64)cpu_khz * 1000; do_div(period, HZ); nmi_timer_attr.sample_period = period; cpu_notifier_register_begin(); err = __register_cpu_notifier(&nmi_timer_cpu_nb); if (err) goto out; /* can't attach events to offline cpus: */ for_each_online_cpu(cpu) { err = nmi_timer_start_cpu(cpu); if (err) { cpu_notifier_register_done(); nmi_timer_shutdown(); return err; } } out: cpu_notifier_register_done(); return err; }
/* * One-time initialisation. */ static int __init arch_hw_breakpoint_init(void) { core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); pr_info("found %d breakpoint and %d watchpoint registers.\n", core_num_brps, core_num_wrps); cpu_notifier_register_begin(); /* * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ smp_call_function(hw_breakpoint_reset, NULL, 1); hw_breakpoint_reset(NULL); /* Register debug fault handlers. */ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-breakpoint handler"); hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-watchpoint handler"); /* Register hotplug notifier. */ __register_cpu_notifier(&hw_breakpoint_reset_nb); cpu_notifier_register_done(); /* Register cpu_suspend hw breakpoint restore hook */ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); return 0; }
static int zcomp_init(struct zcomp *comp) { unsigned long cpu; int ret; comp->notifier.notifier_call = zcomp_cpu_notifier; comp->stream = alloc_percpu(struct zcomp_strm *); if (!comp->stream) return -ENOMEM; cpu_notifier_register_begin(); for_each_online_cpu(cpu) { ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu); if (ret == NOTIFY_BAD) goto cleanup; } __register_cpu_notifier(&comp->notifier); cpu_notifier_register_done(); return 0; cleanup: for_each_online_cpu(cpu) __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu); cpu_notifier_register_done(); return -ENOMEM; }
static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) return -ENOMEM; /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ /* Assume saved/restored counters are the same on all CPUs */ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); if (err) goto fail; for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } nmi_enabled = 0; ctr_running = 0; /* make variables visible to the nmi handler: */ smp_mb(); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 0, "oprofile"); if (err) goto fail; cpu_notifier_register_begin(); /* Use get/put_online_cpus() to protect 'nmi_enabled' */ get_online_cpus(); nmi_enabled = 1; /* make nmi_enabled visible to the nmi handler: */ smp_mb(); on_each_cpu(nmi_cpu_setup, NULL, 1); __register_cpu_notifier(&oprofile_cpu_nb); put_online_cpus(); cpu_notifier_register_done(); return 0; fail: free_msrs(); return err; }
int kvm_timer_hyp_init(void) { struct device_node *np; unsigned int ppi; int err; timecounter = arch_timer_get_timecounter(); if (!timecounter) return -ENODEV; np = of_find_matching_node(NULL, arch_timer_of_match); if (!np) { kvm_err("kvm_arch_timer: can't find DT node\n"); return -ENODEV; } ppi = irq_of_parse_and_map(np, 2); if (!ppi) { kvm_err("kvm_arch_timer: no virtual timer interrupt\n"); err = -EINVAL; goto out; } err = request_percpu_irq(ppi, kvm_arch_timer_handler, "kvm guest timer", kvm_get_running_vcpus()); if (err) { kvm_err("kvm_arch_timer: can't request interrupt %d (%d)\n", ppi, err); goto out; } host_vtimer_irq = ppi; err = __register_cpu_notifier(&kvm_timer_cpu_nb); if (err) { kvm_err("Cannot register timer CPU notifier\n"); goto out_free; } wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; goto out_free; } kvm_info("%s IRQ%d\n", np->name, ppi); on_each_cpu(kvm_timer_init_interrupt, NULL, 1); goto out; out_free: free_percpu_irq(ppi, kvm_get_running_vcpus()); out: of_node_put(np); return err; }
static int zswap_cpu_init(void) { unsigned long cpu; cpu_notifier_register_begin(); for_each_online_cpu(cpu) if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) goto cleanup; __register_cpu_notifier(&zswap_cpu_notifier_block); cpu_notifier_register_done(); return 0; cleanup: for_each_online_cpu(cpu) __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); cpu_notifier_register_done(); return -ENOMEM; }
static void __init acpi_cpufreq_boost_init(void) { if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc(); if (!msrs) return; acpi_cpufreq_driver.boost_supported = true; acpi_cpufreq_driver.boost_enabled = boost_state(0); cpu_notifier_register_begin(); /* Force all MSRs to the same value */ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpu_online_mask); __register_cpu_notifier(&boost_nb); cpu_notifier_register_done(); } }
static int __init topology_init(void) { int cpu; register_nodes(); check_mmu_stats(); cpu_notifier_register_begin(); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); if (cpu_online(cpu)) register_cpu_online(cpu); } __register_cpu_notifier(&sysfs_cpu_nb); cpu_notifier_register_done(); return 0; }