static int nmi_timer_setup(void) { int cpu, err; u64 period; /* clock cycles per tick: */ period = (u64)cpu_khz * 1000; do_div(period, HZ); nmi_timer_attr.sample_period = period; cpu_notifier_register_begin(); err = __register_cpu_notifier(&nmi_timer_cpu_nb); if (err) goto out; /* can't attach events to offline cpus: */ for_each_online_cpu(cpu) { err = nmi_timer_start_cpu(cpu); if (err) { cpu_notifier_register_done(); nmi_timer_shutdown(); return err; } } out: cpu_notifier_register_done(); return err; }
static int zcomp_init(struct zcomp *comp) { unsigned long cpu; int ret; comp->notifier.notifier_call = zcomp_cpu_notifier; comp->stream = alloc_percpu(struct zcomp_strm *); if (!comp->stream) return -ENOMEM; cpu_notifier_register_begin(); for_each_online_cpu(cpu) { ret = __zcomp_cpu_notifier(comp, CPU_UP_PREPARE, cpu); if (ret == NOTIFY_BAD) goto cleanup; } __register_cpu_notifier(&comp->notifier); cpu_notifier_register_done(); return 0; cleanup: for_each_online_cpu(cpu) __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu); cpu_notifier_register_done(); return -ENOMEM; }
static int __init msr_init(void) { int i = 0; int err = 0; err = msrbatch_init(); if (err != 0) { pr_err("failed to initialize msrbatch\n"); goto out; } err = msr_whitelist_init(); if (err != 0) { pr_err("failed to initialize whitelist for msr\n"); goto out_batch; } if (__register_chrdev(MSR_MAJOR, 0, num_possible_cpus(), "cpu/msr", &msr_fops)) { pr_err("unable to get major %d for msr\n", MSR_MAJOR); err = -EBUSY; goto out_wlist; } msr_class = class_create(THIS_MODULE, "msr"); if (IS_ERR(msr_class)) { err = PTR_ERR(msr_class); goto out_chrdev; } msr_class->devnode = msr_devnode; cpu_notifier_register_begin(); for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) goto out_class; } __register_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) msr_device_destroy(i); cpu_notifier_register_done(); class_destroy(msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, num_possible_cpus(), "cpu/msr"); out_wlist: msr_whitelist_cleanup(); out_batch: msrbatch_cleanup(); out: return err; }
static __init int amd_ibs_init(void) { u32 caps; int ret = -EINVAL; caps = __get_ibs_caps(); if (!caps) return -ENODEV; /* ibs not supported by the cpu */ ibs_eilvt_setup(); if (!ibs_eilvt_valid()) goto out; perf_ibs_pm_init(); cpu_notifier_register_begin(); ibs_caps = caps; /* make ibs_caps visible to other cpus: */ smp_mb(); smp_call_function(setup_APIC_ibs, NULL, 1); __perf_cpu_notifier(perf_ibs_cpu_notifier); cpu_notifier_register_done(); ret = perf_event_ibs_init(); out: if (ret) pr_err("Failed to setup IBS, %d\n", ret); return ret; }
/* * One-time initialisation. */ static int __init arch_hw_breakpoint_init(void) { core_num_brps = get_num_brps(); core_num_wrps = get_num_wrps(); pr_info("found %d breakpoint and %d watchpoint registers.\n", core_num_brps, core_num_wrps); cpu_notifier_register_begin(); /* * Reset the breakpoint resources. We assume that a halting * debugger will leave the world in a nice state for us. */ smp_call_function(hw_breakpoint_reset, NULL, 1); hw_breakpoint_reset(NULL); /* Register debug fault handlers. */ hook_debug_fault_code(DBG_ESR_EVT_HWBP, breakpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-breakpoint handler"); hook_debug_fault_code(DBG_ESR_EVT_HWWP, watchpoint_handler, SIGTRAP, TRAP_HWBKPT, "hw-watchpoint handler"); /* Register hotplug notifier. */ __register_cpu_notifier(&hw_breakpoint_reset_nb); cpu_notifier_register_done(); /* Register cpu_suspend hw breakpoint restore hook */ cpu_suspend_set_dbg_restorer(hw_breakpoint_reset); return 0; }
int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ { struct proc_dir_entry *entry; int err = 0; if (!prof_on) return 0; cpu_notifier_register_begin(); if (create_hash_tables()) { err = -ENOMEM; goto out; } entry = proc_create("profile", S_IWUSR | S_IRUGO, NULL, &proc_profile_operations); if (!entry) goto out; proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); __hotcpu_notifier(profile_cpu_callback, 0); out: cpu_notifier_register_done(); return err; }
static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) return -ENOMEM; /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ /* Assume saved/restored counters are the same on all CPUs */ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); if (err) goto fail; for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } nmi_enabled = 0; ctr_running = 0; /* make variables visible to the nmi handler: */ smp_mb(); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 0, "oprofile"); if (err) goto fail; cpu_notifier_register_begin(); /* Use get/put_online_cpus() to protect 'nmi_enabled' */ get_online_cpus(); nmi_enabled = 1; /* make nmi_enabled visible to the nmi handler: */ smp_mb(); on_each_cpu(nmi_cpu_setup, NULL, 1); __register_cpu_notifier(&oprofile_cpu_nb); put_online_cpus(); cpu_notifier_register_done(); return 0; fail: free_msrs(); return err; }
static int zswap_cpu_init(void) { unsigned long cpu; cpu_notifier_register_begin(); for_each_online_cpu(cpu) if (__zswap_cpu_notifier(CPU_UP_PREPARE, cpu) != NOTIFY_OK) goto cleanup; __register_cpu_notifier(&zswap_cpu_notifier_block); cpu_notifier_register_done(); return 0; cleanup: for_each_online_cpu(cpu) __zswap_cpu_notifier(CPU_UP_CANCELED, cpu); cpu_notifier_register_done(); return -ENOMEM; }
static int __init cpuid_init(void) { int i, err = 0; i = 0; if (__register_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid", &cpuid_fops)) { printk(KERN_ERR "cpuid: unable to get major %d for cpuid\n", CPUID_MAJOR); err = -EBUSY; goto out; } cpuid_class = class_create(THIS_MODULE, "cpuid"); if (IS_ERR(cpuid_class)) { err = PTR_ERR(cpuid_class); goto out_chrdev; } cpuid_class->devnode = cpuid_devnode; cpu_notifier_register_begin(); for_each_online_cpu(i) { err = cpuid_device_create(i); if (err != 0) goto out_class; } __register_hotcpu_notifier(&cpuid_class_cpu_notifier); cpu_notifier_register_done(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) { cpuid_device_destroy(i); } cpu_notifier_register_done(); class_destroy(cpuid_class); out_chrdev: __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); out: return err; }
static int __init vsyscall_init(void) { cpu_notifier_register_begin(); on_each_cpu(cpu_vsyscall_init, NULL, 1); /* notifier priority > KVM */ __hotcpu_notifier(cpu_vsyscall_notifier, 30); cpu_notifier_register_done(); return 0; }
static void __exit msr_exit(void) { int cpu = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) msr_device_destroy(cpu); class_destroy(msr_class); __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); __unregister_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); }
static void __exit cpuid_exit(void) { int cpu = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) cpuid_device_destroy(cpu); class_destroy(cpuid_class); __unregister_chrdev(CPUID_MAJOR, 0, NR_CPUS, "cpu/cpuid"); __unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); cpu_notifier_register_done(); }
static int __init msr_init(void) { int i, err = 0; i = 0; if (__register_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr", &msr_fops)) { pr_err("unable to get major %d for msr\n", MSR_MAJOR); err = -EBUSY; goto out; } msr_class = class_create(THIS_MODULE, "msr"); if (IS_ERR(msr_class)) { err = PTR_ERR(msr_class); goto out_chrdev; } msr_class->devnode = msr_devnode; cpu_notifier_register_begin(); for_each_online_cpu(i) { err = msr_device_create(i); if (err != 0) goto out_class; } __register_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); err = 0; goto out; out_class: i = 0; for_each_online_cpu(i) msr_device_destroy(i); cpu_notifier_register_done(); class_destroy(msr_class); out_chrdev: __unregister_chrdev(MSR_MAJOR, 0, NR_CPUS, "cpu/msr"); out: return err; }
void zcomp_destroy(struct zcomp *comp) { unsigned long cpu; cpu_notifier_register_begin(); for_each_online_cpu(cpu) __zcomp_cpu_notifier(comp, CPU_UP_CANCELED, cpu); __unregister_cpu_notifier(&comp->notifier); cpu_notifier_register_done(); free_percpu(comp->stream); kfree(comp); }
static void __exit msr_exit(void) { int cpu = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) msr_device_destroy(cpu); class_destroy(msr_class); __unregister_chrdev(MSR_MAJOR, 0, num_possible_cpus(), "cpu/msr"); __unregister_hotcpu_notifier(&msr_class_cpu_notifier); cpu_notifier_register_done(); msr_whitelist_cleanup(); msrbatch_cleanup(); }
static void nmi_timer_shutdown(void) { struct perf_event *event; int cpu; cpu_notifier_register_begin(); __unregister_cpu_notifier(&nmi_timer_cpu_nb); for_each_possible_cpu(cpu) { event = per_cpu(nmi_timer_events, cpu); if (!event) continue; perf_event_disable(event); per_cpu(nmi_timer_events, cpu) = NULL; perf_event_release_kernel(event); } cpu_notifier_register_done(); }
static int __cpuinit topology_sysfs_init(void) { int cpu; int rc = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) { rc = topology_add_dev(cpu); if (rc) goto out; } __hotcpu_notifier(topology_cpu_callback, 0); out: cpu_notifier_register_done(); return rc; }
static void __init acpi_cpufreq_boost_init(void) { if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { msrs = msrs_alloc(); if (!msrs) return; acpi_cpufreq_driver.boost_supported = true; acpi_cpufreq_driver.boost_enabled = boost_state(0); cpu_notifier_register_begin(); /* Force all MSRs to the same value */ boost_set_msrs(acpi_cpufreq_driver.boost_enabled, cpu_online_mask); __register_cpu_notifier(&boost_nb); cpu_notifier_register_done(); } }
static int __init topology_init(void) { int cpu; register_nodes(); check_mmu_stats(); cpu_notifier_register_begin(); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); if (cpu_online(cpu)) register_cpu_online(cpu); } __register_cpu_notifier(&sysfs_cpu_nb); cpu_notifier_register_done(); return 0; }
static void nmi_shutdown(void) { struct op_msrs *msrs; cpu_notifier_register_begin(); /* Use get/put_online_cpus() to protect 'nmi_enabled' & 'ctr_running' */ get_online_cpus(); on_each_cpu(nmi_cpu_shutdown, NULL, 1); nmi_enabled = 0; ctr_running = 0; __unregister_cpu_notifier(&oprofile_cpu_nb); put_online_cpus(); cpu_notifier_register_done(); /* make variables visible to the nmi handler: */ smp_mb(); unregister_nmi_handler(NMI_LOCAL, "oprofile"); msrs = &get_cpu_var(cpu_msrs); model->shutdown(msrs); free_msrs(); put_cpu_var(cpu_msrs); }