static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) return -ENOMEM; /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ /* Assume saved/restored counters are the same on all CPUs */ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); if (err) goto fail; for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } nmi_enabled = 0; ctr_running = 0; /* make variables visible to the nmi handler: */ smp_mb(); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 0, "oprofile"); if (err) goto fail; cpu_notifier_register_begin(); /* Use get/put_online_cpus() to protect 'nmi_enabled' */ get_online_cpus(); nmi_enabled = 1; /* make nmi_enabled visible to the nmi handler: */ smp_mb(); on_each_cpu(nmi_cpu_setup, NULL, 1); __register_cpu_notifier(&oprofile_cpu_nb); put_online_cpus(); cpu_notifier_register_done(); return 0; fail: free_msrs(); return err; }
static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) return -ENOMEM; /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ /* Assume saved/restored counters are the same on all CPUs */ err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); if (err) goto fail; for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } nmi_enabled = 0; ctr_running = 0; /* make variables visible to the nmi handler: */ smp_mb(); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 0, "oprofile"); if (err) goto fail; nmi_enabled = 1; /* make nmi_enabled visible to the nmi handler: */ smp_mb(); err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "x86/oprofile:online", nmi_cpu_online, nmi_cpu_down_prep); if (err < 0) goto fail_nmi; cpuhp_nmi_online = err; return 0; fail_nmi: unregister_nmi_handler(NMI_LOCAL, "oprofile"); fail: free_msrs(); return err; }
static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) return -ENOMEM; err = model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); if (err) goto fail; for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } nmi_enabled = 0; ctr_running = 0; smp_mb(); err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, 0, "oprofile"); if (err) goto fail; get_online_cpus(); register_cpu_notifier(&oprofile_cpu_nb); nmi_enabled = 1; smp_mb(); on_each_cpu(nmi_cpu_setup, NULL, 1); put_online_cpus(); return 0; fail: free_msrs(); return err; }
static int nmi_setup(void) { int err = 0; int cpu; if (!allocate_msrs()) err = -ENOMEM; else if (!nmi_setup_mux()) err = -ENOMEM; else err = register_die_notifier(&profile_exceptions_nb); if (err) { free_msrs(); nmi_shutdown_mux(); return err; } /* We need to serialize save and setup for HT because the subset * of msrs are distinct for save and setup operations */ /* Assume saved/restored counters are the same on all CPUs */ model->fill_in_addresses(&per_cpu(cpu_msrs, 0)); for_each_possible_cpu(cpu) { if (!cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, per_cpu(cpu_msrs, 0).counters, sizeof(struct op_msr) * model->num_counters); memcpy(per_cpu(cpu_msrs, cpu).controls, per_cpu(cpu_msrs, 0).controls, sizeof(struct op_msr) * model->num_controls); mux_clone(cpu); } on_each_cpu(nmi_cpu_setup, NULL, 1); nmi_enabled = 1; return 0; }