static void mux_clone(int cpu) { if (!has_mux()) return; memcpy(per_cpu(cpu_msrs, cpu).multiplex, per_cpu(cpu_msrs, 0).multiplex, sizeof(struct op_msr) * model->num_virt_counters); }
static int nmi_switch_event(void) { if (!has_mux()) return -ENOSYS; /* not implemented */ if (nmi_multiplex_on() < 0) return -EINVAL; /* not necessary */ on_each_cpu(nmi_cpu_switch, NULL, 1); return 0; }
static void nmi_shutdown_mux(void) { int i; if (!has_mux()) return; for_each_possible_cpu(i) { kfree(per_cpu(cpu_msrs, i).multiplex); per_cpu(cpu_msrs, i).multiplex = NULL; per_cpu(switch_index, i) = 0; } }
static int nmi_switch_event(void) { if (!has_mux()) return -ENOSYS; /* not implemented */ if (nmi_multiplex_on() < 0) return -EINVAL; /* not necessary */ get_online_cpus(); if (ctr_running) on_each_cpu(nmi_cpu_switch, NULL, 1); put_online_cpus(); return 0; }
static int nmi_setup_mux(void) { size_t multiplex_size = sizeof(struct op_msr) * model->num_virt_counters; int i; if (!has_mux()) return 1; for_each_possible_cpu(i) { per_cpu(cpu_msrs, i).multiplex = kmalloc(multiplex_size, GFP_KERNEL); if (!per_cpu(cpu_msrs, i).multiplex) return 0; } return 1; }
static void nmi_cpu_setup_mux(int cpu, struct op_msrs const * const msrs) { int i; struct op_msr *multiplex = msrs->multiplex; if (!has_mux()) return; for (i = 0; i < model->num_virt_counters; ++i) { if (counter_config[i].enabled) { multiplex[i].saved = -(u64)counter_config[i].count; } else { multiplex[i].saved = 0; } } per_cpu(switch_index, cpu) = 0; }
static inline void mux_init(struct oprofile_operations *ops) { if (has_mux()) ops->switch_events = nmi_switch_event; }