/* * After a CPU went down cycle through all the others and rediscover * Must run in process context. */ void cmci_rediscover(int dying) { int banks; int cpu; cpumask_var_t old; if (!cmci_supported(&banks)) return; if (!alloc_cpumask_var(&old, GFP_KERNEL)) return; cpumask_copy(old, ¤t->cpus_allowed); for_each_online_cpu(cpu) { if (cpu == dying) continue; if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) continue; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks, 0); } set_cpus_allowed_ptr(current, old); free_cpumask_var(old); }
static void cmci_rediscover_work_func(void *arg) { int banks; /* Recheck banks in case CPUs don't all have the same */ if (cmci_supported(&banks)) cmci_discover(banks); }
/* After a CPU went down cycle through all the others and rediscover */ void cmci_rediscover(void) { int banks; if (!cmci_supported(&banks)) return; on_each_cpu(cmci_rediscover_work_func, NULL, 1); }
/* * Just in case we missed an event during initialization check * all the CMCI owned banks. */ void cmci_recheck(void) { unsigned long flags; int banks; if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) return; local_irq_save(flags); machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); local_irq_restore(flags); }
void cmci_disable_bank(int bank) { int banks; unsigned long flags; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); __cmci_disable_bank(bank); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); }
static void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) return; mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks, 1); apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); }
/* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */ void cmci_clear(void) { unsigned long flags; int i; int banks; if (!cmci_supported(&banks)) return; raw_spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) __cmci_disable_bank(i); raw_spin_unlock_irqrestore(&cmci_discover_lock, flags); }
static void intel_init_cmci(void) { int banks; if (!cmci_supported(&banks)) return; mce_threshold_vector = intel_threshold_interrupt; cmci_discover(banks, 1); /* * For CPU #0 this runs with still disabled APIC, but that's * ok because only the vector is set up. We still do another * check for the banks later for CPU #0 just to make sure * to not miss any events. */ apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); cmci_recheck(); }
/* * Disable CMCI on this CPU for all banks it owns when it goes down. * This allows other CPUs to claim the banks on rediscovery. */ void cmci_clear(void) { unsigned long flags; int i; int banks; u64 val; if (!cmci_supported(&banks)) return; spin_lock_irqsave(&cmci_discover_lock, flags); for (i = 0; i < banks; i++) { if (!test_bit(i, __get_cpu_var(mce_banks_owned))) continue; /* Disable CMCI */ rdmsrl(MSR_IA32_MCx_CTL2(i), val); val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); wrmsrl(MSR_IA32_MCx_CTL2(i), val); __clear_bit(i, __get_cpu_var(mce_banks_owned)); } spin_unlock_irqrestore(&cmci_discover_lock, flags); }
/* * Reenable CMCI on this CPU in case a CPU down failed. */ void cmci_reenable(void) { int banks; if (cmci_supported(&banks)) cmci_discover(banks, 0); }