void mca_cpu_init(void) { unsigned int i; /* * The first (boot) processor is responsible for discovering the * machine check architecture present on this machine. */ if (!mca_initialized) { mca_get_availability(); mca_initialized = TRUE; simple_lock_init(&mca_lock, 0); } if (mca_MCA_present) { /* Enable all MCA features */ if (mca_control_MSR_present) wrmsr64(IA32_MCG_CTL, IA32_MCG_CTL_ENABLE); switch (mca_family) { case 0x06: /* Enable all but mc0 */ for (i = 1; i < mca_error_bank_count; i++) wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); /* Clear all errors */ for (i = 0; i < mca_error_bank_count; i++) wrmsr64(IA32_MCi_STATUS(i), 0ULL); break; case 0x0F: /* Enable all banks */ for (i = 0; i < mca_error_bank_count; i++) wrmsr64(IA32_MCi_CTL(i),0xFFFFFFFFFFFFFFFFULL); /* Clear all errors */ for (i = 0; i < mca_error_bank_count; i++) wrmsr64(IA32_MCi_STATUS(i), 0ULL); break; } } /* Enable machine check exception handling if available */ if (mca_MCE_present) { set_cr4(get_cr4()|CR4_MCE); } }
static void mca_save_state(mca_state_t *mca_state) { mca_mci_bank_t *bank; unsigned int i; assert(!ml_get_interrupts_enabled() || get_preemption_level() > 0); if (mca_state == NULL) return; mca_state->mca_mcg_ctl = mca_control_MSR_present ? rdmsr64(IA32_MCG_CTL) : 0ULL; mca_state->mca_mcg_status.u64 = rdmsr64(IA32_MCG_STATUS); bank = (mca_mci_bank_t *) &mca_state->mca_error_bank[0]; for (i = 0; i < mca_error_bank_count; i++, bank++) { bank->mca_mci_ctl = rdmsr64(IA32_MCi_CTL(i)); bank->mca_mci_status.u64 = rdmsr64(IA32_MCi_STATUS(i)); if (!bank->mca_mci_status.bits.val) continue; bank->mca_mci_misc = (bank->mca_mci_status.bits.miscv)? rdmsr64(IA32_MCi_MISC(i)) : 0ULL; bank->mca_mci_addr = (bank->mca_mci_status.bits.addrv)? rdmsr64(IA32_MCi_ADDR(i)) : 0ULL; } /* * If we're the first thread with MCA state, point our package to it * and don't care about races */ if (x86_package()->mca_state == NULL) x86_package()->mca_state = mca_state; }