static void mcabank_clear(int banknum) { uint64_t status; status = mca_rdmsr(MSR_IA32_MCx_STATUS(banknum)); if (status & MCi_STATUS_ADDRV) mca_wrmsr(MSR_IA32_MCx_ADDR(banknum), 0x0ULL); if (status & MCi_STATUS_MISCV) mca_wrmsr(MSR_IA32_MCx_MISC(banknum), 0x0ULL); mca_wrmsr(MSR_IA32_MCx_STATUS(banknum), 0x0ULL); }
void mcheck_mca_clearbanks(struct mca_banks *bankmask) { int i; uint64_t status; for (i = 0; i < 32 && i < nr_mce_banks; i++) { if (!mcabanks_test(i, bankmask)) continue; status = mca_rdmsr(MSR_IA32_MCx_STATUS(i)); if (!(status & MCi_STATUS_VAL)) continue; mca_wrmsr(MSR_IA32_MCx_STATUS(i), 0x0ULL); } }
void get_mce_status() { int banks = 0; u64 cap = 0; u64 val = 0; int i = 0; printk("Enter:%s", __func__); rdmsrl(MSR_IA32_MCG_CAP, cap); banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); printk("banks:%0x, cap:%0x\n", banks, cap); for (i = 0; i < banks; i++) { rdmsrl(MSR_IA32_MCx_STATUS(i), val); if(val & 0x8000000000000000) { //64bit printk("i = %d, status: val:%lx ", i, val); if(val & 0x0400000000000000) { //58bit rdmsrl(MSR_IA32_MCx_ADDR(i), val); printk("i=%d, addr: val:%lx", i, val); } if(val & 0x0800000000000000) { //58bit rdmsrl(MSR_IA32_MCx_MISC(i), val); printk("i=%d, misc: val:%lx ", i, val); } } } rdmsrl(MSR_IA32_MCG_STATUS, val); printk("mcg_status:%0x\n", val); printk("\n"); }
static struct mcinfo_bank *mca_init_bank(enum mca_source who, struct mc_info *mi, int bank) { struct mcinfo_bank *mib; uint64_t addr=0, misc = 0; if (!mi) return NULL; mib = x86_mcinfo_reserve(mi, sizeof(struct mcinfo_bank)); if (!mib) { mi->flags |= MCINFO_FLAGS_UNCOMPLETE; return NULL; } memset(mib, 0, sizeof (struct mcinfo_bank)); mib->mc_status = mca_rdmsr(MSR_IA32_MCx_STATUS(bank)); mib->common.type = MC_TYPE_BANK; mib->common.size = sizeof (struct mcinfo_bank); mib->mc_bank = bank; addr = misc = 0; if (mib->mc_status & MCi_STATUS_MISCV) mib->mc_misc = mca_rdmsr(MSR_IA32_MCx_MISC(bank)); if (mib->mc_status & MCi_STATUS_ADDRV) { mib->mc_addr = mca_rdmsr(MSR_IA32_MCx_ADDR(bank)); if (mfn_valid(paddr_to_pfn(mib->mc_addr))) { struct domain *d; d = maddr_get_owner(mib->mc_addr); if (d != NULL && (who == MCA_POLLER || who == MCA_CMCI_HANDLER)) mib->mc_domid = d->domain_id; } } if (who == MCA_CMCI_HANDLER) { mib->mc_ctrl2 = mca_rdmsr(MSR_IA32_MC0_CTL2 + bank); rdtscll(mib->mc_tsc); } return mib; }
static void mca_init_bank(enum mca_source who, struct mc_info *mi, int bank) { struct mcinfo_bank *mib; if (!mi) return; mib = x86_mcinfo_reserve(mi, sizeof(*mib)); if (!mib) { mi->flags |= MCINFO_FLAGS_UNCOMPLETE; return; } mib->mc_status = mca_rdmsr(MSR_IA32_MCx_STATUS(bank)); mib->common.type = MC_TYPE_BANK; mib->common.size = sizeof (struct mcinfo_bank); mib->mc_bank = bank; if (mib->mc_status & MCi_STATUS_MISCV) mib->mc_misc = mca_rdmsr(MSR_IA32_MCx_MISC(bank)); if (mib->mc_status & MCi_STATUS_ADDRV) mib->mc_addr = mca_rdmsr(MSR_IA32_MCx_ADDR(bank)); if ((mib->mc_status & MCi_STATUS_MISCV) && (mib->mc_status & MCi_STATUS_ADDRV) && (mc_check_addr(mib->mc_status, mib->mc_misc, MC_ADDR_PHYSICAL)) && (who == MCA_POLLER || who == MCA_CMCI_HANDLER) && (mfn_valid(paddr_to_pfn(mib->mc_addr)))) { struct domain *d; d = maddr_get_owner(mib->mc_addr); if (d) mib->mc_domid = d->domain_id; } if (who == MCA_CMCI_HANDLER) { mib->mc_ctrl2 = mca_rdmsr(MSR_IA32_MC0_CTL2 + bank); rdtscll(mib->mc_tsc); } }
int mcequirk_amd_apply(enum mcequirk_amd_flags flags) { uint64_t val; switch ( flags ) { case MCEQUIRK_K8_GART: /* * Enable error reporting for all errors except for GART * TBL walk error reporting, which trips off incorrectly * with AGP GART & 3ware & Cerberus. */ wrmsrl(MSR_IA32_MCx_CTL(4), ~(1ULL << 10)); wrmsrl(MSR_IA32_MCx_STATUS(4), 0ULL); break; case MCEQUIRK_F10_GART: if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), val) == 0 ) wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10)); break; } return 0; }
enum mcheck_type amd_mcheck_init(struct cpuinfo_x86 *ci) { uint32_t i; enum mcequirk_amd_flags quirkflag = mcequirk_lookup_amd_quirkdata(ci); /* Assume that machine check support is available. * The minimum provided support is at least the K8. */ mce_handler_init(); x86_mce_vector_register(mcheck_cmn_handler); mce_need_clearbank_register(amd_need_clearbank_scan); for ( i = 0; i < nr_mce_banks; i++ ) { if ( quirkflag == MCEQUIRK_K8_GART && i == 4 ) mcequirk_amd_apply(quirkflag); else { /* Enable error reporting of all errors */ wrmsrl(MSR_IA32_MCx_CTL(i), 0xffffffffffffffffULL); wrmsrl(MSR_IA32_MCx_STATUS(i), 0x0ULL); } } if ( ci->x86 == 0xf ) return mcheck_amd_k8; if ( quirkflag == MCEQUIRK_F10_GART ) mcequirk_amd_apply(quirkflag); x86_mce_callback_register(amd_f10_handler); mce_recoverable_register(mc_amd_recoverable_scan); mce_register_addrcheck(mc_amd_addrcheck); return mcheck_amd_famXX; }
/* Add out_param clear_bank for Machine Check Handler Caller. * For Intel latest CPU, whether to clear the error bank status needs to * be judged by the callback function defined above. */ mctelem_cookie_t mcheck_mca_logout(enum mca_source who, struct mca_banks *bankmask, struct mca_summary *sp, struct mca_banks *clear_bank) { uint64_t gstatus, status; struct mcinfo_global *mig = NULL; /* on stack */ mctelem_cookie_t mctc = NULL; bool_t uc = 0, pcc = 0, recover = 1, need_clear = 1; uint32_t mc_flags = 0; struct mc_info *mci = NULL; mctelem_class_t which = MC_URGENT; /* XXXgcc */ int errcnt = 0; int i; gstatus = mca_rdmsr(MSR_IA32_MCG_STATUS); switch (who) { case MCA_MCE_SCAN: mc_flags = MC_FLAG_MCE; which = MC_URGENT; break; case MCA_POLLER: case MCA_RESET: mc_flags = MC_FLAG_POLLED; which = MC_NONURGENT; break; case MCA_CMCI_HANDLER: mc_flags = MC_FLAG_CMCI; which = MC_NONURGENT; break; default: BUG(); } /* If no mc_recovery_scan callback handler registered, * this error is not recoverable */ recover = (mc_recoverable_scan) ? 1 : 0; for (i = 0; i < nr_mce_banks; i++) { /* Skip bank if corresponding bit in bankmask is clear */ if (!mcabanks_test(i, bankmask)) continue; status = mca_rdmsr(MSR_IA32_MCx_STATUS(i)); if (!(status & MCi_STATUS_VAL)) continue; /* this bank has no valid telemetry */ /* For Intel Latest CPU CMCI/MCE Handler caller, we need to * decide whether to clear bank by MCi_STATUS bit value such as * OVER/UC/EN/PCC/S/AR */ if ( mc_need_clearbank_scan ) need_clear = mc_need_clearbank_scan(who, status); /* If this is the first bank with valid MCA DATA, then * try to reserve an entry from the urgent/nonurgent queue * depending on whether we are called from an exception or * a poller; this can fail (for example dom0 may not * yet have consumed past telemetry). */ if (errcnt++ == 0) { if ( (mctc = mctelem_reserve(which)) != NULL ) { mci = mctelem_dataptr(mctc); mcinfo_clear(mci); mig = x86_mcinfo_reserve(mci, sizeof(*mig)); /* mc_info should at least hold up the global information */ ASSERT(mig); mca_init_global(mc_flags, mig); /* A hook here to get global extended msrs */ { if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) intel_get_extended_msrs(mig, mci); } } } /* flag for uncorrected errors */ if (!uc && ((status & MCi_STATUS_UC) != 0)) uc = 1; /* flag processor context corrupt */ if (!pcc && ((status & MCi_STATUS_PCC) != 0)) pcc = 1; if (recover && uc) /* uc = 1, recover = 1, we need not panic. */ recover = mc_recoverable_scan(status); mca_init_bank(who, mci, i); if (mc_callback_bank_extended) mc_callback_bank_extended(mci, i, status); /* By default, need_clear = 1 */ if (who != MCA_MCE_SCAN && need_clear) /* Clear bank */ mcabank_clear(i); else if ( who == MCA_MCE_SCAN && need_clear) mcabanks_set(i, clear_bank); wmb(); } if (mig && errcnt > 0) { if (pcc) mig->mc_flags |= MC_FLAG_UNCORRECTABLE; else if (uc) mig->mc_flags |= MC_FLAG_RECOVERABLE; else mig->mc_flags |= MC_FLAG_CORRECTABLE; } if (sp) { sp->errcnt = errcnt; sp->ripv = (gstatus & MCG_STATUS_RIPV) != 0; sp->eipv = (gstatus & MCG_STATUS_EIPV) != 0; sp->uc = uc; sp->pcc = pcc; sp->recoverable = recover; } return mci != NULL ? mctc : NULL; /* may be NULL */ }