static bool has_pkg_state_counter(void) { u64 tmp; return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) || !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) || !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) || !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp); }
/** * Worker for supdrvOSMsrProberModify. */ static DECLCALLBACK(void) supdrvLnxMsrProberModifyOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) { PSUPMSRPROBER pReq = (PSUPMSRPROBER)pvUser1; register uint32_t uMsr = pReq->u.In.uMsr; bool const fFaster = pReq->u.In.enmOp == SUPMSRPROBEROP_MODIFY_FASTER; uint64_t uBefore; uint64_t uWritten; uint64_t uAfter; int rcBefore, rcWrite, rcAfter, rcRestore; RTCCUINTREG fOldFlags; /* Initialize result variables. */ uBefore = uWritten = uAfter = 0; rcWrite = rcAfter = rcRestore = -EIO; /* * Do the job. */ fOldFlags = ASMIntDisableFlags(); ASMCompilerBarrier(); /* paranoia */ if (!fFaster) ASMWriteBackAndInvalidateCaches(); rcBefore = rdmsrl_safe(uMsr, &uBefore); if (rcBefore >= 0) { register uint64_t uRestore = uBefore; uWritten = uRestore; uWritten &= pReq->u.In.uArgs.Modify.fAndMask; uWritten |= pReq->u.In.uArgs.Modify.fOrMask; rcWrite = wrmsr_safe(uMsr, RT_LODWORD(uWritten), RT_HIDWORD(uWritten)); rcAfter = rdmsrl_safe(uMsr, &uAfter); rcRestore = wrmsr_safe(uMsr, RT_LODWORD(uRestore), RT_HIDWORD(uRestore)); if (!fFaster) { ASMWriteBackAndInvalidateCaches(); ASMReloadCR3(); ASMNopPause(); } } ASMCompilerBarrier(); /* paranoia */ ASMSetFlags(fOldFlags); /* * Write out the results. */ pReq->u.Out.uResults.Modify.uBefore = uBefore; pReq->u.Out.uResults.Modify.uWritten = uWritten; pReq->u.Out.uResults.Modify.uAfter = uAfter; pReq->u.Out.uResults.Modify.fBeforeGp = rcBefore != 0; pReq->u.Out.uResults.Modify.fModifyGp = rcWrite != 0; pReq->u.Out.uResults.Modify.fAfterGp = rcAfter != 0; pReq->u.Out.uResults.Modify.fRestoreGp = rcRestore != 0; RT_ZERO(pReq->u.Out.uResults.Modify.afReserved); }
static int is_efer_nx(void) { unsigned long long efer = 0; rdmsrl_safe(MSR_EFER, &efer); return efer & EFER_NX; }
static int __init amd_power_pmu_init(void) { int ret; if (!x86_match_cpu(cpu_match)) return 0; if (!boot_cpu_has(X86_FEATURE_ACC_POWER)) return -ENODEV; cpu_pwr_sample_ratio = cpuid_ecx(0x80000007); if (rdmsrl_safe(MSR_F15H_CU_MAX_PWR_ACCUMULATOR, &max_cu_acc_power)) { pr_err("Failed to read max compute unit power accumulator MSR\n"); return -ENODEV; } cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, "perf/x86/amd/power:online", power_cpu_init, power_cpu_exit); ret = perf_pmu_register(&pmu_class, "power", -1); if (WARN_ON(ret)) { pr_warn("AMD Power PMU registration failed\n"); return ret; } pr_info("AMD Power PMU detected\n"); return ret; }
static int __init msr_init(void) { int i, j = 0; if (!boot_cpu_has(X86_FEATURE_TSC)) { pr_cont("no MSR PMU driver.\n"); return 0; } /* Probe the MSRs. */ for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) { u64 val; /* * Virt sucks arse; you cannot tell if a R/O MSR is present :/ */ if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val)) msr[i].attr = NULL; } /* List remaining MSRs in the sysfs attrs. */ for (i = 0; i < PERF_MSR_EVENT_MAX; i++) { if (msr[i].attr) events_attrs[j++] = &msr[i].attr->attr.attr; } events_attrs[j] = NULL; perf_pmu_register(&pmu_msr, "msr", -1); return 0; }
/** * Read an MSR with error handling * * @msr: MSR to read * @m: value to read into * * It returns read data only on success, otherwise it doesn't change the output * argument @m. * */ int msr_read(u32 msr, struct msr *m) { int err; u64 val; err = rdmsrl_safe(msr, &val); if (!err) m->q = val; return err; }
static u64 pkg_state_counter(void) { u64 val; u64 count = 0; static bool skip_c2; static bool skip_c3; static bool skip_c6; static bool skip_c7; if (!skip_c2) { if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val)) count += val; else skip_c2 = true; } if (!skip_c3) { if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val)) count += val; else skip_c3 = true; } if (!skip_c6) { if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val)) count += val; else skip_c6 = true; } if (!skip_c7) { if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val)) count += val; else skip_c7 = true; } return count; }
static bool has_pkg_state_counter(void) { u64 val; struct pkg_cstate_info *info = pkg_cstates; /* check if any one of the counter msrs exists */ while (info->msr_index) { if (!rdmsrl_safe(info->msr_index, &val)) return true; info++; } return false; }
static void shared_msr_update(unsigned slot, u32 msr) { u64 value; unsigned int cpu = smp_processor_id(); struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); /* only read, and nobody should modify it at this time, * so don't need lock */ if (slot >= shared_msrs_global.nr) { printk(KERN_ERR "kvm: invalid MSR slot!"); return; } rdmsrl_safe(msr, &value); smsr->values[slot].host = value; smsr->values[slot].curr = value; }
static u64 pkg_state_counter(void) { u64 val; u64 count = 0; struct pkg_cstate_info *info = pkg_cstates; while (info->msr_index) { if (!info->skip) { if (!rdmsrl_safe(info->msr_index, &val)) count += val; else info->skip = true; } info++; } return count; }
/* * Probe the cstate events and insert the available one into sysfs attrs * Return false if there are no available events. */ static bool __init cstate_probe_msr(const unsigned long evmsk, int max, struct perf_cstate_msr *msr, struct attribute **attrs) { bool found = false; unsigned int bit; u64 val; for (bit = 0; bit < max; bit++) { if (test_bit(bit, &evmsk) && !rdmsrl_safe(msr[bit].msr, &val)) { *attrs++ = &msr[bit].attr->attr.attr; found = true; } else { msr[bit].attr = NULL; } } *attrs = NULL; return found; }
int smigen_safe_rdmsr(unsigned msr, uint64 *val) { return rdmsrl_safe(msr, val); }