/** * Worker for supdrvOSMsrProberModify. */ static DECLCALLBACK(void) supdrvLnxMsrProberModifyOnCpu(RTCPUID idCpu, void *pvUser1, void *pvUser2) { PSUPMSRPROBER pReq = (PSUPMSRPROBER)pvUser1; register uint32_t uMsr = pReq->u.In.uMsr; bool const fFaster = pReq->u.In.enmOp == SUPMSRPROBEROP_MODIFY_FASTER; uint64_t uBefore; uint64_t uWritten; uint64_t uAfter; int rcBefore, rcWrite, rcAfter, rcRestore; RTCCUINTREG fOldFlags; /* Initialize result variables. */ uBefore = uWritten = uAfter = 0; rcWrite = rcAfter = rcRestore = -EIO; /* * Do the job. */ fOldFlags = ASMIntDisableFlags(); ASMCompilerBarrier(); /* paranoia */ if (!fFaster) ASMWriteBackAndInvalidateCaches(); rcBefore = rdmsrl_safe(uMsr, &uBefore); if (rcBefore >= 0) { register uint64_t uRestore = uBefore; uWritten = uRestore; uWritten &= pReq->u.In.uArgs.Modify.fAndMask; uWritten |= pReq->u.In.uArgs.Modify.fOrMask; rcWrite = wrmsr_safe(uMsr, RT_LODWORD(uWritten), RT_HIDWORD(uWritten)); rcAfter = rdmsrl_safe(uMsr, &uAfter); rcRestore = wrmsr_safe(uMsr, RT_LODWORD(uRestore), RT_HIDWORD(uRestore)); if (!fFaster) { ASMWriteBackAndInvalidateCaches(); ASMReloadCR3(); ASMNopPause(); } } ASMCompilerBarrier(); /* paranoia */ ASMSetFlags(fOldFlags); /* * Write out the results. */ pReq->u.Out.uResults.Modify.uBefore = uBefore; pReq->u.Out.uResults.Modify.uWritten = uWritten; pReq->u.Out.uResults.Modify.uAfter = uAfter; pReq->u.Out.uResults.Modify.fBeforeGp = rcBefore != 0; pReq->u.Out.uResults.Modify.fModifyGp = rcWrite != 0; pReq->u.Out.uResults.Modify.fAfterGp = rcAfter != 0; pReq->u.Out.uResults.Modify.fRestoreGp = rcRestore != 0; RT_ZERO(pReq->u.Out.uResults.Modify.afReserved); }
static int write_pat(efx_qword_t* pat) { int r = wrmsr_safe(MSR_IA32_CR_PAT, pat->u32[0], pat->u32[1]); if( r ) return -EIO; return 0; }
/* * Actually perform MSR operations. */ static int cpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd, struct thread *td) { uint64_t reg; int is_bound = 0; int oldcpu; int ret; KASSERT(cpu >= 0 && cpu <= mp_maxid, ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu)); /* * Explicitly clear cpuid data to avoid returning stale * info */ DPRINTF("[cpuctl,%d]: operating on MSR %#0x for %d cpu\n", __LINE__, data->msr, cpu); #ifdef __i386__ if ((cpu_feature & CPUID_MSR) == 0) return (ENODEV); #endif oldcpu = td->td_oncpu; is_bound = cpu_sched_is_bound(td); set_cpu(cpu, td); if (cmd == CPUCTL_RDMSR) { data->data = 0; ret = rdmsr_safe(data->msr, &data->data); } else if (cmd == CPUCTL_WRMSR) { ret = wrmsr_safe(data->msr, data->data); } else if (cmd == CPUCTL_MSRSBIT) { critical_enter(); ret = rdmsr_safe(data->msr, ®); if (ret == 0) ret = wrmsr_safe(data->msr, reg | data->data); critical_exit(); } else if (cmd == CPUCTL_MSRCBIT) { critical_enter(); ret = rdmsr_safe(data->msr, ®); if (ret == 0) ret = wrmsr_safe(data->msr, reg & ~data->data); critical_exit(); } else panic("[cpuctl,%d]: unknown operation requested: %lu", __LINE__, cmd); restore_cpu(oldcpu, is_bound, td); return (ret); }
static inline int wrmsr_eio(u32 reg, u32 eax, u32 edx) { int err; err = wrmsr_safe(reg, eax, edx); if (err) err = -EIO; return err; }
static void wr_new_pat(void *err) { u32 new_pat_lo = (old_pat_lo[smp_processor_id()] & MLX4_PAT_MASK) | MLX4_PAT_MOD; *(int *)err |= wrmsr_safe(X86_MSR_PAT_OFFSET, new_pat_lo, old_pat_hi[smp_processor_id()]); }
int VBOXCALL supdrvOSMsrProberWrite(uint32_t uMsr, RTCPUID idCpu, uint64_t uValue) { # ifdef SUPDRV_LINUX_HAS_SAFE_MSR_API int rc; if (idCpu == NIL_RTCPUID) rc = wrmsr_safe(uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue)); else if (RTMpIsCpuOnline(idCpu)) rc = wrmsr_safe_on_cpu(idCpu, uMsr, RT_LODWORD(uValue), RT_HIDWORD(uValue)); else return VERR_CPU_OFFLINE; if (rc == 0) return VINF_SUCCESS; return VERR_ACCESS_DENIED; # else return VERR_NOT_SUPPORTED; # endif }
static int apply_microcode(int cpu) { unsigned long flags; struct ucode_cpu_info *uci = &per_cpu(ucode_cpu_info, cpu); uint32_t rev; struct microcode_amd *mc_amd = uci->mc.mc_amd; struct microcode_header_amd *hdr; int hw_err; /* We should bind the task to the CPU */ BUG_ON(raw_smp_processor_id() != cpu); if ( mc_amd == NULL ) return -EINVAL; hdr = mc_amd->mpb; if ( hdr == NULL ) return -EINVAL; spin_lock_irqsave(µcode_update_lock, flags); hw_err = wrmsr_safe(MSR_AMD_PATCHLOADER, (unsigned long)hdr); /* get patch id after patching */ rdmsrl(MSR_AMD_PATCHLEVEL, rev); spin_unlock_irqrestore(µcode_update_lock, flags); /* check current patch id and patch's id for match */ if ( hw_err || (rev != hdr->patch_id) ) { printk(KERN_ERR "microcode: CPU%d update from revision " "%#x to %#x failed\n", cpu, rev, hdr->patch_id); return -EIO; } printk(KERN_WARNING "microcode: CPU%d updated from revision %#x to %#x\n", cpu, uci->cpu_sig.rev, hdr->patch_id); uci->cpu_sig.rev = rev; return 0; }
int mcequirk_amd_apply(enum mcequirk_amd_flags flags) { uint64_t val; switch ( flags ) { case MCEQUIRK_K8_GART: /* * Enable error reporting for all errors except for GART * TBL walk error reporting, which trips off incorrectly * with AGP GART & 3ware & Cerberus. */ wrmsrl(MSR_IA32_MCx_CTL(4), ~(1ULL << 10)); wrmsrl(MSR_IA32_MCx_STATUS(4), 0ULL); break; case MCEQUIRK_F10_GART: if ( rdmsr_safe(MSR_AMD64_MCx_MASK(4), val) == 0 ) wrmsr_safe(MSR_AMD64_MCx_MASK(4), val | (1 << 10)); break; } return 0; }
/* * opt_cpuid_mask_ecx/edx: cpuid.1[ecx, edx] feature mask. * For example, E8400[Intel Core 2 Duo Processor series] ecx = 0x0008E3FD, * edx = 0xBFEBFBFF when executing CPUID.EAX = 1 normally. If you want to * 'rev down' to E8400, you can set these values in these Xen boot parameters. */ static void set_cpuidmask(const struct cpuinfo_x86 *c) { static unsigned int msr_basic, msr_ext, msr_xsave; static enum { not_parsed, no_mask, set_mask } status; u64 msr_val; if (status == no_mask) return; if (status == set_mask) goto setmask; ASSERT((status == not_parsed) && (c == &boot_cpu_data)); status = no_mask; if (!~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx & opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx & opt_cpuid_mask_xsave_eax)) return; /* Only family 6 supports this feature. */ if (c->x86 != 6) { printk("No CPUID feature masking support available\n"); return; } switch (c->x86_model) { case 0x17: /* Yorkfield, Wolfdale, Penryn, Harpertown(DP) */ case 0x1d: /* Dunnington(MP) */ msr_basic = MSR_INTEL_MASK_V1_CPUID1; break; case 0x1a: /* Bloomfield, Nehalem-EP(Gainestown) */ case 0x1e: /* Clarksfield, Lynnfield, Jasper Forest */ case 0x1f: /* Something Nehalem-based - perhaps Auburndale/Havendale? */ case 0x25: /* Arrandale, Clarksdale */ case 0x2c: /* Gulftown, Westmere-EP */ case 0x2e: /* Nehalem-EX(Beckton) */ case 0x2f: /* Westmere-EX */ msr_basic = MSR_INTEL_MASK_V2_CPUID1; msr_ext = MSR_INTEL_MASK_V2_CPUID80000001; break; case 0x2a: /* SandyBridge */ case 0x2d: /* SandyBridge-E, SandyBridge-EN, SandyBridge-EP */ msr_basic = MSR_INTEL_MASK_V3_CPUID1; msr_ext = MSR_INTEL_MASK_V3_CPUID80000001; msr_xsave = MSR_INTEL_MASK_V3_CPUIDD_01; break; } status = set_mask; if (~(opt_cpuid_mask_ecx & opt_cpuid_mask_edx)) { if (msr_basic) printk("Writing CPUID feature mask ecx:edx -> %08x:%08x\n", opt_cpuid_mask_ecx, opt_cpuid_mask_edx); else printk("No CPUID feature mask available\n"); } else msr_basic = 0; if (~(opt_cpuid_mask_ext_ecx & opt_cpuid_mask_ext_edx)) { if (msr_ext) printk("Writing CPUID extended feature mask ecx:edx -> %08x:%08x\n", opt_cpuid_mask_ext_ecx, opt_cpuid_mask_ext_edx); else printk("No CPUID extended feature mask available\n"); } else msr_ext = 0; if (~opt_cpuid_mask_xsave_eax) { if (msr_xsave) printk("Writing CPUID xsave feature mask eax -> %08x\n", opt_cpuid_mask_xsave_eax); else printk("No CPUID xsave feature mask available\n"); } else msr_xsave = 0; setmask: if (msr_basic && wrmsr_safe(msr_basic, ((u64)opt_cpuid_mask_edx << 32) | opt_cpuid_mask_ecx)){ msr_basic = 0; printk("Failed to set CPUID feature mask\n"); } if (msr_ext && wrmsr_safe(msr_ext, ((u64)opt_cpuid_mask_ext_edx << 32) | opt_cpuid_mask_ext_ecx)){ msr_ext = 0; printk("Failed to set CPUID extended feature mask\n"); } if (msr_xsave && (rdmsr_safe(msr_xsave, msr_val) || wrmsr_safe(msr_xsave, (msr_val & (~0ULL << 32)) | opt_cpuid_mask_xsave_eax))){ msr_xsave = 0; printk("Failed to set CPUID xsave feature mask\n"); } }
/** * x86_acpi_suspend_lowlevel - save kernel state * * Create an identity mapped page table and copy the wakeup routine to * low memory. */ int x86_acpi_suspend_lowlevel(void) { struct wakeup_header *header = (struct wakeup_header *) __va(real_mode_header->wakeup_header); if (header->signature != WAKEUP_HEADER_SIGNATURE) { printk(KERN_ERR "wakeup header does not match\n"); return -EINVAL; } header->video_mode = saved_video_mode; header->pmode_behavior = 0; #ifndef CONFIG_64BIT native_store_gdt((struct desc_ptr *)&header->pmode_gdt); /* * We have to check that we can write back the value, and not * just read it. At least on 90 nm Pentium M (Family 6, Model * 13), reading an invalid MSR is not guaranteed to trap, see * Erratum X4 in "Intel Pentium M Processor on 90 nm Process * with 2-MB L2 Cache and IntelĀ® Processor A100 and A110 on 90 * nm process with 512-KB L2 Cache Specification Update". */ if (!rdmsr_safe(MSR_EFER, &header->pmode_efer_low, &header->pmode_efer_high) && !wrmsr_safe(MSR_EFER, header->pmode_efer_low, header->pmode_efer_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER); #endif /* !CONFIG_64BIT */ header->pmode_cr0 = read_cr0(); if (__this_cpu_read(cpu_info.cpuid_level) >= 0) { header->pmode_cr4 = read_cr4(); header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4); } if (!rdmsr_safe(MSR_IA32_MISC_ENABLE, &header->pmode_misc_en_low, &header->pmode_misc_en_high) && !wrmsr_safe(MSR_IA32_MISC_ENABLE, header->pmode_misc_en_low, header->pmode_misc_en_high)) header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE); header->realmode_flags = acpi_realmode_flags; header->real_magic = 0x12345678; #ifndef CONFIG_64BIT header->pmode_entry = (u32)&wakeup_pmode_return; header->pmode_cr3 = (u32)__pa_symbol(initial_page_table); saved_magic = 0x12345678; #else /* CONFIG_64BIT */ #ifdef CONFIG_SMP stack_start = (unsigned long)temp_stack + sizeof(temp_stack); early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(smp_processor_id()); initial_gs = per_cpu_offset(smp_processor_id()); #endif initial_code = (unsigned long)wakeup_long64; saved_magic = 0x123456789abcdef0L; #endif /* CONFIG_64BIT */ do_suspend_lowlevel(); return 0; }
static void wr_old_pat(void *err) { *(int *)err |= wrmsr_safe(X86_MSR_PAT_OFFSET, old_pat_lo[smp_processor_id()], old_pat_hi[smp_processor_id()]); }
static void __wrmsr_safe_on_cpu(void *info) { struct msr_info *rv = info; rv->err = wrmsr_safe(rv->msr_no, rv->l, rv->h); }