/* * Enable the extended processor state save/restore feature */ static inline void xstate_enable(void) { #ifndef CONFIG_L4 set_in_cr4(X86_CR4_OSXSAVE); xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); #endif /* L4 */ }
/* * Initialize the floating point unit. */ void fpuinit(void) { register_t saveintr; u_int mxcsr; u_short control; if (IS_BSP()) fpuinit_bsp1(); if (use_xsave) { load_cr4(rcr4() | CR4_XSAVE); xsetbv(XCR0, xsave_mask); } /* * XCR0 shall be set up before CPU can report the save area size. */ if (IS_BSP()) fpuinit_bsp2(); /* * It is too early for critical_enter() to work on AP. */ saveintr = intr_disable(); stop_emulating(); fninit(); control = __INITIAL_FPUCW__; fldcw(control); mxcsr = __INITIAL_MXCSR__; ldmxcsr(mxcsr); start_emulating(); intr_restore(saveintr); }
/* * Restore minimal FPU state after suspend: */ void fpu__resume_cpu(void) { /* * Restore XCR0 on xsave capable CPUs: */ if (cpu_has_xsave) xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask); }
/* * Enable the extended processor state save/restore feature. * Called once per CPU onlining. */ void fpu__init_cpu_xstate(void) { if (!cpu_has_xsave || !xfeatures_mask) return; cr4_set_bits(X86_CR4_OSXSAVE); xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask); }
/* * Enable the extended processor state save/restore feature. * Called once per CPU onlining. */ void fpu__init_cpu_xstate(void) { #ifndef CONFIG_L4 if (!cpu_has_xsave || !xfeatures_mask) return; cr4_set_bits(X86_CR4_OSXSAVE); xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask); #endif /* L4 */ }
/* * Enable the extended processor state save/restore feature */ void __cpuinit xsave_init(void) { if (!cpu_has_xsave) return; set_in_cr4(X86_CR4_OSXSAVE); /* * Enable all the features that the HW is capable of * and the Linux kernel is aware of. */ xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); }
static void __restore_processor_state(struct saved_context *ctxt) { /* * control registers */ /* cr4 was introduced in the Pentium CPU */ if (ctxt->cr4) write_cr4(ctxt->cr4); write_cr3(ctxt->cr3); write_cr2(ctxt->cr2); write_cr0(ctxt->cr0); /* * now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ load_gdt(&ctxt->gdt); load_idt(&ctxt->idt); /* * segment registers */ loadsegment(es, ctxt->es); loadsegment(fs, ctxt->fs); loadsegment(gs, ctxt->gs); loadsegment(ss, ctxt->ss); /* * sysenter MSRs */ if (boot_cpu_has(X86_FEATURE_SEP)) enable_sep_cpu(); /* * restore XCR0 for xsave capable cpu's. */ if (cpu_has_xsave) xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); fix_processor_context(); do_fpu_end(); mtrr_ap_init(); mcheck_init(&boot_cpu_data); }
/* * Enable the extended processor state save/restore feature */ static inline void xstate_enable(void) { set_in_cr4(X86_CR4_OSXSAVE); xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); }
/* * Initialize CPU control registers */ void initializecpu(int cpu) { uint64_t msr; /*Check for FXSR and SSE support and enable if available.*/ if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } if (cpu == 0) { /* Check if we are running in a hypervisor. */ vmm_guest = detect_virtual(); if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM)) vmm_guest = VMM_GUEST_UNKNOWN; } #if !defined(CPU_DISABLE_AVX) /*Check for XSAVE and AVX support and enable if available.*/ if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) && (cpu_feature & CPUID_SSE)) { load_cr4(rcr4() | CR4_XSAVE); /* Adjust size of savefpu in npx.h before adding to mask.*/ xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); cpu_xsave = 1; } #endif if (cpu_vendor_id == CPU_VENDOR_AMD) { switch((cpu_id & 0xFF0000)) { case 0x100000: case 0x120000: /* * Errata 721 is the cpu bug found by your's truly * (Matthew Dillon). It is a bug where a sequence * of 5 or more popq's + a retq, under involved * deep recursion circumstances, can cause the %rsp * to not be properly updated, almost always * resulting in a seg-fault soon after. * * Do not install the workaround when we are running * in a virtual machine. */ if (vmm_guest) break; msr = rdmsr(MSR_AMD_DE_CFG); if ((msr & 1) == 0) { if (cpu == 0) kprintf("Errata 721 workaround " "installed\n"); msr |= 1; wrmsr(MSR_AMD_DE_CFG, msr); } break; } } if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); #if 0 /* JG */ pg_nx = PG_NX; #endif } if (cpu_vendor_id == CPU_VENDOR_CENTAUR && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) init_via(); TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); if (cpu_feature & CPUID_CLFSH) { cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; if (hw_clflush_enable == 0 || ((hw_clflush_enable == -1) && vmm_guest)) cpu_feature &= ~CPUID_CLFSH; }