static int __cpuinit register_callback(unsigned type, const void *func) { struct callback_register callback = { .type = type, .address = XEN_CALLBACK(__KERNEL_CS, func), .flags = CALLBACKF_mask_events, }; return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); } void __cpuinit xen_enable_sysenter(void) { int ret; unsigned sysenter_feature; #ifdef CONFIG_X86_32 sysenter_feature = X86_FEATURE_SEP; #else sysenter_feature = X86_FEATURE_SYSENTER32; #endif if (!boot_cpu_has(sysenter_feature)) return; ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target); if(ret != 0) setup_clear_cpu_cap(sysenter_feature); }
static int __init numaq_tsc_disable(void) { if (num_online_nodes() > 1) { printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); setup_clear_cpu_cap(X86_FEATURE_TSC); } return 0; }
/* * APIC command line parameters */ static int __init parse_lapic(char *arg) { if (config_enabled(CONFIG_X86_32) && !arg) force_enable_local_apic = 1; else if (arg && !strncmp(arg, "notscdeadline", 13)) setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); return 0; }
void __cpuinit numaq_tsc_disable(void) { if (!found_numaq) return; if (num_online_nodes() > 1) { printk(KERN_DEBUG "NUMAQ: disabling TSC\n"); setup_clear_cpu_cap(X86_FEATURE_TSC); } }
static __init int setup_nox2apic(char *str) { if (x2apic_enabled()) { pr_warning("Bios already enabled x2apic, " "can't enforce nox2apic"); return 0; } setup_clear_cpu_cap(X86_FEATURE_X2APIC); return 0; }
static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) { if (!boot_cpu_has(X86_FEATURE_CPUID) && !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { if (fpu__probe_without_cpuid()) setup_force_cpu_cap(X86_FEATURE_FPU); else setup_clear_cpu_cap(X86_FEATURE_FPU); } #ifndef CONFIG_MATH_EMULATION if (!test_cpu_cap(&boot_cpu_data, X86_FEATURE_FPU)) { pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n"); for (;;) asm volatile("hlt"); }
void __cpuinit xen_enable_syscall(void) { #ifdef CONFIG_X86_64 int ret; ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); if (ret != 0) { printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); /* Pretty fatal; 64-bit userspace has no other mechanism for syscalls. */ } if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { ret = register_callback(CALLBACKTYPE_syscall32, xen_syscall32_target); if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } #endif /* CONFIG_X86_64 */ }
void check_mpx_erratum(struct cpuinfo_x86 *c) { if (forcempx) return; /* * Turn off the MPX feature on CPUs where SMEP is not * available or disabled. * * Works around Intel Erratum SKD046: "Branch Instructions * May Initialize MPX Bound Registers Incorrectly". * * This might falsely disable MPX on systems without * SMEP, like Atom processors without SMEP. But there * is no such hardware known at the moment. */ if (cpu_has(c, X86_FEATURE_MPX) && !cpu_has(c, X86_FEATURE_SMEP)) { setup_clear_cpu_cap(X86_FEATURE_MPX); pr_warn("x86/mpx: Disabling MPX since SMEP not present\n"); } }
void __cpuinit xen_enable_syscall(void) { #ifdef CONFIG_X86_64 int ret; ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target); if (ret != 0) { printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); /* */ } if (boot_cpu_has(X86_FEATURE_SYSCALL32)) { ret = register_callback(CALLBACKTYPE_syscall32, xen_syscall32_target); if (ret != 0) setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); } #endif /* */ }
static void early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { unsigned lower_word; wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* Required by the SDM */ sync_core(); rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && c->microcode < 0x20e) { pr_warn("Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) set_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ if (c->x86 == 6) { switch (c->x86_model) { case 0x27: /* Penwell */ case 0x35: /* Cloverview */ case 0x4a: /* Merrifield */ set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC_S3); break; default: break; } } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); #ifdef CONFIG_KMEMCHECK /* * P4s have a "fast strings" feature which causes single- * stepping REP instructions to only generate a #DB on * cache-line boundaries. * * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ if (c->x86 == 15) if (msr_clear_bit(MSR_IA32_MISC_ENABLE, MSR_IA32_MISC_ENABLE_FAST_STRING_BIT) > 0) pr_info("kmemcheck: Disabling fast string operations\n"); #endif /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { pr_info("Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } /* * Intel Quark Core DevMan_001.pdf section 6.4.11 * "The operating system also is required to invalidate (i.e., flush) * the TLB when any changes are made to any of the page table entries. * The operating system must reload CR3 to cause the TLB to be flushed" * * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h * should be false so that __flush_tlb_all() causes CR3 insted of CR4.PGE * to be modified. */ if (c->x86 == 5 && c->x86_model == 9) { pr_info("Disabling PGE capability bit\n"); setup_clear_cpu_cap(X86_FEATURE_PGE); } if (c->cpuid_level >= 0x00000001) { u32 eax, ebx, ecx, edx; cpuid(0x00000001, &eax, &ebx, &ecx, &edx); /* * If HTT (EDX[28]) is set EBX[16:23] contain the number of * apicids which are reserved per package. Store the resulting * shift value for the package management code. */ if (edx & (1U << 28)) c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); }
/* * Clear all of the X86_FEATURE_* bits that are unavailable * when the CPU has no XSAVE support. */ void fpu__xstate_clear_all_cpu_caps(void) { setup_clear_cpu_cap(X86_FEATURE_XSAVE); setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); setup_clear_cpu_cap(X86_FEATURE_XSAVEC); setup_clear_cpu_cap(X86_FEATURE_XSAVES); setup_clear_cpu_cap(X86_FEATURE_AVX); setup_clear_cpu_cap(X86_FEATURE_AVX2); setup_clear_cpu_cap(X86_FEATURE_AVX512F); setup_clear_cpu_cap(X86_FEATURE_AVX512PF); setup_clear_cpu_cap(X86_FEATURE_AVX512ER); setup_clear_cpu_cap(X86_FEATURE_AVX512CD); setup_clear_cpu_cap(X86_FEATURE_AVX512DQ); setup_clear_cpu_cap(X86_FEATURE_AVX512BW); setup_clear_cpu_cap(X86_FEATURE_AVX512VL); setup_clear_cpu_cap(X86_FEATURE_MPX); setup_clear_cpu_cap(X86_FEATURE_XGETBV1); setup_clear_cpu_cap(X86_FEATURE_PKU); }
static void voyager_disable_tsc(void) { /* Voyagers run their CPUs from independent clocks, so disable * the TSC code because we can't sync them */ setup_clear_cpu_cap(X86_FEATURE_TSC); }
void __init pre_setup_arch_hook(void) { /* Voyagers run their CPUs from independent clocks, so disable * the TSC code because we can't sync them */ setup_clear_cpu_cap(X86_FEATURE_TSC); }
static __init int setup_disableapic(char *str) { disable_apic = 1; setup_clear_cpu_cap(X86_FEATURE_APIC); return 0; }
/* * disable flag for tsc. Takes effect by clearing the TSC cpu flag * in cpu/common.c */ int __init notsc_setup(char *str) { setup_clear_cpu_cap(X86_FEATURE_TSC); return 1; }
static void __init lapic_disable(char *str) { enable_local_apic = -1; setup_clear_cpu_cap(X86_FEATURE_APIC); }
static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) { u64 misc_enable; /* Unmask CPUID levels if masked: */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); c->cpuid_level = cpuid_eax(0); get_cpu_cap(c); } } if ((c->x86 == 0xf && c->x86_model >= 0x03) || (c->x86 == 0x6 && c->x86_model >= 0x0e)) set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) { unsigned lower_word; wrmsr(MSR_IA32_UCODE_REV, 0, 0); /* Required by the SDM */ sync_core(); rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode); } /* * Atom erratum AAE44/AAF40/AAG38/AAH41: * * A race condition between speculative fetches and invalidating * a large page. This is worked around in microcode, but we * need the microcode to have already been loaded... so if it is * not, recommend a BIOS update and disable large pages. */ if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_mask <= 2 && c->microcode < 0x20e) { printk(KERN_WARNING "Atom PSE erratum detected, BIOS microcode update recommended\n"); clear_cpu_cap(c, X86_FEATURE_PSE); } #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #else /* Netburst reports 64 bytes clflush size, but does IO in 128 bytes */ if (c->x86 == 15 && c->x86_cache_alignment == 64) c->x86_cache_alignment = 128; #endif /* CPUID workaround for 0F33/0F34 CPU */ if (c->x86 == 0xF && c->x86_model == 0x3 && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) c->x86_phys_bits = 36; /* * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate * with P/T states and does not stop in deep C-states. * * It is also reliable across cores and sockets. (but not across * cabinets - we turn it off in that case explicitly.) */ if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); if (!check_tsc_unstable()) sched_clock_stable = 1; } /* * There is a known erratum on Pentium III and Core Solo * and Core Duo CPUs. * " Page with PAT set to WC while associated MTRR is UC * may consolidate to UC " * Because of this erratum, it is better to stick with * setting WC in MTRR rather than using PAT on these CPUs. * * Enable PAT WC only on P4, Core 2 or later CPUs. */ if (c->x86 == 6 && c->x86_model < 15) clear_cpu_cap(c, X86_FEATURE_PAT); #ifdef CONFIG_KMEMCHECK /* * P4s have a "fast strings" feature which causes single- * stepping REP instructions to only generate a #DB on * cache-line boundaries. * * Ingo Molnar reported a Pentium D (model 6) and a Xeon * (model 2) with the same problem. */ if (c->x86 == 15) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING) { printk(KERN_INFO "kmemcheck: Disabling fast string operations\n"); misc_enable &= ~MSR_IA32_MISC_ENABLE_FAST_STRING; wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); } } #endif /* * If fast string is not enabled in IA32_MISC_ENABLE for any reason, * clear the fast string and enhanced fast string CPU capabilities. */ if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); if (!(misc_enable & MSR_IA32_MISC_ENABLE_FAST_STRING)) { printk(KERN_INFO "Disabled fast string operations\n"); setup_clear_cpu_cap(X86_FEATURE_REP_GOOD); setup_clear_cpu_cap(X86_FEATURE_ERMS); } } }
static int __init x86_rdrand_setup(char *s) { setup_clear_cpu_cap(X86_FEATURE_RDRAND); return 1; }