/* * Initialize CPU control registers */ void initializecpu(void) { uint64_t msr; if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); pg_nx = PG_NX; } if (cpu_vendor_id == CPU_VENDOR_CENTAUR) init_via(); }
/* * Initialize CPU control registers */ void initializecpu(void) { uint64_t msr; uint32_t cr4; cr4 = rcr4(); if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { cr4 |= CR4_FXSR | CR4_XMM; cpu_fxsr = hw_instruction_sse = 1; } if (cpu_stdext_feature & CPUID_STDEXT_FSGSBASE) cr4 |= CR4_FSGSBASE; /* * Postpone enabling the SMEP on the boot CPU until the page * tables are switched from the boot loader identity mapping * to the kernel tables. The boot loader enables the U bit in * its tables. */ if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP)) cr4 |= CR4_SMEP; load_cr4(cr4); if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); pg_nx = PG_NX; } switch (cpu_vendor_id) { case CPU_VENDOR_AMD: init_amd(); break; case CPU_VENDOR_CENTAUR: init_via(); break; } }
void initializecpu(void) { switch (cpu) { #ifdef I486_CPU case CPU_BLUE: init_bluelightning(); break; case CPU_486DLC: init_486dlc(); break; case CPU_CY486DX: init_cy486dx(); break; case CPU_M1SC: init_5x86(); break; #ifdef CPU_I486_ON_386 case CPU_486: init_i486_on_386(); break; #endif case CPU_M1: init_6x86(); break; #endif /* I486_CPU */ #ifdef I686_CPU case CPU_M2: init_6x86MX(); break; case CPU_686: if (cpu_vendor_id == CPU_VENDOR_INTEL) { switch (cpu_id & 0xff0) { case 0x610: init_ppro(); break; case 0x660: init_mendocino(); break; } } else if (cpu_vendor_id == CPU_VENDOR_AMD) { #if defined(I686_CPU) && defined(CPU_ATHLON_SSE_HACK) /* * Sometimes the BIOS doesn't enable SSE instructions. * According to AMD document 20734, the mobile * Duron, the (mobile) Athlon 4 and the Athlon MP * support SSE. These correspond to cpu_id 0x66X * or 0x67X. */ if ((cpu_feature & CPUID_XMM) == 0 && ((cpu_id & ~0xf) == 0x660 || (cpu_id & ~0xf) == 0x670 || (cpu_id & ~0xf) == 0x680)) { u_int regs[4]; wrmsr(0xC0010015, rdmsr(0xC0010015) & ~0x08000); do_cpuid(1, regs); cpu_feature = regs[3]; } #endif } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) { switch (cpu_id & 0xff0) { case 0x690: if ((cpu_id & 0xf) < 3) break; /* fall through. */ case 0x6a0: case 0x6d0: case 0x6f0: init_via(); break; default: break; } } #ifdef PAE if ((amd_feature & AMDID_NX) != 0) { uint64_t msr; msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); pg_nx = PG_NX; } #endif break; #endif default: break; } enable_sse(); /* * CPUID with %eax = 1, %ebx returns * Bits 15-8: CLFLUSH line size * (Value * 8 = cache line size in bytes) */ if ((cpu_feature & CPUID_CLFSH) != 0) cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; /* * XXXKIB: (temporary) hack to work around traps generated when * CLFLUSHing APIC registers window. */ TUNABLE_INT_FETCH("hw.clflush_disable", &hw_clflush_disable); if (cpu_vendor_id == CPU_VENDOR_INTEL && !(cpu_feature & CPUID_SS) && hw_clflush_disable == -1) cpu_feature &= ~CPUID_CLFSH; /* * Allow to disable CLFLUSH feature manually by * hw.clflush_disable tunable. This may help Xen guest on some AMD * CPUs. */ if (hw_clflush_disable == 1) cpu_feature &= ~CPUID_CLFSH; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) /* * OS should flush L1 cache by itself because no PC-98 supports * non-Intel CPUs. Use wbinvd instruction before DMA transfer * when need_pre_dma_flush = 1, use invd instruction after DMA * transfer when need_post_dma_flush = 1. If your CPU upgrade * product supports hardware cache control, you can add the * CPU_UPGRADE_HW_CACHE option in your kernel configuration file. * This option eliminates unneeded cache flush instruction(s). */ if (cpu_vendor_id == CPU_VENDOR_CYRIX) { switch (cpu) { #ifdef I486_CPU case CPU_486DLC: need_post_dma_flush = 1; break; case CPU_M1SC: need_pre_dma_flush = 1; break; case CPU_CY486DX: need_pre_dma_flush = 1; #ifdef CPU_I486_ON_386 need_post_dma_flush = 1; #endif break; #endif default: break; } } else if (cpu_vendor_id == CPU_VENDOR_AMD) { switch (cpu_id & 0xFF0) { case 0x470: /* Enhanced Am486DX2 WB */ case 0x490: /* Enhanced Am486DX4 WB */ case 0x4F0: /* Am5x86 WB */ need_pre_dma_flush = 1; break; } } else if (cpu_vendor_id == CPU_VENDOR_IBM) { need_post_dma_flush = 1; } else { #ifdef CPU_I486_ON_386 need_pre_dma_flush = 1; #endif } #endif /* PC98 && !CPU_UPGRADE_HW_CACHE */ }
/* * Initialize CPU control registers */ void initializecpu(int cpu) { uint64_t msr; /*Check for FXSR and SSE support and enable if available.*/ if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } if (cpu == 0) { /* Check if we are running in a hypervisor. */ vmm_guest = detect_virtual(); if (vmm_guest == VMM_GUEST_NONE && (cpu_feature2 & CPUID2_VMM)) vmm_guest = VMM_GUEST_UNKNOWN; } #if !defined(CPU_DISABLE_AVX) /*Check for XSAVE and AVX support and enable if available.*/ if ((cpu_feature2 & CPUID2_AVX) && (cpu_feature2 & CPUID2_XSAVE) && (cpu_feature & CPUID_SSE)) { load_cr4(rcr4() | CR4_XSAVE); /* Adjust size of savefpu in npx.h before adding to mask.*/ xsetbv(0, CPU_XFEATURE_X87 | CPU_XFEATURE_SSE | CPU_XFEATURE_YMM, 0); cpu_xsave = 1; } #endif if (cpu_vendor_id == CPU_VENDOR_AMD) { switch((cpu_id & 0xFF0000)) { case 0x100000: case 0x120000: /* * Errata 721 is the cpu bug found by your's truly * (Matthew Dillon). It is a bug where a sequence * of 5 or more popq's + a retq, under involved * deep recursion circumstances, can cause the %rsp * to not be properly updated, almost always * resulting in a seg-fault soon after. * * Do not install the workaround when we are running * in a virtual machine. */ if (vmm_guest) break; msr = rdmsr(MSR_AMD_DE_CFG); if ((msr & 1) == 0) { if (cpu == 0) kprintf("Errata 721 workaround " "installed\n"); msr |= 1; wrmsr(MSR_AMD_DE_CFG, msr); } break; } } if ((amd_feature & AMDID_NX) != 0) { msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); #if 0 /* JG */ pg_nx = PG_NX; #endif } if (cpu_vendor_id == CPU_VENDOR_CENTAUR && CPUID_TO_FAMILY(cpu_id) == 0x6 && CPUID_TO_MODEL(cpu_id) >= 0xf) init_via(); TUNABLE_INT_FETCH("hw.clflush_enable", &hw_clflush_enable); if (cpu_feature & CPUID_CLFSH) { cpu_clflush_line_size = ((cpu_procinfo >> 8) & 0xff) * 8; if (hw_clflush_enable == 0 || ((hw_clflush_enable == -1) && vmm_guest)) cpu_feature &= ~CPUID_CLFSH; }
void initializecpu(void) { switch (cpu) { #ifdef I486_CPU case CPU_BLUE: init_bluelightning(); break; case CPU_486DLC: init_486dlc(); break; case CPU_CY486DX: init_cy486dx(); break; case CPU_M1SC: init_5x86(); break; #ifdef CPU_I486_ON_386 case CPU_486: init_i486_on_386(); break; #endif case CPU_M1: init_6x86(); break; #endif /* I486_CPU */ #ifdef I586_CPU case CPU_586: switch (cpu_vendor_id) { case CPU_VENDOR_AMD: #ifdef CPU_WT_ALLOC if (((cpu_id & 0x0f0) > 0) && ((cpu_id & 0x0f0) < 0x60) && ((cpu_id & 0x00f) > 3)) enable_K5_wt_alloc(); else if (((cpu_id & 0x0f0) > 0x80) || (((cpu_id & 0x0f0) == 0x80) && (cpu_id & 0x00f) > 0x07)) enable_K6_2_wt_alloc(); else if ((cpu_id & 0x0f0) > 0x50) enable_K6_wt_alloc(); #endif if ((cpu_id & 0xf0) == 0xa0) /* * Make sure the TSC runs through * suspension, otherwise we can't use * it as timecounter */ wrmsr(0x1900, rdmsr(0x1900) | 0x20ULL); break; case CPU_VENDOR_CENTAUR: init_winchip(); break; case CPU_VENDOR_TRANSMETA: init_transmeta(); break; case CPU_VENDOR_RISE: init_rise(); break; } break; #endif #ifdef I686_CPU case CPU_M2: init_6x86MX(); break; case CPU_686: switch (cpu_vendor_id) { case CPU_VENDOR_INTEL: switch (cpu_id & 0xff0) { case 0x610: init_ppro(); break; case 0x660: init_mendocino(); break; } break; #ifdef CPU_ATHLON_SSE_HACK case CPU_VENDOR_AMD: /* * Sometimes the BIOS doesn't enable SSE instructions. * According to AMD document 20734, the mobile * Duron, the (mobile) Athlon 4 and the Athlon MP * support SSE. These correspond to cpu_id 0x66X * or 0x67X. */ if ((cpu_feature & CPUID_XMM) == 0 && ((cpu_id & ~0xf) == 0x660 || (cpu_id & ~0xf) == 0x670 || (cpu_id & ~0xf) == 0x680)) { u_int regs[4]; wrmsr(MSR_HWCR, rdmsr(MSR_HWCR) & ~0x08000); do_cpuid(1, regs); cpu_feature = regs[3]; } break; #endif case CPU_VENDOR_CENTAUR: init_via(); break; case CPU_VENDOR_TRANSMETA: init_transmeta(); break; } #if defined(PAE) || defined(PAE_TABLES) if ((amd_feature & AMDID_NX) != 0) { uint64_t msr; msr = rdmsr(MSR_EFER) | EFER_NXE; wrmsr(MSR_EFER, msr); pg_nx = PG_NX; elf32_nxstack = 1; } #endif break; #endif default: break; } #if defined(CPU_ENABLE_SSE) if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) { load_cr4(rcr4() | CR4_FXSR | CR4_XMM); cpu_fxsr = hw_instruction_sse = 1; } #endif }
void initializecpu(void) { switch (cpu) { #ifdef I486_CPU case CPU_BLUE: init_bluelightning(); break; case CPU_486DLC: init_486dlc(); break; case CPU_CY486DX: init_cy486dx(); break; case CPU_M1SC: init_5x86(); break; #ifdef CPU_I486_ON_386 case CPU_486: init_i486_on_386(); break; #endif case CPU_M1: init_6x86(); break; #endif /* I486_CPU */ #ifdef I686_CPU case CPU_M2: init_6x86MX(); break; case CPU_686: if (cpu_vendor_id == CPU_VENDOR_INTEL) { switch (cpu_id & 0xff0) { case 0x610: init_ppro(); break; case 0x660: init_mendocino(); break; } } else if (cpu_vendor_id == CPU_VENDOR_AMD) { init_686_amd(); } else if (cpu_vendor_id == CPU_VENDOR_CENTAUR) { switch (cpu_id & 0xff0) { case 0x690: if ((cpu_id & 0xf) < 3) break; /* fall through. */ case 0x6a0: case 0x6d0: case 0x6f0: init_via(); break; default: break; } } break; #endif default: break; } enable_sse(); }