bool_t x86_cpuid_initialize(void) { cpu_identity_t *ci = x86_cpuid_get_identity(); struct family_model original; cpuid_001h_eax_t eax; cpuid_001h_ebx_t ebx; memset(ci, 0, sizeof(*ci)); /* First determine which vendor manufactured the CPU. */ x86_cpuid_fill_vendor_string(ci); /* Need both eax and ebx ouput values. */ eax.words[0] = x86_cpuid_eax(1, 0); ebx.words[0] = x86_cpuid_ebx(1, 0); /* We now use EAX for the family, model, stepping values, and EBX for the * brand index. Store the original values from CPUID_001H.EAX. */ original.family = cpuid_001h_eax_get_family(eax); original.model = cpuid_001h_eax_get_model(eax); ci->display.stepping = cpuid_001h_eax_get_stepping(eax); /* Also store extended family and model values used for adjustment */ ci->display.extended_family = cpuid_001h_eax_get_extended_family(eax); ci->display.extended_model = cpuid_001h_eax_get_extended_model(eax); /* Also store the brand index value given in EBX */ ci->display.brand = cpuid_001h_ebx_get_brand(ebx); if (strncmp(ci->vendor_string, X86_CPUID_VENDOR_STRING_INTEL, X86_CPUID_VENDOR_STRING_MAXLENGTH) == 0) { ci->vendor = X86_VENDOR_INTEL; x86_cpuid_intel_identity_initialize(ci, original); return true; } else if (strncmp(ci->vendor_string, X86_CPUID_VENDOR_STRING_AMD_LEGACY, X86_CPUID_VENDOR_STRING_MAXLENGTH) == 0 || strncmp(ci->vendor_string, X86_CPUID_VENDOR_STRING_AMD, X86_CPUID_VENDOR_STRING_MAXLENGTH) == 0) { ci->vendor = X86_VENDOR_AMD; x86_cpuid_amd_identity_initialize(ci, original); return true; } else { /* CPU from unsupported vendor. Examples could be Cyrix, Centaur, etc. * The old time x86 clones. Return false to the boot and let the upper * level caller decide what to do. */ ci->vendor = X86_VENDOR_OTHER; return false; } }
/** Extracts the vendor string from CPUID_000H.E[BCD]X. * Will be one of "GenuineIntel", "AMDisbetter!", "AuthenticAMD", "CentaurHauls" * etc. We don't support x86 CPUs from vendors other than AMD and Intel. */ BOOT_CODE static void x86_cpuid_fill_vendor_string(cpu_identity_t *ci) { MAY_ALIAS uint32_t *vendor_string32 = (uint32_t *)ci->vendor_string; if (ci == NULL) { return; } vendor_string32[0] = x86_cpuid_ebx(0, 0); vendor_string32[1] = x86_cpuid_edx(0, 0); vendor_string32[2] = x86_cpuid_ecx(0, 0); ci->vendor_string[X86_CPUID_VENDOR_STRING_MAXLENGTH] = '\0'; }
/* * Initialise the FPU for this machine. */ BOOT_CODE bool_t Arch_initFpu(void) { /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */ write_cr4(read_cr4() | CR4_OSFXSR); /* Enable the FPU in general. */ write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR); enableFpu(); /* Initialize the fpu state */ finit(); if (config_set(CONFIG_XSAVE)) { uint64_t xsave_features; uint32_t xsave_instruction; uint64_t desired_features = config_ternary(CONFIG_XSAVE, CONFIG_XSAVE_FEATURE_SET, 1); xsave_state_t *nullFpuState = (xsave_state_t *) &x86KSnullFpuState; /* create NULL state for FPU to be used by XSAVE variants */ memzero(&x86KSnullFpuState, sizeof(x86KSnullFpuState)); /* check for XSAVE support */ if (!(x86_cpuid_ecx(1, 0) & BIT(26))) { printf("XSAVE not supported\n"); return false; } /* enable XSAVE support */ write_cr4(read_cr4() | CR4_OSXSAVE); /* check feature mask */ xsave_features = ((uint64_t)x86_cpuid_edx(0x0d, 0x0) << 32) | x86_cpuid_eax(0x0d, 0x0); if ((xsave_features & desired_features) != desired_features) { printf("Requested feature mask is 0x%llx, but only 0x%llx supported\n", desired_features, (long long)xsave_features); return false; } /* enable feature mask */ write_xcr0(desired_features); /* validate the xsave buffer size and instruction */ if (x86_cpuid_ebx(0x0d, 0x0) > CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but needs to be at least %d\n", CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0)); return false; } if (x86_cpuid_ebx(0x0d, 0x0) < CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but only needs to be %d.\n" "Warning: Memory may be wasted with larger than needed TCBs.\n", CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0)); } /* check if a specialized XSAVE instruction was requested */ xsave_instruction = x86_cpuid_eax(0x0d, 0x1); if (config_set(CONFIG_XSAVE_XSAVEOPT)) { if (!(xsave_instruction & BIT(0))) { printf("XSAVEOPT requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVEC)) { if (!(xsave_instruction & BIT(1))) { printf("XSAVEC requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVES)) { if (!(xsave_instruction & BIT(3))) { printf("XSAVES requested, but not supported\n"); return false; } /* AVX state from extended region should be in compacted format */ nullFpuState->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; /* initialize the XSS MSR */ x86_wrmsr(IA32_XSS_MSR, desired_features); } /* copy i387 FPU initial state from FPU */ saveFpuState(&x86KSnullFpuState); nullFpuState->i387.mxcsr = MXCSR_INIT_VALUE; } else { /* Store the null fpu state */ saveFpuState(&x86KSnullFpuState); } /* Set the FPU to lazy switch mode */ disableFpu(); return true; }
/* * Initialise the FPU for this machine. */ BOOT_CODE bool_t Arch_initFpu(void) { /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */ write_cr4(read_cr4() | CR4_OSFXSR); /* Enable the FPU in general. */ write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR); enableFpu(); /* Initialize the fpu state */ finit(); if (config_set(CONFIG_XSAVE)) { uint64_t xsave_features; uint32_t xsave_instruction; uint64_t desired_features = config_default(CONFIG_XSAVE_FEATURE_SET, 1); /* check for XSAVE support */ if (!(x86_cpuid_ecx(1, 0) & BIT(26))) { printf("XSAVE not supported\n"); return false; } /* enable XSAVE support */ write_cr4(read_cr4() | CR4_OSXSAVE); /* check feature mask */ xsave_features = ((uint64_t)x86_cpuid_edx(0x0d, 0x0) << 32) | x86_cpuid_eax(0x0d, 0x0); if ((xsave_features & desired_features) != desired_features) { printf("Requested feature mask is 0x%llx, but only 0x%llx supported\n", desired_features, (long long)xsave_features); return false; } /* enable feature mask */ write_xcr0(desired_features); /* validate the xsave buffer size and instruction */ if (x86_cpuid_ebx(0x0d, 0x0) != CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but should be %d\n", CONFIG_XSAVE_SIZE, x86_cpuid_ecx(0x0d, 0x0)); return false; } /* check if a specialized XSAVE instruction was requested */ xsave_instruction = x86_cpuid_eax(0x0d, 0x1); if (config_set(CONFIG_XSAVE_XSAVEOPT)) { if (!(xsave_instruction & BIT(0))) { printf("XSAVEOPT requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVEC)) { if (!(xsave_instruction & BIT(1))) { printf("XSAVEC requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVES)) { if (!(xsave_instruction & BIT(3))) { printf("XSAVES requested, but not supported\n"); return false; } /* initialize the XSS MSR */ x86_wrmsr(IA32_XSS_MSR, desired_features); } /* Load a NULL fpu state so that the idle thread ends up * with a sensible FPU state and we can optimize our * switch of it */ memzero(&x86KSnullFpuState, sizeof(x86KSnullFpuState)); loadFpuState(&x86KSnullFpuState); } else { /* Store the null fpu state */ saveFpuState(&x86KSnullFpuState); } /* Set the FPU to lazy switch mode */ disableFpu(); return true; }