/* * Switch the owner of the FPU to the given thread. */ static void switchFpuOwner(tcb_t *new_owner) { enableFpu(); if (ia32KSfpuOwner) { saveFpuState(&ia32KSfpuOwner->tcbContext.fpuState); } if (new_owner) { loadFpuState(&new_owner->tcbContext.fpuState); } else { disableFpu(); } ia32KSfpuOwner = new_owner; }
/** This and only this function initialises the CPU. * * It does NOT initialise any kernel state. * @return For the verification build, this currently returns true always. */ BOOT_CODE static bool_t init_cpu(void) { bool_t haveHWFPU; #ifdef CONFIG_ARCH_AARCH64 if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { if (!checkTCR_EL2()) { return false; } } #endif activate_global_pd(); if (config_set(CONFIG_ARM_HYPERVISOR_SUPPORT)) { vcpu_boot_init(); } #ifdef CONFIG_HARDWARE_DEBUG_API if (!Arch_initHardwareBreakpoints()) { printf("Kernel built with CONFIG_HARDWARE_DEBUG_API, but this board doesn't " "reliably support it.\n"); return false; } #endif /* Setup kernel stack pointer. * On ARM SMP, the array index here is the CPU ID */ #ifndef CONFIG_ARCH_ARM_V6 word_t stack_top = ((word_t) kernel_stack_alloc[SMP_TERNARY(getCurrentCPUIndex(), 0)]) + BIT(CONFIG_KERNEL_STACK_BITS); #if defined(ENABLE_SMP_SUPPORT) && defined(CONFIG_ARCH_AARCH64) /* the least 12 bits are used to store logical core ID */ stack_top |= getCurrentCPUIndex(); #endif setKernelStack(stack_top); #endif /* CONFIG_ARCH_ARM_V6 */ #ifdef CONFIG_ARCH_AARCH64 /* initialise CPU's exception vector table */ setVtable((pptr_t)arm_vector_table); #endif /* CONFIG_ARCH_AARCH64 */ haveHWFPU = fpsimd_HWCapTest(); /* Disable FPU to avoid channels where a platform has an FPU but doesn't make use of it */ if (haveHWFPU) { disableFpu(); } #ifdef CONFIG_HAVE_FPU if (haveHWFPU) { if (!fpsimd_init()) { return false; } } else { printf("Platform claims to have FP hardware, but does not!"); return false; } #endif /* CONFIG_HAVE_FPU */ cpu_initLocalIRQController(); #ifdef CONFIG_ENABLE_BENCHMARKS armv_init_ccnt(); #endif /* CONFIG_ENABLE_BENCHMARKS */ /* Export selected CPU features for access by PL0 */ armv_init_user_access(); initTimer(); return true; }
/* * Initialise the FPU for this machine. */ BOOT_CODE bool_t Arch_initFpu(void) { /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */ write_cr4(read_cr4() | CR4_OSFXSR); /* Enable the FPU in general. */ write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR); enableFpu(); /* Initialize the fpu state */ finit(); if (config_set(CONFIG_XSAVE)) { uint64_t xsave_features; uint32_t xsave_instruction; uint64_t desired_features = config_ternary(CONFIG_XSAVE, CONFIG_XSAVE_FEATURE_SET, 1); xsave_state_t *nullFpuState = (xsave_state_t *) &x86KSnullFpuState; /* create NULL state for FPU to be used by XSAVE variants */ memzero(&x86KSnullFpuState, sizeof(x86KSnullFpuState)); /* check for XSAVE support */ if (!(x86_cpuid_ecx(1, 0) & BIT(26))) { printf("XSAVE not supported\n"); return false; } /* enable XSAVE support */ write_cr4(read_cr4() | CR4_OSXSAVE); /* check feature mask */ xsave_features = ((uint64_t)x86_cpuid_edx(0x0d, 0x0) << 32) | x86_cpuid_eax(0x0d, 0x0); if ((xsave_features & desired_features) != desired_features) { printf("Requested feature mask is 0x%llx, but only 0x%llx supported\n", desired_features, (long long)xsave_features); return false; } /* enable feature mask */ write_xcr0(desired_features); /* validate the xsave buffer size and instruction */ if (x86_cpuid_ebx(0x0d, 0x0) > CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but needs to be at least %d\n", CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0)); return false; } if (x86_cpuid_ebx(0x0d, 0x0) < CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but only needs to be %d.\n" "Warning: Memory may be wasted with larger than needed TCBs.\n", CONFIG_XSAVE_SIZE, x86_cpuid_ebx(0x0d, 0x0)); } /* check if a specialized XSAVE instruction was requested */ xsave_instruction = x86_cpuid_eax(0x0d, 0x1); if (config_set(CONFIG_XSAVE_XSAVEOPT)) { if (!(xsave_instruction & BIT(0))) { printf("XSAVEOPT requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVEC)) { if (!(xsave_instruction & BIT(1))) { printf("XSAVEC requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVES)) { if (!(xsave_instruction & BIT(3))) { printf("XSAVES requested, but not supported\n"); return false; } /* AVX state from extended region should be in compacted format */ nullFpuState->header.xcomp_bv = XCOMP_BV_COMPACTED_FORMAT; /* initialize the XSS MSR */ x86_wrmsr(IA32_XSS_MSR, desired_features); } /* copy i387 FPU initial state from FPU */ saveFpuState(&x86KSnullFpuState); nullFpuState->i387.mxcsr = MXCSR_INIT_VALUE; } else { /* Store the null fpu state */ saveFpuState(&x86KSnullFpuState); } /* Set the FPU to lazy switch mode */ disableFpu(); return true; }
/* * Initialise the FPU for this machine. */ BOOT_CODE bool_t Arch_initFpu(void) { /* Enable FPU / SSE / SSE2 / SSE3 / SSSE3 / SSE4 Extensions. */ write_cr4(read_cr4() | CR4_OSFXSR); /* Enable the FPU in general. */ write_cr0((read_cr0() & ~CR0_EMULATION) | CR0_MONITOR_COPROC | CR0_NUMERIC_ERROR); enableFpu(); /* Initialize the fpu state */ finit(); if (config_set(CONFIG_XSAVE)) { uint64_t xsave_features; uint32_t xsave_instruction; uint64_t desired_features = config_default(CONFIG_XSAVE_FEATURE_SET, 1); /* check for XSAVE support */ if (!(x86_cpuid_ecx(1, 0) & BIT(26))) { printf("XSAVE not supported\n"); return false; } /* enable XSAVE support */ write_cr4(read_cr4() | CR4_OSXSAVE); /* check feature mask */ xsave_features = ((uint64_t)x86_cpuid_edx(0x0d, 0x0) << 32) | x86_cpuid_eax(0x0d, 0x0); if ((xsave_features & desired_features) != desired_features) { printf("Requested feature mask is 0x%llx, but only 0x%llx supported\n", desired_features, (long long)xsave_features); return false; } /* enable feature mask */ write_xcr0(desired_features); /* validate the xsave buffer size and instruction */ if (x86_cpuid_ebx(0x0d, 0x0) != CONFIG_XSAVE_SIZE) { printf("XSAVE buffer set set to %d, but should be %d\n", CONFIG_XSAVE_SIZE, x86_cpuid_ecx(0x0d, 0x0)); return false; } /* check if a specialized XSAVE instruction was requested */ xsave_instruction = x86_cpuid_eax(0x0d, 0x1); if (config_set(CONFIG_XSAVE_XSAVEOPT)) { if (!(xsave_instruction & BIT(0))) { printf("XSAVEOPT requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVEC)) { if (!(xsave_instruction & BIT(1))) { printf("XSAVEC requested, but not supported\n"); return false; } } else if (config_set(CONFIG_XSAVE_XSAVES)) { if (!(xsave_instruction & BIT(3))) { printf("XSAVES requested, but not supported\n"); return false; } /* initialize the XSS MSR */ x86_wrmsr(IA32_XSS_MSR, desired_features); } /* Load a NULL fpu state so that the idle thread ends up * with a sensible FPU state and we can optimize our * switch of it */ memzero(&x86KSnullFpuState, sizeof(x86KSnullFpuState)); loadFpuState(&x86KSnullFpuState); } else { /* Store the null fpu state */ saveFpuState(&x86KSnullFpuState); } /* Set the FPU to lazy switch mode */ disableFpu(); return true; }