void startup(void) { #if defined(CONFIG_BAREMETAL_ENABLE_DCACHE) init_tlb(main_tlb); assign_tlb(main_tlb); enable_cache(); #elif defined(CONFIG_BAREMETAL_ENABLE_ICACHE) enable_cache(); #endif #if defined(CONFIG_BAREMETAL_DDR_INIT) ddr_init(); #endif #if defined(CONFIG_BAREMETAL_PLL0_INIT) pll0_init(); #endif #if defined(CONFIG_BAREMETAL_UART_BASIC) uart_basic_init(); #endif main(); halt(); }
void cache_as_ram_new_stack (void) { void *resume_backup_memory = NULL; print_car_debug("Top about %08x ... Done\n", (uint32_t) &resume_backup_memory); print_car_debug("Disabling cache as ram now\n"); disable_cache_as_ram_bsp(); disable_cache(); /* Enable cached access to RAM in the range 1M to CONFIG_RAMTOP */ set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK); enable_cache(); if (acpi_is_wakeup_s3()) { resume_backup_memory = cbmem_find(CBMEM_ID_RESUME); print_car_debug("Resume backup memory location: %p\n", resume_backup_memory); } prepare_ramstage_region(resume_backup_memory); set_sysinfo_in_ram(1); // So other core0 could start to train mem /*copy and execute ramstage */ copy_and_run(); /* We will not return */ print_car_debug("should not be here -\n"); }
static void cache_ramstage(void) { /* Enable caching for lower 1MB and ram stage using variable mtrr */ disable_cache(); set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK); enable_cache(); }
static void main(void) { uint32_t tmp; post_code(0x05); /* Set timer1 to pulse generator 15us for memory refresh */ outb(0x54, 0x43); outb(0x12, 0x41); /* CPU setup, romcc pukes on invd() */ asm volatile ("invd"); enable_cache(); /* Set serial base */ pci_write_config32(PCI_DEV(0,7,0), 0x54, 0x3f8); /* serial IRQ disable, LPC disable, COM2 goes to LPC, internal UART for COM1 */ pci_write_config32(PCI_DEV(0,7,0), 0x50, 0x84101012); console_init(); /* memory init */ pci_write_config32(PCI_DEV(0,0,0), 0x68, 0x6c99f); pci_write_config32(PCI_DEV(0,0,0), 0x6c, 0x800451); pci_write_config32(PCI_DEV(0,0,0), 0x70, 0x4000003); /* memory phase/buffer strength for read and writes */ tmp = pci_read_config32(PCI_DEV(0,0,0), 0x64); tmp &= 0x0FF00FFFF; tmp |= 0x790000; pci_write_config32(PCI_DEV(0,0,0), 0x64, tmp); /* Route Cseg, Dseg, Eseg and Fseg to RAM */ pci_write_config32(PCI_DEV(0,0,0), 0x84, 0x3ffffff0); }
static void set_init_ecc_mtrrs(void) { msr_t msr; int i; disable_cache(); /* First clear all of the msrs to be safe */ for (i = 0; i < MTRR_COUNT; i++) { msr_t zero; zero.lo = zero.hi = 0; wrmsr(MTRR_PHYS_BASE(i), zero); wrmsr(MTRR_PHYS_MASK(i), zero); } /* Write back cache the first 1MB */ msr.hi = 0x00000000; msr.lo = 0x00000000 | MTRR_TYPE_WRBACK; wrmsr(MTRR_PHYS_BASE(0), msr); msr.hi = 0x000000ff; msr.lo = ~((CONFIG_RAMTOP) - 1) | 0x800; wrmsr(MTRR_PHYS_MASK(0), msr); /* Set the default type to write combining */ msr.hi = 0x00000000; msr.lo = 0xc00 | MTRR_TYPE_WRCOMB; wrmsr(MTRR_DEF_TYPE_MSR, msr); /* Set TOP_MEM to 4G */ msr.hi = 0x00000001; msr.lo = 0x00000000; wrmsr(TOP_MEM, msr); enable_cache(); }
void verstage_mainboard_init(void) { /* Do the minimum to run vboot at full speed */ configure_l2_cache(); enable_cache(); early_mainboard_init(); }
static void model_10_init(device_t dev) { printk(BIOS_DEBUG, "Model 10 Init - a no-op.\n"); u8 i; msr_t msr; #if CONFIG_LOGICAL_CPUS u32 siblings; #endif /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 6; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local cpu apics */ setup_lapic(); /* Set the processor name string */ // init_processor_name(); #if CONFIG_LOGICAL_CPUS siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
void setup_cache() { #ifdef USE_CACHE enable_cache(); #else disable_cache(); #endif }
void smm_init(void) { msr_t msr, syscfg_orig, mtrr_aseg_orig; /* Back up MSRs for later restore */ syscfg_orig = rdmsr(SYSCFG_MSR); mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); /* MTRR changes don't like an enabled cache */ disable_cache(); msr = syscfg_orig; /* Allow changes to MTRR extended attributes */ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; /* turn the extended attributes off until we fix * them so A0000 is routed to memory */ msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); /* set DRAM access to 0xa0000 */ msr.lo = 0x18181818; msr.hi = 0x18181818; wrmsr(MTRRfix16K_A0000_MSR, msr); /* enable the extended features */ msr = syscfg_orig; msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); enable_cache(); /* copy the real SMM handler */ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); wbinvd(); disable_cache(); /* Restore SYSCFG and MTRR */ wrmsr(SYSCFG_MSR, syscfg_orig); wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); enable_cache(); /* CPU MSR are set in CPU init */ }
static void enable_rom_caching(void) { msr_t msr; disable_cache(); /* Why only top 4MiB ? */ set_var_mtrr(1, 0xffc00000, 4*1024*1024, MTRR_TYPE_WRPROT); enable_cache(); /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; wrmsr(MTRRdefType_MSR, msr); }
static void enable_rom_caching(void) { msr_t msr; disable_cache(); set_var_mtrr(1, 0xffffffff - CACHE_ROM_SIZE + 1, CACHE_ROM_SIZE, MTRR_TYPE_WRPROT); enable_cache(); /* Enable Variable MTRRs */ msr.hi = 0x00000000; msr.lo = 0x00000800; wrmsr(MTRR_DEF_TYPE_MSR, msr); }
static void restore_mtrr_state(struct mtrr_state *state) { int i; disable_cache(); for (i = 0; i < MTRR_COUNT; i++) { wrmsr(MTRRphysBase_MSR(i), state->mtrrs[i].base); wrmsr(MTRRphysMask_MSR(i), state->mtrrs[i].mask); } wrmsr(TOP_MEM, state->top_mem); wrmsr(TOP_MEM2, state->top_mem2); wrmsr(MTRRdefType_MSR, state->def_type); enable_cache(); }
static void restore_mtrr_state(struct mtrr_state *state) { int i; disable_cache(); for (i = 0; i < MTRR_COUNT; i++) { wrmsr(MTRR_PHYS_BASE(i), state->mtrrs[i].base); wrmsr(MTRR_PHYS_MASK(i), state->mtrrs[i].mask); } wrmsr(TOP_MEM, state->top_mem); wrmsr(TOP_MEM2, state->top_mem2); wrmsr(MTRR_DEF_TYPE_MSR, state->def_type); enable_cache(); }
asmlinkage void cache_as_ram_new_stack(void) { print_car_debug("Disabling cache as RAM now\n"); disable_cache_as_ram_real(0); // inline disable_cache(); /* Enable cached access to RAM in the range 0M to CACHE_TMP_RAMTOP */ set_var_mtrr(0, 0x00000000, CACHE_TMP_RAMTOP, MTRR_TYPE_WRBACK); enable_cache(); prepare_ramstage_region(acpi_is_wakeup_s3()); set_sysinfo_in_ram(1); // So other core0 could start to train mem /*copy and execute ramstage */ copy_and_run(); /* We will not return */ print_car_debug("should not be here -\n"); }
static inline void early_mtrr_init(void) { static const unsigned long mtrr_msrs[] = { /* fixed mtrr */ 0x250, 0x258, 0x259, 0x268, 0x269, 0x26A, 0x26B, 0x26C, 0x26D, 0x26E, 0x26F, /* var mtrr */ 0x200, 0x201, 0x202, 0x203, 0x204, 0x205, 0x206, 0x207, 0x208, 0x209, 0x20A, 0x20B, 0x20C, 0x20D, 0x20E, 0x20F, /* NULL end of table */ 0 }; disable_cache(); do_early_mtrr_init(mtrr_msrs); enable_cache(); }
void ACPI::shadow_bios(void) { printf("Shadowing BIOS"); void *area = malloc(SHADOW_LEN); xassert(area); memcpy(area, (void *)SHADOW_BASE, SHADOW_LEN); // disable fixed MTRRs uint64_t val = lib::rdmsr(MSR_SYSCFG); lib::wrmsr(MSR_SYSCFG, val | (3 << 18)); disable_cache(); lib::wrmsr(MSR_MTRR_FIX4K_F0000, FMTRR_WRITETHROUGH); lib::wrmsr(MSR_MTRR_FIX4K_F8000, FMTRR_WRITETHROUGH); enable_cache(); // reenable fixed MTRRs lib::wrmsr(MSR_SYSCFG, val | (1 << 18)); memcpy((void *)SHADOW_BASE, area, SHADOW_LEN); free(area); printf("\n"); bios_shadowed = 1; }
// segment = get Nth segment of a multi-segment file static bool check_file_seg(struct MPContext *mpctx, struct demuxer **sources, int num_sources, unsigned char uid_map[][16], char *filename, int segment) { bool was_valid = false; struct demuxer_params params = { .matroska_wanted_uids = uid_map, .matroska_wanted_segment = segment, .matroska_was_valid = &was_valid, }; struct stream *s = stream_open(filename, mpctx->opts); if (!s) return false; struct demuxer *d = demux_open(s, "mkv", ¶ms, mpctx->opts); if (!d) { free_stream(s); return was_valid; } if (d->type == DEMUXER_TYPE_MATROSKA) { for (int i = 1; i < num_sources; i++) { if (sources[i]) continue; if (!memcmp(uid_map[i], d->matroska_data.segment_uid, 16)) { mp_msg(MSGT_CPLAYER, MSGL_INFO, "Match for source %d: %s\n", i, d->filename); if (enable_cache(mpctx, &s, &d, ¶ms) < 0) continue; sources[i] = d; return true; } } } free_demuxer(d); free_stream(s); return was_valid; }
static void set_resume_cache(void) { msr_t msr; /* disable fixed mtrr for now, it will be enabled by mtrr restore */ msr = rdmsr(SYSCFG_MSR); msr.lo &= ~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrFixDramModEn); wrmsr(SYSCFG_MSR, msr); /* Enable cached access to RAM in the range 0M to CACHE_TMP_RAMTOP */ msr.lo = 0 | MTRR_TYPE_WRBACK; msr.hi = 0; wrmsr(MTRR_PHYS_BASE(0), msr); msr.lo = ~(CACHE_TMP_RAMTOP - 1) | MTRR_PHYS_MASK_VALID; msr.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; wrmsr(MTRR_PHYS_MASK(0), msr); /* Set the default memory type and disable fixed and enable variable MTRRs */ msr.hi = 0; msr.lo = (1 << 11); wrmsr(MTRR_DEF_TYPE_MSR, msr); enable_cache(); }
static void set_resume_cache(void) { msr_t msr; /* disable fixed mtrr for now, it will be enabled by mtrr restore */ msr = rdmsr(SYSCFG_MSR); msr.lo &= ~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrFixDramModEn); wrmsr(SYSCFG_MSR, msr); /* Enable caching for 0 - coreboot ram using variable mtrr */ msr.lo = 0 | MTRR_TYPE_WRBACK; msr.hi = 0; wrmsr(MTRRphysBase_MSR(0), msr); msr.lo = ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid; msr.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1; wrmsr(MTRRphysMask_MSR(0), msr); /* Set the default memory type and disable fixed and enable variable MTRRs */ msr.hi = 0; msr.lo = (1 << 11); wrmsr(MTRRdefType_MSR, msr); enable_cache(); }
static void model_10xxx_init(device_t dev) { u8 i; msr_t msr; struct node_core_id id; #if CONFIG_LOGICAL_CPUS u32 siblings; #endif id = get_node_core_id(read_nb_cfg_54()); /* nb_cfg_54 can not be set */ printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid); /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 5; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local cpu apics */ setup_lapic(); /* Set the processor name string */ init_processor_name(); #if CONFIG_LOGICAL_CPUS siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); msr = rdmsr(BU_CFG2_MSR); /* Clear ClLinesToNbDis */ msr.lo &= ~(1 << 15); /* Clear bit 35 as per Erratum 343 */ msr.hi &= ~(1 << (35-32)); wrmsr(BU_CFG2_MSR, msr); if (IS_ENABLED(CONFIG_HAVE_SMI_HANDLER)) { printk(BIOS_DEBUG, "Initializing SMM ASeg memory\n"); /* Set SMM base address for this CPU */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); /* Enable the SMM memory window */ msr = rdmsr(SMM_MASK_MSR); msr.lo |= (1 << 0); /* Enable ASEG SMRAM Range */ wrmsr(SMM_MASK_MSR, msr); } else { printk(BIOS_DEBUG, "Disabling SMM ASeg memory\n"); /* Set SMM base address for this CPU */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); /* Disable the SMM memory window */ msr.hi = 0x0; msr.lo = 0x0; wrmsr(SMM_MASK_MSR, msr); } /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
void x86_enable_cache(void) { post_code(0x60); printk(BIOS_INFO, "Enabling cache\n"); enable_cache(); }
//method to return CPU to regular operation int cpu_resume(int *fp) { enable_cache(); return 0; }
void smm_init(void) { msr_t msr; msr = rdmsr(HWCR_MSR); if (msr.lo & (1 << 0)) { // This sounds like a bug... ? printk(BIOS_DEBUG, "SMM is still locked from last boot, using old handler.\n"); return; } /* Only copy SMM handler once, not once per CPU */ if (!smm_handler_copied) { msr_t syscfg_orig, mtrr_aseg_orig; smm_handler_copied = 1; /* Back up MSRs for later restore */ syscfg_orig = rdmsr(SYSCFG_MSR); mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); /* MTRR changes don't like an enabled cache */ disable_cache(); msr = syscfg_orig; /* Allow changes to MTRR extended attributes */ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; /* turn the extended attributes off until we fix * them so A0000 is routed to memory */ msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); /* set DRAM access to 0xa0000 */ /* A0000 is memory */ msr.lo = 0x18181818; msr.hi = 0x18181818; wrmsr(MTRRfix16K_A0000_MSR, msr); /* enable the extended features */ msr = syscfg_orig; msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); enable_cache(); /* copy the real SMM handler */ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); wbinvd(); /* Restore MTRR */ disable_cache(); /* Restore SYSCFG */ wrmsr(SYSCFG_MSR, syscfg_orig); wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); enable_cache(); } /* But set SMM base address on all CPUs/cores */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); /* enable the SMM memory window */ msr = rdmsr(SMM_MASK_MSR); msr.lo |= (1 << 0); // Enable ASEG SMRAM Range wrmsr(SMM_MASK_MSR, msr); /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }
/* * The main configuration routine. Its implementation is hugely inspired by the * the same routine implementation in Solaris NSCD. */ int parse_config_file(struct configuration *config, const char *fname, char const **error_str, int *error_line) { FILE *fin; char buffer[255]; char *fields[128]; int field_count, line_num, value; int res; TRACE_IN(parse_config_file); assert(config != NULL); assert(fname != NULL); fin = fopen(fname, "r"); if (fin == NULL) { TRACE_OUT(parse_config_file); return (-1); } res = 0; line_num = 0; memset(buffer, 0, sizeof(buffer)); while ((res == 0) && (fgets(buffer, sizeof(buffer) - 1, fin) != NULL)) { field_count = strbreak(buffer, fields, sizeof(fields)); ++line_num; if (field_count == 0) continue; switch (fields[0][0]) { case '#': case '\0': continue; case 'e': if ((field_count == 3) && (strcmp(fields[0], "enable-cache") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_yesno(fields[2])) != -1)) { enable_cache(config, fields[1], value); continue; } break; case 'd': if ((field_count == 2) && (strcmp(fields[0], "debug-level") == 0) && ((value = get_number(fields[1], 0, 10)) != -1)) { continue; } break; case 'p': if ((field_count == 3) && (strcmp(fields[0], "positive-time-to-live") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_number(fields[2], 0, -1)) != -1)) { set_positive_time_to_live(config, fields[1], value); continue; } else if ((field_count == 3) && (strcmp(fields[0], "positive-confidence-threshold") == 0) && ((value = get_number(fields[2], 1, -1)) != -1)) { set_positive_confidence_threshold(config, fields[1], value); continue; } else if ((field_count == 3) && (strcmp(fields[0], "positive-policy") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_policy(fields[2])) != -1)) { set_positive_policy(config, fields[1], value); continue; } else if ((field_count == 3) && (strcmp(fields[0], "perform-actual-lookups") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_yesno(fields[2])) != -1)) { set_perform_actual_lookups(config, fields[1], value); continue; } break; case 'n': if ((field_count == 3) && (strcmp(fields[0], "negative-time-to-live") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_number(fields[2], 0, -1)) != -1)) { set_negative_time_to_live(config, fields[1], value); continue; } else if ((field_count == 3) && (strcmp(fields[0], "negative-confidence-threshold") == 0) && ((value = get_number(fields[2], 1, -1)) != -1)) { set_negative_confidence_threshold(config, fields[1], value); continue; } else if ((field_count == 3) && (strcmp(fields[0], "negative-policy") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_policy(fields[2])) != -1)) { set_negative_policy(config, fields[1], value); continue; } break; case 's': if ((field_count == 3) && (strcmp(fields[0], "suggested-size") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_number(fields[2], 1, -1)) != -1)) { set_suggested_size(config, fields[1], value); continue; } break; case 't': if ((field_count == 2) && (strcmp(fields[0], "threads") == 0) && ((value = get_number(fields[1], 1, -1)) != -1)) { set_threads_num(config, value); continue; } break; case 'k': if ((field_count == 3) && (strcmp(fields[0], "keep-hot-count") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_number(fields[2], 0, -1)) != -1)) { set_keep_hot_count(config, fields[1], value); continue; } break; case 'c': if ((field_count == 3) && (strcmp(fields[0], "check-files") == 0) && (check_cachename(fields[1]) == 0) && ((value = get_yesno(fields[2])) != -1)) { check_files(config, fields[1], value); continue; } break; default: break; } LOG_ERR_2("config file parser", "error in file " "%s on line %d", fname, line_num); *error_str = "syntax error"; *error_line = line_num; res = -1; } fclose(fin); TRACE_OUT(parse_config_file); return (res); }
static void model_12_init(device_t dev) { printk(BIOS_DEBUG, "Model 12 Init.\n"); u8 i; msr_t msr; #if IS_ENABLED(CONFIG_LOGICAL_CPUS) u32 siblings; #endif // struct node_core_id id; // id = get_node_core_id(read_nb_cfg_54()); /* nb_cfg_54 can not be set */ // printk(BIOS_DEBUG, "nodeid = %02d, coreid = %02d\n", id.nodeid, id.coreid); /* Turn on caching if we haven't already */ x86_enable_cache(); amd_setup_mtrrs(); x86_mtrr_check(); disable_cache(); /* zero the machine check error status registers */ msr.lo = 0; msr.hi = 0; for (i = 0; i < 5; i++) { wrmsr(MCI_STATUS + (i * 4), msr); } enable_cache(); /* Enable the local CPU apics */ setup_lapic(); /* Set the processor name string */ // init_processor_name(); #if IS_ENABLED(CONFIG_LOGICAL_CPUS) siblings = cpuid_ecx(0x80000008) & 0xff; if (siblings > 0) { msr = rdmsr_amd(CPU_ID_FEATURES_MSR); msr.lo |= 1 << 28; wrmsr_amd(CPU_ID_FEATURES_MSR, msr); msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR); msr.hi |= 1 << (33 - 32); wrmsr_amd(CPU_ID_EXT_FEATURES_MSR, msr); } printk(BIOS_DEBUG, "siblings = %02d, ", siblings); #endif /* DisableCf8ExtCfg */ msr = rdmsr(NB_CFG_MSR); msr.hi &= ~(1 << (46 - 32)); wrmsr(NB_CFG_MSR, msr); /* Write protect SMM space with SMMLOCK. */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); }