static void soc_init(void *data) { struct global_nvs_t *gnvs; /* Save VBT info and mapping */ if (locate_vbt(&vbt_rdev) != CB_ERR) vbt = rdev_mmap_full(&vbt_rdev); /* Snapshot the current GPIO IRQ polarities. FSP is setting a * default policy that doesn't honor boards' requirements. */ itss_snapshot_irq_polarities(GPIO_IRQ_START, GPIO_IRQ_END); fsp_silicon_init(); /* Restore GPIO IRQ polarities back to previous settings. */ itss_restore_irq_polarities(GPIO_IRQ_START, GPIO_IRQ_END); /* override 'enabled' setting in device tree if needed */ pcie_override_devicetree_after_silicon_init(); /* * Keep the P2SB device visible so it and the other devices are * visible in coreboot for driver support and PCI resource allocation. * There is a UPD setting for this, but it's more consistent to use * hide and unhide symmetrically. */ p2sb_unhide(); /* Allocate ACPI NVS in CBMEM */ gnvs = cbmem_add(CBMEM_ID_ACPI_GNVS, sizeof(*gnvs)); }
void mainboard_memory_init_params(FSPM_UPD *mupd) { FSP_M_CONFIG *mem_cfg; mem_cfg = &mupd->FspmConfig; u8 spd_index; mainboard_fill_dq_map_ch0(&mem_cfg->DqByteMapCh0); mainboard_fill_dq_map_ch1(&mem_cfg->DqByteMapCh1); mainboard_fill_dqs_map_ch0(&mem_cfg->DqsMapCpu2DramCh0); mainboard_fill_dqs_map_ch1(&mem_cfg->DqsMapCpu2DramCh1); mainboard_fill_rcomp_res_data(&mem_cfg->RcompResistor); mainboard_fill_rcomp_strength_data(&mem_cfg->RcompTarget); mem_cfg->DqPinsInterleaved = 0; mem_cfg->CaVrefConfig = 0; /* VREF_CA->CHA/CHB */ mem_cfg->ECT = 1; /* Early Command Training Enabled */ spd_index = 2; struct region_device spd_rdev; if (get_spd_cbfs_rdev(&spd_rdev, spd_index) < 0) die("spd.bin not found\n"); mem_cfg->MemorySpdDataLen = region_device_sz(&spd_rdev); /* Memory leak is ok since we have memory mapped boot media */ mem_cfg->MemorySpdPtr00 = (uintptr_t)rdev_mmap_full(&spd_rdev); mem_cfg->RefClk = 0; /* Auto Select CLK freq */ mem_cfg->MemorySpdPtr10 = mem_cfg->MemorySpdPtr00; }
void platform_fsp_memory_init_params_cb(FSPM_UPD *mupd, uint32_t version) { struct region_device rdev; check_full_retrain(mupd); fill_console_params(mupd); if (CONFIG(SOC_INTEL_GLK)) soc_memory_init_params(mupd); mainboard_memory_init_params(mupd); parse_devicetree_setting(mupd); /* Do NOT let FSP do any GPIO pad configuration */ mupd->FspmConfig.PreMemGpioTablePtr = (uintptr_t) NULL; /* * Tell CSE we do not need to use Ring Buffer Protocol (RBP) to fetch * firmware for us if we are using memory-mapped SPI. This lets CSE * state machine transition to next boot state, so that it can function * as designed. */ mupd->FspmConfig.SkipCseRbp = CONFIG(BOOT_DEVICE_MEMORY_MAPPED); /* * Converged Security Engine (CSE) has secure storage functionality. * HECI2 device can be used to access that functionality. However, part * of S3 resume flow involves resetting HECI2 which takes 136ms. Since * coreboot does not use secure storage functionality, instruct FSP to * skip HECI2 reset. */ mupd->FspmConfig.EnableS3Heci2 = 0; /* * Apollolake splits MRC cache into two parts: constant and variable. * The constant part is not expected to change often and variable is. * Currently variable part consists of parameters that change on cold * boots such as scrambler seed and some memory controller registers. * Scrambler seed is vital for S3 resume case because attempt to use * wrong/missing key renders DRAM contents useless. */ if (mrc_cache_get_current(MRC_VARIABLE_DATA, version, &rdev) == 0) { /* Assume leaking is ok. */ assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED)); mupd->FspmConfig.VariableNvsBufferPtr = rdev_mmap_full(&rdev); } car_set_var(fsp_version, version); }
/* Right now, the offsets for the MRC cache area are hard-coded in the * northbridge Kconfig if CONFIG_CHROMEOS is not set. In order to make * this more flexible, there are two of options: * - Have each mainboard Kconfig supply a hard-coded offset * - Use CBFS */ static u32 get_mrc_cache_region(struct mrc_data_container **mrc_region_ptr) { size_t region_size = 0; *mrc_region_ptr = NULL; if (IS_ENABLED(CONFIG_CHROMEOS)) { struct region_device rdev; if (fmap_locate_area_as_rdev("RW_MRC_CACHE", &rdev) == 0) { region_size = region_device_sz(&rdev); *mrc_region_ptr = rdev_mmap_full(&rdev); } } else { *mrc_region_ptr = cbfs_boot_map_with_leak("mrc.cache", CBFS_TYPE_MRC_CACHE, ®ion_size); } return region_size; }
static void program_mac_address(u16 io_base) { void *search_address = NULL; size_t search_length = -1; /* Default MAC Address of A0:00:BA:D0:0B:AD */ u32 high_dword = 0xD0BA00A0; /* high dword of mac address */ u32 low_dword = 0x0000AD0B; /* low word of mac address as a dword */ if (IS_ENABLED(CONFIG_CHROMEOS)) { struct region_device rdev; if (fmap_locate_area_as_rdev("RO_VPD", &rdev) == 0) { search_address = rdev_mmap_full(&rdev); if (search_address != NULL) search_length = region_device_sz(&rdev); } } else { search_address = cbfs_boot_map_with_leak("vpd.bin", CBFS_TYPE_RAW, &search_length); } if (search_address == NULL) printk(BIOS_ERR, "LAN: VPD not found.\n"); else get_mac_address(&high_dword, &low_dword, search_address, search_length); if (io_base) { printk(BIOS_DEBUG, "Realtek NIC io_base = 0x%04x\n", io_base); printk(BIOS_DEBUG, "Programming MAC Address\n"); /* Disable register protection */ outb(0xc0, io_base + 0x50); outl(high_dword, io_base); outl(low_dword, io_base + 0x04); outb(0x60, io_base + 54); /* Enable register protection again */ outb(0x00, io_base + 0x50); } }
static void fsp_fill_mrc_cache(FSPM_ARCH_UPD *arch_upd, uint32_t fsp_version) { struct region_device rdev; void *data; arch_upd->NvsBufferPtr = NULL; if (!CONFIG(CACHE_MRC_SETTINGS)) return; /* * In recovery mode, force retraining: * 1. Recovery cache is not supported, or * 2. Memory retrain switch is set. */ if (vboot_recovery_mode_enabled()) { if (!CONFIG(HAS_RECOVERY_MRC_CACHE)) return; if (vboot_recovery_mode_memory_retrain()) return; } if (mrc_cache_get_current(MRC_TRAINING_DATA, fsp_version, &rdev) < 0) return; /* Assume boot device is memory mapped. */ assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED)); data = rdev_mmap_full(&rdev); if (data == NULL) return; if (CONFIG(FSP2_0_USES_TPM_MRC_HASH) && !mrc_cache_verify_hash(data, region_device_sz(&rdev))) return; /* MRC cache found */ arch_upd->NvsBufferPtr = data; printk(BIOS_SPEW, "MRC cache found, size %zx\n", region_device_sz(&rdev)); }
static void elog_debug_dump_buffer(const char *msg) { struct region_device *rdev; void *buffer; if (!IS_ENABLED(CONFIG_ELOG_DEBUG)) return; elog_debug(msg); rdev = mirror_dev_get(); buffer = rdev_mmap_full(rdev); if (buffer == NULL) return; hexdump(buffer, region_device_sz(rdev)); rdev_munmap(rdev, buffer); }
/* common code */ static int mrc_cache_get_region(struct mrc_data_region *region) { if (IS_ENABLED(CONFIG_CHROMEOS)) { struct region_device rdev; if (fmap_locate_area_as_rdev("RW_MRC_CACHE", &rdev)) return -1; region->size = region_device_sz(&rdev); region->base = rdev_mmap_full(&rdev); if (region->base == NULL) return -1; return 0; } else { region->base = (void *)CONFIG_MRC_SETTINGS_CACHE_BASE; region->size = CONFIG_MRC_SETTINGS_CACHE_SIZE; } return 0; }
/* Handle the case when FSPM is running XIP. */ static enum cb_err load_fspm_xip(struct fsp_header *hdr, const struct region_device *rdev) { void *base; if (fsp_validate_component(hdr, rdev) != CB_SUCCESS) return CB_ERR; base = rdev_mmap_full(rdev); if ((uintptr_t)base != hdr->image_base) { printk(BIOS_CRIT, "FSPM XIP base does not match: %p vs %p\n", (void *)(uintptr_t)hdr->image_base, base); return CB_ERR; } /* * Since the component is XIP it's already in the address space. Thus, * there's no need to rdev_munmap(). */ return CB_SUCCESS; }
/* Entry point taken when romstage is called after a separate verstage. */ asmlinkage void *romstage_after_verstage(void) { /* Need to locate the current FSP_INFO_HEADER. The cache-as-ram * is still enabled. We can directly access work buffer here. */ FSP_INFO_HEADER *fih; struct prog fsp = PROG_INIT(PROG_REFCODE, "fsp.bin"); console_init(); if (prog_locate(&fsp)) { fih = NULL; printk(BIOS_ERR, "Unable to locate %s\n", prog_name(&fsp)); } else /* This leaks a mapping which this code assumes is benign as * the flash is memory mapped CPU's address space. */ fih = find_fsp((uintptr_t)rdev_mmap_full(prog_rdev(&fsp))); set_fih_car(fih); /* Return new stack value in ram back to assembly stub. */ return cache_as_ram_stage_main(fih); }
static void *cbfs_locate_file_in_region(const char *region_name, const char *file_name, uint32_t file_type, uint32_t *file_size) { struct region_device rdev; struct cbfsf fh; if (file_size != NULL) *file_size = 0; if (fmap_locate_area_as_rdev(region_name, &rdev) == 0) { if (cbfs_locate(&fh, &rdev, file_name, &file_type) == 0) { if (file_size != NULL) *file_size = region_device_sz(&fh.data); return rdev_mmap_full(&fh.data); } else printk(BIOS_DEBUG, "%s file not found in %s region\n", file_name, region_name); } else printk(BIOS_DEBUG,"%s region not found while looking for %s\n", region_name, file_name); return NULL; }
static int elog_scan_flash(void) { elog_debug("elog_scan_flash()\n"); void *mirror_buffer; const struct region_device *rdev = mirror_dev_get(); size_t size = region_device_sz(&nv_dev); /* Fill memory buffer by reading from SPI */ mirror_buffer = rdev_mmap_full(rdev); if (rdev_readat(&nv_dev, mirror_buffer, 0, size) != size) { rdev_munmap(rdev, mirror_buffer); printk(BIOS_ERR, "ELOG: NV read failure.\n"); return -1; } rdev_munmap(rdev, mirror_buffer); /* No writes have been done yet. */ elog_tandem_reset_last_write(); /* Check if the area is empty or not */ if (elog_is_buffer_clear(0)) { printk(BIOS_ERR, "ELOG: NV Buffer Cleared.\n"); return -1; } /* Indicate that header possibly written. */ elog_tandem_increment_last_write(elog_events_start()); /* Validate the header */ if (!elog_is_header_valid()) { printk(BIOS_ERR, "ELOG: NV Buffer Invalid.\n"); return -1; } return elog_update_event_buffer_state(); }
/* * Parse the uImage FIT, choose a configuration and extract images. */ void fit_payload(struct prog *payload) { struct device_tree *dt = NULL; struct region kernel = {0}, fdt = {0}, initrd = {0}; void *data; data = rdev_mmap_full(prog_rdev(payload)); if (data == NULL) return; printk(BIOS_INFO, "FIT: Examine payload %s\n", payload->name); struct fit_config_node *config = fit_load(data); if (!config || !config->kernel_node) { printk(BIOS_ERR, "ERROR: Could not load FIT\n"); rdev_munmap(prog_rdev(payload), data); return; } if (config->fdt_node) { dt = fdt_unflatten(config->fdt_node->data); if (!dt) { printk(BIOS_ERR, "ERROR: Failed to unflatten the FDT.\n"); rdev_munmap(prog_rdev(payload), data); return; } dt_apply_fixups(dt); /* Insert coreboot specific information */ add_cb_fdt_data(dt); /* Update device_tree */ #if defined(CONFIG_LINUX_COMMAND_LINE) fit_update_chosen(dt, (char *)CONFIG_LINUX_COMMAND_LINE); #endif fit_update_memory(dt); } /* Collect infos for fit_payload_arch */ kernel.size = config->kernel_node->size; fdt.size = dt ? dt_flat_size(dt) : 0; initrd.size = config->ramdisk_node ? config->ramdisk_node->size : 0; /* Invoke arch specific payload placement and fixups */ if (!fit_payload_arch(payload, config, &kernel, &fdt, &initrd)) { printk(BIOS_ERR, "ERROR: Failed to find free memory region\n"); bootmem_dump_ranges(); rdev_munmap(prog_rdev(payload), data); return; } /* Load the images to given position */ if (config->fdt_node) { /* Update device_tree */ if (config->ramdisk_node) fit_add_ramdisk(dt, (void *)initrd.offset, initrd.size); pack_fdt(&fdt, dt); } if (config->ramdisk_node && extract(&initrd, config->ramdisk_node)) { printk(BIOS_ERR, "ERROR: Failed to extract initrd\n"); prog_set_entry(payload, NULL, NULL); rdev_munmap(prog_rdev(payload), data); return; } timestamp_add_now(TS_KERNEL_DECOMPRESSION); if (extract(&kernel, config->kernel_node)) { printk(BIOS_ERR, "ERROR: Failed to extract kernel\n"); prog_set_entry(payload, NULL, NULL); rdev_munmap(prog_rdev(payload), data); return; } timestamp_add_now(TS_START_KERNEL); rdev_munmap(prog_rdev(payload), data); }
static void mainboard_init(device_t dev) { u32 search_address = 0x0; size_t search_length = -1; u16 io_base = 0; struct device *ethernet_dev = NULL; void *vpd_file; if (IS_ENABLED(CONFIG_CHROMEOS)) { struct region_device rdev; if (fmap_locate_area_as_rdev("RO_VPD", &rdev) == 0) { vpd_file = rdev_mmap_full(&rdev); if (vpd_file != NULL) { search_length = region_device_sz(&rdev); search_address = (uintptr_t)vpd_file; } } } else { vpd_file = cbfs_boot_map_with_leak("vpd.bin", CBFS_TYPE_RAW, &search_length); if (vpd_file) { search_address = (unsigned long)vpd_file; } else { search_length = -1; search_address = 0; } } /* Initialize the Embedded Controller */ butterfly_ec_init(); /* Program EC Keyboard locale based on VPD data */ program_keyboard_type(search_address, search_length); /* Get NIC's IO base address */ ethernet_dev = dev_find_device(BUTTERFLY_NIC_VENDOR_ID, BUTTERFLY_NIC_DEVICE_ID, dev); if (ethernet_dev != NULL) { io_base = pci_read_config16(ethernet_dev, 0x10) & 0xfffe; /* * Battery life time - LAN PCIe should enter ASPM L1 to save * power when LAN connection is idle. * enable CLKREQ: LAN pci config space 0x81h=01 */ pci_write_config8(ethernet_dev, 0x81, 0x01); } if (io_base) { /* Program MAC address based on VPD data */ program_mac_address(io_base, search_address, search_length); /* * Program NIC LEDS * * RTL8105E Series EEPROM-Less Application Note, * Section 5.6 LED Mode Configuration * * Step1: Write C0h to I/O register 0x50 via byte access to * disable 'register protection' * Step2: Write xx001111b to I/O register 0x52 via byte access * (bit7 is LEDS1 and bit6 is LEDS0) * Step3: Write 0x00 to I/O register 0x50 via byte access to * enable 'register protection' */ outb(0xc0, io_base + 0x50); /* Disable protection */ outb((BUTTERFLY_NIC_LED_MODE << 6) | 0x0f, io_base + 0x52); outb(0x00, io_base + 0x50); /* Enable register protection */ } }