static struct timestamp_table *timestamp_table_get(void) { MAYBE_STATIC struct timestamp_table *ts_table = NULL; struct timestamp_cache *ts_cache; if (!timestamp_should_run()) return NULL; if (ts_table != NULL) return ts_table; ts_cache = timestamp_cache_get(); if (ts_cache == NULL) { if (HAS_CBMEM) ts_table = cbmem_find(CBMEM_ID_TIMESTAMP); return ts_table; } /* Cache is required. */ if (ts_cache->cache_state != TIMESTAMP_CACHE_NOT_NEEDED) return &ts_cache->table; /* Cache shouldn't be used but there's no backing store. */ if (!HAS_CBMEM) return NULL; ts_table = cbmem_find(CBMEM_ID_TIMESTAMP); return ts_table; }
static void acpi_create_gnvs(global_nvs_t *gnvs) { gnvs->pcnt = dev_count_cpu(); /* Enable USB ports in S3 */ gnvs->s3u0 = 1; gnvs->s3u1 = 1; /* Disable USB ports in S5 */ gnvs->s5u0 = 0; gnvs->s5u1 = 0; /* CBMEM TOC */ gnvs->cmem = 0; /* Top of Low Memory (start of resource allocation) */ gnvs->tolm = nc_read_top_of_low_memory(); /* TPM Present */ gnvs->tpmp = 1; #if CONFIG_CHROMEOS chromeos_init_vboot(&(gnvs->chromeos)); /* Bayley Bay does not have a Chrome EC */ gnvs->chromeos.vbt2 = ACTIVE_ECFW_RO; #endif /* Update the mem console pointer. */ gnvs->cbmc = (u32)cbmem_find(CBMEM_ID_CONSOLE); }
void suspend_resume(void) { void *wake_vec; /* If we happen to be resuming find wakeup vector and jump to OS. */ wake_vec = acpi_find_wakeup_vector(); if (wake_vec) { #if CONFIG_HAVE_SMI_HANDLER u32 *gnvs_address = cbmem_find(CBMEM_ID_ACPI_GNVS); /* Restore GNVS pointer in SMM if found */ if (gnvs_address && *gnvs_address) { printk(BIOS_DEBUG, "Restore GNVS pointer to 0x%08x\n", *gnvs_address); smm_setup_structures((void *)*gnvs_address, NULL, NULL); } #endif /* Call mainboard resume handler first, if defined. */ if (mainboard_suspend_resume) mainboard_suspend_resume(); #if CONFIG_COVERAGE coverage_exit(); #endif post_code(POST_OS_RESUME); acpi_jump_to_wakeup(wake_vec); } }
static unsigned long gma_write_acpi_tables(struct device *const dev, unsigned long current, struct acpi_rsdp *const rsdp) { igd_opregion_t *opregion = (igd_opregion_t *)current; global_nvs_t *gnvs; if (intel_gma_init_igd_opregion(opregion) != CB_SUCCESS) return current; current += sizeof(igd_opregion_t); /* GNVS has been already set up */ gnvs = cbmem_find(CBMEM_ID_ACPI_GNVS); if (gnvs) { /* IGD OpRegion Base Address */ gma_set_gnvs_aslb(gnvs, (uintptr_t)opregion); } else { printk(BIOS_ERR, "Error: GNVS table not found.\n"); } current = acpi_align_current(current); return current; }
void acpi_create_gnvs(struct global_nvs_t *gnvs) { const struct device *dev = PCH_DEV_LPC; const struct soc_intel_icelake_config *config = dev->chip_info; /* Set unknown wake source */ gnvs->pm1i = -1; /* CPU core count */ gnvs->pcnt = dev_count_cpu(); if (IS_ENABLED(CONFIG_CONSOLE_CBMEM)) /* Update the mem console pointer. */ gnvs->cbmc = (uintptr_t)cbmem_find(CBMEM_ID_CONSOLE); if (IS_ENABLED(CONFIG_CHROMEOS)) { /* Initialize Verified Boot data */ chromeos_init_chromeos_acpi(&(gnvs->chromeos)); if (IS_ENABLED(CONFIG_EC_GOOGLE_CHROMEEC)) { gnvs->chromeos.vbt2 = google_ec_running_ro() ? ACTIVE_ECFW_RO : ACTIVE_ECFW_RW; } else gnvs->chromeos.vbt2 = ACTIVE_ECFW_RO; } /* Enable DPTF based on mainboard configuration */ gnvs->dpte = config->dptf_enable; /* Fill in the Wifi Region id */ gnvs->cid1 = wifi_regulatory_domain(); /* Set USB2/USB3 wake enable bitmaps. */ gnvs->u2we = config->usb2_wake_enable_bitmap; gnvs->u3we = config->usb3_wake_enable_bitmap; }
static void do_fsp_post_memory_init(bool s3wake, uint32_t fsp_version) { struct range_entry fsp_mem; if (fsp_find_reserved_memory(&fsp_mem)) die("Failed to find FSP_RESERVED_MEMORY_RESOURCE_HOB!\n"); /* initialize cbmem by adding FSP reserved memory first thing */ if (!s3wake) { cbmem_initialize_empty_id_size(CBMEM_ID_FSP_RESERVED_MEMORY, range_entry_size(&fsp_mem)); } else if (cbmem_initialize_id_size(CBMEM_ID_FSP_RESERVED_MEMORY, range_entry_size(&fsp_mem))) { if (CONFIG(HAVE_ACPI_RESUME)) { printk(BIOS_ERR, "Failed to recover CBMEM in S3 resume.\n"); /* Failed S3 resume, reset to come up cleanly */ /* FIXME: A "system" reset is likely enough: */ full_reset(); } } /* make sure FSP memory is reserved in cbmem */ if (range_entry_base(&fsp_mem) != (uintptr_t)cbmem_find(CBMEM_ID_FSP_RESERVED_MEMORY)) die("Failed to accommodate FSP reserved memory request!\n"); save_memory_training_data(s3wake, fsp_version); /* Create romstage handof information */ romstage_handoff_init(s3wake); }
void elog_add_boot_reason(void) { if (developer_mode_enabled()) { elog_add_event(ELOG_TYPE_CROS_DEVELOPER_MODE); printk(BIOS_DEBUG, "%s: Logged dev mode boot\n", __func__); } else if (recovery_mode_enabled()) { u8 reason = 0; #if CONFIG_VBOOT_VERIFY_FIRMWARE struct vboot_handoff *vbho = cbmem_find(CBMEM_ID_VBOOT_HANDOFF); reason = get_recovery_mode_from_vbnv(); if (vbho && !reason) { VbSharedDataHeader *sd = (VbSharedDataHeader *) vbho->shared_data; reason = sd->recovery_reason; } #endif elog_add_event_byte(ELOG_TYPE_CROS_RECOVERY_MODE, reason ? reason : ELOG_CROS_RECOVERY_MODE_BUTTON); printk(BIOS_DEBUG, "%s: Logged recovery mode boot, " "reason: 0x%02x\n", __func__, reason); } else { printk(BIOS_DEBUG, "%s: Normal mode boot, nothing " "interesting to log\n", __func__); } }
void acpi_create_serialio_ssdt(acpi_header_t *ssdt) { unsigned long current = (unsigned long)ssdt + sizeof(acpi_header_t); global_nvs_t *gnvs = cbmem_find(CBMEM_ID_ACPI_GNVS); int id, len = 0; if (!gnvs) return; /* Fill the SSDT header */ memset((void *)ssdt, 0, sizeof(acpi_header_t)); memcpy(&ssdt->signature, "SSDT", 4); ssdt->revision = 2; memcpy(&ssdt->oem_id, OEM_ID, 6); memcpy(&ssdt->oem_table_id, ACPI_TABLE_CREATOR, 8); ssdt->oem_revision = 42; memcpy(&ssdt->asl_compiler_id, ASLC, 4); ssdt->asl_compiler_revision = 42; ssdt->length = sizeof(acpi_header_t); acpigen_set_current((char *) current); /* Fill the SSDT with an entry for each SerialIO device */ for (id = 0; id < 8; id++) len += acpi_create_serialio_ssdt_entry(id, gnvs); /* (Re)calculate length and checksum. */ current = (unsigned long)acpigen_get_current(); ssdt->length = current - (unsigned long)ssdt; ssdt->checksum = acpi_checksum((void *)ssdt, ssdt->length); }
static int smbios_write_type19(unsigned long *current, int handle) { struct smbios_type19 *t = (struct smbios_type19 *)*current; memset(t, 0, sizeof(struct smbios_type19)); int len = sizeof(struct smbios_type19); int i; struct memory_info *meminfo; meminfo = cbmem_find(CBMEM_ID_MEMINFO); if (meminfo == NULL) return 0; /* can't find mem info in cbmem */ printk(BIOS_INFO, "Create SMBIOS type 19\n"); t->type = SMBIOS_MEMORY_ARRAY_MAPPED_ADDRESS; t->memory_array_handle = type16_table_handle; t->phys_addr_start = 0; for (i = 0; i < meminfo->dimm_cnt && i < ARRAY_SIZE(meminfo->dimm); i++) { struct dimm_info *dimm; dimm = &meminfo->dimm[i]; t->phys_addr_end += dimm->dimm_size; } t->phys_addr_end = (t->phys_addr_end << 10) - 1; t->partition_width = meminfo->dimm_cnt; t->handle = handle; type19_table_handle = handle; /* save handle for use/reference in type20 table */ t->length = len - 2; *current += len; return len; }
static void acpi_create_gnvs(struct global_nvs_t *gnvs) { struct soc_intel_apollolake_config *cfg; struct device *dev = NB_DEV_ROOT; /* Clear out GNVS. */ memset(gnvs, 0, sizeof(*gnvs)); if (IS_ENABLED(CONFIG_CONSOLE_CBMEM)) gnvs->cbmc = (uintptr_t)cbmem_find(CBMEM_ID_CONSOLE); if (IS_ENABLED(CONFIG_CHROMEOS)) { /* Initialize Verified Boot data */ chromeos_init_vboot(&gnvs->chromeos); gnvs->chromeos.vbt2 = ACTIVE_ECFW_RO; } /* Set unknown wake source */ gnvs->pm1i = ~0ULL; if (!dev || !dev->chip_info) { printk(BIOS_ERR, "BUG! Could not find SOC devicetree config\n"); return; } cfg = dev->chip_info; /* Enable DPTF based on mainboard configuration */ gnvs->dpte = cfg->dptf_enable; /* Assign address of PERST_0 if GPIO is defined in devicetree */ if (cfg->prt0_gpio != GPIO_PRT0_UDEF) gnvs->prt0 = (uintptr_t)gpio_dwx_address(cfg->prt0_gpio); }
/** @brief returns pointer to a CAR variable, before or after migration. * * @param var pointer to the CAR variable */ void *car_get_var_ptr(void *var) { char *migrated_base = NULL; int offset; void * _car_start = &_car_data_start; void * _car_end = &_car_data_end; /* If the cache-as-ram has not been migrated return the pointer * passed in. */ if (!car_migrated) return var; if (var < _car_start || var >= _car_end) { printk(BIOS_ERR, "Requesting CAR variable outside of CAR region: %p\n", var); return var; } #if IS_ENABLED(CONFIG_PLATFORM_USES_FSP) migrated_base=(char *)find_saved_temp_mem( *(void **)CBMEM_FSP_HOB_PTR); #else migrated_base = cbmem_find(CBMEM_ID_CAR_GLOBALS); #endif if (migrated_base == NULL) die( "CAR: Could not find migration base!\n"); offset = (char *)var - (char *)_car_start; return &migrated_base[offset]; }
static void add_cbmem_pointers(struct lb_header *header) { /* * These CBMEM sections' addresses are included in the coreboot table * with the appropriate tags. */ const struct section_id { int cbmem_id; int table_tag; } section_ids[] = { {CBMEM_ID_TIMESTAMP, LB_TAG_TIMESTAMPS}, {CBMEM_ID_CONSOLE, LB_TAG_CBMEM_CONSOLE} }; int i; for (i = 0; i < ARRAY_SIZE(section_ids); i++) { const struct section_id *sid = section_ids + i; struct lb_cbmem_ref *cbmem_ref; void *cbmem_addr = cbmem_find(sid->cbmem_id); if (!cbmem_addr) continue; /* This section is not present */ cbmem_ref = (struct lb_cbmem_ref *)lb_new_record(header); if (!cbmem_ref) { printk(BIOS_ERR, "No more room in coreboot table!\n"); break; } cbmem_ref->tag = sid->table_tag; cbmem_ref->size = sizeof(*cbmem_ref); cbmem_ref->cbmem_addr = (unsigned long)cbmem_addr; } }
void acpi_jump_to_wakeup(void *vector) { u32 acpi_backup_memory = 0; if (HIGH_MEMORY_SAVE && acpi_s3_resume_allowed()) { acpi_backup_memory = (u32)cbmem_find(CBMEM_ID_RESUME); if (!acpi_backup_memory) { printk(BIOS_WARNING, "ACPI: Backup memory missing. " "No S3 resume.\n"); return; } } #if CONFIG_SMP // FIXME: This should go into the ACPI backup memory, too. No pork sausages. /* * Just restore the SMP trampoline and continue with wakeup on * assembly level. */ memcpy(lowmem_backup_ptr, lowmem_backup, lowmem_backup_size); #endif /* Copy wakeup trampoline in place. */ memcpy((void *)WAKEUP_BASE, &__wakeup, __wakeup_size); #if CONFIG_COLLECT_TIMESTAMPS timestamp_add_now(TS_ACPI_WAKE_JUMP); #endif acpi_do_wakeup((u32)vector, acpi_backup_memory, CONFIG_RAMBASE, HIGH_MEMORY_SAVE); }
void backup_ramstage_section(uintptr_t base, size_t size) { struct resume_backup *backup_mem = cbmem_find(CBMEM_ID_RESUME); /* For first boot we exit here as CBMEM_ID_RESUME is only * created late in ramstage with acpi_prepare_resume_backup(). */ if (!backup_mem) return; /* Check that the backup is not done twice. */ if (backup_mem->valid) return; /* When we are called from ramstage loader, update header with * properties of the ramstage we will load. */ if (backup_create_or_update(backup_mem, base, size) < 0) return; /* Back up the OS-controlled memory where ramstage will be loaded. */ memcpy((void *)(uintptr_t)backup_mem->cbmem, (void *)(uintptr_t)backup_mem->lowmem, (size_t)backup_mem->size); backup_mem->valid = 1; }
static void lpe_enable_acpi_mode(device_t dev) { static const struct reg_script ops[] = { /* Disable PCI interrupt, enable Memory and Bus Master */ REG_PCI_OR32(PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | (1<<10)), /* Enable ACPI mode */ REG_IOSF_OR(IOSF_PORT_0x58, LPE_PCICFGCTR1, LPE_PCICFGCTR1_PCI_CFG_DIS | LPE_PCICFGCTR1_ACPI_INT_EN), REG_SCRIPT_END }; global_nvs_t *gnvs; /* Find ACPI NVS to update BARs */ gnvs = (global_nvs_t *)cbmem_find(CBMEM_ID_ACPI_GNVS); if (!gnvs) { printk(BIOS_ERR, "Unable to locate Global NVS\n"); return; } /* Save BAR0, BAR1, and firmware base to ACPI NVS */ assign_device_nvs(dev, &gnvs->dev.lpe_bar0, PCI_BASE_ADDRESS_0); assign_device_nvs(dev, &gnvs->dev.lpe_bar1, PCI_BASE_ADDRESS_1); assign_device_nvs(dev, &gnvs->dev.lpe_fw, FIRMWARE_PCI_REG_BASE); /* Device is enabled in ACPI mode */ gnvs->dev.lpe_en = 1; /* Put device in ACPI mode */ reg_script_run_on_dev(dev, ops); }
void stage_cache_load_stage(int stage_id, struct prog *stage) { struct stage_cache *meta; const struct cbmem_entry *e; void *c; size_t size; void *load_addr; prog_set_entry(stage, NULL, NULL); meta = cbmem_find(CBMEM_ID_STAGEx_META + stage_id); if (meta == NULL) return; e = cbmem_entry_find(CBMEM_ID_STAGEx_CACHE + stage_id); if (e == NULL) return; c = cbmem_entry_start(e); size = cbmem_entry_size(e); load_addr = (void *)(uintptr_t)meta->load_addr; memcpy(load_addr, c, size); prog_set_area(stage, load_addr, size); prog_set_entry(stage, (void *)(uintptr_t)meta->entry_addr, NULL); }
static void acpi_create_gnvs(global_nvs_t *gnvs) { gnvs->pcnt = dev_count_cpu(); /* Enable USB ports in S3 */ gnvs->s3u0 = 1; gnvs->s3u1 = 1; /* Disable USB ports in S5 */ gnvs->s5u0 = 0; gnvs->s5u1 = 0; /* Top of Low Memory (start of resource allocation) */ gnvs->tolm = nc_read_top_of_low_memory(); /* TPM Present */ gnvs->tpmp = 1; /* Enable DPTF */ gnvs->tcrt = CRITICAL_TEMPERATURE; gnvs->tpsv = PASSIVE_TEMPERATURE; gnvs->tact = ACTIVE_TEMPERATURE; gnvs->dpte = 1; #if CONFIG_CHROMEOS chromeos_init_vboot(&(gnvs->chromeos)); gnvs->chromeos.vbt2 = google_ec_running_ro() ? ACTIVE_ECFW_RO : ACTIVE_ECFW_RW; #endif /* Update the mem console pointer. */ gnvs->cbmc = (u32)cbmem_find(CBMEM_ID_CONSOLE); }
/* Save wake source information for calculating ACPI _SWS values */ int soc_fill_acpi_wake(uint32_t *pm1, uint32_t **gpe0) { struct chipset_power_state *ps; static uint32_t gpe0_sts[GPE0_REG_MAX]; uint32_t pm1_en; int i; ps = cbmem_find(CBMEM_ID_POWER_STATE); if (ps == NULL) return -1; /* * PM1_EN to check the basic wake events which can happen through * powerbtn or any other wake source like lidopen, key board press etc. * WAK_STS bit is set when the system is in one of the sleep states * (via the SLP_EN bit) and an enabled wake event occurs. Upon setting * this bit, the PMC will transition the system to the ON state and * can only be set by hardware and can only be cleared by writing a one * to this bit position. */ pm1_en = ps->pm1_en | WAK_STS | RTC_EN | PWRBTN_EN; *pm1 = ps->pm1_sts & pm1_en; /* Mask off GPE0 status bits that are not enabled */ *gpe0 = &gpe0_sts[0]; for (i = 0; i < GPE0_REG_MAX; i++) gpe0_sts[i] = ps->gpe0_sts[i] & ps->gpe0_en[i]; return GPE0_REG_MAX; }
void *car_get_var_ptr(void *var) { char *migrated_base; int offset; void * _car_start = &_car_data_start; void * _car_end = &_car_data_end; /* If the cache-as-ram has not been migrated return the pointer * passed in. */ if (!car_migrated) return var; if (var < _car_start || var >= _car_end) { printk(BIOS_ERR, "Requesting CAR variable outside of CAR region: %p\n", var); return var; } migrated_base = cbmem_find(CBMEM_ID_CAR_GLOBALS); if (migrated_base == NULL) { printk(BIOS_ERR, "CAR: Could not find migration base!\n"); return var; } offset = (char *)var - (char *)_car_start; return &migrated_base[offset]; }
void cache_as_ram_new_stack (void) { void *resume_backup_memory = NULL; print_car_debug("Top about %08x ... Done\n", (uint32_t) &resume_backup_memory); print_car_debug("Disabling cache as ram now\n"); disable_cache_as_ram_bsp(); disable_cache(); /* Enable cached access to RAM in the range 1M to CONFIG_RAMTOP */ set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK); enable_cache(); if (acpi_is_wakeup_s3()) { resume_backup_memory = cbmem_find(CBMEM_ID_RESUME); print_car_debug("Resume backup memory location: %p\n", resume_backup_memory); } prepare_ramstage_region(resume_backup_memory); set_sysinfo_in_ram(1); // So other core0 could start to train mem /*copy and execute ramstage */ copy_and_run(); /* We will not return */ print_car_debug("should not be here -\n"); }
const void *vpd_find(const char *key, int *size, enum vpd_region region) { struct vpd_gets_arg arg = {0}; int consumed = 0; const struct vpd_cbmem *vpd; vpd = cbmem_find(CBMEM_ID_VPD); if (!vpd || !vpd->ro_size) return NULL; arg.key = (const uint8_t *)key; arg.key_len = strlen(key); if (region == VPD_ANY || region == VPD_RO) while (VPD_OK == decodeVpdString(vpd->ro_size, vpd->blob, &consumed, vpd_gets_callback, &arg)) { /* Iterate until found or no more entries. */ } if (!arg.matched && region != VPD_RO) while (VPD_OK == decodeVpdString(vpd->rw_size, vpd->blob + vpd->ro_size, &consumed, vpd_gets_callback, &arg)) { /* Iterate until found or no more entries. */ } if (!arg.matched) return NULL; *size = arg.value_len; return arg.value; }
static void acpi_jump_to_wakeup(void *vector) { uintptr_t source = 0, target = 0; size_t size = 0; if (!acpi_s3_resume_allowed()) { printk(BIOS_WARNING, "ACPI: S3 resume not allowed.\n"); return; } if (!CONFIG(RELOCATABLE_RAMSTAGE)) { struct resume_backup *backup_mem = cbmem_find(CBMEM_ID_RESUME); if (backup_mem && backup_mem->valid) { backup_mem->valid = 0; target = backup_mem->lowmem; source = backup_mem->cbmem; size = backup_mem->size; } else { printk(BIOS_WARNING, "ACPI: Backup memory missing. " "No S3 resume.\n"); return; } } /* Copy wakeup trampoline in place. */ memcpy((void *)WAKEUP_BASE, &__wakeup, __wakeup_size); set_boot_successful(); timestamp_add_now(TS_ACPI_WAKE_JUMP); acpi_do_wakeup((uintptr_t)vector, source, target, size); }
void *vboot_get_payload(size_t *len) { struct vboot_handoff *vboot_handoff; struct firmware_component *fwc; vboot_handoff = cbmem_find(CBMEM_ID_VBOOT_HANDOFF); if (vboot_handoff == NULL) return NULL; if (CONFIG_VBOOT_BOOT_LOADER_INDEX >= MAX_PARSED_FW_COMPONENTS) { printk(BIOS_ERR, "Invalid boot loader index: %d\n", CONFIG_VBOOT_BOOT_LOADER_INDEX); return NULL; } fwc = &vboot_handoff->components[CONFIG_VBOOT_BOOT_LOADER_INDEX]; if (len != NULL) *len = fwc->size; printk(BIOS_DEBUG, "Booting 0x%x byte payload at 0x%08x.\n", fwc->size, fwc->address); return (void *)fwc->address; }
void payload_load(void) { int i; const struct prog_loader_ops *ops; struct prog *payload = &global_payload; for (i = 0; i < ARRAY_SIZE(payload_ops); i++) { ops = payload_ops[i]; if (ops->prepare(payload) < 0) { printk(BIOS_DEBUG, "%s: could not locate payload.\n", ops->name); continue; } printk(BIOS_DEBUG, "%s: located payload @ %p, %zu bytes.\n", ops->name, prog_start(payload), prog_size(payload)); break; } if (i == ARRAY_SIZE(payload_ops)) goto out; mirror_payload(payload); /* Pass cbtables to payload if architecture desires it. */ prog_set_entry(payload, selfload(payload), cbmem_find(CBMEM_ID_CBTABLE)); out: if (prog_entry(payload) == NULL) die("Payload not loaded.\n"); }
void *GetHeapBase(void) { void *heap = (void *)BIOS_HEAP_START_ADDRESS; if (acpi_is_wakeup_s3()) heap = cbmem_find(CBMEM_ID_RESUME_SCRATCH); return heap; }
static void update_mrc_cache(void *unused) { const struct mrc_saved_data *current_boot; const struct mrc_saved_data *current_saved; const struct mrc_saved_data *next_slot; struct mrc_data_region region; printk(BIOS_DEBUG, "Updating MRC cache data.\n"); current_boot = cbmem_find(CBMEM_ID_MRCDATA); if (!current_boot) { printk(BIOS_ERR, "No MRC cache in cbmem.\n"); return; } if (mrc_cache_get_region(®ion)) { printk(BIOS_ERR, "Could not obtain MRC cache region.\n"); return; } if (!mrc_cache_valid(®ion, current_boot)) { printk(BIOS_ERR, "MRC cache data in cbmem invalid.\n"); return; } current_saved = NULL; if (!__mrc_cache_get_current(®ion, ¤t_saved)) { if (current_saved->size == current_boot->size && !memcmp(¤t_saved->data[0], ¤t_boot->data[0], current_saved->size)) { printk(BIOS_DEBUG, "MRC cache up to date.\n"); protect_mrc_cache(®ion); return; } } next_slot = mrc_cache_next_slot(®ion, current_saved); if (!mrc_slot_valid(®ion, next_slot, current_boot)) { printk(BIOS_DEBUG, "Slot @ %p is invalid.\n", next_slot); if (!nvm_is_erased(region.base, region.size)) { if (nvm_erase(region.base, region.size) < 0) { printk(BIOS_DEBUG, "Failure erasing region.\n"); return; } } next_slot = region.base; } if (nvm_write((void *)next_slot, current_boot, current_boot->size + sizeof(*current_boot))) { printk(BIOS_DEBUG, "Failure writing MRC cache to %p.\n", next_slot); } protect_mrc_cache(®ion); }
/** * Add coreboot tables, CBMEM information and optional board specific strapping * IDs to the device tree loaded via FIT. */ static void add_cb_fdt_data(struct device_tree *tree) { u32 addr_cells = 1, size_cells = 1; u64 reg_addrs[2], reg_sizes[2]; void *baseptr = NULL; size_t size = 0; static const char *firmware_path[] = {"firmware", NULL}; struct device_tree_node *firmware_node = dt_find_node(tree->root, firmware_path, &addr_cells, &size_cells, 1); /* Need to add 'ranges' to the intermediate node to make 'reg' work. */ dt_add_bin_prop(firmware_node, "ranges", NULL, 0); static const char *coreboot_path[] = {"coreboot", NULL}; struct device_tree_node *coreboot_node = dt_find_node(firmware_node, coreboot_path, &addr_cells, &size_cells, 1); dt_add_string_prop(coreboot_node, "compatible", "coreboot"); /* Fetch CB tables from cbmem */ void *cbtable = cbmem_find(CBMEM_ID_CBTABLE); if (!cbtable) { printk(BIOS_WARNING, "FIT: No coreboot table found!\n"); return; } /* First 'reg' address range is the coreboot table. */ const struct lb_header *header = cbtable; reg_addrs[0] = (uintptr_t)header; reg_sizes[0] = header->header_bytes + header->table_bytes; /* Second is the CBMEM area (which usually includes the coreboot table). */ cbmem_get_region(&baseptr, &size); if (!baseptr || size == 0) { printk(BIOS_WARNING, "FIT: CBMEM pointer/size not found!\n"); return; } reg_addrs[1] = (uintptr_t)baseptr; reg_sizes[1] = size; dt_add_reg_prop(coreboot_node, reg_addrs, reg_sizes, 2, addr_cells, size_cells); /* Expose board ID, SKU ID, and RAM code to payload.*/ if (board_id() != UNDEFINED_STRAPPING_ID) dt_add_u32_prop(coreboot_node, "board-id", board_id()); if (sku_id() != UNDEFINED_STRAPPING_ID) dt_add_u32_prop(coreboot_node, "sku-id", sku_id()); if (ram_code() != UNDEFINED_STRAPPING_ID) dt_add_u32_prop(coreboot_node, "ram-code", ram_code()); }
void soc_add_mtc(struct lb_header *header) { struct lb_range *mtc; mtc = (struct lb_range *)lb_new_record(header); mtc->tag = LB_TAG_MTC; mtc->size = sizeof(*mtc); mtc->range_start = (uintptr_t)cbmem_find(CBMEM_ID_MTC); mtc->range_size = mtc_table_size; }
void fsp_update_fih(FSP_INFO_HEADER *fih) { struct fsp_runtime *fspr; fspr = cbmem_find(CBMEM_ID_FSP_RUNTIME); if (fspr == NULL) die("Can't update FSP runtime information.\n"); fspr->fih = (uintptr_t)fih; }
static int vboot_enable_recovery(void) { struct vboot_handoff *vbho; vbho = cbmem_find(CBMEM_ID_VBOOT_HANDOFF); if (vbho == NULL) return 0; return !!(vbho->init_params.out_flags & VB_INIT_OUT_ENABLE_RECOVERY); }