AGESA_STATUS OemS3Save(AMD_S3SAVE_PARAMS *S3SaveParams) { AMD_S3_PARAMS *dataBlock = &S3SaveParams->S3DataBlock; u32 MTRRStorageSize = 0; uintptr_t pos, size; if (HIGH_ROMSTAGE_STACK_SIZE) cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK, HIGH_ROMSTAGE_STACK_SIZE); /* To be consumed in AmdInitResume. */ get_s3nv_data(S3DataTypeNonVolatile, &pos, &size); if (size && dataBlock->NvStorageSize) spi_SaveS3info(pos, size, dataBlock->NvStorage, dataBlock->NvStorageSize); /* To be consumed in AmdS3LateRestore. */ char *heap = cbmem_add(CBMEM_ID_RESUME_SCRATCH, HIGH_MEMORY_SCRATCH); if (heap) { memset(heap, 0, HIGH_MEMORY_SCRATCH); memcpy(heap, dataBlock->VolatileStorage, dataBlock->VolatileStorageSize); } /* Collect MTRR setup. */ backup_mtrr(MTRRStorage, &MTRRStorageSize); /* To be consumed in restore_mtrr, CPU enumeration in ramstage. */ get_s3nv_data(S3DataTypeMTRR, &pos, &size); if (size && MTRRStorageSize) spi_SaveS3info(pos, size, MTRRStorage, MTRRStorageSize); return AGESA_SUCCESS; }
void acpi_prepare_resume_backup(void) { if (!acpi_s3_resume_allowed()) return; /* Let's prepare the ACPI S3 Resume area now already, so we can rely on * it being there during reboot time. We don't need the pointer, nor * the result right now. If it fails, ACPI resume will be disabled. */ if (HIGH_MEMORY_SAVE) cbmem_add(CBMEM_ID_RESUME, HIGH_MEMORY_SAVE); if (HIGH_MEMORY_SCRATCH) cbmem_add(CBMEM_ID_RESUME_SCRATCH, HIGH_MEMORY_SCRATCH); }
void *write_tables(void) { uintptr_t cbtable_start; uintptr_t cbtable_end; size_t cbtable_size; const size_t max_table_size = COREBOOT_TABLE_SIZE; cbtable_start = (uintptr_t)cbmem_add(CBMEM_ID_CBTABLE, max_table_size); if (!cbtable_start) { printk(BIOS_ERR, "Could not add CBMEM for coreboot table.\n"); return NULL; } /* Add architecture specific tables. */ arch_write_tables(cbtable_start); /* Write the coreboot table. */ cbtable_end = write_coreboot_table(cbtable_start); cbtable_size = cbtable_end - cbtable_start; if (cbtable_size > max_table_size) { printk(BIOS_ERR, "%s: coreboot table didn't fit (%zx/%zx)\n", __func__, cbtable_size, max_table_size); } printk(BIOS_DEBUG, "coreboot table: %zd bytes.\n", cbtable_size); /* Print CBMEM sections */ cbmem_list(); return (void *)cbtable_start; }
void vboot_fill_handoff(void) { struct vboot_handoff *vh; struct vb2_shared_data *sd; sd = vb2_get_shared_data(); sd->workbuf_hash_offset = 0; sd->workbuf_hash_size = 0; printk(BIOS_INFO, "creating vboot_handoff structure\n"); vh = cbmem_add(CBMEM_ID_VBOOT_HANDOFF, sizeof(*vh)); if (vh == NULL) /* we don't need to failover gracefully here because this * shouldn't happen with the image that has passed QA. */ die("failed to allocate vboot_handoff structure\n"); memset(vh, 0, sizeof(*vh)); /* needed until we finish transtion to vboot2 for kernel verification */ fill_vboot_handoff(vh, sd); /* * The recovery mode switch is cleared (typically backed by EC) here * to allow multiple queries to get_recovery_mode_switch() and have * them return consistent results during the verified boot path as well * as dram initialization. x86 systems ignore the saved dram settings * in the recovery path in order to start from a clean slate. Therefore * clear the state here since this function is called when memory * is known to be up. */ clear_recovery_mode_switch(); }
int cbmem_initialize_id_size(u32 id, u64 size) { struct imd *imd; struct imd imd_backing; const int recovery = 1; cbmem_top_init_once(); imd = imd_init_backing(&imd_backing); imd_handle_init(imd, cbmem_top()); if (imd_recover(imd)) return 1; #if defined(__PRE_RAM__) /* * Lock the imd in romstage on a recovery. The assumption is that * if the imd area was recovered in romstage then S3 resume path * is being taken. */ imd_lockdown(imd); #endif /* Add the specified range first */ if (size) cbmem_add(id, size); /* Complete migration to CBMEM. */ cbmem_run_init_hooks(recovery); /* Recovery successful. */ return 0; }
static void vboot_setup_cbmem(int unused) { struct vboot_working_data *wd_cbmem = cbmem_add(CBMEM_ID_VBOOT_WORKBUF, VB2_FIRMWARE_WORKBUF_RECOMMENDED_SIZE); assert(wd_cbmem != NULL); }
static FILE *fopen(const char *path, const char *mode) { #if CONFIG_DEBUG_COVERAGE printk(BIOS_DEBUG, "fopen %s with mode %s\n", path, mode); #endif if (!current_file) { current_file = cbmem_add(CBMEM_ID_COVERAGE, 32*1024); } else { previous_file = current_file; current_file = (FILE *)(ALIGN(((unsigned long)previous_file->data + previous_file->len), 16)); } // TODO check if we're at the end of the CBMEM region (ENOMEM) if (current_file) { current_file->magic = 0x584d4153; current_file->next = NULL; if (previous_file) previous_file->next = current_file; current_file->filename = (char *)¤t_file[1]; strcpy(current_file->filename, path); current_file->data = (char *)ALIGN(((unsigned long)current_file->filename + strlen(path) + 1), 16); current_file->offset = 0; current_file->len = 0; } return current_file; }
int fsp_relocate(struct prog *fsp_relocd, const struct region_device *fsp_src) { void *new_loc; void *fih; ssize_t fih_offset; size_t size = region_device_sz(fsp_src); new_loc = cbmem_add(CBMEM_ID_REFCODE, size); if (new_loc == NULL) { printk(BIOS_ERR, "ERROR: Unable to load FSP into memory.\n"); return -1; } if (rdev_readat(fsp_src, new_loc, 0, size) != size) { printk(BIOS_ERR, "ERROR: Can't read FSP's region device.\n"); return -1; } fih_offset = fsp1_1_relocate((uintptr_t)new_loc, new_loc, size); if (fih_offset <= 0) { printk(BIOS_ERR, "ERROR: FSP relocation faiulre.\n"); return -1; } fih = (void *)((uint8_t *)new_loc + fih_offset); prog_set_area(fsp_relocd, new_loc, size); prog_set_entry(fsp_relocd, fih, NULL); return 0; }
void write_tables(void) { unsigned long table_pointer, new_table_pointer; post_code(0x9d); table_pointer = (unsigned long)cbmem_add(CBMEM_ID_CBTABLE, MAX_COREBOOT_TABLE_SIZE); if (!table_pointer) { printk(BIOS_ERR, "Could not add CBMEM for coreboot table.\n"); return; } new_table_pointer = write_coreboot_table(0UL, 0UL, table_pointer, table_pointer); if (new_table_pointer > (table_pointer + MAX_COREBOOT_TABLE_SIZE)) printk(BIOS_ERR, "coreboot table didn't fit (%lx/%x bytes)\n", new_table_pointer - table_pointer, MAX_COREBOOT_TABLE_SIZE); printk(BIOS_DEBUG, "coreboot table: %ld bytes.\n", new_table_pointer - table_pointer); post_code(0x9e); /* Print CBMEM sections */ cbmem_list(); }
void save_mrc_data(struct pei_data *pei_data) { struct mrc_data_container *mrcdata; int output_len = ALIGN(pei_data->mrc_output_len, 16); /* Save the MRC S3 restore data to cbmem */ mrcdata = cbmem_add (CBMEM_ID_MRCDATA, output_len + sizeof(struct mrc_data_container)); printk(BIOS_DEBUG, "Relocate MRC DATA from %p to %p (%u bytes)\n", pei_data->mrc_output, mrcdata, output_len); mrcdata->mrc_signature = MRC_DATA_SIGNATURE; mrcdata->mrc_data_size = output_len; mrcdata->reserved = 0; memcpy(mrcdata->mrc_data, pei_data->mrc_output, pei_data->mrc_output_len); /* Zero the unused space in aligned buffer. */ if (output_len > pei_data->mrc_output_len) memset(mrcdata->mrc_data+pei_data->mrc_output_len, 0, output_len - pei_data->mrc_output_len); mrcdata->mrc_checksum = compute_ip_checksum(mrcdata->mrc_data, mrcdata->mrc_data_size); }
static void soc_init(void *data) { struct global_nvs_t *gnvs; /* Save VBT info and mapping */ if (locate_vbt(&vbt_rdev) != CB_ERR) vbt = rdev_mmap_full(&vbt_rdev); /* Snapshot the current GPIO IRQ polarities. FSP is setting a * default policy that doesn't honor boards' requirements. */ itss_snapshot_irq_polarities(GPIO_IRQ_START, GPIO_IRQ_END); fsp_silicon_init(); /* Restore GPIO IRQ polarities back to previous settings. */ itss_restore_irq_polarities(GPIO_IRQ_START, GPIO_IRQ_END); /* override 'enabled' setting in device tree if needed */ pcie_override_devicetree_after_silicon_init(); /* * Keep the P2SB device visible so it and the other devices are * visible in coreboot for driver support and PCI resource allocation. * There is a UPD setting for this, but it's more consistent to use * hide and unhide symmetrically. */ p2sb_unhide(); /* Allocate ACPI NVS in CBMEM */ gnvs = cbmem_add(CBMEM_ID_ACPI_GNVS, sizeof(*gnvs)); }
void *backup_default_smm_area(void) { void *save_area; const void *default_smm = (void *)SMM_DEFAULT_BASE; if (!IS_ENABLED(CONFIG_HAVE_ACPI_RESUME)) return NULL; /* * The buffer needs to be preallocated regardless. In the non-resume * path it will be allocated for handling resume. Note that cbmem_add() * does a find before the addition. */ save_area = cbmem_add(CBMEM_ID_SMM_SAVE_SPACE, SMM_DEFAULT_SIZE); if (save_area == NULL) { printk(BIOS_DEBUG, "SMM save area not added.\n"); return NULL; } /* Only back up the area on S3 resume. */ if (acpi_slp_type == 3) { memcpy(save_area, default_smm, SMM_DEFAULT_SIZE); return save_area; } /* * Not the S3 resume path. No need to restore memory contents after * SMM relocation. */ return NULL; }
void spintable_init(void *monitor_address) { extern void __wait_for_spin_table_request(void); const size_t code_size = 4096; if (monitor_address == NULL) { printk(BIOS_ERR, "spintable: NULL address to monitor.\n"); return; } spin_attrs.entry = cbmem_add(CBMEM_ID_SPINTABLE, code_size); if (spin_attrs.entry == NULL) return; spin_attrs.addr = monitor_address; printk(BIOS_INFO, "spintable @ %p will monitor %p\n", spin_attrs.entry, spin_attrs.addr); /* Ensure the memory location is zero'd out. */ *(uint64_t *)monitor_address = 0; memcpy(spin_attrs.entry, __wait_for_spin_table_request, code_size); dcache_clean_invalidate_by_mva(monitor_address, sizeof(uint64_t)); dcache_clean_invalidate_by_mva(spin_attrs.entry, code_size); }
void car_migrate_variables(void) { void *migrated_base; car_migration_func_t *migrate_func; size_t car_data_size = &_car_data_end[0] - &_car_data_start[0]; /* Check if already migrated. */ if (car_migrated) return; migrated_base = cbmem_add(CBMEM_ID_CAR_GLOBALS, car_data_size); if (migrated_base == NULL) { printk(BIOS_ERR, "Could not migrate CAR data!\n"); return; } memcpy(migrated_base, &_car_data_start[0], car_data_size); /* Mark that the data has been moved. */ car_migrated = ~0; /* Call all the migration functions. */ migrate_func = &_car_migrate_start; while (*migrate_func != NULL) { (*migrate_func)(); migrate_func++; } }
void cbmem_initialize_empty_id_size(u32 id, u64 size) { struct imd *imd; struct imd imd_backing; const int no_recovery = 0; cbmem_top_init_once(); imd = imd_init_backing(&imd_backing); imd_handle_init(imd, cbmem_top()); printk(BIOS_DEBUG, "CBMEM:\n"); if (imd_create_tiered_empty(imd, CBMEM_ROOT_MIN_SIZE, CBMEM_LG_ALIGN, CBMEM_SM_ROOT_SIZE, CBMEM_SM_ALIGN)) { printk(BIOS_DEBUG, "failed.\n"); return; } /* Add the specified range first */ if (size) cbmem_add(id, size); /* Complete migration to CBMEM. */ cbmem_run_init_hooks(no_recovery); }
void mainboard_save_dimm_info(struct romstage_params *params) { struct dimm_info *dimm; struct memory_info *mem_info; /* * Allocate CBMEM area for DIMM information used to populate SMBIOS * table 17 */ mem_info = cbmem_add(CBMEM_ID_MEMINFO, sizeof(*mem_info)); printk(BIOS_DEBUG, "CBMEM entry for DIMM info: 0x%p\n", mem_info); if (mem_info == NULL) return; memset(mem_info, 0, sizeof(*mem_info)); /* Describe the first channel memory */ dimm = &mem_info->dimm[0]; set_dimm_info(params->pei_data->spd_data_ch0, dimm); mem_info->dimm_cnt = 1; /* Describe the second channel memory */ if (params->pei_data->spd_ch1_config == 1) { dimm = &mem_info->dimm[1]; set_dimm_info(params->pei_data->spd_data_ch1, dimm); dimm->channel_num = 1; mem_info->dimm_cnt = 2; } }
static struct vb2_working_data * const vboot_get_working_data(void) { if (IS_ENABLED(CONFIG_VBOOT_STARTS_IN_ROMSTAGE)) /* cbmem_add() does a cbmem_find() first. */ return cbmem_add(CBMEM_ID_VBOOT_WORKBUF, vb_work_buf_size); else return (struct vb2_working_data *)_vboot2_work; }
/* Stage cache uses cbmem. */ void stage_cache_add(int stage_id, const struct prog *stage) { struct stage_cache *meta; void *c; meta = cbmem_add(CBMEM_ID_STAGEx_META + stage_id, sizeof(*meta)); if (meta == NULL) return; meta->load_addr = (uintptr_t)prog_start(stage); meta->entry_addr = (uintptr_t)prog_entry(stage); c = cbmem_add(CBMEM_ID_STAGEx_CACHE + stage_id, prog_size(stage)); if (c == NULL) return; memcpy(c, prog_start(stage), prog_size(stage)); }
void cbmem_add_vpd_calibration_data(void) { size_t cbmem_entry_size, filled_entries; struct calibration_entry *cbmem_entry; struct calibration_blob *cal_blob; int i; /* * Allocate one more cache entry than max required, to make sure that * the last entry can be identified by the key size of zero. */ struct vpd_blob_cache_t vpd_blob_cache[ARRAY_SIZE(templates) * MAX_WIFI_INTERFACE_COUNT]; cbmem_entry_size = fill_up_entries_cache(vpd_blob_cache, ARRAY_SIZE(vpd_blob_cache), &filled_entries); if (!cbmem_entry_size) return; /* No calibration data found in the VPD. */ cbmem_entry_size += sizeof(struct calibration_entry); cbmem_entry = cbmem_add(CBMEM_ID_WIFI_CALIBRATION, cbmem_entry_size); if (!cbmem_entry) { printk(BIOS_ERR, "%s: no room in cbmem to add %zd bytes\n", __func__, cbmem_entry_size); return; } cbmem_entry->size = cbmem_entry_size; /* Copy cached data into the CBMEM entry. */ cal_blob = cbmem_entry->entries; for (i = 0; i < filled_entries; i++) { /* Use this as a pointer to the current cache entry. */ struct vpd_blob_cache_t *cache = vpd_blob_cache + i; char *pointer; cal_blob->blob_size = cache->blob_size; cal_blob->key_size = cache->key_size; cal_blob->value_size = cache->value_size; /* copy the key */ pointer = (char *)(cal_blob + 1); memcpy(pointer, cache->key_name, cache->key_size); /* and the value */ pointer += cache->key_size; memcpy(pointer, cache->value_pointer, cache->value_size); free(cache->value_pointer); printk(BIOS_INFO, "%s: added %s to CBMEM\n", __func__, cache->key_name); cal_blob = (struct calibration_blob *) ((char *)cal_blob + cal_blob->blob_size); } }
/* * Save the FSP memory HOB (mrc data) to the MRC area in CBMEM */ int save_mrc_data(void *hob_start) { u32 *mrc_hob; u32 *mrc_hob_data; u32 mrc_hob_size; struct mrc_data_container *mrc_data; int output_len; const EFI_GUID mrc_guid = FSP_NON_VOLATILE_STORAGE_HOB_GUID; mrc_hob = get_next_guid_hob(&mrc_guid, hob_start); if (mrc_hob == NULL) { printk(BIOS_DEBUG, "Memory Configure Data Hob is not present\n"); return 0; } mrc_hob_data = GET_GUID_HOB_DATA(mrc_hob); mrc_hob_size = (u32) GET_HOB_LENGTH(mrc_hob); printk(BIOS_DEBUG, "Memory Configure Data Hob at %p (size = 0x%x).\n", (void *)mrc_hob_data, mrc_hob_size); output_len = ALIGN(mrc_hob_size, 16); /* Save the MRC S3/fast boot/ADR restore data to cbmem */ mrc_data = cbmem_add(CBMEM_ID_MRCDATA, output_len + sizeof(struct mrc_data_container)); /* Just return if there was a problem with getting CBMEM */ if (mrc_data == NULL) { printk(BIOS_WARNING, "CBMEM was not available to save the fast boot cache data.\n"); return 0; } printk(BIOS_DEBUG, "Copy FSP MRC DATA to HOB (source addr %p, dest addr %p, %u bytes)\n", (void *)mrc_hob_data, mrc_data, output_len); mrc_data->mrc_signature = MRC_DATA_SIGNATURE; mrc_data->mrc_data_size = output_len; mrc_data->reserved = 0; memcpy(mrc_data->mrc_data, (const void *)mrc_hob_data, mrc_hob_size); /* Zero the unused space in aligned buffer. */ if (output_len > mrc_hob_size) memset((mrc_data->mrc_data + mrc_hob_size), 0, output_len - mrc_hob_size); mrc_data->mrc_checksum = compute_ip_checksum(mrc_data->mrc_data, mrc_data->mrc_data_size); #if IS_ENABLED(CONFIG_DISPLAY_FAST_BOOT_DATA) printk(BIOS_SPEW, "Fast boot data (includes align and checksum):\n"); hexdump32(BIOS_SPEW, (void *)mrc_data->mrc_data, output_len); #endif return 1; }
void fsps_load(bool s3wake) { struct fsp_header *hdr = &fsps_hdr; struct cbfsf file_desc; struct region_device rdev; const char *name = CONFIG_FSP_S_CBFS; void *dest; size_t size; struct prog fsps = PROG_INIT(PROG_REFCODE, name); static int load_done; if (load_done) return; if (s3wake && !IS_ENABLED(CONFIG_NO_STAGE_CACHE)) { printk(BIOS_DEBUG, "Loading FSPS from stage_cache\n"); stage_cache_load_stage(STAGE_REFCODE, &fsps); if (fsp_validate_component(hdr, prog_rdev(&fsps)) != CB_SUCCESS) die("On resume fsps header is invalid\n"); load_done = 1; return; } if (cbfs_boot_locate(&file_desc, name, NULL)) { printk(BIOS_ERR, "Could not locate %s in CBFS\n", name); die("FSPS not available!\n"); } cbfs_file_data(&rdev, &file_desc); /* Load and relocate into CBMEM. */ size = region_device_sz(&rdev); dest = cbmem_add(CBMEM_ID_REFCODE, size); if (dest == NULL) die("Could not add FSPS to CBMEM!\n"); if (rdev_readat(&rdev, dest, 0, size) < 0) die("Failed to read FSPS!\n"); if (fsp_component_relocate((uintptr_t)dest, dest, size) < 0) die("Unable to relocate FSPS!\n"); /* Create new region device in memory after relocation. */ rdev_chain(&rdev, &addrspace_32bit.rdev, (uintptr_t)dest, size); if (fsp_validate_component(hdr, &rdev) != CB_SUCCESS) die("Invalid FSPS header!\n"); prog_set_area(&fsps, dest, size); stage_cache_add(STAGE_REFCODE, &fsps); /* Signal that FSP component has been loaded. */ prog_segment_loaded(hdr->image_base, hdr->image_size, SEG_FINAL); load_done = 1; }
void *igd_make_opregion(void) { igd_opregion_t *opregion; printk(BIOS_DEBUG, "ACPI: * IGD OpRegion\n"); opregion = cbmem_add(CBMEM_ID_IGD_OPREGION, sizeof (*opregion)); if (opregion) init_igd_opregion(opregion); return opregion; }
static void ramoops_alloc(void *arg) { const size_t size = CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE; if (size == 0) return; if (cbmem_add(CBMEM_ID_RAM_OOPS, size) == NULL) printk(BIOS_ERR, "Could not allocate RAMOOPS buffer\n"); }
/******************************************************************************* * The FSP early_init function returns to this function. * Memory is setup and the stack is set by the FSP. */ void romstage_main_continue(EFI_STATUS status, void *hob_list_ptr) { int cbmem_was_initted; void *cbmem_hob_ptr; uint32_t prev_sleep_state; struct romstage_handoff *handoff; timestamp_add_now(TS_AFTER_INITRAM); post_code(0x4a); printk(BIOS_DEBUG, "%s status: %x hob_list_ptr: %x\n", __func__, (u32) status, (u32) hob_list_ptr); #if IS_ENABLED(CONFIG_USBDEBUG_IN_ROMSTAGE) /* FSP reconfigures USB, so reinit it to have debug */ usbdebug_init(); #endif /* IS_ENABLED(CONFIG_USBDEBUG_IN_ROMSTAGE) */ printk(BIOS_DEBUG, "FSP Status: 0x%0x\n", (u32)status); /* Get previous sleep state again and clear */ prev_sleep_state = chipset_prev_sleep_state(1); printk(BIOS_DEBUG, "%s: prev_sleep_state = S%d\n", __func__, prev_sleep_state); report_platform_info(); post_code(0x4b); late_mainboard_romstage_entry(); post_code(0x4c); /* if S3 resume skip ram check */ if (prev_sleep_state != 3) { quick_ram_check(); post_code(0x4d); } cbmem_was_initted = !cbmem_recovery(prev_sleep_state == 3); /* Save the HOB pointer in CBMEM to be used in ramstage*/ cbmem_hob_ptr = cbmem_add (CBMEM_ID_HOB_POINTER, sizeof(*hob_list_ptr)); *(u32*)cbmem_hob_ptr = (u32)hob_list_ptr; post_code(0x4e); handoff = romstage_handoff_find_or_add(); if (handoff != NULL) handoff->s3_resume = (prev_sleep_state == 3); else printk(BIOS_DEBUG, "Romstage handoff structure not added!\n"); post_code(0x4f); /* Load the ramstage. */ copy_and_run(); while (1); }
/* Romstage needs quite a bit of stack for decompressing images since the lzma * lib keeps its state on the stack during romstage. */ static unsigned long choose_top_of_stack(void) { unsigned long stack_top; const unsigned long romstage_ram_stack_size = 0x5000; /* cbmem_add() does a find() before add(). */ stack_top = (unsigned long)cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK, romstage_ram_stack_size); stack_top += romstage_ram_stack_size; return stack_top; }
static void s3_resume_prepare(void) { global_nvs_t *gnvs; gnvs = cbmem_add(CBMEM_ID_ACPI_GNVS, sizeof(global_nvs_t)); if (gnvs == NULL) return; if (!acpi_is_wakeup_s3()) memset(gnvs, 0, sizeof(global_nvs_t)); }
static void reserve_ram_oops_dynamic(chromeos_acpi_t *chromeos) { const size_t size = CONFIG_CHROMEOS_RAMOOPS_RAM_SIZE; void *ram_oops; if (!IS_ENABLED(CONFIG_CHROMEOS_RAMOOPS_DYNAMIC)) return; ram_oops = cbmem_add(CBMEM_ID_RAM_OOPS, size); set_ramoops(chromeos, ram_oops, size); }
void fsp_set_runtime(FSP_INFO_HEADER *fih, void *hob_list) { struct fsp_runtime *fspr; fspr = cbmem_add(CBMEM_ID_FSP_RUNTIME, sizeof(*fspr)); if (fspr == NULL) die("Can't save FSP runtime information.\n"); fspr->fih = (uintptr_t)fih; fspr->hob_list = (uintptr_t)hob_list; }
int rmodule_stage_load(struct rmod_stage_load *rsl) { struct rmodule rmod_stage; size_t region_size; char *stage_region; int rmodule_offset; int load_offset; struct cbfs_stage stage; void *rmod_loc; struct region_device *fh; if (rsl->prog == NULL || prog_name(rsl->prog) == NULL) return -1; fh = prog_rdev(rsl->prog); if (rdev_readat(fh, &stage, 0, sizeof(stage)) != sizeof(stage)) return -1; rmodule_offset = rmodule_calc_region(DYN_CBMEM_ALIGN_SIZE, stage.memlen, ®ion_size, &load_offset); stage_region = cbmem_add(rsl->cbmem_id, region_size); if (stage_region == NULL) return -1; rmod_loc = &stage_region[rmodule_offset]; printk(BIOS_INFO, "Decompressing stage %s @ 0x%p (%d bytes)\n", prog_name(rsl->prog), rmod_loc, stage.memlen); if (!cbfs_load_and_decompress(fh, sizeof(stage), stage.len, rmod_loc, stage.memlen, stage.compression)) return -1; if (rmodule_parse(rmod_loc, &rmod_stage)) return -1; if (rmodule_load(&stage_region[load_offset], &rmod_stage)) return -1; prog_set_area(rsl->prog, rmod_stage.location, rmodule_memory_size(&rmod_stage)); prog_set_entry(rsl->prog, rmodule_entry(&rmod_stage), NULL); /* Allow caller to pick up parameters, if available. */ rsl->params = rmodule_parameters(&rmod_stage); return 0; }
AGESA_STATUS OemS3Save(void *vS3SaveParams) { #if IS_ENABLED(CONFIG_CPU_AMD_PI_00660F01) AMD_RTB_PARAMS *S3SaveParams = (AMD_RTB_PARAMS *)vS3SaveParams; S3_DATA_BLOCK *dataBlock = &S3SaveParams->S3DataBlock; #else AMD_S3SAVE_PARAMS *S3SaveParams = (AMD_S3SAVE_PARAMS *)vS3SaveParams; AMD_S3_PARAMS *dataBlock = &S3SaveParams->S3DataBlock; #endif u8 MTRRStorage[S3_DATA_MTRR_SIZE]; u32 MTRRStorageSize = 0; u32 pos, size; if (HIGH_ROMSTAGE_STACK_SIZE) cbmem_add(CBMEM_ID_ROMSTAGE_RAM_STACK, HIGH_ROMSTAGE_STACK_SIZE); /* To be consumed in AmdInitResume. */ get_s3nv_data(S3DataTypeNonVolatile, &pos, &size); if (size && dataBlock->NvStorageSize) spi_SaveS3info(pos, size, dataBlock->NvStorage, dataBlock->NvStorageSize); /* To be consumed in AmdS3LateRestore. */ char *heap = cbmem_add(CBMEM_ID_RESUME_SCRATCH, HIGH_MEMORY_SCRATCH); if (heap) { memset(heap, 0, HIGH_MEMORY_SCRATCH); memcpy(heap, dataBlock->VolatileStorage, dataBlock->VolatileStorageSize); } /* Collect MTRR setup. */ backup_mtrr(MTRRStorage, &MTRRStorageSize); /* To be consumed in restore_mtrr, CPU enumeration in ramstage. */ get_s3nv_data(S3DataTypeMTRR, &pos, &size); if (size && MTRRStorageSize) spi_SaveS3info(pos, size, MTRRStorage, MTRRStorageSize); return AGESA_SUCCESS; }