void arm_tf_run_bl31(u64 payload_entry, u64 payload_arg0, u64 payload_spsr) { struct prog bl31 = PROG_INIT(PROG_BL31, CONFIG_CBFS_PREFIX"/bl31"); void (*bl31_entry)(bl31_params_t *params, void *plat_params) = NULL; if (prog_locate(&bl31)) die("BL31 not found"); bl31_entry = selfload(&bl31, false); if (!bl31_entry) die("BL31 load failed"); SET_PARAM_HEAD(&bl31_params, PARAM_BL31, VERSION_1, 0); if (IS_ENABLED(CONFIG_ARM64_USE_SECURE_OS)) { struct prog bl32 = PROG_INIT(PROG_BL32, CONFIG_CBFS_PREFIX"/secure_os"); if (prog_locate(&bl32)) die("BL32 not found"); if (cbfs_prog_stage_load(&bl32)) die("BL32 load failed"); SET_PARAM_HEAD(&bl32_ep_info, PARAM_EP, VERSION_1, PARAM_EP_SECURE); bl32_ep_info.pc = (uintptr_t)prog_entry(&bl32); bl32_ep_info.spsr = SPSR_EXCEPTION_MASK | get_eret_el(EL1, SPSR_USE_L); bl31_params.bl32_ep_info = &bl32_ep_info; } bl31_params.bl33_ep_info = &bl33_ep_info; SET_PARAM_HEAD(&bl33_ep_info, PARAM_EP, VERSION_1, PARAM_EP_NON_SECURE); bl33_ep_info.pc = payload_entry; bl33_ep_info.spsr = payload_spsr; bl33_ep_info.args.arg0 = payload_arg0; /* May update bl31_params if necessary. Must flush all added structs. */ void *bl31_plat_params = soc_get_bl31_plat_params(&bl31_params); dcache_clean_by_mva(&bl31_params, sizeof(bl31_params)); dcache_clean_by_mva(&bl33_ep_info, sizeof(bl33_ep_info)); raw_write_daif(SPSR_EXCEPTION_MASK); mmu_disable(); bl31_entry(&bl31_params, bl31_plat_params); die("BL31 returned!"); }
void run_ramstage(void) { struct prog ramstage = PROG_INIT(ASSET_RAMSTAGE, CONFIG_CBFS_PREFIX "/ramstage"); /* Only x86 systems currently take the same firmware path on resume. */ if (IS_ENABLED(CONFIG_ARCH_X86) && IS_ENABLED(CONFIG_EARLY_CBMEM_INIT)) run_ramstage_from_resume(romstage_handoff_find_or_add(), &ramstage); if (prog_locate(&ramstage)) goto fail; timestamp_add_now(TS_START_COPYRAM); if (IS_ENABLED(CONFIG_RELOCATABLE_RAMSTAGE)) { if (load_relocatable_ramstage(&ramstage)) goto fail; } else if (cbfs_prog_stage_load(&ramstage)) goto fail; stage_cache_add(STAGE_RAMSTAGE, &ramstage); timestamp_add_now(TS_END_COPYRAM); prog_run(&ramstage); fail: die("Ramstage was not loaded!\n"); }
static pei_wrapper_entry_t load_reference_code(void) { struct prog prog = PROG_INIT(ASSET_REFCODE, CONFIG_CBFS_PREFIX "/refcode"); struct rmod_stage_load refcode = { .cbmem_id = CBMEM_ID_REFCODE, .prog = &prog, }; if (acpi_is_wakeup_s3()) { return load_refcode_from_cache(); } if (prog_locate(&prog)) { printk(BIOS_DEBUG, "Couldn't locate reference code.\n"); return NULL; } if (rmodule_stage_load(&refcode)) { printk(BIOS_DEBUG, "Error loading reference code.\n"); return NULL; } /* Cache loaded reference code. */ stage_cache_add(STAGE_REFCODE, &prog); return (pei_wrapper_entry_t)prog_entry(&prog); }
void fsps_load(bool s3wake) { struct fsp_header *hdr = &fsps_hdr; struct cbfsf file_desc; struct region_device rdev; const char *name = CONFIG_FSP_S_CBFS; void *dest; size_t size; struct prog fsps = PROG_INIT(PROG_REFCODE, name); static int load_done; if (load_done) return; if (s3wake && !IS_ENABLED(CONFIG_NO_STAGE_CACHE)) { printk(BIOS_DEBUG, "Loading FSPS from stage_cache\n"); stage_cache_load_stage(STAGE_REFCODE, &fsps); if (fsp_validate_component(hdr, prog_rdev(&fsps)) != CB_SUCCESS) die("On resume fsps header is invalid\n"); load_done = 1; return; } if (cbfs_boot_locate(&file_desc, name, NULL)) { printk(BIOS_ERR, "Could not locate %s in CBFS\n", name); die("FSPS not available!\n"); } cbfs_file_data(&rdev, &file_desc); /* Load and relocate into CBMEM. */ size = region_device_sz(&rdev); dest = cbmem_add(CBMEM_ID_REFCODE, size); if (dest == NULL) die("Could not add FSPS to CBMEM!\n"); if (rdev_readat(&rdev, dest, 0, size) < 0) die("Failed to read FSPS!\n"); if (fsp_component_relocate((uintptr_t)dest, dest, size) < 0) die("Unable to relocate FSPS!\n"); /* Create new region device in memory after relocation. */ rdev_chain(&rdev, &addrspace_32bit.rdev, (uintptr_t)dest, size); if (fsp_validate_component(hdr, &rdev) != CB_SUCCESS) die("Invalid FSPS header!\n"); prog_set_area(&fsps, dest, size); stage_cache_add(STAGE_REFCODE, &fsps); /* Signal that FSP component has been loaded. */ prog_segment_loaded(hdr->image_base, hdr->image_size, SEG_FINAL); load_done = 1; }
static void vboot_prepare(void) { int run_verification; run_verification = verification_should_run(); if (run_verification) { verstage_main(); car_set_var(vboot_executed, 1); } else if (verstage_should_load()) { struct cbfsf file; struct prog verstage = PROG_INIT(PROG_VERSTAGE, CONFIG_CBFS_PREFIX "/verstage"); printk(BIOS_DEBUG, "VBOOT: Loading verstage.\n"); /* load verstage from RO */ if (cbfs_boot_locate(&file, prog_name(&verstage), NULL)) die("failed to load verstage"); cbfs_file_data(prog_rdev(&verstage), &file); if (cbfs_prog_stage_load(&verstage)) die("failed to load verstage"); /* verify and select a slot */ prog_run(&verstage); /* This is not actually possible to hit this condition at * runtime, but this provides a hint to the compiler for dead * code elimination below. */ if (!IS_ENABLED(CONFIG_RETURN_FROM_VERSTAGE)) return; car_set_var(vboot_executed, 1); } /* * Fill in vboot cbmem objects before moving to ramstage so all * downstream users have access to vboot results. This path only * applies to platforms employing VBOOT_DYNAMIC_WORK_BUFFER because * cbmem comes online prior to vboot verification taking place. For * other platforms the vboot cbmem objects are initialized when * cbmem comes online. */ if (ENV_ROMSTAGE && IS_ENABLED(CONFIG_VBOOT_DYNAMIC_WORK_BUFFER)) { vb2_store_selected_region(); vboot_fill_handoff(); } }
void intel_silicon_init(void) { struct prog fsp = PROG_INIT(ASSET_REFCODE, "fsp.bin"); int is_s3_wakeup = acpi_is_wakeup_s3(); if (is_s3_wakeup) { printk(BIOS_DEBUG, "FSP: Loading binary from cache\n"); stage_cache_load_stage(STAGE_REFCODE, &fsp); } else { fsp_find_and_relocate(&fsp); printk(BIOS_DEBUG, "FSP: Saving binary in cache\n"); fsp_cache_save(&fsp); } /* FSP_INFO_HEADER is set as the program entry. */ fsp_update_fih(prog_entry(&fsp)); fsp_run_silicon_init(is_s3_wakeup); }
void run_romstage(void) { struct prog romstage = PROG_INIT(ASSET_ROMSTAGE, CONFIG_CBFS_PREFIX "/romstage"); if (prog_locate(&romstage)) goto fail; timestamp_add_now(TS_START_COPYROM); if (cbfs_prog_stage_load(&romstage)) goto fail; timestamp_add_now(TS_END_COPYROM); prog_run(&romstage); fail: if (IS_ENABLED(CONFIG_BOOTBLOCK_CONSOLE)) die("Couldn't load romstage.\n"); halt(); }
/* Entry point taken when romstage is called after a separate verstage. */ asmlinkage void *romstage_after_verstage(void) { /* Need to locate the current FSP_INFO_HEADER. The cache-as-ram * is still enabled. We can directly access work buffer here. */ FSP_INFO_HEADER *fih; struct prog fsp = PROG_INIT(PROG_REFCODE, "fsp.bin"); console_init(); if (prog_locate(&fsp)) { fih = NULL; printk(BIOS_ERR, "Unable to locate %s\n", prog_name(&fsp)); } else /* This leaks a mapping which this code assumes is benign as * the flash is memory mapped CPU's address space. */ fih = find_fsp((uintptr_t)rdev_mmap_full(prog_rdev(&fsp))); set_fih_car(fih); /* Return new stack value in ram back to assembly stub. */ return cache_as_ram_stage_main(fih); }
goto fail; stage_cache_add(STAGE_RAMSTAGE, &ramstage); timestamp_add_now(TS_END_COPYRAM); prog_run(&ramstage); fail: die("Ramstage was not loaded!\n"); } #ifdef __RAMSTAGE__ // gc-sections should take care of this static struct prog global_payload = PROG_INIT(ASSET_PAYLOAD, CONFIG_CBFS_PREFIX "/payload"); void __attribute__((weak)) mirror_payload(struct prog *payload) { return; } void payload_load(void) { struct prog *payload = &global_payload; timestamp_add_now(TS_LOAD_PAYLOAD); if (prog_locate(payload)) goto out;
uint32_t vboot_init_crtm(void) { struct prog bootblock = PROG_INIT(PROG_BOOTBLOCK, "bootblock"); struct prog verstage = PROG_INIT(PROG_VERSTAGE, CONFIG_CBFS_PREFIX "/verstage"); struct prog romstage = PROG_INIT(PROG_ROMSTAGE, CONFIG_CBFS_PREFIX "/romstage"); char tcpa_metadata[TCPA_PCR_HASH_NAME]; /* Initialize TCPE PRERAM log. */ tcpa_preram_log_clear(); /* measure bootblock from RO */ struct cbfsf bootblock_data; struct region_device bootblock_fmap; if (fmap_locate_area_as_rdev("BOOTBLOCK", &bootblock_fmap) == 0) { if (tpm_measure_region(&bootblock_fmap, TPM_CRTM_PCR, "FMAP: BOOTBLOCK")) return VB2_ERROR_UNKNOWN; } else { if (cbfs_boot_locate(&bootblock_data, prog_name(&bootblock), NULL) == 0) { cbfs_file_data(prog_rdev(&bootblock), &bootblock_data); if (create_tcpa_metadata(prog_rdev(&bootblock), prog_name(&bootblock), tcpa_metadata) < 0) return VB2_ERROR_UNKNOWN; if (tpm_measure_region(prog_rdev(&bootblock), TPM_CRTM_PCR, tcpa_metadata)) return VB2_ERROR_UNKNOWN; } else { printk(BIOS_INFO, "VBOOT: Couldn't measure bootblock into CRTM!\n"); return VB2_ERROR_UNKNOWN; } } if (CONFIG(VBOOT_STARTS_IN_ROMSTAGE)) { struct cbfsf romstage_data; /* measure romstage from RO */ if (cbfs_boot_locate(&romstage_data, prog_name(&romstage), NULL) == 0) { cbfs_file_data(prog_rdev(&romstage), &romstage_data); if (create_tcpa_metadata(prog_rdev(&romstage), prog_name(&romstage), tcpa_metadata) < 0) return VB2_ERROR_UNKNOWN; if (tpm_measure_region(prog_rdev(&romstage), TPM_CRTM_PCR, tcpa_metadata)) return VB2_ERROR_UNKNOWN; } else { printk(BIOS_INFO, "VBOOT: Couldn't measure %s into CRTM!\n", CONFIG_CBFS_PREFIX "/romstage"); return VB2_ERROR_UNKNOWN; } } if (CONFIG(VBOOT_SEPARATE_VERSTAGE)) { struct cbfsf verstage_data; /* measure verstage from RO */ if (cbfs_boot_locate(&verstage_data, prog_name(&verstage), NULL) == 0) { cbfs_file_data(prog_rdev(&verstage), &verstage_data); if (create_tcpa_metadata(prog_rdev(&verstage), prog_name(&verstage), tcpa_metadata) < 0) return VB2_ERROR_UNKNOWN; if (tpm_measure_region(prog_rdev(&verstage), TPM_CRTM_PCR, tcpa_metadata)) return VB2_ERROR_UNKNOWN; } else { printk(BIOS_INFO, "VBOOT: Couldn't measure %s into CRTM!\n", CONFIG_CBFS_PREFIX "/verstage"); return VB2_ERROR_UNKNOWN; } } return VB2_SUCCESS; }
static void vboot_prepare(void) { if (verification_should_run()) { /* Note: this path is not used for VBOOT_RETURN_FROM_VERSTAGE */ verstage_main(); car_set_var(vboot_executed, 1); vb2_save_recovery_reason_vbnv(); /* * Avoid double memory retrain when the EC is running RW code * and a recovery request came in through an EC host event. The * double retrain happens because the EC won't be rebooted * until kernel verification notices the EC isn't running RO * code which is after memory training. Therefore, reboot the * EC after we've saved the potential recovery request so it's * not lost. Lastly, only perform this sequence on x86 * platforms since those are the ones that currently do a * costly memory training in recovery mode. */ if (IS_ENABLED(CONFIG_EC_GOOGLE_CHROMEEC) && IS_ENABLED(CONFIG_ARCH_X86)) google_chromeec_early_init(); } else if (verstage_should_load()) { struct cbfsf file; struct prog verstage = PROG_INIT(PROG_VERSTAGE, CONFIG_CBFS_PREFIX "/verstage"); printk(BIOS_DEBUG, "VBOOT: Loading verstage.\n"); /* load verstage from RO */ if (cbfs_boot_locate(&file, prog_name(&verstage), NULL)) die("failed to load verstage"); cbfs_file_data(prog_rdev(&verstage), &file); if (cbfs_prog_stage_load(&verstage)) die("failed to load verstage"); /* verify and select a slot */ prog_run(&verstage); /* This is not actually possible to hit this condition at * runtime, but this provides a hint to the compiler for dead * code elimination below. */ if (!IS_ENABLED(CONFIG_VBOOT_RETURN_FROM_VERSTAGE)) return; car_set_var(vboot_executed, 1); } /* * Fill in vboot cbmem objects before moving to ramstage so all * downstream users have access to vboot results. This path only * applies to platforms employing VBOOT_STARTS_IN_ROMSTAGE because * cbmem comes online prior to vboot verification taking place. For * other platforms the vboot cbmem objects are initialized when * cbmem comes online. */ if (ENV_ROMSTAGE && IS_ENABLED(CONFIG_VBOOT_STARTS_IN_ROMSTAGE)) { vb2_store_selected_region(); vboot_fill_handoff(); } }