static int cbfs_master_header_props(struct cbfs_props *props) { struct cbfs_header header; int32_t offset; const struct region_device *bdev; bdev = boot_device_ro(); rdev_readat(bdev, &offset, CONFIG_ROM_SIZE - sizeof(offset), sizeof(offset)); /* The offset is relative to the end of the media. */ offset += CONFIG_ROM_SIZE; rdev_readat(bdev, &header , offset, sizeof(header)); header.magic = ntohl(header.magic); header.romsize = ntohl(header.romsize); header.bootblocksize = ntohl(header.bootblocksize); header.offset = ntohl(header.offset); if (header.magic != CBFS_HEADER_MAGIC) return -1; props->offset = header.offset; if (CONFIG_ROM_SIZE != header.romsize) props->size = CONFIG_ROM_SIZE; else props->size = header.romsize; props->size -= props->offset; props->size -= header.bootblocksize; props->size = ALIGN_DOWN(props->size, 64); return 0; }
size_t cbfs_load_and_decompress(const struct region_device *rdev, size_t offset, size_t in_size, void *buffer, size_t buffer_size, uint32_t compression) { size_t out_size; switch (compression) { case CBFS_COMPRESS_NONE: if (buffer_size < in_size) return 0; if (rdev_readat(rdev, buffer, offset, in_size) != in_size) return 0; return in_size; case CBFS_COMPRESS_LZ4: if ((ENV_BOOTBLOCK || ENV_VERSTAGE) && !IS_ENABLED(CONFIG_COMPRESS_PRERAM_STAGES)) return 0; /* Load the compressed image to the end of the available memory * area for in-place decompression. It is the responsibility of * the caller to ensure that buffer_size is large enough * (see compression.h, guaranteed by cbfstool for stages). */ void *compr_start = buffer + buffer_size - in_size; if (rdev_readat(rdev, compr_start, offset, in_size) != in_size) return 0; timestamp_add_now(TS_START_ULZ4F); out_size = ulz4fn(compr_start, in_size, buffer, buffer_size); timestamp_add_now(TS_END_ULZ4F); return out_size; case CBFS_COMPRESS_LZMA: if (ENV_BOOTBLOCK || ENV_VERSTAGE) return 0; if ((ENV_ROMSTAGE || ENV_POSTCAR) && !IS_ENABLED(CONFIG_COMPRESS_RAMSTAGE)) return 0; void *map = rdev_mmap(rdev, offset, in_size); if (map == NULL) return 0; /* Note: timestamp not useful for memory-mapped media (x86) */ timestamp_add_now(TS_START_ULZMA); out_size = ulzman(map, in_size, buffer, buffer_size); timestamp_add_now(TS_END_ULZMA); rdev_munmap(rdev, map); return out_size; default: return 0; } }
/* returns the size of data in a VPD 2.0 formatted fmap region, or 0 */ static int32_t get_vpd_size(const char *fmap_name, int32_t *base) { struct google_vpd_info info; struct region_device vpd; int32_t size; if (fmap_locate_area_as_rdev(fmap_name, &vpd)) { printk(BIOS_ERR, "%s: No %s FMAP section.\n", __func__, fmap_name); return 0; } size = region_device_sz(&vpd); if ((size < GOOGLE_VPD_2_0_OFFSET + sizeof(info)) || rdev_chain(&vpd, &vpd, GOOGLE_VPD_2_0_OFFSET, size - GOOGLE_VPD_2_0_OFFSET)) { printk(BIOS_ERR, "%s: Too small (%d) for Google VPD 2.0.\n", __func__, size); return 0; } /* Try if we can find a google_vpd_info, otherwise read whole VPD. */ if (rdev_readat(&vpd, &info, *base, sizeof(info)) == sizeof(info) && memcmp(info.header.magic, VPD_INFO_MAGIC, sizeof(info.header.magic)) == 0 && size >= info.size + sizeof(info)) { *base += sizeof(info); size = info.size; } else { size -= GOOGLE_VPD_2_0_OFFSET; } return size; }
int fsp_relocate(struct prog *fsp_relocd, const struct region_device *fsp_src) { void *new_loc; void *fih; ssize_t fih_offset; size_t size = region_device_sz(fsp_src); new_loc = cbmem_add(CBMEM_ID_REFCODE, size); if (new_loc == NULL) { printk(BIOS_ERR, "ERROR: Unable to load FSP into memory.\n"); return -1; } if (rdev_readat(fsp_src, new_loc, 0, size) != size) { printk(BIOS_ERR, "ERROR: Can't read FSP's region device.\n"); return -1; } fih_offset = fsp1_1_relocate((uintptr_t)new_loc, new_loc, size); if (fih_offset <= 0) { printk(BIOS_ERR, "ERROR: FSP relocation faiulre.\n"); return -1; } fih = (void *)((uint8_t *)new_loc + fih_offset); prog_set_area(fsp_relocd, new_loc, size); prog_set_entry(fsp_relocd, fih, NULL); return 0; }
int vb2ex_read_resource(struct vb2_context *ctx, enum vb2_resource_index index, uint32_t offset, void *buf, uint32_t size) { struct region_device rdev; const char *name; switch (index) { case VB2_RES_GBB: name = "GBB"; break; case VB2_RES_FW_VBLOCK: if (is_slot_a(ctx)) name = "VBLOCK_A"; else name = "VBLOCK_B"; break; default: return VB2_ERROR_EX_READ_RESOURCE_INDEX; } if (vboot_named_region_device(name, &rdev)) return VB2_ERROR_EX_READ_RESOURCE_SIZE; if (rdev_readat(&rdev, buf, offset, size) != size) return VB2_ERROR_EX_READ_RESOURCE_SIZE; return VB2_SUCCESS; }
/* * Validate the event header and data. */ static size_t elog_is_event_valid(size_t offset) { uint8_t checksum; struct event_header *event; uint8_t len; const size_t len_offset = offsetof(struct event_header, length); const size_t size = sizeof(len); /* Read and validate length. */ if (rdev_readat(mirror_dev_get(), &len, offset + len_offset, size) < 0) return 0; /* Event length must be at least header size + checksum */ if (len < (sizeof(*event) + sizeof(checksum))) return 0; if (len > MAX_EVENT_SIZE) return 0; event = elog_get_event_buffer(offset, len); if (!event) return 0; /* If event checksum is invalid the area is corrupt */ checksum = elog_checksum_event(event); elog_put_event_buffer(event); if (checksum != 0) return 0; /* Event is valid */ return len; }
void fsps_load(bool s3wake) { struct fsp_header *hdr = &fsps_hdr; struct cbfsf file_desc; struct region_device rdev; const char *name = CONFIG_FSP_S_CBFS; void *dest; size_t size; struct prog fsps = PROG_INIT(PROG_REFCODE, name); static int load_done; if (load_done) return; if (s3wake && !IS_ENABLED(CONFIG_NO_STAGE_CACHE)) { printk(BIOS_DEBUG, "Loading FSPS from stage_cache\n"); stage_cache_load_stage(STAGE_REFCODE, &fsps); if (fsp_validate_component(hdr, prog_rdev(&fsps)) != CB_SUCCESS) die("On resume fsps header is invalid\n"); load_done = 1; return; } if (cbfs_boot_locate(&file_desc, name, NULL)) { printk(BIOS_ERR, "Could not locate %s in CBFS\n", name); die("FSPS not available!\n"); } cbfs_file_data(&rdev, &file_desc); /* Load and relocate into CBMEM. */ size = region_device_sz(&rdev); dest = cbmem_add(CBMEM_ID_REFCODE, size); if (dest == NULL) die("Could not add FSPS to CBMEM!\n"); if (rdev_readat(&rdev, dest, 0, size) < 0) die("Failed to read FSPS!\n"); if (fsp_component_relocate((uintptr_t)dest, dest, size) < 0) die("Unable to relocate FSPS!\n"); /* Create new region device in memory after relocation. */ rdev_chain(&rdev, &addrspace_32bit.rdev, (uintptr_t)dest, size); if (fsp_validate_component(hdr, &rdev) != CB_SUCCESS) die("Invalid FSPS header!\n"); prog_set_area(&fsps, dest, size); stage_cache_add(STAGE_REFCODE, &fsps); /* Signal that FSP component has been loaded. */ prog_segment_loaded(hdr->image_base, hdr->image_size, SEG_FINAL); load_done = 1; }
/* here is a simple SPI debug test, known to fid trouble */ static void simple_spi_test(void) { const struct region_device *boot_dev; int i, amt = 4 * MiB, errors = 0; //u32 *data = (void *)0x40000000; u32 data[1024]; u32 in; boot_device_init(); boot_dev = boot_device_ro(); amt = sizeof(data); if (boot_dev == NULL) { printk(BIOS_SPEW, "Failed to initialize default media.\n"); return; } if (rdev_readat(boot_dev, data, 0, amt) < amt) { printk(BIOS_SPEW, "simple_spi_test fails\n"); return; } for(i = 0; i < amt; i += 4){ if (rdev_readat(boot_dev, &in, i, 4) < 4) { printk(BIOS_SPEW, "simple_spi_test fails at %d\n", i); return; } if (data[i/4] != in){ errors++; printk(BIOS_SPEW, "BAD at %d(%p):\nRAM %08lx\nSPI %08lx\n", i, &data[i/4], (unsigned long)data[i/4], (unsigned long)in); /* reread it to see which is wrong. */ if (rdev_readat(boot_dev, &in, i, 4) < 4) { printk(BIOS_SPEW, "simple_spi_test fails at %d\n", i); return; } printk(BIOS_SPEW, "RTRY at %d(%p):\nRAM %08lx\nSPI %08lx\n", i, &data[i/4], (unsigned long)data[i/4], (unsigned long)in); } } printk(BIOS_SPEW, "%d errors\n", errors); }
static size_t emu_rom_read(struct cbfs_media *media, void *dest, size_t offset, size_t count) { const struct region_device *boot_dev; boot_dev = media->context; if (rdev_readat(boot_dev, dest, offset, count) < 0) return 0; return count; }
int rmodule_stage_load(struct rmod_stage_load *rsl) { struct rmodule rmod_stage; size_t region_size; char *stage_region; int rmodule_offset; int load_offset; struct cbfs_stage stage; void *rmod_loc; struct region_device *fh; if (rsl->prog == NULL || prog_name(rsl->prog) == NULL) return -1; fh = prog_rdev(rsl->prog); if (rdev_readat(fh, &stage, 0, sizeof(stage)) != sizeof(stage)) return -1; rmodule_offset = rmodule_calc_region(DYN_CBMEM_ALIGN_SIZE, stage.memlen, ®ion_size, &load_offset); stage_region = cbmem_add(rsl->cbmem_id, region_size); if (stage_region == NULL) return -1; rmod_loc = &stage_region[rmodule_offset]; printk(BIOS_INFO, "Decompressing stage %s @ 0x%p (%d bytes)\n", prog_name(rsl->prog), rmod_loc, stage.memlen); if (!cbfs_load_and_decompress(fh, sizeof(stage), stage.len, rmod_loc, stage.memlen, stage.compression)) return -1; if (rmodule_parse(rmod_loc, &rmod_stage)) return -1; if (rmodule_load(&stage_region[load_offset], &rmod_stage)) return -1; prog_set_area(rsl->prog, rmod_stage.location, rmodule_memory_size(&rmod_stage)); prog_set_entry(rsl->prog, rmodule_entry(&rmod_stage), NULL); /* Allow caller to pick up parameters, if available. */ rsl->params = rmodule_parameters(&rmod_stage); return 0; }
int tegra210_run_mtc(void) { ssize_t nread; struct region_device fh; struct cbfsf mtc_file; void *const mtc = (void *)(uintptr_t)CONFIG_MTC_ADDRESS; void *dvfs_table; size_t (*mtc_fw)(void **dvfs_table) = (void *)mtc; if (cbfs_boot_locate(&mtc_file, "tegra_mtc.bin", NULL)) { printk(BIOS_ERR, "MTC file not found: tegra_mtc.bin\n"); return -1; } cbfs_file_data(&fh, &mtc_file); /* Read MTC file into predefined region. */ nread = rdev_readat(&fh, mtc, 0, region_device_sz(&fh)); if (nread != region_device_sz(&fh)) { printk(BIOS_ERR, "MTC bytes read (%zu) != file length(%zu)!\n", nread, region_device_sz(&fh)); return -1; } printk(BIOS_INFO, "MTC: %zu bytes loaded @ %p\n", nread, mtc); mtc_table_size = (*mtc_fw)(&dvfs_table); if ((mtc_table_size == 0) || (mtc_table_size > MTC_TABLE_MAX_SIZE)) { printk(BIOS_ERR, "MTC Training table size is invalid.!\n"); return -1; } printk(BIOS_INFO, "MTC: Done. Entries size 0x%zx located at %p\n", mtc_table_size, dvfs_table); void *cbmem_tab = cbmem_add(CBMEM_ID_MTC, mtc_table_size); if (cbmem_tab == NULL) { printk(BIOS_ERR, "MTC table allocation in cbmem failed!\n"); return -1; } memcpy(cbmem_tab, dvfs_table, mtc_table_size); printk(BIOS_INFO, "MTC: Copied 0x%zx bytes from %p to %p\n", mtc_table_size, dvfs_table, cbmem_tab); return 0; }
/* * Scan the event area and validate each entry and update the ELOG state. */ static int elog_update_event_buffer_state(void) { size_t offset = elog_events_start(); elog_debug("elog_update_event_buffer_state()\n"); /* Go through each event and validate it */ while (1) { uint8_t type; const size_t type_offset = offsetof(struct event_header, type); size_t len; const size_t size = sizeof(type); if (rdev_readat(mirror_dev_get(), &type, offset + type_offset, size) < 0) { return -1; } /* The end of the event marker has been found */ if (type == ELOG_TYPE_EOL) break; /* Validate the event */ len = elog_is_event_valid(offset); if (!len) { printk(BIOS_ERR, "ELOG: Invalid event @ offset 0x%zx\n", offset); return -1; } /* Move to the next event */ elog_tandem_increment_last_write(len); offset += len; } /* Ensure the remaining buffer is empty */ if (!elog_is_buffer_clear(offset)) { printk(BIOS_ERR, "ELOG: buffer not cleared from 0x%zx\n", offset); return -1; } return 0; }
/* returns the size of data in a VPD 2.0 formatted fmap region, or 0 */ static int32_t get_vpd_size(const char *fmap_name, int32_t *base) { struct google_vpd_info info; struct region_device vpd; int32_t size; if (fmap_locate_area_as_rdev(fmap_name, &vpd)) { printk(BIOS_ERR, "%s: No %s FMAP section.\n", __func__, fmap_name); return 0; } size = region_device_sz(&vpd); if ((size < GOOGLE_VPD_2_0_OFFSET + sizeof(info)) || rdev_chain(&vpd, &vpd, GOOGLE_VPD_2_0_OFFSET, size - GOOGLE_VPD_2_0_OFFSET)) { printk(BIOS_ERR, "%s: Too small (%d) for Google VPD 2.0.\n", __func__, size); return 0; } /* Try if we can find a google_vpd_info, otherwise read whole VPD. */ if (rdev_readat(&vpd, &info, *base, sizeof(info)) != sizeof(info)) { printk(BIOS_ERR, "ERROR: Failed to read %s header.\n", fmap_name); return 0; } if (memcmp(info.header.magic, VPD_INFO_MAGIC, sizeof(info.header.magic)) == 0 && size >= info.size + sizeof(info)) { *base += sizeof(info); size = info.size; } else if (info.header.tlv.type == VPD_TYPE_TERMINATOR || info.header.tlv.type == VPD_TYPE_IMPLICIT_TERMINATOR) { printk(BIOS_WARNING, "WARNING: %s is uninitialized or empty.\n", fmap_name); size = 0; } else { size -= GOOGLE_VPD_2_0_OFFSET; } return size; }
enum fsp_status fsp_silicon_init(void) { struct fsp_header *hdr = &fsps_hdr; struct cbfsf file_desc; struct region_device rdev; const char *name = CONFIG_FSP_S_CBFS; void *dest; size_t size; if (cbfs_boot_locate(&file_desc, name, NULL)) { printk(BIOS_ERR, "Could not locate %s in CBFS\n", name); return FSP_NOT_FOUND; } cbfs_file_data(&rdev, &file_desc); /* Load and relocate into CBMEM. */ size = region_device_sz(&rdev); dest = cbmem_add(CBMEM_ID_REFCODE, size); if (dest == NULL) { printk(BIOS_ERR, "Could not add FSPS to CBMEM.\n"); return FSP_NOT_FOUND; } if (rdev_readat(&rdev, dest, 0, size) < 0) return FSP_NOT_FOUND; if (fsp_component_relocate((uintptr_t)dest, dest, size) < 0) { printk(BIOS_ERR, "Unable to relocate FSPS.\n"); return FSP_NOT_FOUND; } /* Create new region device in memory after relocation. */ rdev_chain(&rdev, &addrspace_32bit.rdev, (uintptr_t)dest, size); if (fsp_validate_component(hdr, &rdev) != CB_SUCCESS) return FSP_NOT_FOUND; /* Signal that FSP component has been loaded. */ prog_segment_loaded(hdr->image_base, hdr->image_size, SEG_FINAL); return do_silicon_init(hdr); }
/* Load the binary into the memory specified by the info header. */ static enum cb_err load_fspm_mem(struct fsp_header *hdr, const struct region_device *rdev, const struct memranges *memmap) { uintptr_t fspm_begin; uintptr_t fspm_end; if (fsp_validate_component(hdr, rdev) != CB_SUCCESS) return CB_ERR; fspm_begin = hdr->image_base; fspm_end = fspm_begin + hdr->image_size; if (check_region_overlap(memmap, "FSPM", fspm_begin, fspm_end) != CB_SUCCESS) return CB_ERR; /* Load binary into memory at provided address. */ if (rdev_readat(rdev, (void *)fspm_begin, 0, fspm_end - fspm_begin) < 0) return CB_ERR; return CB_SUCCESS; }
static int elog_scan_flash(void) { elog_debug("elog_scan_flash()\n"); void *mirror_buffer; const struct region_device *rdev = mirror_dev_get(); size_t size = region_device_sz(&nv_dev); /* Fill memory buffer by reading from SPI */ mirror_buffer = rdev_mmap_full(rdev); if (rdev_readat(&nv_dev, mirror_buffer, 0, size) != size) { rdev_munmap(rdev, mirror_buffer); printk(BIOS_ERR, "ELOG: NV read failure.\n"); return -1; } rdev_munmap(rdev, mirror_buffer); /* No writes have been done yet. */ elog_tandem_reset_last_write(); /* Check if the area is empty or not */ if (elog_is_buffer_clear(0)) { printk(BIOS_ERR, "ELOG: NV Buffer Cleared.\n"); return -1; } /* Indicate that header possibly written. */ elog_tandem_increment_last_write(elog_events_start()); /* Validate the header */ if (!elog_is_header_valid()) { printk(BIOS_ERR, "ELOG: NV Buffer Invalid.\n"); return -1; } return elog_update_event_buffer_state(); }
int ccplex_load_mts(void) { ssize_t nread; struct stopwatch sw; struct cbfsf mts_file; struct region_device fh; /* * MTS location is hard coded to this magic address. The hardware will * take the MTS from this location and place it in the final resting * place in the carveout region. */ void * const mts = (void *)(uintptr_t)MTS_LOAD_ADDRESS; stopwatch_init(&sw); if (cbfs_boot_locate(&mts_file, MTS_FILE_NAME, NULL)) { printk(BIOS_DEBUG, "MTS file not found: %s\n", MTS_FILE_NAME); return -1; } cbfs_file_data(&fh, &mts_file); /* Read MTS file into the carveout region. */ nread = rdev_readat(&fh, mts, 0, region_device_sz(&fh)); if (nread != region_device_sz(&fh)) { printk(BIOS_DEBUG, "MTS bytes read (%zu) != file length(%u)!\n", nread, region_device_sz(&fh)); return -1; } printk(BIOS_DEBUG, "MTS: %zu bytes loaded @ %p in %ld usecs.\n", nread, mts, stopwatch_duration_usecs(&sw)); return ccplex_start(); }
static void cbmem_add_cros_vpd(int is_recovery) { struct region_device vpd; struct vpd_cbmem *cbmem; int32_t ro_vpd_base = 0, rw_vpd_base = 0; int32_t ro_vpd_size, rw_vpd_size; timestamp_add_now(TS_START_COPYVPD); ro_vpd_size = get_vpd_size("RO_VPD", &ro_vpd_base); rw_vpd_size = get_vpd_size("RW_VPD", &rw_vpd_base); /* no VPD at all? nothing to do then */ if ((ro_vpd_size == 0) && (rw_vpd_size == 0)) return; cbmem = cbmem_add(CBMEM_ID_VPD, sizeof(*cbmem) + ro_vpd_size + rw_vpd_size); if (!cbmem) { printk(BIOS_ERR, "%s: Failed to allocate CBMEM (%u+%u).\n", __func__, ro_vpd_size, rw_vpd_size); return; } cbmem->magic = CROSVPD_CBMEM_MAGIC; cbmem->version = CROSVPD_CBMEM_VERSION; cbmem->ro_size = 0; cbmem->rw_size = 0; if (ro_vpd_size) { if (fmap_locate_area_as_rdev("RO_VPD", &vpd)) { /* shouldn't happen, but let's be extra defensive */ printk(BIOS_ERR, "%s: No RO_VPD FMAP section.\n", __func__); return; } rdev_chain(&vpd, &vpd, GOOGLE_VPD_2_0_OFFSET, region_device_sz(&vpd) - GOOGLE_VPD_2_0_OFFSET); if (rdev_readat(&vpd, cbmem->blob, ro_vpd_base, ro_vpd_size) == ro_vpd_size) { cbmem->ro_size = ro_vpd_size; } else { printk(BIOS_ERR, "%s: Reading RO_VPD FMAP section failed.\n", __func__); ro_vpd_size = 0; } timestamp_add_now(TS_END_COPYVPD_RO); } if (rw_vpd_size) { if (fmap_locate_area_as_rdev("RW_VPD", &vpd)) { /* shouldn't happen, but let's be extra defensive */ printk(BIOS_ERR, "%s: No RW_VPD FMAP section.\n", __func__); return; } rdev_chain(&vpd, &vpd, GOOGLE_VPD_2_0_OFFSET, region_device_sz(&vpd) - GOOGLE_VPD_2_0_OFFSET); if (rdev_readat(&vpd, cbmem->blob + ro_vpd_size, rw_vpd_base, rw_vpd_size) == rw_vpd_size) { cbmem->rw_size = rw_vpd_size; } else { printk(BIOS_ERR, "%s: Reading RW_VPD FMAP section failed.\n", __func__); } timestamp_add_now(TS_END_COPYVPD_RW); } }
static int hash_body(struct vb2_context *ctx, struct region_device *fw_main) { uint64_t load_ts; uint32_t expected_size; uint8_t block[TODO_BLOCK_SIZE]; uint8_t hash_digest[VBOOT_MAX_HASH_SIZE]; const size_t hash_digest_sz = sizeof(hash_digest); size_t block_size = sizeof(block); size_t offset; int rv; /* Clear the full digest so that any hash digests less than the * max have trailing zeros. */ memset(hash_digest, 0, hash_digest_sz); /* * Since loading the firmware and calculating its hash is intertwined, * we use this little trick to measure them separately and pretend it * was first loaded and then hashed in one piece with the timestamps. * (This split won't make sense with memory-mapped media like on x86.) */ load_ts = timestamp_get(); timestamp_add(TS_START_HASH_BODY, load_ts); expected_size = region_device_sz(fw_main); offset = 0; /* Start the body hash */ rv = vb2api_init_hash(ctx, VB2_HASH_TAG_FW_BODY, &expected_size); if (rv) return rv; /* * Honor vboot's RW slot size. The expected size is pulled out of * the preamble and obtained through vb2api_init_hash() above. By * creating sub region the RW slot portion of the boot media is * limited. */ if (rdev_chain(fw_main, fw_main, 0, expected_size)) { printk(BIOS_ERR, "Unable to restrict CBFS size.\n"); return VB2_ERROR_UNKNOWN; } /* Extend over the body */ while (expected_size) { uint64_t temp_ts; if (block_size > expected_size) block_size = expected_size; temp_ts = timestamp_get(); if (rdev_readat(fw_main, block, offset, block_size) < 0) return VB2_ERROR_UNKNOWN; load_ts += timestamp_get() - temp_ts; rv = vb2api_extend_hash(ctx, block, block_size); if (rv) return rv; expected_size -= block_size; offset += block_size; } timestamp_add(TS_DONE_LOADING, load_ts); timestamp_add_now(TS_DONE_HASHING); /* Check the result (with RSA signature verification) */ rv = vb2api_check_hash_get_digest(ctx, hash_digest, hash_digest_sz); if (rv) return rv; timestamp_add_now(TS_END_HASH_BODY); if (handle_digest_result(hash_digest, hash_digest_sz)) return VB2_ERROR_UNKNOWN; return VB2_SUCCESS; }