void misc_tests(void) { uint8_t digest[VB2_SHA512_DIGEST_SIZE]; struct vb2_digest_context dc; /* Crypto algorithm to hash algorithm mapping */ TEST_EQ(vb2_crypto_to_hash(VB2_ALG_RSA1024_SHA1), VB2_HASH_SHA1, "Crypto map to SHA1"); TEST_EQ(vb2_crypto_to_hash(VB2_ALG_RSA2048_SHA256), VB2_HASH_SHA256, "Crypto map to SHA256"); TEST_EQ(vb2_crypto_to_hash(VB2_ALG_RSA4096_SHA256), VB2_HASH_SHA256, "Crypto map to SHA256 2"); TEST_EQ(vb2_crypto_to_hash(VB2_ALG_RSA8192_SHA512), VB2_HASH_SHA512, "Crypto map to SHA512"); TEST_EQ(vb2_crypto_to_hash(VB2_ALG_COUNT), VB2_HASH_INVALID, "Crypto map to invalid"); TEST_EQ(vb2_digest_size(VB2_HASH_INVALID), 0, "digest size invalid alg"); TEST_EQ(vb2_digest((uint8_t *)oneblock_msg, strlen(oneblock_msg), VB2_HASH_INVALID, digest, sizeof(digest)), VB2_ERROR_SHA_INIT_ALGORITHM, "vb2_digest() invalid alg"); /* Test bad algorithm inside extend and finalize */ vb2_digest_init(&dc, VB2_HASH_SHA256); dc.hash_alg = VB2_HASH_INVALID; TEST_EQ(vb2_digest_extend(&dc, digest, sizeof(digest)), VB2_ERROR_SHA_EXTEND_ALGORITHM, "vb2_digest_extend() invalid alg"); TEST_EQ(vb2_digest_finalize(&dc, digest, sizeof(digest)), VB2_ERROR_SHA_FINALIZE_ALGORITHM, "vb2_digest_finalize() invalid alg"); }
int vb2_verify_data(const uint8_t *data, uint32_t size, struct vb2_signature *sig, const struct vb2_public_key *key, const struct vb2_workbuf *wb) { struct vb2_workbuf wblocal = *wb; struct vb2_digest_context *dc; uint8_t *digest; uint32_t digest_size; int rv; if (sig->data_size > size) { VB2_DEBUG("Data buffer smaller than length of signed data.\n"); return VB2_ERROR_VDATA_NOT_ENOUGH_DATA; } /* Digest goes at start of work buffer */ digest_size = vb2_digest_size(key->hash_alg); if (!digest_size) return VB2_ERROR_VDATA_DIGEST_SIZE; digest = vb2_workbuf_alloc(&wblocal, digest_size); if (!digest) return VB2_ERROR_VDATA_WORKBUF_DIGEST; /* Hashing requires temp space for the context */ dc = vb2_workbuf_alloc(&wblocal, sizeof(*dc)); if (!dc) return VB2_ERROR_VDATA_WORKBUF_HASHING; rv = vb2_digest_init(dc, key->hash_alg); if (rv) return rv; rv = vb2_digest_extend(dc, data, sig->data_size); if (rv) return rv; rv = vb2_digest_finalize(dc, digest, digest_size); if (rv) return rv; vb2_workbuf_free(&wblocal, sizeof(*dc)); return vb2_verify_digest(key, sig, digest, &wblocal); }
static int vb2_digest(const uint8_t *buf, uint32_t size, enum vb2_hash_algorithm hash_alg, uint8_t *digest, uint32_t digest_size) { struct vb2_digest_context dc; int rv; rv = vb2_digest_init(&dc, hash_alg); if (rv) return rv; rv = vb2_digest_extend(&dc, buf, size); if (rv) return rv; return vb2_digest_finalize(&dc, digest, digest_size); }
static void reset_common_data(enum reset_type t) { struct vb2_packed_key *k; memset(workbuf, 0xaa, sizeof(workbuf)); memset(&cc, 0, sizeof(cc)); cc.workbuf = workbuf; cc.workbuf_size = sizeof(workbuf); vb2_workbuf_from_ctx(&cc, &wb); vb2_init_context(&cc); sd = vb2_get_sd(&cc); vb2_nv_init(&cc); vb2_secdatak_create(&cc); vb2_secdatak_init(&cc); vb2_secdatak_set(&cc, VB2_SECDATAK_VERSIONS, 0x20002); mock_read_res_fail_on_call = 0; mock_unpack_key_retval = VB2_SUCCESS; mock_read_gbb_header_retval = VB2_SUCCESS; mock_load_kernel_keyblock_retval = VB2_SUCCESS; mock_load_kernel_preamble_retval = VB2_SUCCESS; /* Recovery key in mock GBB */ mock_gbb.recovery_key.algorithm = 11; mock_gbb.recovery_key.key_offset = vb2_offset_of(&mock_gbb.recovery_key, &mock_gbb.recovery_key_data); mock_gbb.recovery_key.key_size = sizeof(mock_gbb.recovery_key_data); strcpy(mock_gbb.recovery_key_data, "The recovery key"); mock_gbb.h.recovery_key_offset = vb2_offset_of(&mock_gbb, &mock_gbb.recovery_key); mock_gbb.h.recovery_key_size = mock_gbb.recovery_key.key_offset + mock_gbb.recovery_key.key_size; if (t == FOR_PHASE1) { uint8_t *kdata; /* Create mock firmware preamble in the context */ sd->workbuf_preamble_offset = cc.workbuf_used; fwpre = (struct vb2_fw_preamble *) (cc.workbuf + sd->workbuf_preamble_offset); k = &fwpre->kernel_subkey; kdata = (uint8_t *)fwpre + sizeof(*fwpre); memcpy(kdata, fw_kernel_key_data, sizeof(fw_kernel_key_data)); k->algorithm = 7; k->key_offset = vb2_offset_of(k, kdata); k->key_size = sizeof(fw_kernel_key_data); sd->workbuf_preamble_size = sizeof(*fwpre) + k->key_size; cc.workbuf_used += sd->workbuf_preamble_size; } else if (t == FOR_PHASE2) { struct vb2_signature *sig; struct vb2_digest_context dc; uint8_t *sdata; /* Create mock kernel data key */ sd->workbuf_data_key_offset = cc.workbuf_used; kdkey = (struct vb2_packed_key *) (cc.workbuf + sd->workbuf_data_key_offset); kdkey->algorithm = VB2_ALG_RSA2048_SHA256; sd->workbuf_data_key_size = sizeof(*kdkey); cc.workbuf_used += sd->workbuf_data_key_size; /* Create mock kernel preamble in the context */ sd->workbuf_preamble_offset = cc.workbuf_used; kpre = (struct vb2_kernel_preamble *) (cc.workbuf + sd->workbuf_preamble_offset); sdata = (uint8_t *)kpre + sizeof(*kpre); sig = &kpre->body_signature; sig->data_size = sizeof(kernel_data); sig->sig_offset = vb2_offset_of(sig, sdata); sig->sig_size = VB2_SHA512_DIGEST_SIZE; vb2_digest_init(&dc, VB2_HASH_SHA256); vb2_digest_extend(&dc, (const uint8_t *)kernel_data, sizeof(kernel_data)); vb2_digest_finalize(&dc, sdata, sig->sig_size); sd->workbuf_preamble_size = sizeof(*kpre) + sig->sig_size; sd->vblock_preamble_offset = 0x10000 - sd->workbuf_preamble_size; cc.workbuf_used += sd->workbuf_preamble_size; } else { /* Set flags and versions for roll-forward */ sd->kernel_version = 0x20004; sd->kernel_version_secdatak = 0x20002; sd->flags |= VB2_SD_FLAG_KERNEL_SIGNED; cc.flags |= VB2_CONTEXT_ALLOW_KERNEL_ROLL_FORWARD; } };
int vb2api_init_hash2(struct vb2_context *ctx, const struct vb2_guid *guid, uint32_t *size) { struct vb2_shared_data *sd = vb2_get_sd(ctx); const struct vb2_fw_preamble *pre; const struct vb2_signature *sig = NULL; struct vb2_digest_context *dc; struct vb2_workbuf wb; uint32_t hash_offset; int i, rv; vb2_workbuf_from_ctx(ctx, &wb); /* Get preamble pointer */ if (!sd->workbuf_preamble_size) return VB2_ERROR_API_INIT_HASH_PREAMBLE; pre = (const struct vb2_fw_preamble *) (ctx->workbuf + sd->workbuf_preamble_offset); /* Find the matching signature */ hash_offset = pre->hash_offset; for (i = 0; i < pre->hash_count; i++) { sig = (const struct vb2_signature *) ((uint8_t *)pre + hash_offset); if (!memcmp(guid, &sig->guid, sizeof(*guid))) break; hash_offset += sig->c.total_size; } if (i >= pre->hash_count) return VB2_ERROR_API_INIT_HASH_GUID; /* No match */ /* Allocate workbuf space for the hash */ if (sd->workbuf_hash_size) { dc = (struct vb2_digest_context *) (ctx->workbuf + sd->workbuf_hash_offset); } else { uint32_t dig_size = sizeof(*dc); dc = vb2_workbuf_alloc(&wb, dig_size); if (!dc) return VB2_ERROR_API_INIT_HASH_WORKBUF; sd->workbuf_hash_offset = vb2_offset_of(ctx->workbuf, dc); sd->workbuf_hash_size = dig_size; ctx->workbuf_used = sd->workbuf_hash_offset + dig_size; } sd->hash_tag = vb2_offset_of(ctx->workbuf, sig); sd->hash_remaining_size = sig->data_size; if (size) *size = sig->data_size; if (!(pre->flags & VB2_FIRMWARE_PREAMBLE_DISALLOW_HWCRYPTO)) { rv = vb2ex_hwcrypto_digest_init(sig->hash_alg, sig->data_size); if (!rv) { VB2_DEBUG("Using HW crypto engine for hash_alg %d\n", sig->hash_alg); dc->hash_alg = sig->hash_alg; dc->using_hwcrypto = 1; return VB2_SUCCESS; } if (rv != VB2_ERROR_EX_HWCRYPTO_UNSUPPORTED) return rv; VB2_DEBUG("HW crypto for hash_alg %d not supported, using SW\n", sig->hash_alg); } else { VB2_DEBUG("HW crypto forbidden by preamble, using SW\n"); } return vb2_digest_init(dc, sig->hash_alg); }