inline void cm_init(struct count_min_t *cm) { memset(cm, 0, sizeof(struct count_min_t)); for(size_t i = 0; i < CM_DEPTH; i++) { sgx_read_rand((unsigned char *)&cm->a[i], sizeof(cm->a[i])); sgx_read_rand((unsigned char *)&cm->b[i], sizeof(cm->b[i])); cm->a[i] = cm->a[i] % CM_PRIME; cm->b[i] = cm->b[i] % CM_PRIME; } }
uint32_t aes128gcm_encrypt(const sgx_aes_gcm_128bit_key_t *key, const uint8_t *bufin, const size_t bufinlen, uint8_t *bufout, size_t bufoutlen) { // check buffer bounds if(bufoutlen < aes128gcm_ciphertext_size(bufinlen)) { return 0Xffffffff; } // source random IV from rdrand if(sgx_read_rand(bufout, SGX_AESGCM_IV_SIZE) != SGX_SUCCESS) { return 0Xffffffff; } // encrypt if(SGX_SUCCESS != sgx_rijndael128GCM_encrypt(key, bufin, bufinlen, // plaintext bufout + SGX_AESGCM_IV_SIZE + SGX_AESGCM_MAC_SIZE, // ciphertext bufout, SGX_AESGCM_IV_SIZE, // iv NULL, 0, // aad (sgx_aes_gcm_128bit_tag_t*) (bufout + SGX_AESGCM_IV_SIZE) // mac )) { return 0Xffffffff; } return 0; }
//Function to generate End Point Selection id and XID for end point selection msg1 //@es_selector, output XID and SelectorID //@return PVEC_SUCCESS on success or error code otherwise pve_status_t gen_es_msg1_data(gen_endpoint_selection_output_t *es_selector) { //randomly generate xid pve_status_t ret = se_read_rand_error_to_pve_error(sgx_read_rand(es_selector->xid, XID_SIZE)); if(ret != PVEC_SUCCESS) return ret; //generate selector id which is hash value of Provisioning Base Key return gen_es_selector_id(&es_selector->selector_id); }
// sgx_register_exception_handler() // register a custom exception handler // Parameter // is_first_handler - the order in which the handler should be called. // if the parameter is nonzero, the handler is the first handler to be called. // if the parameter is zero, the handler is the last handler to be called. // exception_handler - a pointer to the handler to be called. // Return Value // handler - success // NULL - fail void *sgx_register_exception_handler(int is_first_handler, sgx_exception_handler_t exception_handler) { // initialize g_veh_cookie for the first time sgx_register_exception_handler is called. if(unlikely(g_veh_cookie == 0)) { uintptr_t rand = 0; do { if(SGX_SUCCESS != sgx_read_rand((unsigned char *)&rand, sizeof(rand))) { return NULL; } } while(rand == 0); sgx_spin_lock(&g_handler_lock); if(g_veh_cookie == 0) { g_veh_cookie = rand; } sgx_spin_unlock(&g_handler_lock); } if(!sgx_is_within_enclave((const void*)exception_handler, 0)) { return NULL; } handler_node_t *node = (handler_node_t *)malloc(sizeof(handler_node_t)); if(!node) { return NULL; } node->callback = ENC_VEH_POINTER(exception_handler); // write lock sgx_spin_lock(&g_handler_lock); if((g_first_node == NULL) || is_first_handler) { node->next = g_first_node; g_first_node = node; } else { handler_node_t *tmp = g_first_node; while(tmp->next != NULL) { tmp = tmp->next; } node->next = NULL; tmp->next = node; } // write unlock sgx_spin_unlock(&g_handler_lock); return node; }
//random number generation function inside pve pve_status_t pve_rng_generate( int nBits, unsigned char* pRandData) { pve_status_t ret = PVEC_SUCCESS; sgx_status_t se_ret = SGX_SUCCESS; //inside enclave, using random function from selib. It will initialize several more bits if nBits is not times of 8 if((se_ret = sgx_read_rand(reinterpret_cast<uint8_t*>(pRandData), (nBits+7)/8))!=SGX_SUCCESS) ret = se_read_rand_error_to_pve_error(se_ret); return ret; }
// ocall for entropy collection int mbedtls_hardware_poll(void *data, unsigned char *output, size_t len, size_t *olen ) { (void)data; sgx_status_t st = sgx_read_rand(output, len); if (st != SGX_SUCCESS) { printf_sgx("hardware_poll fails with %d\n", st); *olen = -1; return -1; } else { *olen = len; return 0; } }
// ocall for hardware RNG int mbedtls_sgx_drbg_random( void *p_rng, unsigned char *output, size_t out_len ) { if (!output) { printf_sgx("mbedtls_sgx_drbg receives NULL"); return -1; } (void)p_rng; sgx_status_t st = sgx_read_rand(output, out_len); if (st != SGX_SUCCESS) { printf_sgx("mbedtls_sgx_drbg fails with %d\n", st); return -1; } return 0; }
//The function will try to do some preparation for piece meal encryption of field1 in ProvMsg3 // It prepares the encryption state in msg3 //@parm: structure to provide some input data to generate ProvMsg3 and also some states for piece meal processing //@return PVEC_SUCCESS on success and error code if failed static pve_status_t proc_msg3_state_init(prov_msg3_parm_t *parm, const sgx_key_128bit_t *pwk2) { pve_status_t ret = PVEC_SUCCESS; sgx_status_t se_ret = SGX_SUCCESS; if((se_ret=sgx_read_rand(parm->iv, IV_SIZE))!=SGX_SUCCESS){//randomly generate the IV ret = se_read_rand_error_to_pve_error(se_ret); goto ret_point; } se_static_assert(SK_SIZE==sizeof(sgx_cmac_128bit_tag_t)); /*size of sgx_cmac_128bit_tag_t should same as value of SK_SIZE*/ //initialize state for piece-meal encryption of field of ProvMsg3 ret = sgx_error_to_pve_error(sgx_aes_gcm128_enc_init((const uint8_t *)pwk2, parm->iv, IV_SIZE,//pwk2 as the key NULL, 0,//no AAD used for the encryption of EpidSignature (sgx_aes_state_handle_t*)&parm->p_msg3_state)); ret_point: return ret; }
IppStatus IPP_STDCALL sgx_ipp_DRNGen(Ipp32u* pRandBNU, int nBits, void* pCtx) { sgx_status_t sgx_ret; UNUSED(pCtx); if (0 != nBits % 8) { // Must be byte aligned return ippStsSizeErr; } if (!pRandBNU) { return ippStsNullPtrErr; } sgx_ret = sgx_read_rand((uint8_t*)pRandBNU, (uint32_t)nBits / 8); if (SGX_SUCCESS != sgx_ret) { return ippStsErr; } return ippStsNoErr; }
// TKE interface for isv enclaves sgx_status_t sgx_ra_init_ex( const sgx_ec256_public_t *p_pub_key, int b_pse, sgx_ra_derive_secret_keys_t derive_key_cb, sgx_ra_context_t *p_context) { int valid = 0; sgx_status_t ret = SGX_SUCCESS; sgx_ecc_state_handle_t ecc_state = NULL; // initialize g_kdf_cookie for the first time sgx_ra_init_ex is called. if (unlikely(g_kdf_cookie == 0)) { uintptr_t rand = 0; do { if (SGX_SUCCESS != sgx_read_rand((unsigned char *)&rand, sizeof(rand))) { return SGX_ERROR_UNEXPECTED; } } while (rand == 0); sgx_spin_lock(&g_ra_db_lock); if (g_kdf_cookie == 0) { g_kdf_cookie = rand; memset_s(&rand, sizeof(rand), 0, sizeof(rand)); } sgx_spin_unlock(&g_ra_db_lock); } if(!p_pub_key || !p_context) return SGX_ERROR_INVALID_PARAMETER; if(!sgx_is_within_enclave(p_pub_key, sizeof(sgx_ec256_public_t))) return SGX_ERROR_INVALID_PARAMETER; //derive_key_cb can be NULL if (NULL != derive_key_cb && !sgx_is_within_enclave((const void*)derive_key_cb, 0)) { return SGX_ERROR_INVALID_PARAMETER; } ret = sgx_ecc256_open_context(&ecc_state); if(SGX_SUCCESS != ret) { if(SGX_ERROR_OUT_OF_MEMORY != ret) ret = SGX_ERROR_UNEXPECTED; return ret; } ret = sgx_ecc256_check_point((const sgx_ec256_public_t *)p_pub_key, ecc_state, &valid); if(SGX_SUCCESS != ret) { if(SGX_ERROR_OUT_OF_MEMORY != ret) ret = SGX_ERROR_UNEXPECTED; sgx_ecc256_close_context(ecc_state); return ret; } if(!valid) { sgx_ecc256_close_context(ecc_state); return SGX_ERROR_INVALID_PARAMETER; } sgx_ecc256_close_context(ecc_state); //add new item to g_ra_db ra_db_item_t* new_item = (ra_db_item_t*)malloc(sizeof(ra_db_item_t)); if (!new_item) { return SGX_ERROR_OUT_OF_MEMORY; } memset(new_item,0, sizeof(ra_db_item_t)); memcpy(&new_item->sp_pubkey, p_pub_key, sizeof(new_item->sp_pubkey)); if(b_pse) { //sgx_create_pse_session() must have been called ret = sgx_get_ps_sec_prop(&new_item->ps_sec_prop); if (ret!=SGX_SUCCESS) { SAFE_FREE(new_item); return ret; } } new_item->derive_key_cb = ENC_KDF_POINTER(derive_key_cb); new_item->state = ra_inited; //find first empty slot in g_ra_db int first_empty = -1; ra_db_item_t* item = NULL; sgx_spin_lock(&g_ra_db_lock); uint32_t size = vector_size(&g_ra_db); for (uint32_t i = 0; i < size; i++) { if(0 != vector_get(&g_ra_db, i, reinterpret_cast<void**>(&item))) { sgx_spin_unlock(&g_ra_db_lock); SAFE_FREE(new_item); return SGX_ERROR_UNEXPECTED; } if(item == NULL) { first_empty = i; break; } } //if there is a empty slot, use it if (first_empty >= 0) { errno_t vret = vector_set(&g_ra_db, first_empty, new_item); UNUSED(vret); assert(vret == 0); *p_context = first_empty; } //if there are no empty slots, add a new item to g_ra_db else { if(size >= INT32_MAX) { //overflow sgx_spin_unlock(&g_ra_db_lock); SAFE_FREE(new_item); return SGX_ERROR_OUT_OF_MEMORY; } if(0 != vector_push_back(&g_ra_db, new_item)) { sgx_spin_unlock(&g_ra_db_lock); SAFE_FREE(new_item); return SGX_ERROR_OUT_OF_MEMORY; } *p_context = size; } sgx_spin_unlock(&g_ra_db_lock); return SGX_SUCCESS; }
//Function to generate Field1_0 of ProvMsg3 //@msg2_blob_input, input decoded ProvMsg2 info //@join_proof, output the join proof and the escrow data which is encrypted f of Private Key //@return PVEC_SUCCESS on success and error code on failure //The function assume all required inputs have been prepared in msg2_blob_input static pve_status_t gen_msg3_join_proof_escrow_data(const proc_prov_msg2_blob_input_t *msg2_blob_input, join_proof_with_escrow_t& join_proof) { pve_status_t ret = PVEC_SUCCESS; BitSupplier epid_prng = (BitSupplier) epid_random_func; FpElemStr temp_f; //first generate private key f randomly before sealing it by PSK FpElemStr *f = &temp_f; sgx_status_t sgx_status = SGX_SUCCESS; JoinRequest *join_r = &join_proof.jr; EpidStatus epid_ret = kEpidNoErr; psvn_t psvn; MemberCtx* ctx = NULL; memset(&temp_f, 0, sizeof(temp_f)); //randomly generate the private EPID key f, host to network transformation not required since server will not decode it ret=sgx_error_to_pve_error(sgx_gen_epid_priv_f((void*)f)); if(PVEC_SUCCESS != ret){ goto ret_point; } //generate JoinP using f before encryption by calling EPID library memset(join_r, 0, sizeof(JoinRequest));//first clear to 0 //generate JoinP to fill it in field1_0_0 by EPID library epid_ret = epid_member_create(epid_prng, NULL, f, &ctx); if(kEpidNoErr!=epid_ret){ ret = epid_error_to_pve_error(epid_ret); goto ret_point; } epid_ret = EpidCreateJoinRequest(ctx, &msg2_blob_input->group_cert.key, //EPID Group Cert from ProvMsgs2 used reinterpret_cast<const IssuerNonce *>(msg2_blob_input->challenge_nonce), join_r); if(kEpidNoErr != epid_ret){ ret = epid_error_to_pve_error(epid_ret); goto ret_point; } //get PSK sgx_key_128bit_t psk; memcpy(&psvn.cpu_svn, &msg2_blob_input->equiv_pi.cpu_svn, sizeof(psvn.cpu_svn)); memcpy(&psvn.isv_svn, &msg2_blob_input->equiv_pi.pve_svn, sizeof(psvn.isv_svn)); ret = get_pve_psk(&psvn, &psk); if(PVEC_SUCCESS != ret){ goto ret_point; } join_proof.escrow.version = 0;//version 0 used for escrow data //now we could seal f by PSK ret = se_read_rand_error_to_pve_error(sgx_read_rand(join_proof.escrow.iv, IV_SIZE)); if(PVEC_SUCCESS != ret){ goto ret_point; } se_static_assert(sizeof(psk)==sizeof(sgx_aes_gcm_128bit_key_t)); /*sizeof sgx_aes_gcm_128bit_key_t tshould be same as size of psk*/ se_static_assert(sizeof(sgx_aes_gcm_128bit_tag_t)==sizeof(join_proof.escrow.mac)); /*sizeof sgx_aes_gcm_128bit_tag_t should be same as MAC_SIZE*/ sgx_status = sgx_rijndael128GCM_encrypt(reinterpret_cast<const sgx_aes_gcm_128bit_key_t *>(&psk), reinterpret_cast<uint8_t *>(f), sizeof(*f), reinterpret_cast<uint8_t *>(&join_proof.escrow.f), join_proof.escrow.iv, IV_SIZE, NULL, 0, reinterpret_cast<sgx_aes_gcm_128bit_tag_t *>(join_proof.escrow.mac)); if(SGX_SUCCESS != sgx_status){ ret = sgx_error_to_pve_error(sgx_status); } ret_point: (void)memset_s(&psk, sizeof(psk), 0, sizeof(psk));//clear the key (void)memset_s(&temp_f, sizeof(temp_f), 0, sizeof(temp_f));//clear temp f in stack if(PVEC_SUCCESS != ret){ (void)memset_s(&join_proof, sizeof(join_proof), 0, sizeof(join_proof)); } epid_member_delete(&ctx); return ret; }
extern "C" sgx_status_t sgx_ra_proc_msg2_trusted( sgx_ra_context_t context, const sgx_ra_msg2_t *p_msg2, //(g_b||spid||quote_type|| KDF_ID ||sign_gb_ga||cmac||sig_rl_size||sig_rl) const sgx_target_info_t *p_qe_target, sgx_report_t *p_report, sgx_quote_nonce_t* p_nonce) { sgx_status_t se_ret = SGX_ERROR_UNEXPECTED; //p_msg2[in] p_qe_target[in] p_report[out] p_nonce[out] in EDL file if(vector_size(&g_ra_db) <= context || !p_msg2 || !p_qe_target || !p_report || !p_nonce) return SGX_ERROR_INVALID_PARAMETER; ra_db_item_t* item = NULL; if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL ) return SGX_ERROR_INVALID_PARAMETER; sgx_ec256_private_t a; memset(&a, 0, sizeof(a)); // Create gb_ga sgx_ec256_public_t gb_ga[2]; sgx_ec256_public_t sp_pubkey; sgx_ec_key_128bit_t smkey = {0}; sgx_ec_key_128bit_t skey = {0}; sgx_ec_key_128bit_t mkey = {0}; sgx_ec_key_128bit_t vkey = {0}; sgx_ra_derive_secret_keys_t ra_key_cb = NULL; memset(&gb_ga[0], 0, sizeof(gb_ga)); sgx_spin_lock(&item->item_lock); //sgx_ra_get_ga must have been called if (item->state != ra_get_gaed) { sgx_spin_unlock(&item->item_lock); return SGX_ERROR_INVALID_STATE; } memcpy(&a, &item->a, sizeof(a)); memcpy(&gb_ga[1], &item->g_a, sizeof(gb_ga[1])); memcpy(&sp_pubkey, &item->sp_pubkey, sizeof(sp_pubkey)); ra_key_cb = DEC_KDF_POINTER(item->derive_key_cb); sgx_spin_unlock(&item->item_lock); memcpy(&gb_ga[0], &p_msg2->g_b, sizeof(gb_ga[0])); sgx_ecc_state_handle_t ecc_state = NULL; // ecc_state need to be freed when exit. se_ret = sgx_ecc256_open_context(&ecc_state); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; return se_ret; } sgx_ec256_dh_shared_t dh_key; memset(&dh_key, 0, sizeof(dh_key)); sgx_ec256_public_t* p_msg2_g_b = const_cast<sgx_ec256_public_t*>(&p_msg2->g_b); se_ret = sgx_ecc256_compute_shared_dhkey(&a, (sgx_ec256_public_t*)p_msg2_g_b, &dh_key, ecc_state); if(SGX_SUCCESS != se_ret) { if (SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; sgx_ecc256_close_context(ecc_state); return se_ret; } // Verify signature of gb_ga uint8_t result; sgx_ec256_signature_t* p_msg2_sign_gb_ga = const_cast<sgx_ec256_signature_t*>(&p_msg2->sign_gb_ga); se_ret = sgx_ecdsa_verify((uint8_t *)&gb_ga, sizeof(gb_ga), &sp_pubkey, p_msg2_sign_gb_ga, &result, ecc_state); if(SGX_SUCCESS != se_ret) { if (SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; sgx_ecc256_close_context(ecc_state); return se_ret; } if(SGX_EC_VALID != result) { sgx_ecc256_close_context(ecc_state); return SGX_ERROR_INVALID_SIGNATURE; } do { if(NULL != ra_key_cb) { se_ret = ra_key_cb(&dh_key, p_msg2->kdf_id, &smkey, &skey, &mkey, &vkey); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret && SGX_ERROR_INVALID_PARAMETER != se_ret && SGX_ERROR_KDF_MISMATCH != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } } else if (p_msg2->kdf_id == 0x0001) { se_ret = derive_key(&dh_key, "SMK", (uint32_t)(sizeof("SMK") -1), &smkey); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } se_ret = derive_key(&dh_key, "SK", (uint32_t)(sizeof("SK") -1), &skey); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } se_ret = derive_key(&dh_key, "MK", (uint32_t)(sizeof("MK") -1), &mkey); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } se_ret = derive_key(&dh_key, "VK", (uint32_t)(sizeof("VK") -1), &vkey); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } } else { se_ret = SGX_ERROR_KDF_MISMATCH; break; } sgx_cmac_128bit_tag_t mac; uint32_t maced_size = offsetof(sgx_ra_msg2_t, mac); se_ret = sgx_rijndael128_cmac_msg(&smkey, (const uint8_t *)p_msg2, maced_size, &mac); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } //Check mac if(0 == consttime_memequal(mac, p_msg2->mac, sizeof(mac))) { se_ret = SGX_ERROR_MAC_MISMATCH; break; } //create a nonce se_ret =sgx_read_rand((uint8_t*)p_nonce, sizeof(sgx_quote_nonce_t)); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; break; } sgx_spin_lock(&item->item_lock); //sgx_ra_get_ga must have been called if (item->state != ra_get_gaed) { se_ret = SGX_ERROR_INVALID_STATE; sgx_spin_unlock(&item->item_lock); break; } memcpy(&item->g_b, &p_msg2->g_b, sizeof(item->g_b)); memcpy(&item->smk_key, smkey, sizeof(item->smk_key)); memcpy(&item->sk_key, skey, sizeof(item->sk_key)); memcpy(&item->mk_key, mkey, sizeof(item->mk_key)); memcpy(&item->vk_key, vkey, sizeof(item->vk_key)); memcpy(&item->qe_target, p_qe_target, sizeof(sgx_target_info_t)); memcpy(&item->quote_nonce, p_nonce, sizeof(sgx_quote_nonce_t)); sgx_report_data_t report_data = {{0}}; se_static_assert(sizeof(sgx_report_data_t)>=sizeof(sgx_sha256_hash_t)); // H = SHA256(ga || gb || VK_CMAC) uint32_t sha256ed_size = offsetof(ra_db_item_t, sp_pubkey); //report_data is 512bits, H is 256bits. The H is in the lower 256 bits of report data while the higher 256 bits are all zeros. se_ret = sgx_sha256_msg((uint8_t *)&item->g_a, sha256ed_size, (sgx_sha256_hash_t *)&report_data); if(SGX_SUCCESS != se_ret) { if (SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; sgx_spin_unlock(&item->item_lock); break; } //REPORTDATA = H se_ret = sgx_create_report(p_qe_target, &report_data, p_report); if (SGX_SUCCESS != se_ret) { if(SGX_ERROR_OUT_OF_MEMORY != se_ret) se_ret = SGX_ERROR_UNEXPECTED; sgx_spin_unlock(&item->item_lock); break; } item->state = ra_proc_msg2ed; sgx_spin_unlock(&item->item_lock); }while(0); memset_s(&dh_key, sizeof(dh_key), 0, sizeof(dh_key)); sgx_ecc256_close_context(ecc_state); memset_s(&a, sizeof(sgx_ec256_private_t),0, sizeof(sgx_ec256_private_t)); memset_s(smkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t)); memset_s(skey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t)); memset_s(mkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t)); memset_s(vkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t)); return se_ret; }
/* * External function used to get quote. Prefix "emp_" means it is a pointer * points memory outside enclave. * * @param p_blob[in, out] Pointer to the EPID Blob. * @param blob_size[in] The size of EPID Blob, in bytes. * @param p_enclave_report[in] The application enclave's report. * @param quote_type[in] The type of quote, random based or name based. * @param p_spid[in] Pointer to SPID. * @param p_nonce[in] Pointer to nonce. * @param emp_sig_rl[in] Pointer to SIG-RL. * @param sig_rl_size[in] The size of SIG-RL, in bytes. * @param p_qe_report[out] Pointer to QE report, which reportdata is * sha256(nonce || quote) * @param emp_quote[out] Pointer to the output buffer for quote. * @param quote_size[in] The size of emp_quote, in bytes. * @param pce_isvsvn[in] The ISVSVN of PCE. * @return ae_error_t AE_SUCCESS for success, otherwise for errors. */ uint32_t get_quote( uint8_t *p_blob, uint32_t blob_size, const sgx_report_t *p_enclave_report, sgx_quote_sign_type_t quote_type, const sgx_spid_t *p_spid, const sgx_quote_nonce_t *p_nonce, const uint8_t *emp_sig_rl, uint32_t sig_rl_size, sgx_report_t *p_qe_report, uint8_t *emp_quote, uint32_t quote_size, sgx_isv_svn_t pce_isvsvn) { ae_error_t ret = AE_SUCCESS; EpidStatus epid_ret = kEpidNoErr; MemberCtx *p_epid_context = NULL; sgx_quote_t quote_body; uint8_t is_resealed = 0; sgx_basename_t basename = {{0}}; uint64_t sign_size = 0; sgx_status_t se_ret = SGX_SUCCESS; sgx_report_t qe_report; uint64_t required_buffer_size = 0; se_sig_rl_t sig_rl_header; se_plaintext_epid_data_sdk_t plaintext; sgx_ec256_signature_t ec_signature; sgx_cpu_svn_t cpusvn; memset("e_body, 0, sizeof(quote_body)); memset(&sig_rl_header, 0, sizeof(sig_rl_header)); memset(&plaintext, 0, sizeof(plaintext)); memset(&ec_signature, 0, sizeof(ec_signature)); memset(&cpusvn, 0, sizeof(cpusvn)); /* Actually, some cases here will be checked with code generated by edger8r. Here we just want to defend in depth. */ if((NULL == p_blob) || (NULL == p_enclave_report) || (NULL == p_spid) || (NULL == emp_quote) || (!quote_size) || ((NULL != emp_sig_rl) && (sig_rl_size < sizeof(se_sig_rl_t) + 2 * SE_ECDSA_SIGN_SIZE)) // // this size check could mispredict and cause us to // overflow, but we have an lfence below // that's safe to use for this case // || ((NULL == emp_sig_rl) && (sig_rl_size != 0))) return QE_PARAMETER_ERROR; if(SGX_TRUSTED_EPID_BLOB_SIZE_SDK != blob_size) return QE_PARAMETER_ERROR; // // this could mispredict and cause us to // overflow, but we have an lfence below // that's safe to use for this case // if(SGX_LINKABLE_SIGNATURE != quote_type && SGX_UNLINKABLE_SIGNATURE != quote_type) return QE_PARAMETER_ERROR; if(!p_nonce && p_qe_report) return QE_PARAMETER_ERROR; if(p_nonce && !p_qe_report) return QE_PARAMETER_ERROR; /* To reduce the memory footprint of QE, we should leave sig_rl and quote buffer outside enclave. */ if(!sgx_is_outside_enclave(emp_sig_rl, sig_rl_size)) return QE_PARAMETER_ERROR; // // for user_check SigRL input // based on quote_size input parameter // sgx_lfence(); if(!sgx_is_outside_enclave(emp_quote, quote_size)) return QE_PARAMETER_ERROR; /* Check whether p_blob is copied into EPC. If we want to reduce the memory usage, maybe we can leave the p_blob outside EPC. */ if(!sgx_is_within_enclave(p_blob, blob_size)) return QE_PARAMETER_ERROR; if(!sgx_is_within_enclave(p_enclave_report, sizeof(*p_enclave_report))) return QE_PARAMETER_ERROR; if(!sgx_is_within_enclave(p_spid, sizeof(*p_spid))) return QE_PARAMETER_ERROR; /* If the code reach here, if p_nonce is NULL, then p_qe_report will be NULL also. So we only check p_nonce here.*/ if(p_nonce) { /* Actually Edger8r will alloc the buffer within EPC, this is just kind of defense in depth. */ if(!sgx_is_within_enclave(p_nonce, sizeof(*p_nonce))) return QE_PARAMETER_ERROR; if(!sgx_is_within_enclave(p_qe_report, sizeof(*p_qe_report))) return QE_PARAMETER_ERROR; } /* Verify the input report. */ if(SGX_SUCCESS != sgx_verify_report(p_enclave_report)) return QE_PARAMETER_ERROR; /* Verify EPID p_blob and create the context */ ret = random_stack_advance(verify_blob_internal, p_blob, blob_size, &is_resealed, TRUE, plaintext, &p_epid_context, &cpusvn); if(AE_SUCCESS != ret) goto CLEANUP; /* If SIG-RL is provided, we should check its size. */ if(emp_sig_rl) { uint64_t temp_size = 0; uint64_t n2 = 0; memcpy(&sig_rl_header, emp_sig_rl, sizeof(sig_rl_header)); if(sig_rl_header.protocol_version != SE_EPID_SIG_RL_VERSION) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } if(sig_rl_header.epid_identifier != SE_EPID_SIG_RL_ID) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } if(memcmp(&sig_rl_header.sig_rl.gid, &plaintext.epid_group_cert.gid, sizeof(sig_rl_header.sig_rl.gid))) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } temp_size = se_get_sig_rl_size(&sig_rl_header); if(temp_size != sig_rl_size) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } se_static_assert(sizeof(ec_signature.x) == SE_ECDSA_SIGN_SIZE); se_static_assert(sizeof(ec_signature.y) == SE_ECDSA_SIGN_SIZE); memcpy(ec_signature.x, emp_sig_rl + sig_rl_size - (SE_ECDSA_SIGN_SIZE * 2), sizeof(ec_signature.x)); SWAP_ENDIAN_32B(ec_signature.x); memcpy(ec_signature.y, emp_sig_rl + sig_rl_size - (SE_ECDSA_SIGN_SIZE * 1), sizeof(ec_signature.y)); SWAP_ENDIAN_32B(ec_signature.y); n2 = SWAP_4BYTES(sig_rl_header.sig_rl.n2); temp_size = sizeof(EpidSignature) - sizeof(NrProof) + n2 * sizeof(NrProof); if(temp_size > UINT32_MAX) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } sign_size = temp_size; } else { sign_size = sizeof(BasicSignature) + sizeof(uint32_t) // rl_ver + sizeof(uint32_t); // rl_num } /* Verify sizeof basename is large enough and it should always be true*/ se_static_assert(sizeof(basename) > sizeof(*p_spid)); /* Because basename has already been zeroed, so we don't need to concatenating with 0s.*/ memcpy(&basename, p_spid, sizeof(*p_spid)); if(SGX_UNLINKABLE_SIGNATURE == quote_type) { uint8_t *p = (uint8_t *)&basename + sizeof(*p_spid); se_ret = sgx_read_rand(p, sizeof(basename) - sizeof(*p_spid)); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } epid_ret = EpidRegisterBasename(p_epid_context, (uint8_t *)&basename, sizeof(basename)); if(kEpidNoErr != epid_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } required_buffer_size = SE_QUOTE_LENGTH_WITHOUT_SIG + sign_size; /* We should make sure the buffer size is big enough. */ if(quote_size < required_buffer_size) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } // // for user_check SigRL input // based on n2 field in SigRL // sgx_lfence(); /* Copy the data in the report into quote body. */ memset(emp_quote, 0, quote_size); quote_body.version = QE_QUOTE_VERSION; quote_body.sign_type = (uint16_t)quote_type; quote_body.pce_svn = pce_isvsvn; // Both are little endian quote_body.xeid = plaintext.xeid; // Both are little endian se_static_assert(sizeof(plaintext.epid_group_cert.gid) == sizeof(OctStr32)); se_static_assert(sizeof(quote_body.epid_group_id) == sizeof(uint32_t)); ((uint8_t *)("e_body.epid_group_id))[0] = plaintext.epid_group_cert.gid.data[3]; ((uint8_t *)("e_body.epid_group_id))[1] = plaintext.epid_group_cert.gid.data[2]; ((uint8_t *)("e_body.epid_group_id))[2] = plaintext.epid_group_cert.gid.data[1]; ((uint8_t *)("e_body.epid_group_id))[3] = plaintext.epid_group_cert.gid.data[0]; memcpy("e_body.basename, &basename, sizeof(quote_body.basename)); // Get the QE's report. se_ret = sgx_create_report(NULL, NULL, &qe_report); if(SGX_SUCCESS != se_ret) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } // Copy QE's security version in to Quote body. quote_body.qe_svn = qe_report.body.isv_svn; // Copy the incoming report into Quote body. memcpy("e_body.report_body, &(p_enclave_report->body), sizeof(quote_body.report_body)); /* Because required_buffer_size is larger than signature_len, so if we get here, then no integer overflow will ocur. */ quote_body.signature_len = (uint32_t)(sizeof(se_wrap_key_t) + QUOTE_IV_SIZE + sizeof(uint32_t) + sign_size + sizeof(sgx_mac_t)); /* Make the signature. */ ret = qe_epid_sign(p_epid_context, plaintext, &basename, emp_sig_rl ? ((const se_sig_rl_t *)emp_sig_rl)->sig_rl.bk : NULL, &sig_rl_header, &ec_signature, p_enclave_report, p_nonce, p_qe_report, emp_quote, "e_body, (uint32_t)sign_size); if(AE_SUCCESS != ret) { // Only need to clean the buffer after the fixed length part. memset_s(emp_quote + sizeof(sgx_quote_t), quote_size - sizeof(sgx_quote_t), 0, quote_size - sizeof(sgx_quote_t)); goto CLEANUP; } memcpy(emp_quote, "e_body, sizeof(sgx_quote_t)); CLEANUP: if(p_epid_context) epid_member_delete(&p_epid_context); return ret; }
//Function to create data for ProvMsg3 generation // The sigrl of ProvMsg2 will processed in this function in piece-meal method //@msg2_blob_input: structure to hold decoded data of ProvMsg2 //@performance_rekey_used[in]: 1 if performance rekey used or 0 if not //@msg3_parm: structure to hold most information to generate ProvMsg3 //@msg3_output: structure to hold output data to create ProvMsg3 //@emp_epid_sig: output buffer to external memory for variable length EpidSignature //@epid_sig_buffer_size: size in bytes of buffer emp_epid_sig //@return PVEC_SUCCESS on success and error code if failed pve_status_t gen_prov_msg3_data(const proc_prov_msg2_blob_input_t *msg2_blob_input, prov_msg3_parm_t& msg3_parm, uint8_t performance_rekey_used, gen_prov_msg3_output_t *msg3_output, external_memory_byte_t *emp_epid_sig, uint32_t epid_sig_buffer_size) { pve_status_t ret = PVEC_SUCCESS; sgx_status_t sgx_status = SGX_ERROR_UNEXPECTED; uint8_t temp_buf[JOIN_PROOF_TLV_TOTAL_SIZE]; uint8_t *data_to_encrypt = NULL; uint8_t size_to_encrypt = 0; uint8_t pwk2_tlv_buffer[PWK2_TLV_TOTAL_SIZE]; sgx_key_128bit_t *pwk2=reinterpret_cast<sgx_key_128bit_t *>(pwk2_tlv_buffer+PWK2_TLV_HEADER_SIZE); uint8_t report_data_payload[MAC_SIZE + HARD_CODED_JOIN_PROOF_WITH_ESCROW_TLV_SIZE + NONCE_2_SIZE + PEK_MOD_SIZE]; uint8_t* pdata = &report_data_payload[0]; sgx_report_data_t report_data = { 0 }; uint8_t aad[sizeof(GroupId)+sizeof(device_id_t)+CHALLENGE_NONCE_SIZE]; void *pub_key = NULL; const signed_pek_t& pek = msg2_blob_input->pek; uint32_t le_e; int i; size_t output_len = 0; uint8_t le_n[sizeof(pek.n)]; static_assert(sizeof(pek.n)==384, "pek.n should be 384 bytes"); device_id_t *device_id_in_aad= (device_id_t *)(aad+sizeof(GroupId)); join_proof_with_escrow_t* join_proof_with_escrow=reinterpret_cast<join_proof_with_escrow_t *>(temp_buf+JOIN_PROOF_TLV_HEADER_SIZE); se_static_assert(sizeof(join_proof_with_escrow_t)+JOIN_PROOF_TLV_HEADER_SIZE==JOIN_PROOF_TLV_TOTAL_SIZE); /*unmatched hardcoded size*/ se_static_assert(sizeof(sgx_key_128bit_t)==PWK2_TLV_TOTAL_SIZE-PWK2_TLV_HEADER_SIZE); /*unmatched PWK2 size*/ memset(temp_buf, 0 ,sizeof(temp_buf)); memset(aad, 0, sizeof(aad)); memset(pwk2, 0, sizeof(sgx_key_128bit_t)); memcpy(pwk2_tlv_buffer, PWK2_TLV_HEADER, PWK2_TLV_HEADER_SIZE); msg3_output->is_join_proof_generated=false; msg3_output->is_epid_sig_generated=false; if ((msg2_blob_input->pce_target_info.attributes.flags & SGX_FLAGS_PROVISION_KEY) != SGX_FLAGS_PROVISION_KEY || (msg2_blob_input->pce_target_info.attributes.flags & SGX_FLAGS_DEBUG) != 0){ //PCE must have access to provisioning key //Can't be debug PCE ret = PVEC_PARAMETER_ERROR; goto ret_point; } if(!performance_rekey_used){ //the temp_buf used for join_proof_with_escrow tlv memcpy(temp_buf, JOIN_PROOF_TLV_HEADER, JOIN_PROOF_TLV_HEADER_SIZE);//first copy in tlv header ret = random_stack_advance(gen_msg3_join_proof_escrow_data, msg2_blob_input, *join_proof_with_escrow);//generate the tlv payload if( PVEC_SUCCESS != ret ) goto ret_point; msg3_output->is_join_proof_generated = true; data_to_encrypt = temp_buf; size_to_encrypt = JOIN_PROOF_TLV_TOTAL_SIZE; } //now encrypt field1 ret = se_read_rand_error_to_pve_error(sgx_read_rand(msg3_output->field1_iv, IV_SIZE));//randomly generate IV if( PVEC_SUCCESS != ret) goto ret_point; memcpy(aad, &msg2_blob_input->group_cert.key.gid,sizeof(GroupId));//start to prepare AAD memcpy(&device_id_in_aad->fmsp, &msg2_blob_input->equiv_pi.fmsp, sizeof(fmsp_t)); memcpy(&device_id_in_aad->psvn.cpu_svn, &msg2_blob_input->equiv_pi.cpu_svn, sizeof(sgx_cpu_svn_t)); memcpy(&device_id_in_aad->psvn.isv_svn, &msg2_blob_input->equiv_pi.pve_svn, sizeof(sgx_isv_svn_t)); memset(&device_id_in_aad->ppid, 0, sizeof(device_id_in_aad->ppid)); ret = pve_rng_generate(NONCE_2_SIZE*8, msg3_output->n2); if(PVEC_SUCCESS !=ret){ goto ret_point; } ret = random_stack_advance(get_pwk2, &device_id_in_aad->psvn, msg3_output->n2, pwk2); if( PVEC_SUCCESS != ret ) goto ret_point; memcpy(aad+sizeof(GroupId)+sizeof(device_id_t), msg2_blob_input->challenge_nonce, CHALLENGE_NONCE_SIZE); se_static_assert(sizeof(sgx_aes_gcm_128bit_key_t)==SK_SIZE); /*sizeof sgx_aes_gcm_128bit_key_t should be same as TCB size*/ se_static_assert(sizeof(sgx_aes_gcm_128bit_tag_t)==MAC_SIZE); /*sizeof sgx_aes_gcm_128bit_tag_t should be same as MAC_SIZE*/ sgx_status = sgx_rijndael128GCM_encrypt(reinterpret_cast<const sgx_aes_gcm_128bit_key_t *>(pwk2), data_to_encrypt, size_to_encrypt, msg3_output->field1_data, msg3_output->field1_iv, IV_SIZE, aad, static_cast<uint32_t>(sizeof(GroupId)+sizeof(device_id_t)+CHALLENGE_NONCE_SIZE), reinterpret_cast<sgx_aes_gcm_128bit_tag_t *>(msg3_output->field1_mac));//encrypt field1 if(SGX_SUCCESS != sgx_status){ ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } if( msg2_blob_input->is_previous_pi_provided ){ //preparing the encryption state of ProvMsg3 and encrypt inplace of msg3_inside enclave (field1_0 and field1_1) //The function will randomly set the iv value too ret = proc_msg3_state_init(&msg3_parm, pwk2); if( PVEC_SUCCESS!=ret ) goto ret_point; //Now start piece-meal generation of EPIDsign ret = gen_msg3_signature(msg2_blob_input, &msg3_parm, emp_epid_sig, epid_sig_buffer_size); if( PVEC_SUCCESS!=ret ) goto ret_point; msg3_output->is_epid_sig_generated = true; msg3_output->epid_sig_output_size = epid_sig_buffer_size; memcpy(msg3_output->epid_sig_iv, msg3_parm.iv, IV_SIZE); //generate MAC in EPC ret = sgx_error_to_pve_error(sgx_aes_gcm128_enc_get_mac(msg3_output->epid_sig_mac, (sgx_aes_state_handle_t*)msg3_parm.p_msg3_state)); if (PVEC_SUCCESS != ret) goto ret_point; } le_e = lv_ntohl(pek.e); se_static_assert(sizeof(pek.n)==sizeof(le_n)); /*unmatched size of pek.n*/ //endian swap for(i=0;i<(int)(sizeof(pek.n)/sizeof(pek.n[0]));i++){ le_n[i]=pek.n[sizeof(pek.n)/sizeof(pek.n[0])-i-1]; } sgx_status = sgx_create_rsa_pub_key(sizeof(pek.n), sizeof(pek.e), reinterpret_cast<const unsigned char *>(le_n), reinterpret_cast<const unsigned char *>(&le_e), &pub_key); if (SGX_SUCCESS != sgx_status) { ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } sgx_status = sgx_rsa_pub_encrypt_sha256(pub_key, NULL, &output_len, reinterpret_cast<const unsigned char*>(pwk2_tlv_buffer), PWK2_TLV_TOTAL_SIZE); if (SGX_SUCCESS != sgx_status) { ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } sgx_status = sgx_rsa_pub_encrypt_sha256(pub_key, msg3_output->encrypted_pwk2, &output_len, reinterpret_cast<const unsigned char*>(pwk2_tlv_buffer), PWK2_TLV_TOTAL_SIZE); if (SGX_SUCCESS != sgx_status) { ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } // X = (NT)MAC_PWK2(... (NT)E_PWK2((T)(JoinP, f)) ...) | (NT)E_PWK2((T)(JoinP, f)) | (NT)PWK2N | (NT)E_PEK((T)PWK2) // REPORT.ReportData == SHA256[X] memcpy(pdata, msg3_output->field1_mac, MAC_SIZE); pdata += MAC_SIZE; if (!performance_rekey_used){ memcpy(pdata, msg3_output->field1_data, HARD_CODED_JOIN_PROOF_WITH_ESCROW_TLV_SIZE); pdata += HARD_CODED_JOIN_PROOF_WITH_ESCROW_TLV_SIZE; } memcpy(pdata, msg3_output->n2, NONCE_2_SIZE); pdata += NONCE_2_SIZE; memcpy(pdata, msg3_output->encrypted_pwk2, PEK_MOD_SIZE); pdata += PEK_MOD_SIZE; se_static_assert(sizeof(report_data) >= sizeof(sgx_sha256_hash_t)); /*report data is no large enough*/ sgx_status = sgx_sha256_msg(report_data_payload, (uint32_t)(pdata - &report_data_payload[0]), reinterpret_cast<sgx_sha256_hash_t *>(&report_data)); if (SGX_SUCCESS != sgx_status){ ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } sgx_status = sgx_create_report(&msg2_blob_input->pce_target_info, &report_data, &msg3_output->pwk2_report); if (SGX_SUCCESS != sgx_status){ ret = sgx_error_to_pve_error(sgx_status); goto ret_point; } ret_point: (void)memset_s(aad, sizeof(aad), 0, sizeof(aad)); (void)memset_s(temp_buf, sizeof(temp_buf), 0, sizeof(temp_buf)); (void)memset_s(pwk2_tlv_buffer, sizeof(pwk2_tlv_buffer),0,sizeof(pwk2_tlv_buffer)); if(pub_key){ sgx_free_rsa_key(pub_key, SGX_RSA_PUBLIC_KEY, sizeof(pek.n), sizeof(pek.e)); } return ret; }
/* * An internal function used to sign the EPID signature on the quote body. * Prefix "emp_" means it is a pointer points memory outside enclave. * * For quote with SIG-RL * |--------------------------------------------------------------------| * |sgx_quote_t|wrap_key_t|iv|payload_size|basic_sig|rl_ver|n2|nrp..|mac| * |--------------------------------------------------------------------| * For quote without SIG-RL * |--------------------------------------------------------------| * |sgx_quote_t|wrap_key_t|iv|payload_size|basic_sig|rl_ver|n2|mac| * |--------------------------------------------------------------| * * @param p_epid_context[in] Pointer to the EPID context. * @param plaintext[in] Reference to the plain text part of EPID blob. * @param p_basename[in] The pointer to basename. * @param emp_sig_rl_entries[in] The pointer to SIG-RL entries. * @param p_sig_rl_header[in] The header of SIG-RL, within EPC. * @param p_sig_rl_signature[in] The ecdsa signature of SIG-RL, within EPC. * @param p_enclave_report[in] The input isv report. * @param p_nonce[in] The input nonce. * @param p_qe_report[out] The output buffer for qe_report. * @param emp_quote[out] The output buffer for quote. * @param p_quote_body[in] The quote body in EPC. * @param sign_size[in] size of the signature. * @return ae_error_t AE_SUCCESS for success, otherwise for errors. */ static ae_error_t qe_epid_sign( MemberCtx *p_epid_context, const se_plaintext_epid_data_sdk_t& plaintext, const sgx_basename_t *p_basename, const SigRlEntry *emp_sig_rl_entries, se_sig_rl_t *p_sig_rl_header, sgx_ec256_signature_t *p_sig_rl_signature, const sgx_report_t *p_enclave_report, const sgx_quote_nonce_t *p_nonce, sgx_report_t *p_qe_report, uint8_t *emp_quote, const sgx_quote_t *p_quote_body, uint32_t sign_size) { ae_error_t ret = AE_SUCCESS; sgx_status_t se_ret = SGX_SUCCESS; EpidStatus epid_ret = kEpidNoErr; se_wrap_key_t wrap_key; BasicSignature basic_sig; BasicSignature encrypted_basic_sig; uint8_t aes_iv[QUOTE_IV_SIZE] = {0}; uint8_t aes_key[QE_AES_KEY_SIZE] = {0}; uint8_t aes_tag[SGX_SEAL_TAG_SIZE] = {0}; sgx_report_data_t qe_report_data = {{0}}; sgx_target_info_t report_target; sgx_ec256_public_t ec_pub_key; // little endian se_ae_ecdsa_hash_t sig_rl_hash = {{0}}; uint8_t ecc_result = SGX_EC_INVALID_SIGNATURE; sgx_sha_state_handle_t sha_context = NULL; sgx_sha_state_handle_t sha_quote_context = NULL; sgx_aes_state_handle_t aes_gcm_state = NULL; void *pub_key = NULL; size_t pub_key_size = 0; uint8_t* pub_key_buffer = NULL; sgx_ecc_state_handle_t ecc_handle = NULL; memset(&wrap_key, 0, sizeof(wrap_key)); memset(&basic_sig, 0, sizeof(basic_sig)); memset(&encrypted_basic_sig, 0, sizeof(encrypted_basic_sig)); memset(&report_target, 0, sizeof(report_target)); memset(&ec_pub_key, 0, sizeof(ec_pub_key)); se_encrypted_sign_t *emp_p = (se_encrypted_sign_t *) (((sgx_quote_t *)emp_quote)->signature); uint8_t* emp_nr = NULL; uint32_t match = FALSE; /* Sign the quote body and get the basic signature*/ epid_ret = EpidSignBasic(p_epid_context, (uint8_t *)const_cast<sgx_quote_t *>(p_quote_body), (uint32_t)QE_QUOTE_BODY_SIZE, (uint8_t *)const_cast<sgx_basename_t *>(p_basename), sizeof(*p_basename), &basic_sig, NULL); //Random basename, can be NULL if basename is provided if(kEpidNoErr != epid_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Prepare the context for SHA256 of quote. */ if(p_qe_report) { se_ret = sgx_sha256_init(&sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } // Update hash for nonce. se_ret = sgx_sha256_update((uint8_t *)const_cast<sgx_quote_nonce_t *>(p_nonce), sizeof(*p_nonce), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } // Update hash for the first part of quote. se_ret = sgx_sha256_update((uint8_t *)const_cast<sgx_quote_t *>(p_quote_body), sizeof(*p_quote_body), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } /* Prepare the context for SHA256 and start calculate the hash of header * of SIG-RL. */ if(emp_sig_rl_entries) { se_ret = sgx_sha256_init(&sha_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Calculate the hash of SIG-RL header. */ se_ret = sgx_sha256_update((uint8_t *)p_sig_rl_header, (uint32_t)(sizeof(se_sig_rl_t) - sizeof(SigRlEntry)), sha_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } // Start encrypt the signature. /* Get the random wrap key */ se_ret = sgx_read_rand(aes_key, sizeof(aes_key)); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Copy the hash of wrap key into output buffer. */ se_static_assert(sizeof(wrap_key.key_hash) == sizeof(sgx_sha256_hash_t)); se_ret = sgx_sha256_msg(aes_key, sizeof(aes_key), (sgx_sha256_hash_t *)wrap_key.key_hash); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Start encrypt the wrap key by RSA algorithm. */ se_ret = sgx_create_rsa_pub1_key(sizeof(g_qsdk_pub_key_n), sizeof(g_qsdk_pub_key_e), (const unsigned char *)g_qsdk_pub_key_n, (const unsigned char *)g_qsdk_pub_key_e, &pub_key); if(se_ret != SGX_SUCCESS) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Get output buffer size */ se_ret = sgx_rsa_pub_encrypt_sha256(pub_key, NULL, &pub_key_size, aes_key, sizeof(aes_key)); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_rsa_pub_encrypt_sha256(pub_key, wrap_key.encrypted_key, &pub_key_size, aes_key, sizeof(aes_key)); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Create the random AES IV. */ se_ret = sgx_read_rand(aes_iv, sizeof(aes_iv)); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Copy the wrap_key_t into output buffer. */ memcpy(&emp_p->wrap_key, &wrap_key, sizeof(wrap_key)); /* Copy the AES IV into output buffer. */ memcpy(&emp_p->iv, aes_iv, sizeof(aes_iv)); /* Copy the AES Blob payload size into output buffer. */ memcpy(&emp_p->payload_size, &sign_size, sizeof(sign_size)); se_ret = sgx_aes_gcm128_enc_init( aes_key, aes_iv, //input initial vector. randomly generated value and encryption of different msg should use different iv sizeof(aes_iv), //length of initial vector, usually IV_SIZE NULL,//AAD of AES-GCM, it could be NULL 0, //length of bytes of AAD &aes_gcm_state); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } memset_s(aes_key, sizeof(aes_key), 0, sizeof(aes_key)); /* Encrypt the basic signature. */ se_ret = sgx_aes_gcm128_enc_update( (uint8_t *)&basic_sig, //start address to data before/after encryption sizeof(basic_sig), (uint8_t *)&encrypted_basic_sig, //length of data aes_gcm_state); //pointer to a state if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Copy the encrypted basic signature into output buffer. */ memcpy(&emp_p->basic_sign, &encrypted_basic_sig, sizeof(encrypted_basic_sig)); if(p_qe_report) { se_ret = sgx_sha256_update((uint8_t *)&wrap_key, sizeof(wrap_key), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_sha256_update(aes_iv, sizeof(aes_iv), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_sha256_update((uint8_t *)&sign_size, sizeof(sign_size), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_sha256_update((uint8_t *)&encrypted_basic_sig, sizeof(encrypted_basic_sig), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } /* Start process the SIG-RL. */ if(emp_sig_rl_entries) { unsigned int entry_count = 0; unsigned int i = 0; RLver_t encrypted_rl_ver = {{0}}; RLCount encrypted_n2 = {{0}}; entry_count = lv_ntohl(p_sig_rl_header->sig_rl.n2);//entry count for big endian to little endian // Continue encrypt the output se_ret = sgx_aes_gcm128_enc_update( (uint8_t *)&(p_sig_rl_header->sig_rl.version), //start address to data before/after encryption sizeof(p_sig_rl_header->sig_rl.version), (uint8_t *)&encrypted_rl_ver, //length of data aes_gcm_state); //pointer to a state if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_aes_gcm128_enc_update( (uint8_t *)&(p_sig_rl_header->sig_rl.n2), //start address to data before/after encryption sizeof(p_sig_rl_header->sig_rl.n2), (uint8_t *)&encrypted_n2, //length of data aes_gcm_state); //pointer to a state if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } memcpy(&(emp_p->rl_ver), &encrypted_rl_ver, sizeof(encrypted_rl_ver)); memcpy(&(emp_p->rl_num), &encrypted_n2, sizeof(encrypted_n2)); if(p_qe_report) { se_ret = sgx_sha256_update((uint8_t *)&encrypted_rl_ver, sizeof(encrypted_rl_ver), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_sha256_update((uint8_t *)&encrypted_n2, sizeof(encrypted_n2), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } /* Start process the SIG-RL entries one by one. */ emp_nr = emp_p->nrp_mac; for (i = 0; i < entry_count; i++, emp_nr += sizeof(NrProof)) { /* Generate non-revoke prove one by one. */ SigRlEntry entry; NrProof temp_nr; NrProof encrypted_temp_nr; memcpy(&entry, emp_sig_rl_entries + i, sizeof(entry)); memset_s(&temp_nr, sizeof(temp_nr), 0, sizeof(temp_nr)); memset_s(&encrypted_temp_nr, sizeof(encrypted_temp_nr), 0, sizeof(encrypted_temp_nr)); epid_ret = EpidNrProve(p_epid_context, (uint8_t *)const_cast<sgx_quote_t *>(p_quote_body), (uint32_t)QE_QUOTE_BODY_SIZE, (uint8_t *)const_cast<sgx_basename_t *>(p_basename), // basename is required, otherwise it will return kEpidBadArgErr sizeof(*p_basename), &basic_sig, // Basic signature with 'b' and 'k' in it &entry, //Single entry in SigRl composed of 'b' and 'k' &temp_nr); // The generated non-revoked proof if(kEpidNoErr != epid_ret) { if(kEpidSigRevokedInSigRl == epid_ret) match = TRUE; else { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } /* Update the hash of SIG-RL */ se_ret = sgx_sha256_update((uint8_t *)&entry, sizeof(entry), sha_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_aes_gcm128_enc_update( (uint8_t *)&temp_nr, //start address to data before/after encryption sizeof(encrypted_temp_nr), (uint8_t *)&encrypted_temp_nr, //length of data aes_gcm_state); //pointer to a state if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } memcpy(emp_nr, &encrypted_temp_nr, sizeof(encrypted_temp_nr)); if(p_qe_report) { se_ret = sgx_sha256_update((uint8_t *)&encrypted_temp_nr, sizeof(encrypted_temp_nr), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } } /* Get the final hash of the whole SIG-RL. */ se_ret = sgx_sha256_get_hash(sha_context, (sgx_sha256_hash_t *)&sig_rl_hash.hash); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* Verify the integrity of SIG-RL by check ECDSA signature. */ se_static_assert(sizeof(ec_pub_key) == sizeof(plaintext.epid_sk)); // Both plaintext.epid_sk and ec_pub_key are little endian memcpy(&ec_pub_key, plaintext.epid_sk, sizeof(ec_pub_key)); se_ret = sgx_ecc256_open_context(&ecc_handle); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } // sgx_ecdsa_verify_hash will take ec_pub_key as little endian se_ret = sgx_ecdsa_verify_hash((uint8_t*)&(sig_rl_hash.hash), (const sgx_ec256_public_t *)&ec_pub_key, p_sig_rl_signature, &ecc_result, ecc_handle); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } else if(SGX_EC_VALID != ecc_result) { ret = QE_SIGRL_ERROR; goto CLEANUP; } else if(match) { ret = QE_REVOKED_ERROR; goto CLEANUP; } } else { se_static_assert(sizeof(emp_p->rl_ver) == sizeof(RLver_t)); se_static_assert(sizeof(emp_p->rl_num) == sizeof(RLCount)); uint8_t temp_buf[sizeof(RLver_t) + sizeof(RLCount)] = {0}; uint8_t encrypted_temp_buf[sizeof(temp_buf)] = {0}; se_ret = sgx_aes_gcm128_enc_update( (uint8_t *)&temp_buf, //start address to data before/after encryption sizeof(encrypted_temp_buf), (uint8_t *)&encrypted_temp_buf, //length of data aes_gcm_state); //pointer to a state if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } /* This will copy both encrypted rl_ver and encrypted rl_num into Output buffer. */ memcpy(&emp_p->rl_ver, &encrypted_temp_buf, sizeof(encrypted_temp_buf)); if(p_qe_report) { se_ret = sgx_sha256_update((uint8_t *)&encrypted_temp_buf, sizeof(encrypted_temp_buf), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } } } se_ret = sgx_aes_gcm128_enc_get_mac(aes_tag, aes_gcm_state); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } memcpy((uint8_t *)&(emp_p->basic_sign) + sign_size, &aes_tag, sizeof(aes_tag)); if(p_qe_report) { se_ret = sgx_sha256_update(aes_tag, sizeof(aes_tag), sha_quote_context); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } se_ret = sgx_sha256_get_hash(sha_quote_context, (sgx_sha256_hash_t *)&qe_report_data); if(SGX_SUCCESS != se_ret) { ret = QE_UNEXPECTED_ERROR; goto CLEANUP; } memcpy(&(report_target.attributes), &(((const sgx_report_t *)p_enclave_report)->body.attributes), sizeof(report_target.attributes)); memcpy(&(report_target.mr_enclave), &(((const sgx_report_t *)p_enclave_report)->body.mr_enclave), sizeof(report_target.mr_enclave)); memcpy(&(report_target.misc_select), &(((const sgx_report_t *)p_enclave_report)->body.misc_select), sizeof(report_target.misc_select)); se_ret = sgx_create_report(&report_target, &qe_report_data, p_qe_report); if(SGX_SUCCESS != se_ret) { ret = QE_PARAMETER_ERROR; goto CLEANUP; } } CLEANUP: memset_s(aes_key, sizeof(aes_key), 0, sizeof(aes_key)); sgx_sha256_close(sha_context); sgx_sha256_close(sha_quote_context); if (aes_gcm_state) sgx_aes_gcm_close(aes_gcm_state); if (pub_key) sgx_free_rsa_key(pub_key, SGX_RSA_PUBLIC_KEY, sizeof(plaintext.qsdk_mod), sizeof(plaintext.qsdk_exp)); if (pub_key_buffer) free(pub_key_buffer); if (ecc_handle) sgx_ecc256_close_context(ecc_handle); return ret; }
sgx_status_t sgx_get_key(const sgx_key_request_t *key_request, sgx_key_128bit_t *key) { sgx_status_t err = SGX_ERROR_UNEXPECTED; void *buffer = NULL; size_t size = 0, buf_ptr = 0; sgx_key_request_t *tmp_key_request = NULL; sgx_key_128bit_t *tmp_key = NULL; egetkey_status_t egetkey_status = EGETKEY_SUCCESS; int i = 0; const sgx_report_t *report = NULL; // check parameters // // key_request must be within the enclave if(!key_request || !sgx_is_within_enclave(key_request, sizeof(*key_request))) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } if (key_request->reserved1 != 0) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } for (i=0; i<SGX_KEY_REQUEST_RESERVED2_BYTES; ++i) { if (key_request->reserved2[i] != 0) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } } // key must be within the enclave if(!key || !sgx_is_within_enclave(key, sizeof(*key))) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } // check key_request->key_policy reserved bits if(key_request->key_policy & ~(SGX_KEYPOLICY_MRENCLAVE | SGX_KEYPOLICY_MRSIGNER | (KEY_POLICY_KSS))) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } // check if KSS flag is disabled but KSS related policy or config_svn is set report = sgx_self_report(); if (!(report->body.attributes.flags & SGX_FLAGS_KSS) && ((key_request->key_policy & KEY_POLICY_KSS) || key_request->config_svn > 0)) { err = SGX_ERROR_INVALID_PARAMETER; goto CLEANUP; } // allocate memory // // To minimize the effort of memory management, the two elements allocation // are combined in a single malloc. The calculation for the required size has // an assumption, that // the elements should be allocated in descending order of the alignment size. // // If the alignment requirements are changed, the allocation order needs to // change accordingly. // // Current allocation order is: // key_request -> key // // key_request: 512-byte aligned, 512-byte length // key: 16-byte aligned, 16-byte length size = ROUND_TO(sizeof(*key_request), KEY_REQUEST_ALIGN_SIZE) + ROUND_TO(sizeof(*key), KEY_ALIGN_SIZE); size += MAX(KEY_REQUEST_ALIGN_SIZE, KEY_ALIGN_SIZE) - 1; buffer = malloc(size); if(buffer == NULL) { err = SGX_ERROR_OUT_OF_MEMORY; goto CLEANUP; } memset(buffer, 0, size); buf_ptr = reinterpret_cast<size_t>(buffer); buf_ptr = ROUND_TO(buf_ptr, KEY_REQUEST_ALIGN_SIZE); tmp_key_request = reinterpret_cast<sgx_key_request_t *>(buf_ptr); buf_ptr += sizeof(*tmp_key_request); buf_ptr = ROUND_TO(buf_ptr, KEY_ALIGN_SIZE); tmp_key = reinterpret_cast<sgx_key_128bit_t *>(buf_ptr); // Copy data from user buffer to the aligned memory memcpy_s(tmp_key_request, sizeof(*tmp_key_request), key_request, sizeof(*key_request)); // Do EGETKEY egetkey_status = (egetkey_status_t) do_egetkey(tmp_key_request, tmp_key); switch(egetkey_status) { case EGETKEY_SUCCESS: err = SGX_SUCCESS; break; case EGETKEY_INVALID_ATTRIBUTE: err = SGX_ERROR_INVALID_ATTRIBUTE; break; case EGETKEY_INVALID_CPUSVN: err = SGX_ERROR_INVALID_CPUSVN; break; case EGETKEY_INVALID_ISVSVN: err = SGX_ERROR_INVALID_ISVSVN; break; case EGETKEY_INVALID_KEYNAME: err = SGX_ERROR_INVALID_KEYNAME; break; default: err = SGX_ERROR_UNEXPECTED; break; } CLEANUP: if((SGX_SUCCESS != err) && (NULL != key)) { // The key buffer should be filled with random number. // If sgx_read_rand returns failure, let the key buffer untouched sgx_read_rand(reinterpret_cast<uint8_t *>(key), sizeof(*key)); } else if(NULL != key) { // Copy data to the user buffer memcpy_s(key, sizeof(*key), tmp_key, sizeof(*tmp_key)); } // cleanup if(buffer) { memset_s(buffer, size, 0, size); free(buffer); } return err; }
/******************************************************************* ** Function name: create_vmc ** Descrption: create a VMC in SQLite Database and return UUID of the VMC to the caller. ** *******************************************************************/ pse_op_error_t create_vmc(const isv_attributes_t &owner_attributes, // [IN] ISV's attributes vmc_data_blob_t &data, // [IN,OUT] VMC blob data mc_rpdb_uuid_t &mc_rpdb_uuid) // [OUT] UUID of VMC { pse_op_error_t rc = OP_SUCCESS; sgx_status_t stat = SGX_SUCCESS; uint32_t tmp_rpdb_id = {0}; int leaf_node_id = 0; op_leafnode_flag_t op_leafnode_flag_info; int retry_times = 1; // Check MC Service Availablity Status if((rc = get_mc_service_status()) != OP_SUCCESS) { if ((rc = initialize_sqlite_database_file(false)) != OP_SUCCESS) return rc; } // Calculate Owner ID and copy to "data->owner_id" if(SGX_SUCCESS != calculate_owner_id(owner_attributes, data.owner_policy, data.owner_attr_mask, data.owner_id)) { rc = OP_ERROR_INTERNAL; goto end; } // Copy Owner's SVN data.owner_svn = owner_attributes.isv_svn; // read sgx random number for uuid->nonce. stat = sgx_read_rand(mc_rpdb_uuid.nonce, UUID_NONCE_SIZE); if( SGX_SUCCESS != stat ) { rc = OP_ERROR_INTERNAL; goto end; } do{ // get an empty database leaf node and return coressponding ID as rpdb ID to caller stat = sqlite_get_empty_leafnode(&rc, &leaf_node_id, const_cast<sgx_measurement_t*>(&owner_attributes.mr_signer)); if(stat == SGX_SUCCESS) // OCALL success { if (rc == OP_SUCCESS) { // check leaf_node_id if (leaf_node_id < INIT_LEAF_NODE_ID_BASE || leaf_node_id > INIT_MAX_LEAF_NODE_ID) { // Invalid leaf node id, valid range must be [INIT_LEAF_NODE_ID_BASE, INIT_LEAF_NODE_ID_BASE*2-1] rc = OP_ERROR_INTERNAL; break; } } else if( (rc == OP_ERROR_DATABASE_FATAL || rc == OP_ERROR_INVALID_VMC_DB) && retry_times > 0) { // try to re-initialize vmc db if (OP_SUCCESS != (rc = initialize_sqlite_database_file(true))) { break; } else { // if successful, try to create again continue; } } else { // other errors break; } } else // OCALL failure { rc = OP_ERROR_INTERNAL; break; } // RPDB ID = LEAD ID - OFFSET(which is INIT_LEAF_NODE_ID_BASE) // Valid range of RPDB ID is from 0 to (INIT_LEAF_NODE_ID_BASE-1). tmp_rpdb_id = leaf_node_id - INIT_LEAF_NODE_ID_BASE; memcpy(mc_rpdb_uuid.entry_index, &tmp_rpdb_id, UUID_ENTRY_INDEX_SIZE); memcpy(data.nonce, mc_rpdb_uuid.nonce, UUID_NONCE_SIZE); // mark the flag as USED data.is_used = 1; // copy creator's mrsigner memcpy(&op_leafnode_flag_info.mr_signer, &owner_attributes.mr_signer, sizeof(sgx_measurement_t)); // will set the USED flag in SQLite Database op_leafnode_flag_info.op_type = SET_LEAFNODE_FLAG; // call operate_vmc to store VMC creation info into SQLite Database. rc = operate_vmc(owner_attributes, mc_rpdb_uuid, data, RPDB_OP_CREATE, &op_leafnode_flag_info); if((OP_ERROR_INVALID_VMC_DB == rc || OP_ERROR_DATABASE_FATAL == rc) && retry_times > 0) { // try to re-initialize vmc db if (OP_SUCCESS != (rc = initialize_sqlite_database_file(true)) ) { break; } else { // if successful, try to create vmc again continue; } } else { break; } }while(retry_times--); end: if(OP_SUCCESS != rc) { memset(mc_rpdb_uuid.entry_index, 0xFF, UUID_ENTRY_INDEX_SIZE); memset(mc_rpdb_uuid.nonce, 0x0, UUID_NONCE_SIZE); } return rc; }
extern "C" sgx_status_t sgx_mac_aadata_ex(const uint16_t key_policy, const sgx_attributes_t attribute_mask, const sgx_misc_select_t misc_mask, const uint32_t additional_MACtext_length, const uint8_t *p_additional_MACtext, const uint32_t sealed_data_size, sgx_sealed_data_t *p_sealed_data) { sgx_status_t err = SGX_ERROR_UNEXPECTED; sgx_report_t report; sgx_key_id_t keyID; sgx_key_request_t tmp_key_request; uint8_t payload_iv[SGX_SEAL_IV_SIZE]; memset(&payload_iv, 0, sizeof(payload_iv)); uint32_t sealedDataSize = sgx_calc_sealed_data_size(additional_MACtext_length, 0); // Check for overflow if (sealedDataSize == UINT32_MAX) { return SGX_ERROR_INVALID_PARAMETER; } // // Check parameters // // check key_request->key_policy reserved bits are not set and one of policy bits are set if ((key_policy & ~(SGX_KEYPOLICY_MRENCLAVE | SGX_KEYPOLICY_MRSIGNER)) || ((key_policy & (SGX_KEYPOLICY_MRENCLAVE | SGX_KEYPOLICY_MRSIGNER)) == 0)) { return SGX_ERROR_INVALID_PARAMETER; } if ((attribute_mask.flags & 0x3) != 0x3) { return SGX_ERROR_INVALID_PARAMETER; } // The AAD must be provided if ((additional_MACtext_length == 0) || (p_additional_MACtext == NULL)) { return SGX_ERROR_INVALID_PARAMETER; } // Ensure AAD does not cross enclave boundary if (!(sgx_is_within_enclave(p_additional_MACtext, additional_MACtext_length) || sgx_is_outside_enclave(p_additional_MACtext, additional_MACtext_length))) { return SGX_ERROR_INVALID_PARAMETER; } // Ensure sealed data blob is within an enclave during the sealing process if ((p_sealed_data == NULL) || (!sgx_is_within_enclave(p_sealed_data, sealed_data_size))) { return SGX_ERROR_INVALID_PARAMETER; } if (sealedDataSize != sealed_data_size) { return SGX_ERROR_INVALID_PARAMETER; } memset(&report, 0, sizeof(sgx_report_t)); memset(p_sealed_data, 0, sealedDataSize); memset(&keyID, 0, sizeof(sgx_key_id_t)); memset(&tmp_key_request, 0, sizeof(sgx_key_request_t)); // Get the report to obtain isv_svn and cpu_svn err = sgx_create_report(NULL, NULL, &report); if (err != SGX_SUCCESS) { goto clear_return; } // Get a random number to populate the key_id of the key_request err = sgx_read_rand(reinterpret_cast<uint8_t *>(&keyID), sizeof(sgx_key_id_t)); if (err != SGX_SUCCESS) { goto clear_return; } memcpy(&(tmp_key_request.cpu_svn), &(report.body.cpu_svn), sizeof(sgx_cpu_svn_t)); memcpy(&(tmp_key_request.isv_svn), &(report.body.isv_svn), sizeof(sgx_isv_svn_t)); tmp_key_request.key_name = SGX_KEYSELECT_SEAL; tmp_key_request.key_policy = key_policy; tmp_key_request.attribute_mask.flags = attribute_mask.flags; tmp_key_request.attribute_mask.xfrm = attribute_mask.xfrm; memcpy(&(tmp_key_request.key_id), &keyID, sizeof(sgx_key_id_t)); tmp_key_request.misc_mask = misc_mask; err = sgx_seal_data_iv(additional_MACtext_length, p_additional_MACtext, 0, NULL, payload_iv, &tmp_key_request, p_sealed_data); if (err == SGX_SUCCESS) { // Copy data from the temporary key request buffer to the sealed data blob memcpy(&(p_sealed_data->key_request), &tmp_key_request, sizeof(sgx_key_request_t)); } clear_return: // Clear temp state memset_s(&report, sizeof(sgx_report_t), 0, sizeof(sgx_report_t)); memset_s(&keyID, sizeof(sgx_key_id_t), 0, sizeof(sgx_key_id_t)); return err; }
//calculate launch token. key_id, attributes_le and then mac is updated. //return AE_SUCCESS on success static ae_error_t le_calc_lic_token(token_t* lictoken) { //calculate launch token sgx_key_request_t key_request; sgx_key_128bit_t launch_key; if(SGX_SUCCESS != sgx_read_rand((uint8_t*)&lictoken->key_id, sizeof(sgx_key_id_t))) { return LE_UNEXPECTED_ERROR; } // Create Key Request memset(&key_request, 0, sizeof(key_request)); //setup key_request parameters to derive launch key key_request.key_name = SGX_KEYSELECT_LICENSE; memcpy(&key_request.key_id, &lictoken->key_id, sizeof(key_request.key_id)); memcpy(&key_request.cpu_svn, &(lictoken->cpu_svn_le), sizeof(key_request.cpu_svn)); memcpy(&key_request.isv_svn, &(lictoken->isv_svn_le), sizeof(key_request.isv_svn)); key_request.attribute_mask.xfrm = 0; //0xFFFFFFFFFFFFFFFB: ~SGX_FLAGS_MODE64BIT key_request.attribute_mask.flags = ~SGX_FLAGS_MODE64BIT; key_request.misc_mask = 0xFFFFFFFF; lictoken->masked_misc_select_le &= key_request.misc_mask; lictoken->attributes_le.flags = (lictoken->attributes_le.flags) & (key_request.attribute_mask.flags); lictoken->attributes_le.xfrm = (lictoken->attributes_le.xfrm) & (key_request.attribute_mask.xfrm); // EGETKEY sgx_status_t sgx_ret = sgx_get_key(&key_request,&launch_key); if(SGX_SUCCESS != sgx_ret) { return LE_GET_LICENSE_KEY_ERROR; } sgx_cmac_state_handle_t p_cmac_handle = NULL; do{ sgx_ret = sgx_cmac128_init(&launch_key, &p_cmac_handle); if(SGX_SUCCESS != sgx_ret) { break; } sgx_ret = sgx_cmac128_update((uint8_t*)&lictoken->body, sizeof(lictoken->body), p_cmac_handle); if(SGX_SUCCESS != sgx_ret) { break; } sgx_ret = sgx_cmac128_final(p_cmac_handle, (sgx_cmac_128bit_tag_t*)&lictoken->mac); }while(0); if (p_cmac_handle != NULL) { sgx_cmac128_close(p_cmac_handle); } //clear launch_key after being used memset_s(launch_key,sizeof(launch_key), 0, sizeof(launch_key)); if (SGX_SUCCESS != sgx_ret) { return AE_FAILURE; } return AE_SUCCESS; }