OSStatus AES_GCM_Encrypt( AES_GCM_Context *inContext, const void *inSrc, size_t inLen, void *inDst ) { OSStatus err; err = gcm_encrypt( inDst, inSrc, inLen, &inContext->ctx ); require_noerr( err, exit ); exit: return( err ); }
void Document::encrypt(const lj::Uuid& server, const uint8_t* key, int key_size, const std::string& key_name, const std::vector<std::string>& paths) { // Only accept 256bit keys. if (k_key_size != key_size) { throw LJ__Exception("Encrypt key must be 256bits."); } // Create the source data for the application. size_t source_size; std::unique_ptr < uint8_t[] > source; if (paths.empty()) { // An empty paths list means we should encrypt everything. source.reset(doc_->nav(".").to_binary(&source_size)); } else { // In order to encrypt specific paths, we have to // copy the paths out of the current document. // The paths will be removed from the document after we successfully // encrypt them. lj::bson::Node tmp; for (auto iter = paths.begin(); paths.end() != iter; ++iter) { lj::bson::Node* ptr = new lj::bson::Node(doc_->nav(".").nav(*iter)); tmp.nav(".").set_child(*iter, ptr); } source.reset(tmp.to_binary(&source_size)); } // Generate an initialization vector for the crypto. uint8_t iv[GCM_IV_SIZE]; std::fstream rnd("/dev/random", std::ios_base::in); rnd.read(reinterpret_cast<char*>(iv), GCM_IV_SIZE); // Prepare all the data structures for the AES crypto in GCM mode. struct aes_ctx cipher_ctx; aes_set_encrypt_key(&cipher_ctx, key_size, key); struct gcm_key auth_key; gcm_set_key(&auth_key, &cipher_ctx, (nettle_crypt_func*) & aes_encrypt); struct gcm_ctx auth_ctx; gcm_set_iv(&auth_ctx, &auth_key, GCM_IV_SIZE, iv); // Perform the actual encryption. std::unique_ptr < uint8_t[] > destination(new uint8_t[source_size]); gcm_encrypt(&auth_ctx, &auth_key, &cipher_ctx, (nettle_crypt_func*) & aes_encrypt, source_size, destination.get(), source.get()); lj::Wiper < uint8_t[]>::wipe(source, source_size); // Extract the authentication information. uint8_t auth_tag[GCM_BLOCK_SIZE]; gcm_digest(&auth_ctx, &auth_key, &cipher_ctx, (nettle_crypt_func*) & aes_encrypt, GCM_BLOCK_SIZE, auth_tag); // Create bson Nodes for data necessary for decryption. lj::bson::Node* encrypted_node = lj::bson::new_binary( destination.get(), source_size, lj::bson::Binary_type::k_bin_user_defined); lj::bson::Node* authentication_node = lj::bson::new_binary( auth_tag, GCM_BLOCK_SIZE, lj::bson::Binary_type::k_bin_user_defined); lj::bson::Node* ivector_node = lj::bson::new_binary( iv, GCM_IV_SIZE, lj::bson::Binary_type::k_bin_user_defined); // Wipe the temporary memory areas clean lj::Wiper<uint8_t[]>::wipe(destination, source_size); lj::Wiper<uint8_t[]>::wipe(auth_tag, GCM_BLOCK_SIZE); lj::Wiper<uint8_t[]>::wipe(iv, GCM_IV_SIZE); // Document is unmodified up to this point. Now we add the // encrypted data. This is added before removing any data // incase an exception is thrown. taint(server); doc_->nav(k_crypt_data).set_child(key_name, encrypted_node); doc_->nav(k_crypt_auth).set_child(key_name, authentication_node); doc_->nav(k_crypt_vector).set_child(key_name, ivector_node); // Remove the paths that were just encrypted. if (paths.empty()) { doc_->set_child(".", nullptr); } else { for (auto iter = paths.begin(); paths.end() != iter; ++iter) { doc_->nav(".").set_child(*iter, nullptr); } } }
/* * @func encrypt_or_clear_ip_sections modifies the content of some sections. * 1. If section content cannot be modified without disrupting enclave signing or loading flows * then section content is not modified * 2. Allocable sections (copied to application address space at shared object's load time) * are encrypted. * 3. The content of sections that are not allocable is zeroed * @param IN pcl_data_t* dat, ELF data * @param IN uint8_t* key, the AES key for GCM encrypt * @param INOUT uint8_t* elf_buf, base address of ELF binary buffer * @param OUT pcl_table_t* tbl, pointer to PCL table * @param OUT uint32_t* num_rvas_out, total number of sections that are encrypted * @param bool debug, true iff enclave is requried to support debug * @return encip_ret_e: * ENCIP_ERROR_ENCSECS_INVALID_PARAM any input parameter is NULL * PCL_MAX_NUM_ENCRYPTED_SECTIONS if out of entires in PCL table * Respective error results in case any of the functions encrypt or update_flags fail. * ENCIP_SUCCESS if success */ static encip_ret_e encrypt_or_clear_ip_sections( IN pcl_data_t* dat, IN uint8_t* key, INOUT uint8_t* elf_buf, size_t elf_size, OUT pcl_table_t* tbl, OUT uint32_t* num_rvas_out, bool debug) { if( NULL == dat || NULL == key || NULL == elf_buf || NULL == tbl || NULL == num_rvas_out) return ENCIP_ERROR_ENCSECS_INVALID_PARAM; uint32_t num_rvas = 0; // Go over sections headers to find sections to encrypt or clear: char* sec_name = NULL; for(uint16_t secidx = 1; secidx < dat->nsections; secidx++) { if(dat->elf_sec[secidx].sh_name >= dat->elf_sec[dat->shstrndx].sh_size) return ENCIP_ERROR_PARSE_ELF_INVALID_IMAGE; sec_name = dat->sections_names + dat->elf_sec[secidx].sh_name; /* * Verifying string starts before end of section. Assuming (but not checking) * that string ends before end of section. Additional check will complicate code. * Assuming the platform this application is running on is not compromized. */ if((uint8_t*)sec_name > elf_buf + elf_size) return ENCIP_ERROR_PARSE_ELF_INVALID_IMAGE; if(can_modify(sec_name, debug)) { uint8_t* va = (uint8_t *)(elf_buf + dat->elf_sec[secidx].sh_offset); size_t size = dat->elf_sec[secidx].sh_size; if((va >= elf_buf + elf_size) || (va + size < va) || (va + size > elf_buf + elf_size)) return ENCIP_ERROR_PARSE_ELF_INVALID_IMAGE; // If section is allocable (mapped into process's virtual memory), decrypt it: if(SHF_ALLOC & dat->elf_sec[secidx].sh_flags) { if(PCL_MAX_NUM_ENCRYPTED_SECTIONS <= num_rvas) { /* * No more empty entries in PCL table. * To fix - redefine PCL_MAX_NUM_ENCRYPTED_SECTIONS in pcl_common.h */ printf("Error: No more empty entries in Intel(R) SGX PCL table\n"); printf("To fix - redefine PCL_MAX_NUM_ENCRYPTED_SECTIONS in pcl_common.h\n"); return ENCIP_ERROR_ENCSECS_RVAS_OVERFLOW; } if(PCL_GCM_NUM_BLOCKS(size) > PCL_GCM_MAX_NUM_BLOCKS) { /* * Size in 16-bytes-blocks exceeds (2^32 - 2). * Only happen if cipher-text size is ~64GB. */ return ENCIP_ERROR_ENCSECS_COUNTER_OVERFLOW; } uint8_t* iv = (uint8_t*)&(tbl->rvas_sizes_tags_ivs[num_rvas].iv.val); encip_ret_e ret = init_random_iv(iv); if(ENCIP_ERROR(ret)) return ret; uint8_t* tag = (uint8_t*)&(tbl->rvas_sizes_tags_ivs[num_rvas].tag); ret = gcm_encrypt(va, size, NULL, 0, (uint8_t *)key, iv, va, tag); if(ENCIP_ERROR(ret)) { printf("Failed to gcm-encrypt section %s\n", sec_name); return ret; } // Insert entry to table: tbl->rvas_sizes_tags_ivs[num_rvas].rva = dat->elf_sec[secidx].sh_addr; tbl->rvas_sizes_tags_ivs[num_rvas].size = size; // Update flags to writable: ret = update_flags(secidx, dat); if(ENCIP_ERROR(ret)) return ret; // Increment num_rvas: num_rvas++; } // Else (section is not allocable), zero it: else { memset(va, 0, size); } } } *num_rvas_out = num_rvas; return ENCIP_SUCCESS; }