void * sgx_ocalloc(size_t size) { // read the outside stack address from current SSA thread_data_t *thread_data = get_thread_data(); ssa_gpr_t *ssa_gpr = reinterpret_cast<ssa_gpr_t *>(thread_data->first_ssa_gpr); size_t addr = ssa_gpr->REG(sp_u); // check u_rsp points to the untrusted address. // if the check fails, it should be hacked. call abort directly if(!sgx_is_outside_enclave(reinterpret_cast<void *>(addr), sizeof(size_t))) { abort(); } // size is too large to allocate. call abort() directly. if(addr < size) { abort(); } // calculate the start address for the allocated memory addr -= size; addr &= ~(static_cast<size_t>(OC_ROUND - 1)); // for stack alignment // the allocated memory has overlap with enclave, abort the enclave if(!sgx_is_outside_enclave(reinterpret_cast<void *>(addr), size)) { abort(); } // probe the outside stack to ensure that we do not skip over the stack3 guard page // we need to probe all the pages including the first page and the last page // the first page need to be probed in case uRTS didnot touch that page before EENTER enclave // the last page need to be probed in case the enclave didnot touch that page before another OCALLOC size_t first_page = TRIM_TO_PAGE(ssa_gpr->REG(sp_u) - 1); size_t last_page = TRIM_TO_PAGE(addr); // To avoid the dead-loop in the following for(...) loop. // Attacker might fake a stack address that is within address 0x4095. if (last_page == 0) { abort(); } // the compiler may optimize the following code to probe the pages in any order // while we only expect the probe order should be from higher addr to lower addr // so use volatile to avoid optimization by the compiler for(volatile size_t page = first_page; page >= last_page; page -= SE_PAGE_SIZE) { *reinterpret_cast<uint8_t *>(page) = 0; } // update the outside stack address in the SSA ssa_gpr->REG(sp_u) = addr; return reinterpret_cast<void *>(addr); }
int CLoader::build_mem_region(const section_info_t * const sec_info) { int ret = SGX_SUCCESS; uint8_t added_page[SE_PAGE_SIZE]; uint64_t offset = 0; uint8_t *raw_ptr = NULL; uint64_t rva = 0; sec_info_t sinfo; memset(&sinfo, 0, sizeof(sinfo)); rva = sec_info->rva + offset; while(offset < TRIM_TO_PAGE(sec_info->raw_data_size)) { raw_ptr = sec_info->raw_data + offset; sinfo.flags = sec_info->flag; //check if the page is writable. if(sec_info->bitmap && sec_info->bitmap->size()) { uint64_t page_frame = rva >> SE_PAGE_SHIFT; //NOTE: // Current enclave size is not beyond 64G, so the type-casting from (uint64>>15) to (size_t) is OK. // In the future, if the max enclave size is extended to beyond (1<<49), this type-casting will not work. // It only impacts the enclave signing process. (32bit signing tool to sign 64 bit enclaves) if((*sec_info->bitmap)[(size_t)(page_frame / 8)] & (1 << (page_frame % 8))) sinfo.flags = sec_info->flag | SI_FLAG_W; } //call driver API to add page; raw_ptr needn't be page align, driver will handle page align; if(SGX_SUCCESS != (ret = get_enclave_creator()->add_enclave_page(ENCLAVE_ID_IOCTL, raw_ptr, rva, sinfo, ADD_EXTEND_PAGE))) { //if add page failed , we should remove enclave somewhere; return ret; } offset += SE_PAGE_SIZE; rva = sec_info->rva + offset; }