Esempio n. 1
0
int sl_fcall_mngr_clone(struct sl_fcall_mngr* mngr, struct sl_fcall_mngr* untrusted)
{
    PANIC_ON(!sgx_is_outside_enclave(untrusted, sizeof(*untrusted)));
    sgx_lfence();

    BUG_ON(mngr == NULL);
    BUG_ON(untrusted == NULL);

    sl_fcall_type_t type_u = untrusted->fmn_type;
    PANIC_ON((type_u != SL_FCALL_TYPE_ECALL) && (type_u != SL_FCALL_TYPE_OCALL));
    
    mngr->fmn_type = type_u;

    int ret = sl_siglines_clone(&mngr->fmn_siglns,
                                &untrusted->fmn_siglns,
                                can_type_process(type_u) ? process_fcall : NULL);
    if (ret) return ret;

    //check that we have right call managers. 
    //i.e ecall manager on untrusted or ocall manager on trusted side
    PANIC_ON(fcall_type2direction(type_u) != sl_siglines_get_direction(&mngr->fmn_siglns));

    uint32_t num_lines = sl_siglines_size(&mngr->fmn_siglns);
    
    BUG_ON(untrusted->fmn_bufs == NULL);

	struct sl_fcall_buf** bufs_u = untrusted->fmn_bufs;
    PANIC_ON(!sgx_is_outside_enclave(bufs_u, sizeof(bufs_u[0]) * num_lines));
    sgx_lfence();
    
	mngr->fmn_bufs = bufs_u;
	mngr->fmn_call_table = NULL;
    
	return 0;
}
Esempio n. 2
0
void * sgx_ocalloc(size_t size)
{
    // read the outside stack address from current SSA
    thread_data_t *thread_data = get_thread_data();
    ssa_gpr_t *ssa_gpr = reinterpret_cast<ssa_gpr_t *>(thread_data->first_ssa_gpr);
    size_t addr = ssa_gpr->REG(sp_u);

    // check u_rsp points to the untrusted address.
    // if the check fails, it should be hacked. call abort directly
    if(!sgx_is_outside_enclave(reinterpret_cast<void *>(addr), sizeof(size_t)))
    {
        abort();
    }

    // size is too large to allocate. call abort() directly.
    if(addr < size)
    {
        abort();
    }

    // calculate the start address for the allocated memory
    addr -= size;
    addr &= ~(static_cast<size_t>(OC_ROUND - 1));  // for stack alignment

    // the allocated memory has overlap with enclave, abort the enclave
    if(!sgx_is_outside_enclave(reinterpret_cast<void *>(addr), size))
    {
        abort();
    }

    // probe the outside stack to ensure that we do not skip over the stack3 guard page
    // we need to probe all the pages including the first page and the last page
    // the first page need to be probed in case uRTS didnot touch that page before EENTER enclave
    // the last page need to be probed in case the enclave didnot touch that page before another OCALLOC
    size_t first_page = TRIM_TO_PAGE(ssa_gpr->REG(sp_u) - 1);
    size_t last_page = TRIM_TO_PAGE(addr);

    // To avoid the dead-loop in the following for(...) loop.
    // Attacker might fake a stack address that is within address 0x4095.
    if (last_page == 0)
    {
        abort();
    }

    // the compiler may optimize the following code to probe the pages in any order
    // while we only expect the probe order should be from higher addr to lower addr
    // so use volatile to avoid optimization by the compiler
    for(volatile size_t page = first_page; page >= last_page; page -= SE_PAGE_SIZE)
    {
        *reinterpret_cast<uint8_t *>(page) = 0;
    }

    // update the outside stack address in the SSA
    ssa_gpr->REG(sp_u) = addr;

    return reinterpret_cast<void *>(addr);
}
Esempio n. 3
0
sgx_status_t sgx_read_rand(unsigned char *rand, size_t length_in_bytes)
{
    // check parameters
    //
    // rand can be within or outside the enclave
    if(!rand || !length_in_bytes)
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if(!sgx_is_within_enclave(rand, length_in_bytes) && !sgx_is_outside_enclave(rand, length_in_bytes))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    // loop to rdrand
    uint32_t rand_num = 0;
    while(length_in_bytes > 0)
    {
        sgx_status_t status = __do_get_rand32(&rand_num);
        if(status != SGX_SUCCESS)
        {
            return status;
        }

        size_t size = (length_in_bytes < sizeof(rand_num)) ? length_in_bytes : sizeof(rand_num);
        memcpy(rand, &rand_num, size);

        rand += size;
        length_in_bytes -= size;
    }
    memset_s(&rand_num, sizeof(rand_num), 0, sizeof(rand_num));
    return SGX_SUCCESS;
}
Esempio n. 4
0
// init_enclave()
//      Initialize enclave.
// Parameters:
//      [IN] enclave_base - the enclave base address
//      [IN] ms - the marshalling structure passed by uRTS
// Return Value:
//       0 - success
//      -1 - fail
//
extern "C" int init_enclave(void *enclave_base, void *ms)
{
    if(enclave_base == NULL || ms == NULL)
    {
        return -1;
    }

    // relocation
    if(0 != relocate_enclave(enclave_base))
    {
        return -1;
    }

    // Check if the ms is outside the enclave.
    // sgx_is_outside_enclave() should be placed after relocate_enclave()
    cpu_sdk_info_t *info = (cpu_sdk_info_t *)ms;
    if(!sgx_is_outside_enclave(info, sizeof(cpu_sdk_info_t)))
    {
        return -1;
    }
    const sdk_version_t sdk_version = info->version;
    const uint64_t cpu_features = info->cpu_features;
    
    if (sdk_version != SDK_VERSION_1_5)
        return -1;

    // xsave
    uint64_t xfrm = get_xfeature_state();

    // optimized libs
    if(0 != init_optimized_libs(cpu_features, xfrm))
    {
        CLEAN_XFEATURE_REGS
        return -1;
    }
Esempio n. 5
0
int sl_siglines_clone(struct sl_siglines* sglns,
                      struct sl_siglines* untrusted,
                      sl_sighandler_t handler)
{
    PANIC_ON(!sgx_is_outside_enclave(untrusted, sizeof(*untrusted)));
    sgx_lfence();

    sl_siglines_dir_t dir = untrusted->dir;
    PANIC_ON((dir != SL_SIGLINES_DIR_T2U) && (dir != SL_SIGLINES_DIR_U2T));

    BUG_ON(sglns == NULL);

    sglns->dir = dir;

    BUG_ON(is_direction_sender(dir) && (handler != NULL));

    uint32_t num_lines = untrusted->num_lines;
    if ((num_lines <= 0) || ((num_lines % NBITS_PER_UINT64) != 0))
        return -EINVAL;
    sglns->num_lines = num_lines;
    uint32_t nlong = num_lines / NBITS_PER_UINT64;


    BUG_ON(untrusted->event_lines == NULL);

    uint64_t* event_lines_u = untrusted->event_lines;
    PANIC_ON(!sgx_is_outside_enclave(event_lines_u, sizeof(uint64_t) * nlong));
    sgx_lfence();
    sglns->event_lines = event_lines_u;

    uint64_t* free_lines = NULL;
    if (is_direction_sender(dir)) 
    {
        free_lines = malloc(sizeof(uint64_t) * nlong);
        if (free_lines == NULL) 
            return -ENOMEM;
		
        for (uint32_t i = 0; i < nlong; i++) 
            free_lines[i] = (uint64_t)(-1);// 1's -> free
    }
    sglns->free_lines = free_lines;

    sglns->handler = handler;

    return 0;
}
Esempio n. 6
0
/* Override the weak symbol defined in tRTS */
sgx_status_t do_init_switchless(struct sl_uswitchless* handle) 
{
    PANIC_ON(!sgx_is_outside_enclave(handle, sizeof(*handle)));
    sgx_lfence();

	if (lock_cmpxchg64((uint64_t*)&sl_uswitchless_handle, (uint64_t)NULL, (uint64_t)handle) != (uint64_t)NULL)
	{
		return SGX_ERROR_UNEXPECTED;
	}
    return SGX_SUCCESS;
}
Esempio n. 7
0
/*
 * ecall_type_enum_union:
 *   enum_foo_t/union_foo_t is defined in EDL 
 *   and can be used in ECALL.
 */
void ecall_type_enum_union(enum enum_foo_t val1, union union_foo_t *val2)
{
    if (sgx_is_outside_enclave(val2, sizeof(union union_foo_t)) != 1)
        abort();
    val2->union_foo_0 = 1;
    val2->union_foo_1 = 2; /* overwrite union_foo_0 */
    assert(val1 == ENUM_FOO_0);
#ifndef DEBUG
    UNUSED(val1);
#endif
}
Esempio n. 8
0
// sgx_ocfree()
// Parameters:
//      N/A
// Return Value:
//      N/A
// sgx_ocfree restores the original outside stack pointer in the SSA.
// Do not call this function if you still need the buffer allocated by sgx_ocalloc within the ECALL.
void sgx_ocfree()
{
    // ECALL stack frame
    //           last_sp -> |             |
    //                       -------------
    //                      | ret_addr    |
    //                      | xbp_u       |
    //                      | xsp_u       |

    thread_data_t *thread_data = get_thread_data();
    ssa_gpr_t *ssa_gpr = reinterpret_cast<ssa_gpr_t *>(thread_data->first_ssa_gpr);
    uintptr_t *addr = reinterpret_cast<uintptr_t *>(thread_data->last_sp);
    uintptr_t usp = *(addr - 3);
    if(!sgx_is_outside_enclave(reinterpret_cast<void *>(usp), sizeof(uintptr_t)))
    {
        abort();
    }
    ssa_gpr->REG(sp_u) = usp;
}
Esempio n. 9
0
/* ecall_pointer_user_check, ecall_pointer_in, ecall_pointer_out, ecall_pointer_in_out:
 *   The root ECALLs to test [in], [out], [user_check] attributes.
 */
size_t ecall_pointer_user_check(void *val, size_t sz)
{
    /* check if the buffer is allocated outside */
    if (sgx_is_outside_enclave(val, sz) != 1)
        abort();

    char tmp[100] = {0};
    size_t len = sz>100?100:sz;
    
    /* copy the memory into the enclave to make sure 'val' 
     * is not being changed in checksum_internal() */
    memcpy(tmp, val, len);
    
    int32_t sum = checksum_internal((char *)tmp, len);
    printf("Checksum(0x%p, %zu) = 0x%x\n", 
            val, len, sum);
    
    /* modify outside memory directly */
    memcpy(val, "SGX_SUCCESS", len>12?12:len);

	return len;
}
Esempio n. 10
0
/* the caller is supposed to fill the quote field in emp_msg3 before calling
 * this function.*/
extern "C" sgx_status_t sgx_ra_get_msg3_trusted(
    sgx_ra_context_t context,
    uint32_t quote_size,
    sgx_report_t* qe_report,
    sgx_ra_msg3_t *emp_msg3,    //(mac||g_a||ps_sec_prop||quote)
    uint32_t msg3_size)
{
    if(vector_size(&g_ra_db) <= context ||!quote_size || !qe_report || !emp_msg3)
        return SGX_ERROR_INVALID_PARAMETER;

    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;

    //check integer overflow of msg3_size and quote_size
    if (UINTPTR_MAX - reinterpret_cast<uintptr_t>(emp_msg3) < msg3_size ||
        UINT32_MAX - quote_size < sizeof(sgx_ra_msg3_t) ||
        sizeof(sgx_ra_msg3_t) + quote_size != msg3_size)
        return SGX_ERROR_INVALID_PARAMETER;

    if (!sgx_is_outside_enclave(emp_msg3, msg3_size))
        return SGX_ERROR_INVALID_PARAMETER;
    //
    // fence after boundary check 
    // this also stops speculation in case of 
    // branch associated 
    // with sizeof(sgx_ra_msg3_t) + quote_size != msg3_size
    // mispredicting
    //
    sgx_lfence();

    sgx_status_t se_ret = SGX_ERROR_UNEXPECTED;

    //verify qe report
    se_ret = sgx_verify_report(qe_report);
    if(se_ret != SGX_SUCCESS)
    {
        if (SGX_ERROR_MAC_MISMATCH != se_ret &&
            SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        return se_ret;
    }

    sgx_spin_lock(&item->item_lock);
    //sgx_ra_proc_msg2_trusted must have been called
    if (item->state != ra_proc_msg2ed)
    {
        sgx_spin_unlock(&item->item_lock);
        return SGX_ERROR_INVALID_STATE;
    }
    //verify qe_report attributes and mr_enclave same as quoting enclave
    if( memcmp( &qe_report->body.attributes, &item->qe_target.attributes, sizeof(sgx_attributes_t)) ||
        memcmp( &qe_report->body.mr_enclave, &item->qe_target.mr_enclave, sizeof(sgx_measurement_t)) )
    {
        sgx_spin_unlock(&item->item_lock);
        return SGX_ERROR_INVALID_PARAMETER;
    }

    sgx_ra_msg3_t msg3_except_quote_in;
    sgx_cmac_128bit_key_t smk_key;
    memcpy(&msg3_except_quote_in.g_a, &item->g_a, sizeof(msg3_except_quote_in.g_a));
    memcpy(&msg3_except_quote_in.ps_sec_prop, &item->ps_sec_prop,
        sizeof(msg3_except_quote_in.ps_sec_prop));
    memcpy(&smk_key, &item->smk_key, sizeof(smk_key));
    sgx_spin_unlock(&item->item_lock);

    sgx_sha_state_handle_t sha_handle = NULL;
    sgx_cmac_state_handle_t cmac_handle = NULL;


    //SHA256(NONCE || emp_quote)
    sgx_sha256_hash_t hash = {0};
    se_ret = sgx_sha256_init(&sha_handle);
    if (SGX_SUCCESS != se_ret)
    {
        if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        return se_ret;
    }
    if (NULL == sha_handle)
        {
            return SGX_ERROR_UNEXPECTED;
        }
    do
    {
        se_ret = sgx_sha256_update((uint8_t *)&item->quote_nonce,
            sizeof(item->quote_nonce),
            sha_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

         //cmac   M := ga || PS_SEC_PROP_DESC(all zero if unused) ||emp_quote
        sgx_cmac_128bit_tag_t mac;
        se_ret = sgx_cmac128_init(&smk_key, &cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        if (NULL == cmac_handle)
        {
            se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        se_ret = sgx_cmac128_update((uint8_t*)&msg3_except_quote_in.g_a,
            sizeof(msg3_except_quote_in.g_a), cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        se_ret = sgx_cmac128_update((uint8_t*)&msg3_except_quote_in.ps_sec_prop,
            sizeof(msg3_except_quote_in.ps_sec_prop), cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        // sha256 and cmac quote
        uint8_t quote_piece[32];
        const uint8_t* emp_quote_piecemeal = emp_msg3->quote;
        uint32_t quote_piece_size = static_cast<uint32_t>(sizeof(quote_piece));

        while (emp_quote_piecemeal < emp_msg3->quote + quote_size)
        {
            //calculate size of one piece, the size of them are sizeof(quote_piece) except for the last one.
            if (static_cast<uint32_t>(emp_msg3->quote + quote_size - emp_quote_piecemeal) < quote_piece_size)
                quote_piece_size = static_cast<uint32_t>(emp_msg3->quote - emp_quote_piecemeal) + quote_size ;
            memcpy(quote_piece, emp_quote_piecemeal, quote_piece_size);
            se_ret = sgx_sha256_update(quote_piece,
                                    quote_piece_size,
                                    sha_handle);
           if (SGX_SUCCESS != se_ret)
           {
               if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                   se_ret = SGX_ERROR_UNEXPECTED;
              break;
           }
           se_ret = sgx_cmac128_update(quote_piece,
                                    quote_piece_size,
                                    cmac_handle);
           if (SGX_SUCCESS != se_ret)
          {
              if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                  se_ret = SGX_ERROR_UNEXPECTED;
              break;
          }
           emp_quote_piecemeal += sizeof(quote_piece);
        }
        ERROR_BREAK(se_ret);

        //get sha256 hash value
        se_ret = sgx_sha256_get_hash(sha_handle, &hash);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        //get cmac value
        se_ret = sgx_cmac128_final(cmac_handle, &mac);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        //verify qe_report->body.report_data == SHA256(NONCE || emp_quote)
        if(0 != memcmp(&qe_report->body.report_data, &hash, sizeof(hash)))
        {
            se_ret = SGX_ERROR_MAC_MISMATCH;
            break;
        }

        memcpy(&msg3_except_quote_in.mac, mac, sizeof(mac));
        memcpy(emp_msg3, &msg3_except_quote_in, offsetof(sgx_ra_msg3_t, quote));
        se_ret = SGX_SUCCESS;
    }while(0);
    memset_s(&smk_key, sizeof(smk_key), 0, sizeof(smk_key));
    (void)sgx_sha256_close(sha_handle);
    if(cmac_handle != NULL)
        sgx_cmac128_close(cmac_handle);
    return se_ret;
}
Esempio n. 11
0
/*
 * External function used to get quote. Prefix "emp_" means it is a pointer
 * points memory outside enclave.
 *
 * @param p_blob[in, out] Pointer to the EPID Blob.
 * @param blob_size[in] The size of EPID Blob, in bytes.
 * @param p_enclave_report[in] The application enclave's report.
 * @param quote_type[in] The type of quote, random based or name based.
 * @param p_spid[in] Pointer to SPID.
 * @param p_nonce[in] Pointer to nonce.
 * @param emp_sig_rl[in] Pointer to SIG-RL.
 * @param sig_rl_size[in] The size of SIG-RL, in bytes.
 * @param p_qe_report[out] Pointer to QE report, which reportdata is
 *                         sha256(nonce || quote)
 * @param emp_quote[out] Pointer to the output buffer for quote.
 * @param quote_size[in] The size of emp_quote, in bytes.
 * @param pce_isvsvn[in] The ISVSVN of PCE.
 * @return ae_error_t AE_SUCCESS for success, otherwise for errors.
 */
uint32_t get_quote(
    uint8_t *p_blob,
    uint32_t blob_size,
    const sgx_report_t *p_enclave_report,
    sgx_quote_sign_type_t quote_type,
    const sgx_spid_t *p_spid,
    const sgx_quote_nonce_t *p_nonce,
    const uint8_t *emp_sig_rl,
    uint32_t sig_rl_size,
    sgx_report_t *p_qe_report,
    uint8_t *emp_quote,
    uint32_t quote_size,
    sgx_isv_svn_t pce_isvsvn)
{
    ae_error_t ret = AE_SUCCESS;
    EpidStatus epid_ret = kEpidNoErr;
    MemberCtx *p_epid_context = NULL;
    sgx_quote_t quote_body;
    uint8_t is_resealed = 0;
    sgx_basename_t basename = {{0}};
    uint64_t sign_size = 0;
    sgx_status_t se_ret = SGX_SUCCESS;
    sgx_report_t qe_report;
    uint64_t required_buffer_size = 0;
    se_sig_rl_t sig_rl_header;
    se_plaintext_epid_data_sdk_t plaintext;
    sgx_ec256_signature_t ec_signature;
    sgx_cpu_svn_t cpusvn;

    memset(&quote_body, 0, sizeof(quote_body));
    memset(&sig_rl_header, 0, sizeof(sig_rl_header));
    memset(&plaintext, 0, sizeof(plaintext));
    memset(&ec_signature, 0, sizeof(ec_signature));
    memset(&cpusvn, 0, sizeof(cpusvn));


    /* Actually, some cases here will be checked with code generated by
       edger8r. Here we just want to defend in depth. */
    if((NULL == p_blob)
       || (NULL == p_enclave_report)
       || (NULL == p_spid)
       || (NULL == emp_quote)
       || (!quote_size)
       || ((NULL != emp_sig_rl) && (sig_rl_size < sizeof(se_sig_rl_t)
                                                  + 2 * SE_ECDSA_SIGN_SIZE))

		//
		// this size check could mispredict and cause us to
		// overflow, but we have an lfence below
		// that's safe to use for this case
		//

       || ((NULL == emp_sig_rl) && (sig_rl_size != 0)))
        return QE_PARAMETER_ERROR;
    if(SGX_TRUSTED_EPID_BLOB_SIZE_SDK != blob_size)
        return QE_PARAMETER_ERROR;

	//
	// this could mispredict and cause us to
	// overflow, but we have an lfence below
	// that's safe to use for this case
	//

    if(SGX_LINKABLE_SIGNATURE != quote_type
       && SGX_UNLINKABLE_SIGNATURE != quote_type)
        return QE_PARAMETER_ERROR;
    if(!p_nonce && p_qe_report)
        return QE_PARAMETER_ERROR;
    if(p_nonce && !p_qe_report)
        return QE_PARAMETER_ERROR;

    /* To reduce the memory footprint of QE, we should leave sig_rl and
       quote buffer outside enclave. */
    if(!sgx_is_outside_enclave(emp_sig_rl, sig_rl_size))
        return QE_PARAMETER_ERROR;

    //
    // for user_check SigRL input
    // based on quote_size input parameter
    //
    sgx_lfence();

    if(!sgx_is_outside_enclave(emp_quote, quote_size))
        return QE_PARAMETER_ERROR;

    /* Check whether p_blob is copied into EPC. If we want to reduce the
       memory usage, maybe we can leave the p_blob outside EPC. */
    if(!sgx_is_within_enclave(p_blob, blob_size))
        return QE_PARAMETER_ERROR;
    if(!sgx_is_within_enclave(p_enclave_report, sizeof(*p_enclave_report)))
        return QE_PARAMETER_ERROR;
    if(!sgx_is_within_enclave(p_spid, sizeof(*p_spid)))
        return QE_PARAMETER_ERROR;
    /* If the code reach here, if p_nonce is NULL, then p_qe_report will be
       NULL also. So we only check p_nonce here.*/
    if(p_nonce)
    {
        /* Actually Edger8r will alloc the buffer within EPC, this is just kind
           of defense in depth. */
        if(!sgx_is_within_enclave(p_nonce, sizeof(*p_nonce)))
            return QE_PARAMETER_ERROR;
        if(!sgx_is_within_enclave(p_qe_report, sizeof(*p_qe_report)))
            return QE_PARAMETER_ERROR;
    }

    /* Verify the input report. */
    if(SGX_SUCCESS != sgx_verify_report(p_enclave_report))
        return QE_PARAMETER_ERROR;

    /* Verify EPID p_blob and create the context */
    ret = random_stack_advance(verify_blob_internal, p_blob,
        blob_size,
        &is_resealed,
        TRUE,
        plaintext,
        &p_epid_context,
        &cpusvn);
    if(AE_SUCCESS != ret)
        goto CLEANUP;

    /* If SIG-RL is provided, we should check its size. */
    if(emp_sig_rl)
    {
        uint64_t temp_size = 0;
        uint64_t n2 = 0;

        memcpy(&sig_rl_header, emp_sig_rl, sizeof(sig_rl_header));
        if(sig_rl_header.protocol_version != SE_EPID_SIG_RL_VERSION)
        {
            ret = QE_PARAMETER_ERROR;
            goto CLEANUP;
        }

        if(sig_rl_header.epid_identifier != SE_EPID_SIG_RL_ID)
        {
            ret = QE_PARAMETER_ERROR;
            goto CLEANUP;
        }

        if(memcmp(&sig_rl_header.sig_rl.gid, &plaintext.epid_group_cert.gid,
                   sizeof(sig_rl_header.sig_rl.gid)))
        {
            ret = QE_PARAMETER_ERROR;
            goto CLEANUP;
        }
        temp_size = se_get_sig_rl_size(&sig_rl_header);
        if(temp_size != sig_rl_size)
        {
            ret = QE_PARAMETER_ERROR;
            goto CLEANUP;
        }

        se_static_assert(sizeof(ec_signature.x) == SE_ECDSA_SIGN_SIZE);
        se_static_assert(sizeof(ec_signature.y) == SE_ECDSA_SIGN_SIZE);
        memcpy(ec_signature.x,
               emp_sig_rl + sig_rl_size - (SE_ECDSA_SIGN_SIZE * 2),
               sizeof(ec_signature.x));
        SWAP_ENDIAN_32B(ec_signature.x);
        memcpy(ec_signature.y,
               emp_sig_rl + sig_rl_size - (SE_ECDSA_SIGN_SIZE * 1),
               sizeof(ec_signature.y));
        SWAP_ENDIAN_32B(ec_signature.y);

        n2 = SWAP_4BYTES(sig_rl_header.sig_rl.n2);
        temp_size = sizeof(EpidSignature) - sizeof(NrProof)
                    + n2 * sizeof(NrProof);
        if(temp_size > UINT32_MAX)
        {
            ret = QE_PARAMETER_ERROR;
            goto CLEANUP;
        }
        sign_size = temp_size;
    }
    else
    {
        sign_size = sizeof(BasicSignature)
                    + sizeof(uint32_t) // rl_ver
                    + sizeof(uint32_t); // rl_num
    }

    /* Verify sizeof basename is large enough and it should always be true*/
    se_static_assert(sizeof(basename) > sizeof(*p_spid));
    /* Because basename has already been zeroed,
       so we don't need to concatenating with 0s.*/
    memcpy(&basename, p_spid, sizeof(*p_spid));
    if(SGX_UNLINKABLE_SIGNATURE == quote_type)
    {
        uint8_t *p = (uint8_t *)&basename + sizeof(*p_spid);
        se_ret = sgx_read_rand(p, sizeof(basename) - sizeof(*p_spid));
        if(SGX_SUCCESS != se_ret)
        {
            ret = QE_UNEXPECTED_ERROR;
            goto CLEANUP;
        }
    }

    epid_ret = EpidRegisterBasename(p_epid_context, (uint8_t *)&basename,
        sizeof(basename));
    if(kEpidNoErr != epid_ret)
    {
        ret = QE_UNEXPECTED_ERROR;
        goto CLEANUP;
    }

    required_buffer_size = SE_QUOTE_LENGTH_WITHOUT_SIG + sign_size;

    /* We should make sure the buffer size is big enough. */
    if(quote_size < required_buffer_size)
    {
        ret = QE_PARAMETER_ERROR;
        goto CLEANUP;
    }

    //
    // for user_check SigRL input
    // based on n2 field in SigRL
    //
    sgx_lfence();

    /* Copy the data in the report into quote body. */
    memset(emp_quote, 0, quote_size);
    quote_body.version = QE_QUOTE_VERSION;
    quote_body.sign_type = (uint16_t)quote_type;
    quote_body.pce_svn = pce_isvsvn; // Both are little endian
    quote_body.xeid = plaintext.xeid; // Both are little endian
    se_static_assert(sizeof(plaintext.epid_group_cert.gid) == sizeof(OctStr32));
    se_static_assert(sizeof(quote_body.epid_group_id) == sizeof(uint32_t));
    ((uint8_t *)(&quote_body.epid_group_id))[0] = plaintext.epid_group_cert.gid.data[3];
    ((uint8_t *)(&quote_body.epid_group_id))[1] = plaintext.epid_group_cert.gid.data[2];
    ((uint8_t *)(&quote_body.epid_group_id))[2] = plaintext.epid_group_cert.gid.data[1];
    ((uint8_t *)(&quote_body.epid_group_id))[3] = plaintext.epid_group_cert.gid.data[0];
    memcpy(&quote_body.basename, &basename, sizeof(quote_body.basename));

    // Get the QE's report.
    se_ret = sgx_create_report(NULL, NULL, &qe_report);
    if(SGX_SUCCESS != se_ret)
    {
        ret = QE_PARAMETER_ERROR;
        goto CLEANUP;
    }

    // Copy QE's security version in to Quote body.
    quote_body.qe_svn = qe_report.body.isv_svn;

    // Copy the incoming report into Quote body.
    memcpy(&quote_body.report_body, &(p_enclave_report->body),
           sizeof(quote_body.report_body));
    /* Because required_buffer_size is larger than signature_len, so if we
       get here, then no integer overflow will ocur. */
    quote_body.signature_len = (uint32_t)(sizeof(se_wrap_key_t)
                               + QUOTE_IV_SIZE
                               + sizeof(uint32_t)
                               + sign_size
                               + sizeof(sgx_mac_t));

    /* Make the signature. */
    ret = qe_epid_sign(p_epid_context,
                       plaintext,
                       &basename,
                       emp_sig_rl ? ((const se_sig_rl_t *)emp_sig_rl)->sig_rl.bk
                                    : NULL,
                       &sig_rl_header,
                       &ec_signature,
                       p_enclave_report,
                       p_nonce,
                       p_qe_report,
                       emp_quote,
                       &quote_body,
                       (uint32_t)sign_size);
    if(AE_SUCCESS != ret)
    {
        // Only need to clean the buffer after the fixed length part.
        memset_s(emp_quote + sizeof(sgx_quote_t), quote_size - sizeof(sgx_quote_t),
                 0, quote_size - sizeof(sgx_quote_t));
        goto CLEANUP;
    }

    memcpy(emp_quote, &quote_body, sizeof(sgx_quote_t));

CLEANUP:
    if(p_epid_context)
		epid_member_delete(&p_epid_context);
    return ret;
}
Esempio n. 12
0
extern "C" sgx_status_t sgx_mac_aadata_ex(const uint16_t key_policy,
                                            const sgx_attributes_t attribute_mask,
                                            const sgx_misc_select_t misc_mask,
                                            const uint32_t additional_MACtext_length,
                                            const uint8_t *p_additional_MACtext,
                                            const uint32_t sealed_data_size,
                                            sgx_sealed_data_t *p_sealed_data)
{
    sgx_status_t err = SGX_ERROR_UNEXPECTED;
    sgx_report_t report;
    sgx_key_id_t keyID;
    sgx_key_request_t tmp_key_request;
    uint8_t payload_iv[SGX_SEAL_IV_SIZE];
    memset(&payload_iv, 0, sizeof(payload_iv));

    uint32_t sealedDataSize = sgx_calc_sealed_data_size(additional_MACtext_length, 0);
    // Check for overflow
    if (sealedDataSize == UINT32_MAX)
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    //
    // Check parameters
    //
    // check key_request->key_policy reserved bits are not set and one of policy bits are set
    if ((key_policy & ~(SGX_KEYPOLICY_MRENCLAVE | SGX_KEYPOLICY_MRSIGNER)) ||
        ((key_policy & (SGX_KEYPOLICY_MRENCLAVE | SGX_KEYPOLICY_MRSIGNER)) == 0))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if ((attribute_mask.flags & 0x3) != 0x3)
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    // The AAD must be provided
    if ((additional_MACtext_length == 0) || (p_additional_MACtext == NULL))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    // Ensure AAD does not cross enclave boundary
    if (!(sgx_is_within_enclave(p_additional_MACtext, additional_MACtext_length) ||
        sgx_is_outside_enclave(p_additional_MACtext, additional_MACtext_length)))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    // Ensure sealed data blob is within an enclave during the sealing process
    if ((p_sealed_data == NULL) || (!sgx_is_within_enclave(p_sealed_data, sealed_data_size)))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if (sealedDataSize != sealed_data_size)
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    memset(&report, 0, sizeof(sgx_report_t));
    memset(p_sealed_data, 0, sealedDataSize);
    memset(&keyID, 0, sizeof(sgx_key_id_t));
    memset(&tmp_key_request, 0, sizeof(sgx_key_request_t));

    // Get the report to obtain isv_svn and cpu_svn
    err = sgx_create_report(NULL, NULL, &report);
    if (err != SGX_SUCCESS)
    {
        goto clear_return;
    }

    // Get a random number to populate the key_id of the key_request
    err = sgx_read_rand(reinterpret_cast<uint8_t *>(&keyID), sizeof(sgx_key_id_t));
    if (err != SGX_SUCCESS)
    {
        goto clear_return;
    }

    memcpy(&(tmp_key_request.cpu_svn), &(report.body.cpu_svn), sizeof(sgx_cpu_svn_t));
    memcpy(&(tmp_key_request.isv_svn), &(report.body.isv_svn), sizeof(sgx_isv_svn_t));
    tmp_key_request.key_name = SGX_KEYSELECT_SEAL;
    tmp_key_request.key_policy = key_policy;
    tmp_key_request.attribute_mask.flags = attribute_mask.flags;
    tmp_key_request.attribute_mask.xfrm = attribute_mask.xfrm;
    memcpy(&(tmp_key_request.key_id), &keyID, sizeof(sgx_key_id_t));
    tmp_key_request.misc_mask = misc_mask;

    err = sgx_seal_data_iv(additional_MACtext_length, p_additional_MACtext,
        0, NULL, payload_iv, &tmp_key_request, p_sealed_data);

    if (err == SGX_SUCCESS)
    {
        // Copy data from the temporary key request buffer to the sealed data blob
        memcpy(&(p_sealed_data->key_request), &tmp_key_request, sizeof(sgx_key_request_t));
    }

clear_return:
    // Clear temp state
    memset_s(&report, sizeof(sgx_report_t), 0, sizeof(sgx_report_t));
    memset_s(&keyID, sizeof(sgx_key_id_t), 0, sizeof(sgx_key_id_t));
    return err;
}
Esempio n. 13
0
extern "C" sgx_status_t sgx_unmac_aadata(const sgx_sealed_data_t *p_sealed_data,
                                        uint8_t *p_additional_MACtext,
                                        uint32_t *p_additional_MACtext_length)
{
    sgx_status_t err = SGX_ERROR_UNEXPECTED;
    // Ensure the the sgx_sealed_data_t members are all inside enclave before using them.
    if ((p_sealed_data == NULL) || (!sgx_is_within_enclave(p_sealed_data, sizeof(sgx_sealed_data_t))))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    // If using this API, the sealed blob must have no encrypted data. 
    // So the encryt_text_length must be 0.
    uint32_t encrypt_text_length = sgx_get_encrypt_txt_len(p_sealed_data);
    if (encrypt_text_length != 0)
    {
        return SGX_ERROR_MAC_MISMATCH; // Return error indicating the blob is corrupted
    }

    // The sealed blob must have AAD. So the add_text_length must not be 0.
    uint32_t add_text_length = sgx_get_add_mac_txt_len(p_sealed_data);
    if (add_text_length == UINT32_MAX || add_text_length == 0)
    {
        return SGX_ERROR_MAC_MISMATCH; // Return error indicating the blob is corrupted
    }
    uint32_t sealedDataSize = sgx_calc_sealed_data_size(add_text_length, encrypt_text_length);
    if (sealedDataSize == UINT32_MAX)
    {
        return SGX_ERROR_MAC_MISMATCH; // Return error indicating the blob is corrupted
    }

    //
    // Check parameters
    //
    // Ensure sealed data blob is within an enclave during the sealing process
    if (!sgx_is_within_enclave(p_sealed_data, sealedDataSize))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if (p_additional_MACtext == NULL || p_additional_MACtext_length == NULL)
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    // Ensure AAD does not cross enclave boundary
    if (!(sgx_is_within_enclave(p_additional_MACtext, add_text_length) || 
        sgx_is_outside_enclave(p_additional_MACtext, add_text_length)))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    uint32_t additional_MACtext_length = *p_additional_MACtext_length;
    if (additional_MACtext_length < add_text_length) {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    err = sgx_unseal_data_helper(p_sealed_data, p_additional_MACtext, add_text_length,
        NULL, encrypt_text_length);
    if (err == SGX_SUCCESS)
    {
        *p_additional_MACtext_length = add_text_length;
    }
    return err;
}
Esempio n. 14
0
size_t protected_fs_file::write(const void* ptr, size_t size, size_t count)
{
	if (ptr == NULL || size == 0 || count == 0)
		return 0;

	int32_t result32 = sgx_thread_mutex_lock(&mutex);
	if (result32 != 0)
	{
		last_error = result32;
		file_status = SGX_FILE_STATUS_MEMORY_CORRUPTED;
		return 0;
	}

	size_t data_left_to_write = size * count;

	// prevent overlap...
#if defined(_WIN64) || defined(__x86_64__)
	if (size > UINT32_MAX || count > UINT32_MAX)
	{
		last_error = EINVAL;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}
#else
	if (((uint64_t)((uint64_t)size * (uint64_t)count)) != (uint64_t)data_left_to_write)
	{
		last_error = EINVAL;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}
#endif

	if (sgx_is_outside_enclave(ptr, data_left_to_write))
	{
		last_error = SGX_ERROR_INVALID_PARAMETER;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (file_status != SGX_FILE_STATUS_OK)
	{
		last_error = SGX_ERROR_FILE_BAD_STATUS;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (open_mode.append == 0 && open_mode.update == 0 && open_mode.write == 0)
	{
		last_error = EACCES;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (open_mode.append == 1)
		offset = encrypted_part_plain.size; // add at the end of the file

	const unsigned char* data_to_write = (const unsigned char*)ptr;

	// the first block of user data is written in the meta-data encrypted part
	if (offset < MD_USER_DATA_SIZE)
	{
		size_t empty_place_left_in_md = MD_USER_DATA_SIZE - (size_t)offset; // offset is smaller than MD_USER_DATA_SIZE
		if (data_left_to_write <= empty_place_left_in_md)
		{
			memcpy(&encrypted_part_plain.data[offset], data_to_write, data_left_to_write);
			offset += data_left_to_write;
			data_to_write += data_left_to_write; // not needed, to prevent future errors
			data_left_to_write = 0;
		}
		else
		{
			memcpy(&encrypted_part_plain.data[offset], data_to_write, empty_place_left_in_md);
			offset += empty_place_left_in_md;
			data_to_write += empty_place_left_in_md;
			data_left_to_write -= empty_place_left_in_md;
		}
		
		if (offset > encrypted_part_plain.size)
			encrypted_part_plain.size = offset; // file grew, update the new file size

		need_writing = true;
	}

	while (data_left_to_write > 0)
	{
		file_data_node_t* file_data_node = NULL;
		file_data_node = get_data_node(); // return the data node of the current offset, will read it from disk or create new one if needed (and also the mht node if needed)
		if (file_data_node == NULL)
			break;

		size_t offset_in_node = (size_t)((offset - MD_USER_DATA_SIZE) % NODE_SIZE);
		size_t empty_place_left_in_node = NODE_SIZE - offset_in_node;
		
		if (data_left_to_write <= empty_place_left_in_node)
		{ // this will be the last write
			memcpy(&file_data_node->plain.data[offset_in_node], data_to_write, data_left_to_write);
			offset += data_left_to_write;
			data_to_write += data_left_to_write; // not needed, to prevent future errors
			data_left_to_write = 0;
		}
		else
		{
			memcpy(&file_data_node->plain.data[offset_in_node], data_to_write, empty_place_left_in_node);
			offset += empty_place_left_in_node;
			data_to_write += empty_place_left_in_node;
			data_left_to_write -= empty_place_left_in_node;

		}

		if (offset > encrypted_part_plain.size)
			encrypted_part_plain.size = offset; // file grew, update the new file size

		if (file_data_node->need_writing == false)
		{
			file_data_node->need_writing = true;
			file_mht_node_t* file_mht_node = file_data_node->parent;
			while (file_mht_node->mht_node_number != 0) // set all the mht parent nodes as 'need writing'
			{
				file_mht_node->need_writing = true;
				file_mht_node = file_mht_node->parent;
			}
			root_mht.need_writing = true;
			need_writing = true;
		}
	}

	sgx_thread_mutex_unlock(&mutex);

	size_t ret_count = ((size * count) - data_left_to_write) / size;
	return ret_count;
}
Esempio n. 15
0
size_t protected_fs_file::read(void* ptr, size_t size, size_t count)
{
	if (ptr == NULL || size == 0 || count == 0)
		return 0;

	int32_t result32 = sgx_thread_mutex_lock(&mutex);
	if (result32 != 0)
	{
		last_error = result32;
		file_status = SGX_FILE_STATUS_MEMORY_CORRUPTED;
		return 0;
	}

	size_t data_left_to_read = size * count;

	// prevent overlap...
#if defined(_WIN64) || defined(__x86_64__)
	if (size > UINT32_MAX || count > UINT32_MAX)
	{
		last_error = EINVAL;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}
#else
	if (((uint64_t)((uint64_t)size * (uint64_t)count)) != (uint64_t)data_left_to_read)
	{
		last_error = EINVAL;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}
#endif


	if (sgx_is_outside_enclave(ptr, data_left_to_read))
	{
		last_error = EINVAL;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (file_status != SGX_FILE_STATUS_OK)
	{
		last_error = SGX_ERROR_FILE_BAD_STATUS;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (open_mode.read == 0 && open_mode.update == 0)
	{
		last_error = EACCES;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (end_of_file == true)
	{// not an error
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	// this check is not really needed, can go on with the code and it will do nothing until the end, but it's more 'right' to check it here
	if (offset == encrypted_part_plain.size)
	{
		end_of_file = true;
		sgx_thread_mutex_unlock(&mutex);
		return 0;
	}

	if (((uint64_t)data_left_to_read) > (uint64_t)(encrypted_part_plain.size - offset)) // the request is bigger than what's left in the file
	{
		data_left_to_read = (size_t)(encrypted_part_plain.size - offset);
	}
	size_t data_attempted_to_read = data_left_to_read; // used at the end to return how much we actually read

	unsigned char* out_buffer = (unsigned char*)ptr;

	// the first block of user data is read from the meta-data encrypted part
	if (offset < MD_USER_DATA_SIZE)
	{
		size_t data_left_in_md = MD_USER_DATA_SIZE - (size_t)offset; // offset is smaller than MD_USER_DATA_SIZE
		if (data_left_to_read <= data_left_in_md)
		{
			memcpy(out_buffer, &encrypted_part_plain.data[offset], data_left_to_read);
			offset += data_left_to_read;
			out_buffer += data_left_to_read; // not needed, to prevent future errors
			data_left_to_read = 0;
		}
		else
		{
			memcpy(out_buffer, &encrypted_part_plain.data[offset], data_left_in_md);
			offset += data_left_in_md;
			out_buffer += data_left_in_md;
			data_left_to_read -= data_left_in_md;
		}
	}

	while (data_left_to_read > 0)
	{
		file_data_node_t* file_data_node = NULL;
		file_data_node = get_data_node(); // return the data node of the current offset, will read it from disk if needed (and also the mht node if needed)
		if (file_data_node == NULL)
			break;

		size_t offset_in_node = (offset - MD_USER_DATA_SIZE) % NODE_SIZE;
		size_t data_left_in_node = NODE_SIZE - offset_in_node;
		
		if (data_left_to_read <= data_left_in_node)
		{
			memcpy(out_buffer, &file_data_node->plain.data[offset_in_node], data_left_to_read);
			offset += data_left_to_read;
			out_buffer += data_left_to_read; // not needed, to prevent future errors
			data_left_to_read = 0;
		}
		else
		{
			memcpy(out_buffer, &file_data_node->plain.data[offset_in_node], data_left_in_node);
			offset += data_left_in_node;
			out_buffer += data_left_in_node;
			data_left_to_read -= data_left_in_node;

		}
	}

	sgx_thread_mutex_unlock(&mutex);

	if (data_left_to_read == 0 &&
		data_attempted_to_read != (size * count)) // user wanted to read more and we had to shrink the request
	{
		assert(offset == encrypted_part_plain.size);
		end_of_file = true;
	}

	size_t ret_count = (data_attempted_to_read - data_left_to_read) / size;
	return ret_count;
}