Esempio n. 1
0
// sgx_register_exception_handler()
//      register a custom exception handler
// Parameter
//      is_first_handler - the order in which the handler should be called.
// if the parameter is nonzero, the handler is the first handler to be called.
// if the parameter is zero, the handler is the last handler to be called.
//      exception_handler - a pointer to the handler to be called.
// Return Value
//      handler - success
//         NULL - fail
void *sgx_register_exception_handler(int is_first_handler, sgx_exception_handler_t exception_handler)
{
    // initialize g_veh_cookie for the first time sgx_register_exception_handler is called.
    if(unlikely(g_veh_cookie == 0))
    {
        uintptr_t rand = 0;
        do
        {
            if(SGX_SUCCESS != sgx_read_rand((unsigned char *)&rand, sizeof(rand)))
            {
                return NULL;
            }
        } while(rand == 0);

        sgx_spin_lock(&g_handler_lock);
        if(g_veh_cookie == 0)
        {
            g_veh_cookie = rand;
        }
        sgx_spin_unlock(&g_handler_lock);
    }
    if(!sgx_is_within_enclave((const void*)exception_handler, 0))
    {
        return NULL;
    }
    handler_node_t *node = (handler_node_t *)malloc(sizeof(handler_node_t));
    if(!node)
    {
        return NULL;
    }
    node->callback = ENC_VEH_POINTER(exception_handler);

    // write lock
    sgx_spin_lock(&g_handler_lock);

    if((g_first_node == NULL) || is_first_handler)
    {
        node->next = g_first_node;
        g_first_node = node;
    }
    else
    {
        handler_node_t *tmp = g_first_node;
        while(tmp->next != NULL)
        {
            tmp = tmp->next;
        }
        node->next = NULL;
        tmp->next = node;
    }
    // write unlock
    sgx_spin_unlock(&g_handler_lock);

    return node;
}
Esempio n. 2
0
// TKE interface for isv enclaves
sgx_status_t sgx_ra_get_keys(
    sgx_ra_context_t context,
    sgx_ra_key_type_t type,
    sgx_ra_key_128_t *p_key)
{
    if(vector_size(&g_ra_db) <= context || !p_key)
        return SGX_ERROR_INVALID_PARAMETER;
    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;

    if(!sgx_is_within_enclave(p_key, sizeof(sgx_ra_key_128_t)))
        return SGX_ERROR_INVALID_PARAMETER;

    sgx_status_t ret = SGX_SUCCESS;
    sgx_spin_lock(&item->item_lock);
    //sgx_ra_proc_msg2_trusted fill the keys, so keys are available after it's called.
    if (item->state != ra_proc_msg2ed)
        ret = SGX_ERROR_INVALID_STATE;
    else if(SGX_RA_KEY_MK == type)
        memcpy(p_key, item->mk_key, sizeof(sgx_ra_key_128_t));
    else if(SGX_RA_KEY_SK == type)
        memcpy(p_key, item->sk_key, sizeof(sgx_ra_key_128_t));
    else
        ret = SGX_ERROR_INVALID_PARAMETER;
    sgx_spin_unlock(&item->item_lock);
    return ret;
}
Esempio n. 3
0
sgx_status_t sgx_close_pse_session()
{
    sgx_spin_lock(&g_spin_lock);
    if(g_b_session_established)
        g_b_session_established = false;
    sgx_spin_unlock(&g_spin_lock);
    return SGX_SUCCESS;
}
Esempio n. 4
0
sgx_status_t sgx_create_pse_session()
{
    sgx_spin_lock(&g_spin_lock);
    if(!g_b_session_established)
        g_b_session_established = true;
    sgx_spin_unlock(&g_spin_lock);
    return SGX_SUCCESS;
}
Esempio n. 5
0
extern "C" sgx_status_t sgx_ra_get_ga(
    sgx_ra_context_t context,
    sgx_ec256_public_t *g_a)
{
    sgx_status_t se_ret;
    if(vector_size(&g_ra_db) <= context||!g_a)
        return SGX_ERROR_INVALID_PARAMETER;
    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;


    sgx_ecc_state_handle_t ecc_state = NULL;
    sgx_ec256_public_t pub_key;
    sgx_ec256_private_t priv_key;

    memset(&pub_key, 0, sizeof(pub_key));
    memset(&priv_key, 0, sizeof(priv_key));


    sgx_spin_lock(&item->item_lock);
    do
    {
        //sgx_ra_init must have been called
        if (item->state != ra_inited)
        {
            se_ret = SGX_ERROR_INVALID_STATE;
            break;
        }
        // ecc_state should be closed when exit.
        se_ret = sgx_ecc256_open_context(&ecc_state);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        se_ret = sgx_ecc256_create_key_pair(&priv_key, &pub_key, ecc_state);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        memcpy(&item->a, &priv_key, sizeof(item->a));
        memcpy(&item->g_a, &pub_key, sizeof(item->g_a));
        memcpy(g_a, &pub_key, sizeof(sgx_ec256_public_t));
        item->state = ra_get_gaed;
        //clear local private key to defense in depth
        memset_s(&priv_key,sizeof(priv_key),0,sizeof(sgx_ec256_private_t));
    }while(0);
    sgx_spin_unlock(&item->item_lock);
    if(ecc_state!=NULL)
        sgx_ecc256_close_context(ecc_state);
    return se_ret;
}
Esempio n. 6
0
static uint32_t get_rand_lcg()
{
    sgx_spin_lock(&g_seed_lock);

    uint64_t& seed = g_global_data_sim.seed;
    seed = (uint64_t)(6364136223846793005ULL * seed + 1);
    uint32_t n = (uint32_t)(seed >> 32);

    sgx_spin_unlock(&g_seed_lock);

    return n;
}
Esempio n. 7
0
// TKE interface for isv enclaves
sgx_status_t SGXAPI sgx_ra_close(
    sgx_ra_context_t context)
{
    if(vector_size(&g_ra_db) <= context)
        return SGX_ERROR_INVALID_PARAMETER;
    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;
    sgx_spin_lock(&g_ra_db_lock);
    //safe clear private key and RA key before free memory to defense in depth
    memset_s(&item->a,sizeof(item->a),0,sizeof(sgx_ec256_private_t));
    memset_s(&item->vk_key,sizeof(item->vk_key),0,sizeof(sgx_ec_key_128bit_t));
    memset_s(&item->mk_key,sizeof(item->mk_key),0,sizeof(sgx_ec_key_128bit_t));
    memset_s(&item->sk_key,sizeof(item->sk_key),0,sizeof(sgx_ec_key_128bit_t));
    memset_s(&item->smk_key,sizeof(item->smk_key),0,sizeof(sgx_ec_key_128bit_t));
    SAFE_FREE(item);
    vector_set(&g_ra_db, context, NULL);
    sgx_spin_unlock(&g_ra_db_lock);
    return SGX_SUCCESS;
}
Esempio n. 8
0
// sgx_unregister_exception_handler()
//      unregister a custom exception handler.
// Parameter
//      handler - a handler to the custom exception handler previously 
// registered using the sgx_register_exception_handler function.
// Return Value
//      none zero - success
//              0 - fail
int sgx_unregister_exception_handler(void *handler)
{
    if(!handler)
    {
        return 0;
    }

    int status = 0;

    // write lock
    sgx_spin_lock(&g_handler_lock);

    if(g_first_node)
    {
        handler_node_t *node = g_first_node;
        if(node == handler)
        {
            g_first_node = node->next;
            status = 1;
        }
        else
        {
            while(node->next != NULL)
            {
                if(node->next == handler)
                {
                    node->next = node->next->next;
                    status = 1;
                    break;
                }
                node = node->next;
            }
        }
    }
    // write unlock
    sgx_spin_unlock(&g_handler_lock);

    if(status) free(handler);
    return status;
}
Esempio n. 9
0
// TKE interface for isv enclaves
sgx_status_t sgx_ra_init_ex(
    const sgx_ec256_public_t *p_pub_key,
    int b_pse,
    sgx_ra_derive_secret_keys_t derive_key_cb,
    sgx_ra_context_t *p_context)
{
    int valid = 0;
    sgx_status_t ret = SGX_SUCCESS;
    sgx_ecc_state_handle_t ecc_state = NULL;

    // initialize g_kdf_cookie for the first time sgx_ra_init_ex is called.
    if (unlikely(g_kdf_cookie == 0))
    {
        uintptr_t rand = 0;
        do
        {
            if (SGX_SUCCESS != sgx_read_rand((unsigned char *)&rand, sizeof(rand)))
            {
                return SGX_ERROR_UNEXPECTED;
            }
        } while (rand == 0);

        sgx_spin_lock(&g_ra_db_lock);
        if (g_kdf_cookie == 0)
        {
            g_kdf_cookie = rand;
            memset_s(&rand, sizeof(rand), 0, sizeof(rand));
        }
        sgx_spin_unlock(&g_ra_db_lock);
    }

    if(!p_pub_key || !p_context)
        return SGX_ERROR_INVALID_PARAMETER;

    if(!sgx_is_within_enclave(p_pub_key, sizeof(sgx_ec256_public_t)))
        return SGX_ERROR_INVALID_PARAMETER;

    //derive_key_cb can be NULL
    if (NULL != derive_key_cb &&
        !sgx_is_within_enclave((const void*)derive_key_cb, 0))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }

    ret = sgx_ecc256_open_context(&ecc_state);
    if(SGX_SUCCESS != ret)
    {
        if(SGX_ERROR_OUT_OF_MEMORY != ret)
            ret = SGX_ERROR_UNEXPECTED;
        return ret;
    }

    ret = sgx_ecc256_check_point((const sgx_ec256_public_t *)p_pub_key,
                                 ecc_state, &valid);
    if(SGX_SUCCESS != ret)
    {
        if(SGX_ERROR_OUT_OF_MEMORY != ret)
            ret = SGX_ERROR_UNEXPECTED;
        sgx_ecc256_close_context(ecc_state);
        return ret;
    }
    if(!valid)
    {
        sgx_ecc256_close_context(ecc_state);
        return SGX_ERROR_INVALID_PARAMETER;
    }
    sgx_ecc256_close_context(ecc_state);

    //add new item to g_ra_db
    ra_db_item_t* new_item = (ra_db_item_t*)malloc(sizeof(ra_db_item_t));
    if (!new_item)
    {
        return SGX_ERROR_OUT_OF_MEMORY;
    }
    memset(new_item,0, sizeof(ra_db_item_t));
    memcpy(&new_item->sp_pubkey, p_pub_key, sizeof(new_item->sp_pubkey));
    if(b_pse)
    {
        //sgx_create_pse_session() must have been called
        ret = sgx_get_ps_sec_prop(&new_item->ps_sec_prop);
        if (ret!=SGX_SUCCESS)
        {
            SAFE_FREE(new_item);
            return ret;
        }
    }

    new_item->derive_key_cb = ENC_KDF_POINTER(derive_key_cb);
    new_item->state = ra_inited;

    //find first empty slot in g_ra_db
    int first_empty = -1;
    ra_db_item_t* item = NULL;
    sgx_spin_lock(&g_ra_db_lock);
    uint32_t size = vector_size(&g_ra_db);
    for (uint32_t i = 0; i < size; i++)
    {
        if(0 != vector_get(&g_ra_db, i, reinterpret_cast<void**>(&item)))
        {
            sgx_spin_unlock(&g_ra_db_lock);
            SAFE_FREE(new_item);
            return SGX_ERROR_UNEXPECTED;
        }
        if(item == NULL)
        {
            first_empty = i;
            break;
        }
    }
    //if there is a empty slot, use it
    if (first_empty >= 0)
    {
        errno_t vret = vector_set(&g_ra_db, first_empty, new_item);
        UNUSED(vret);
        assert(vret == 0);
        *p_context = first_empty;
    }
    //if there are no empty slots, add a new item to g_ra_db
    else
    {
        if(size >= INT32_MAX)
        {
            //overflow
            sgx_spin_unlock(&g_ra_db_lock);
            SAFE_FREE(new_item);
            return SGX_ERROR_OUT_OF_MEMORY;
        }
        if(0 != vector_push_back(&g_ra_db, new_item))
        {
            sgx_spin_unlock(&g_ra_db_lock);
            SAFE_FREE(new_item);
            return SGX_ERROR_OUT_OF_MEMORY;
        }
        *p_context = size;
    }
    sgx_spin_unlock(&g_ra_db_lock);
    return SGX_SUCCESS;
}
Esempio n. 10
0
/* the caller is supposed to fill the quote field in emp_msg3 before calling
 * this function.*/
extern "C" sgx_status_t sgx_ra_get_msg3_trusted(
    sgx_ra_context_t context,
    uint32_t quote_size,
    sgx_report_t* qe_report,
    sgx_ra_msg3_t *emp_msg3,    //(mac||g_a||ps_sec_prop||quote)
    uint32_t msg3_size)
{
    if(vector_size(&g_ra_db) <= context ||!quote_size || !qe_report || !emp_msg3)
        return SGX_ERROR_INVALID_PARAMETER;

    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;

    //check integer overflow of msg3_size and quote_size
    if (UINTPTR_MAX - reinterpret_cast<uintptr_t>(emp_msg3) < msg3_size ||
        UINT32_MAX - quote_size < sizeof(sgx_ra_msg3_t) ||
        sizeof(sgx_ra_msg3_t) + quote_size != msg3_size)
        return SGX_ERROR_INVALID_PARAMETER;

    if (!sgx_is_outside_enclave(emp_msg3, msg3_size))
        return SGX_ERROR_INVALID_PARAMETER;
    //
    // fence after boundary check 
    // this also stops speculation in case of 
    // branch associated 
    // with sizeof(sgx_ra_msg3_t) + quote_size != msg3_size
    // mispredicting
    //
    sgx_lfence();

    sgx_status_t se_ret = SGX_ERROR_UNEXPECTED;

    //verify qe report
    se_ret = sgx_verify_report(qe_report);
    if(se_ret != SGX_SUCCESS)
    {
        if (SGX_ERROR_MAC_MISMATCH != se_ret &&
            SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        return se_ret;
    }

    sgx_spin_lock(&item->item_lock);
    //sgx_ra_proc_msg2_trusted must have been called
    if (item->state != ra_proc_msg2ed)
    {
        sgx_spin_unlock(&item->item_lock);
        return SGX_ERROR_INVALID_STATE;
    }
    //verify qe_report attributes and mr_enclave same as quoting enclave
    if( memcmp( &qe_report->body.attributes, &item->qe_target.attributes, sizeof(sgx_attributes_t)) ||
        memcmp( &qe_report->body.mr_enclave, &item->qe_target.mr_enclave, sizeof(sgx_measurement_t)) )
    {
        sgx_spin_unlock(&item->item_lock);
        return SGX_ERROR_INVALID_PARAMETER;
    }

    sgx_ra_msg3_t msg3_except_quote_in;
    sgx_cmac_128bit_key_t smk_key;
    memcpy(&msg3_except_quote_in.g_a, &item->g_a, sizeof(msg3_except_quote_in.g_a));
    memcpy(&msg3_except_quote_in.ps_sec_prop, &item->ps_sec_prop,
        sizeof(msg3_except_quote_in.ps_sec_prop));
    memcpy(&smk_key, &item->smk_key, sizeof(smk_key));
    sgx_spin_unlock(&item->item_lock);

    sgx_sha_state_handle_t sha_handle = NULL;
    sgx_cmac_state_handle_t cmac_handle = NULL;


    //SHA256(NONCE || emp_quote)
    sgx_sha256_hash_t hash = {0};
    se_ret = sgx_sha256_init(&sha_handle);
    if (SGX_SUCCESS != se_ret)
    {
        if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        return se_ret;
    }
    if (NULL == sha_handle)
        {
            return SGX_ERROR_UNEXPECTED;
        }
    do
    {
        se_ret = sgx_sha256_update((uint8_t *)&item->quote_nonce,
            sizeof(item->quote_nonce),
            sha_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

         //cmac   M := ga || PS_SEC_PROP_DESC(all zero if unused) ||emp_quote
        sgx_cmac_128bit_tag_t mac;
        se_ret = sgx_cmac128_init(&smk_key, &cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        if (NULL == cmac_handle)
        {
            se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        se_ret = sgx_cmac128_update((uint8_t*)&msg3_except_quote_in.g_a,
            sizeof(msg3_except_quote_in.g_a), cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        se_ret = sgx_cmac128_update((uint8_t*)&msg3_except_quote_in.ps_sec_prop,
            sizeof(msg3_except_quote_in.ps_sec_prop), cmac_handle);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        // sha256 and cmac quote
        uint8_t quote_piece[32];
        const uint8_t* emp_quote_piecemeal = emp_msg3->quote;
        uint32_t quote_piece_size = static_cast<uint32_t>(sizeof(quote_piece));

        while (emp_quote_piecemeal < emp_msg3->quote + quote_size)
        {
            //calculate size of one piece, the size of them are sizeof(quote_piece) except for the last one.
            if (static_cast<uint32_t>(emp_msg3->quote + quote_size - emp_quote_piecemeal) < quote_piece_size)
                quote_piece_size = static_cast<uint32_t>(emp_msg3->quote - emp_quote_piecemeal) + quote_size ;
            memcpy(quote_piece, emp_quote_piecemeal, quote_piece_size);
            se_ret = sgx_sha256_update(quote_piece,
                                    quote_piece_size,
                                    sha_handle);
           if (SGX_SUCCESS != se_ret)
           {
               if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                   se_ret = SGX_ERROR_UNEXPECTED;
              break;
           }
           se_ret = sgx_cmac128_update(quote_piece,
                                    quote_piece_size,
                                    cmac_handle);
           if (SGX_SUCCESS != se_ret)
          {
              if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                  se_ret = SGX_ERROR_UNEXPECTED;
              break;
          }
           emp_quote_piecemeal += sizeof(quote_piece);
        }
        ERROR_BREAK(se_ret);

        //get sha256 hash value
        se_ret = sgx_sha256_get_hash(sha_handle, &hash);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        //get cmac value
        se_ret = sgx_cmac128_final(cmac_handle, &mac);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        //verify qe_report->body.report_data == SHA256(NONCE || emp_quote)
        if(0 != memcmp(&qe_report->body.report_data, &hash, sizeof(hash)))
        {
            se_ret = SGX_ERROR_MAC_MISMATCH;
            break;
        }

        memcpy(&msg3_except_quote_in.mac, mac, sizeof(mac));
        memcpy(emp_msg3, &msg3_except_quote_in, offsetof(sgx_ra_msg3_t, quote));
        se_ret = SGX_SUCCESS;
    }while(0);
    memset_s(&smk_key, sizeof(smk_key), 0, sizeof(smk_key));
    (void)sgx_sha256_close(sha_handle);
    if(cmac_handle != NULL)
        sgx_cmac128_close(cmac_handle);
    return se_ret;
}
Esempio n. 11
0
extern "C" sgx_status_t sgx_ra_proc_msg2_trusted(
    sgx_ra_context_t context,
    const sgx_ra_msg2_t *p_msg2,            //(g_b||spid||quote_type|| KDF_ID ||sign_gb_ga||cmac||sig_rl_size||sig_rl)
    const sgx_target_info_t *p_qe_target,
    sgx_report_t *p_report,
    sgx_quote_nonce_t* p_nonce)
{
    sgx_status_t se_ret = SGX_ERROR_UNEXPECTED;
    //p_msg2[in] p_qe_target[in] p_report[out] p_nonce[out] in EDL file
    if(vector_size(&g_ra_db) <= context
       || !p_msg2
       || !p_qe_target
       || !p_report
       || !p_nonce)
        return SGX_ERROR_INVALID_PARAMETER;

    ra_db_item_t* item = NULL;
    if(0 != vector_get(&g_ra_db, context, reinterpret_cast<void**>(&item)) || item == NULL )
        return SGX_ERROR_INVALID_PARAMETER;

    sgx_ec256_private_t a;
    memset(&a, 0, sizeof(a));
    // Create gb_ga
    sgx_ec256_public_t gb_ga[2];
    sgx_ec256_public_t sp_pubkey;
    sgx_ec_key_128bit_t smkey = {0};
    sgx_ec_key_128bit_t skey = {0};
    sgx_ec_key_128bit_t mkey = {0};
    sgx_ec_key_128bit_t vkey = {0};
    sgx_ra_derive_secret_keys_t ra_key_cb = NULL;

    memset(&gb_ga[0], 0, sizeof(gb_ga));
    sgx_spin_lock(&item->item_lock);
    //sgx_ra_get_ga must have been called
    if (item->state != ra_get_gaed)
    {
        sgx_spin_unlock(&item->item_lock);
        return SGX_ERROR_INVALID_STATE;
    }
    memcpy(&a, &item->a, sizeof(a));
    memcpy(&gb_ga[1], &item->g_a, sizeof(gb_ga[1]));
    memcpy(&sp_pubkey, &item->sp_pubkey, sizeof(sp_pubkey));
    ra_key_cb = DEC_KDF_POINTER(item->derive_key_cb);
    sgx_spin_unlock(&item->item_lock);
    memcpy(&gb_ga[0], &p_msg2->g_b, sizeof(gb_ga[0]));

    sgx_ecc_state_handle_t ecc_state = NULL;

    // ecc_state need to be freed when exit.
    se_ret = sgx_ecc256_open_context(&ecc_state);
    if (SGX_SUCCESS != se_ret)
    {
        if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        return se_ret;
    }

    sgx_ec256_dh_shared_t dh_key;
    memset(&dh_key, 0, sizeof(dh_key));
    sgx_ec256_public_t* p_msg2_g_b = const_cast<sgx_ec256_public_t*>(&p_msg2->g_b);
    se_ret = sgx_ecc256_compute_shared_dhkey(&a,
        (sgx_ec256_public_t*)p_msg2_g_b,
        &dh_key, ecc_state);
    if(SGX_SUCCESS != se_ret)
    {
        if (SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        sgx_ecc256_close_context(ecc_state);
        return se_ret;
    }
    // Verify signature of gb_ga
    uint8_t result;
    sgx_ec256_signature_t* p_msg2_sign_gb_ga = const_cast<sgx_ec256_signature_t*>(&p_msg2->sign_gb_ga);
    se_ret = sgx_ecdsa_verify((uint8_t *)&gb_ga, sizeof(gb_ga),
        &sp_pubkey,
        p_msg2_sign_gb_ga,
        &result, ecc_state);
    if(SGX_SUCCESS != se_ret)
    {
        if (SGX_ERROR_OUT_OF_MEMORY != se_ret)
            se_ret = SGX_ERROR_UNEXPECTED;
        sgx_ecc256_close_context(ecc_state);
        return se_ret;
    }
    if(SGX_EC_VALID != result)
    {
        sgx_ecc256_close_context(ecc_state);
        return SGX_ERROR_INVALID_SIGNATURE;
    }

    do
    {
        if(NULL != ra_key_cb)
        {
            se_ret = ra_key_cb(&dh_key,
                               p_msg2->kdf_id,
                               &smkey,
                               &skey,
                               &mkey,
                               &vkey);
            if (SGX_SUCCESS != se_ret)
            {
                if(SGX_ERROR_OUT_OF_MEMORY != se_ret &&
                    SGX_ERROR_INVALID_PARAMETER != se_ret &&
                    SGX_ERROR_KDF_MISMATCH != se_ret)
                    se_ret = SGX_ERROR_UNEXPECTED;
                break;
            }
        }
        else if (p_msg2->kdf_id == 0x0001)
        {
            se_ret = derive_key(&dh_key, "SMK", (uint32_t)(sizeof("SMK") -1), &smkey);
            if (SGX_SUCCESS != se_ret)
            {
                if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                    se_ret = SGX_ERROR_UNEXPECTED;
                break;
            }
            se_ret = derive_key(&dh_key, "SK", (uint32_t)(sizeof("SK") -1), &skey);
            if (SGX_SUCCESS != se_ret)
            {
                if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                    se_ret = SGX_ERROR_UNEXPECTED;
                break;
            }

            se_ret = derive_key(&dh_key, "MK", (uint32_t)(sizeof("MK") -1), &mkey);
            if (SGX_SUCCESS != se_ret)
            {
                if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                    se_ret = SGX_ERROR_UNEXPECTED;
                break;
            }

            se_ret = derive_key(&dh_key, "VK", (uint32_t)(sizeof("VK") -1), &vkey);
            if (SGX_SUCCESS != se_ret)
            {
                if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                    se_ret = SGX_ERROR_UNEXPECTED;
                break;
            }
        }
        else
        {
            se_ret = SGX_ERROR_KDF_MISMATCH;
            break;
        }

        sgx_cmac_128bit_tag_t mac;
        uint32_t maced_size = offsetof(sgx_ra_msg2_t, mac);

        se_ret = sgx_rijndael128_cmac_msg(&smkey, (const uint8_t *)p_msg2, maced_size, &mac);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }
        //Check mac
        if(0 == consttime_memequal(mac, p_msg2->mac, sizeof(mac)))
        {
            se_ret = SGX_ERROR_MAC_MISMATCH;
            break;
        }

        //create a nonce
        se_ret =sgx_read_rand((uint8_t*)p_nonce, sizeof(sgx_quote_nonce_t));
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            break;
        }

        sgx_spin_lock(&item->item_lock);
        //sgx_ra_get_ga must have been called
        if (item->state != ra_get_gaed)
        {
            se_ret = SGX_ERROR_INVALID_STATE;
            sgx_spin_unlock(&item->item_lock);
            break;
        }
        memcpy(&item->g_b, &p_msg2->g_b, sizeof(item->g_b));
        memcpy(&item->smk_key, smkey, sizeof(item->smk_key));
        memcpy(&item->sk_key, skey, sizeof(item->sk_key));
        memcpy(&item->mk_key, mkey, sizeof(item->mk_key));
        memcpy(&item->vk_key, vkey, sizeof(item->vk_key));
        memcpy(&item->qe_target, p_qe_target, sizeof(sgx_target_info_t));
        memcpy(&item->quote_nonce, p_nonce, sizeof(sgx_quote_nonce_t));
        sgx_report_data_t report_data = {{0}};
        se_static_assert(sizeof(sgx_report_data_t)>=sizeof(sgx_sha256_hash_t));
        // H = SHA256(ga || gb || VK_CMAC)
        uint32_t sha256ed_size = offsetof(ra_db_item_t, sp_pubkey);
        //report_data is 512bits, H is 256bits. The H is in the lower 256 bits of report data while the higher 256 bits are all zeros.
        se_ret = sgx_sha256_msg((uint8_t *)&item->g_a, sha256ed_size,
                                (sgx_sha256_hash_t *)&report_data);
        if(SGX_SUCCESS != se_ret)
        {
            if (SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            sgx_spin_unlock(&item->item_lock);
            break;
        }
        //REPORTDATA = H
        se_ret = sgx_create_report(p_qe_target, &report_data, p_report);
        if (SGX_SUCCESS != se_ret)
        {
            if(SGX_ERROR_OUT_OF_MEMORY != se_ret)
                se_ret = SGX_ERROR_UNEXPECTED;
            sgx_spin_unlock(&item->item_lock);
            break;
        }
        item->state = ra_proc_msg2ed;
        sgx_spin_unlock(&item->item_lock);
    }while(0);
    memset_s(&dh_key, sizeof(dh_key), 0, sizeof(dh_key));
    sgx_ecc256_close_context(ecc_state);
    memset_s(&a, sizeof(sgx_ec256_private_t),0, sizeof(sgx_ec256_private_t));
    memset_s(smkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t));
    memset_s(skey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t));
    memset_s(mkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t));
    memset_s(vkey, sizeof(sgx_ec_key_128bit_t),0, sizeof(sgx_ec_key_128bit_t));
    return se_ret;
}
Esempio n. 12
0
sgx_status_t sgx_read_monotonic_counter(
    const sgx_mc_uuid_t *p_counter_uuid,
    uint32_t *p_counter_value)
{
    if(!p_counter_value || !p_counter_uuid){
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if (!g_b_session_established){
        return SGX_ERROR_AE_SESSION_INVALID;
    }

    pse_message_t *p_req_msg = (pse_message_t *)malloc(PSE_READ_MC_REQ_SIZE);
    if(!p_req_msg){
        return SGX_ERROR_OUT_OF_MEMORY;
    }
    pse_message_t *p_resp_msg = (pse_message_t *)malloc(PSE_READ_MC_RESP_SIZE);
    if(!p_resp_msg){
        free(p_req_msg);
        return SGX_ERROR_OUT_OF_MEMORY;
    }
    p_req_msg->exp_resp_size = sizeof(pse_mc_read_resp_t);
    p_req_msg->payload_size = sizeof(pse_mc_read_req_t);

    pse_mc_read_req_t *p_mc_req
        = (pse_mc_read_req_t *)p_req_msg->payload;
    memcpy(p_mc_req->counter_id,
           p_counter_uuid->counter_id,
           sizeof(p_mc_req->counter_id));
    memcpy(p_mc_req->nonce,
           p_counter_uuid->nonce,
           sizeof(p_mc_req->nonce));
    p_mc_req->req_hdr.service_id = PSE_MC_SERVICE;
    p_mc_req->req_hdr.service_cmd = PSE_MC_READ;

    pse_mc_read_resp_t *p_mc_resp
        = (pse_mc_read_resp_t *)p_resp_msg->payload;

    sgx_status_t status = SGX_SUCCESS;
    sgx_status_t ret = SGX_SUCCESS;
    int retry = RETRY_TIMES;
    do {
        sgx_spin_lock(&g_spin_lock);
        status = invoke_service_ocall(&ret,
                                      (uint8_t *)p_req_msg,
                                      PSE_READ_MC_REQ_SIZE,
                                      (uint8_t *)p_resp_msg,
                                      PSE_READ_MC_RESP_SIZE,
                                      DEFAULT_AESM_TIMEOUT);
        sgx_spin_unlock(&g_spin_lock);
        if(status != SGX_SUCCESS || ret != SGX_SUCCESS){
            if(SGX_ERROR_MC_NOT_FOUND != ret)
                status = SGX_ERROR_UNEXPECTED;
			else
                status = SGX_ERROR_MC_NOT_FOUND;
            continue;
        }

        if(p_mc_resp->resp_hdr.service_id != PSE_MC_SERVICE
           || p_mc_resp->resp_hdr.service_cmd != PSE_MC_READ
           || p_mc_resp->resp_hdr.status != PSE_SUCCESS){
            if(PSE_ERROR_MC_NOT_FOUND == p_mc_resp->resp_hdr.status)
                status = SGX_ERROR_MC_NOT_FOUND;
            else
                status = SGX_ERROR_UNEXPECTED;
        } else {
            *p_counter_value = p_mc_resp->counter_value;
            status = SGX_SUCCESS;
            break;
        }
    } while(retry--);
    free(p_req_msg);
    free(p_resp_msg);
    return status;
}
Esempio n. 13
0
sgx_status_t sgx_create_monotonic_counter_ex(
    uint16_t owner_policy,
    const sgx_attributes_t* owner_attribute_mask,
    sgx_mc_uuid_t *p_counter_uuid,
    uint32_t *p_counter_value)
{
    if(!p_counter_value || !p_counter_uuid || !owner_attribute_mask){
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if (0!= (~(MC_POLICY_SIGNER | MC_POLICY_ENCLAVE) & owner_policy)
        || 0 == ((MC_POLICY_SIGNER | MC_POLICY_ENCLAVE)& owner_policy))
    {
        return SGX_ERROR_INVALID_PARAMETER;
    }
    if (!g_b_session_established){
        return SGX_ERROR_AE_SESSION_INVALID;
    }

    pse_message_t *p_req_msg = (pse_message_t *)malloc(PSE_CREATE_MC_REQ_SIZE);
    if(!p_req_msg){
        return SGX_ERROR_OUT_OF_MEMORY;
    }
    pse_message_t *p_resp_msg = (pse_message_t *)malloc(PSE_CREATE_MC_RESP_SIZE);
    if(!p_resp_msg){
        free(p_req_msg);
        return SGX_ERROR_OUT_OF_MEMORY;
    }
    p_req_msg->exp_resp_size = sizeof(pse_mc_create_resp_t);
    p_req_msg->payload_size = sizeof(pse_mc_create_req_t);

    pse_mc_create_req_t *p_mc_req
        = (pse_mc_create_req_t *)p_req_msg->payload;
    p_mc_req->req_hdr.service_id = PSE_MC_SERVICE;
    p_mc_req->req_hdr.service_cmd = PSE_MC_CREATE;
    p_mc_req->policy = owner_policy;
    memcpy(p_mc_req->attr_mask , owner_attribute_mask, sizeof(p_mc_req->attr_mask));

    pse_mc_create_resp_t *p_mc_resp
        = (pse_mc_create_resp_t *)p_resp_msg->payload;

    sgx_status_t status = SGX_SUCCESS;
    sgx_status_t ret = SGX_SUCCESS;
    int retry = RETRY_TIMES;
    do {
        sgx_spin_lock(&g_spin_lock);
        status = invoke_service_ocall(&ret,
                                      (uint8_t *)p_req_msg,
                                      PSE_CREATE_MC_REQ_SIZE,
                                      (uint8_t *)p_resp_msg,
                                      PSE_CREATE_MC_RESP_SIZE,
                                      DEFAULT_AESM_TIMEOUT);
        sgx_spin_unlock(&g_spin_lock);
        if(status != SGX_SUCCESS || ret != SGX_SUCCESS){
            status = SGX_ERROR_UNEXPECTED;
            continue;
        }

        if(p_mc_resp->resp_hdr.service_id != PSE_MC_SERVICE
           || p_mc_resp->resp_hdr.service_cmd != PSE_MC_CREATE
           || p_mc_resp->resp_hdr.status != PSE_SUCCESS){
            status = SGX_ERROR_UNEXPECTED;
        } else {
            memcpy(p_counter_uuid->counter_id,
                   &p_mc_resp->counter_id,
                   sizeof(p_counter_uuid->counter_id));
            memcpy(p_counter_uuid->nonce,
                   &p_mc_resp->nonce,
                   sizeof(p_counter_uuid->nonce));
            *p_counter_value = 0;
            status = SGX_SUCCESS;
            break;
        }
    } while(retry--);
    free(p_req_msg);
    free(p_resp_msg);
    return status;
}
Esempio n. 14
0
// internal_handle_exception(sgx_exception_info_t *info):
//      the 2nd phrase exception handing, which traverse registered exception handlers.
//      if the exception can be handled, then continue execution
//      otherwise, throw abortion, go back to 1st phrase, and call the default handler.
extern "C" __attribute__((regparm(1))) void internal_handle_exception(sgx_exception_info_t *info)
{
    int status = EXCEPTION_CONTINUE_SEARCH;
    handler_node_t *node = NULL;
    thread_data_t *thread_data = get_thread_data();
    size_t size = 0;
    uintptr_t *nhead = NULL;
    uintptr_t *ntmp = NULL;
    uintptr_t xsp = 0;

    if (thread_data->exception_flag < 0)
        goto failed_end;
    thread_data->exception_flag++;

    // read lock
    sgx_spin_lock(&g_handler_lock);

    node = g_first_node;
    while(node != NULL)
    {
        size += sizeof(uintptr_t);
        node = node->next;
    }

    // There's no exception handler registered
    if (size == 0)
    {
        sgx_spin_unlock(&g_handler_lock);

        //exception cannot be handled
        thread_data->exception_flag = -1;

        //instruction triggering the exception will be executed again.
        continue_execution(info);
    }

    if ((nhead = (uintptr_t *)malloc(size)) == NULL)
    {
        sgx_spin_unlock(&g_handler_lock);
        goto failed_end;
    }
    ntmp = nhead;
    node = g_first_node;
    while(node != NULL)
    {
        *ntmp = node->callback;
        ntmp++;
        node = node->next;
    }

    // read unlock
    sgx_spin_unlock(&g_handler_lock);

    // call exception handler until EXCEPTION_CONTINUE_EXECUTION is returned
    ntmp = nhead;
    while(size > 0)
    {
        sgx_exception_handler_t handler = DEC_VEH_POINTER(*ntmp);
        status = handler(info);
        if(EXCEPTION_CONTINUE_EXECUTION == status)
        {
            break;
        }
        ntmp++;
        size -= sizeof(sgx_exception_handler_t);
    }
    free(nhead);

    // call default handler
    // ignore invalid return value, treat to EXCEPTION_CONTINUE_SEARCH
    // check SP to be written on SSA is pointing to the trusted stack
    xsp = info->cpu_context.REG(sp);
    if (!is_valid_sp(xsp))
    {
        goto failed_end;
    }

    if(EXCEPTION_CONTINUE_EXECUTION == status)
    {
        //exception is handled, decrease the nested exception count
        thread_data->exception_flag--;
    }
    else
    {
        //exception cannot be handled
        thread_data->exception_flag = -1;
    }

    //instruction triggering the exception will be executed again.
    continue_execution(info);

failed_end:
    thread_data->exception_flag = -1; // mark the current exception cannot be handled
    abort();    // throw abortion
}