コード例 #1
0
ファイル: tee_svc.c プロジェクト: gxliu/optee_os
TEE_Result tee_svc_invoke_ta_command(TEE_TASessionHandle ta_sess,
				     uint32_t cancel_req_to, uint32_t cmd_id,
				     uint32_t param_types, TEE_Param params[4],
				     uint32_t *ret_orig)
{
	TEE_Result res;
	uint32_t ret_o = TEE_ORIGIN_TEE;
	struct tee_ta_param param = { 0 };
	TEE_Identity clnt_id;
	struct tee_ta_session *sess;
	struct tee_ta_session *called_sess = (struct tee_ta_session *)ta_sess;
	tee_mm_entry_t *mm_param = NULL;
	tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS];

	res = tee_ta_get_current_session(&sess);
	if (res != TEE_SUCCESS)
		return res;

	res =
	    tee_ta_verify_session_pointer(called_sess,
					  &sess->ctx->open_sessions);
	if (res != TEE_SUCCESS)
		return res;

	res = tee_svc_copy_param(sess, called_sess, param_types, params,
				 &param, tmp_buf_pa, &mm_param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	res =
	    tee_ta_invoke_command(&ret_o, called_sess, &clnt_id, cancel_req_to,
				  cmd_id, &param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	res = tee_svc_update_out_param(sess, called_sess, &param, tmp_buf_pa,
				       params);
	if (res != TEE_SUCCESS)
		goto function_exit;

function_exit:
	tee_ta_set_current_session(sess);
	called_sess->calling_sess = NULL; /* clear eventual borrowed mapping */

	if (mm_param != NULL) {
		TEE_Result res2;
		void *va = 0;

		res2 =
		    tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va);
		if (res2 == TEE_SUCCESS)
			tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param));
	}
	tee_mm_free(mm_param);
	if (ret_orig)
		tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o));
	return res;
}
コード例 #2
0
ファイル: tee_mmu.c プロジェクト: enavro/optee_os
bool tee_mmu_kmap_is_mapped(void *va, size_t len)
{
	tee_vaddr_t a = (tee_vaddr_t)va;
	tee_mm_entry_t *mm = tee_mm_find(&tee_mmu_virt_kmap, a);

	if (mm == NULL)
		return false;

	if ((a + len) > (tee_mm_get_smem(mm) + tee_mm_get_bytes(mm)))
		return false;

	return true;
}
コード例 #3
0
ファイル: tee_mmu.c プロジェクト: enavro/optee_os
void tee_mmu_kunmap(void *va, size_t len)
{
	size_t n;
	tee_mm_entry_t *mm;
	struct core_mmu_table_info tbl_info;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	mm = tee_mm_find(&tee_mmu_virt_kmap, (vaddr_t)va);
	if (mm == NULL || len > tee_mm_get_bytes(mm))
		return;		/* Invalid range, not much to do */

	/* Clear the mmu entries */
	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs, 0, 0);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
	tee_mm_free(mm);
}
コード例 #4
0
ファイル: tee_mmu.c プロジェクト: enavro/optee_os
/*
 * tee_mmu_map - alloc and fill mmu mapping table for a user TA (uTA).
 *
 * param - Contains the physical addr of the input buffers
 *         Returns logical addresses
 *
 * Allocate a table to store the N first section entries of the MMU L1 table
 * used to map the target user TA, and clear table to 0.
 * Load mapping for the TA stack_heap area, code area and params area (params
 * are the 4 GP TEE TA invoke parameters buffer).
 */
TEE_Result tee_mmu_map(struct tee_ta_ctx *ctx, struct tee_ta_param *param)
{
	TEE_Result res = TEE_SUCCESS;
	paddr_t pa = 0;
	uintptr_t smem;
	size_t n;

	TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);

	res = tee_mmu_umap_init(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	/*
	 * Map stack
	 */
	smem = tee_mm_get_smem(ctx->mm_stack);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_HEAP_STACK_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm_stack),
			    TEE_MMU_UDATA_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);

	/*
	 * Map code
	 */
	smem = tee_mm_get_smem(ctx->mm);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_CODE_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm),
			    TEE_MMU_UCODE_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);


	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];
		uint32_t attr = TEE_MMU_UDATA_ATTR;

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		if (tee_pbuf_is_non_sec(p->memref.buffer, p->memref.size))
			attr &= ~TEE_MATTR_SECURE;

		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_THR)
			attr |= TEE_MATTR_I_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
			attr |= TEE_MATTR_I_WRITE_BACK;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_THR)
			attr |= TEE_MATTR_O_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
			attr |= TEE_MATTR_O_WRITE_BACK;


		res = tee_mmu_umap_add_param(ctx->mmu,
				(paddr_t)p->memref.buffer, p->memref.size,
				attr);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	res = tee_mmu_umap_set_vas(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		res = tee_mmu_user_pa2va(ctx, (paddr_t)p->memref.buffer,
					 &p->memref.buffer);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	ctx->mmu->ta_private_vmem_start = ctx->mmu->table[0].va;

	n = TEE_MMU_UMAP_MAX_ENTRIES;
	do {
		n--;
	} while (n && !ctx->mmu->table[n].size);
	ctx->mmu->ta_private_vmem_end = ctx->mmu->table[n].va +
					ctx->mmu->table[n].size;

exit:
	if (res != TEE_SUCCESS)
		tee_mmu_umap_clear(ctx->mmu);

	return res;
}
コード例 #5
0
ファイル: generic_boot.c プロジェクト: jbech-linaro/optee_os
static void init_runtime(unsigned long pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;

	assert(pageable_size % SMALL_PAGE_SIZE == 0);
	assert(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * This needs to be initialized early to support address lookup
	 * in MEM_AREA_TEE_RAM
	 */
	tee_pager_early_init();

	thread_init_boot_thread();

	init_asan();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	IMSG_RAW("\n");
	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
	assert(hashes);
	asan_memcpy_unchecked(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	carve_out_asan_mem(&tee_mm_sec_ddr);

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	assert(mm);
	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
	/*
	 * Load pageable part in the dedicated allocated area:
	 * - Move pageable non-init part into pageable area. Note bootloader
	 *   may have loaded it anywhere in TA RAM hence use memmove().
	 * - Copy pageable init part from current location into pageable area.
	 */
	memmove(paged_store + init_size,
		phys_to_virt(pageable_part,
			     core_mmu_get_type_by_pa(pageable_part)),
		__pageable_part_end - __pageable_part_start);
	asan_memcpy_unchecked(paged_store, __init_start, init_size);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
			     n, page, res);
			panic();
		}
	}

	/*
	 * Assert prepaged init sections are page aligned so that nothing
	 * trails uninited at the end of the premapped init area.
	 */
	assert(!(init_size & SMALL_PAGE_MASK));

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	init_vcore(&tee_mm_vcore);

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	assert(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged.
	 * Linear memory (flat map core memory) ends there.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
	assert(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	assert(mm);
	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
				TEE_MATTR_PRX, paged_store, hashes);

	tee_pager_add_pages((vaddr_t)__pageable_start,
			init_size / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start + init_size,
			(pageable_size - init_size) / SMALL_PAGE_SIZE, true);

	/*
	 * There may be physical pages in TZSRAM before the core load address.
	 * These pages can be added to the physical pages pool of the pager.
	 * This setup may happen when a the secure bootloader runs in TZRAM
	 * and its memory can be reused by OP-TEE once boot stages complete.
	 */
	tee_pager_add_pages(tee_mm_vcore.lo,
			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
			true);
}
コード例 #6
0
ファイル: tee_svc.c プロジェクト: gxliu/optee_os
/* Called when a TA calls an OpenSession on another TA */
TEE_Result tee_svc_open_ta_session(const TEE_UUID *dest,
				   uint32_t cancel_req_to, uint32_t param_types,
				   TEE_Param params[4],
				   TEE_TASessionHandle *ta_sess,
				   uint32_t *ret_orig)
{
	TEE_Result res;
	uint32_t ret_o = TEE_ORIGIN_TEE;
	struct tee_ta_session *s = NULL;
	struct tee_ta_session *sess;
	tee_mm_entry_t *mm_param = NULL;

	TEE_UUID *uuid = malloc(sizeof(TEE_UUID));
	struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param));
	TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity));
	tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS];

	if (uuid == NULL || param == NULL || clnt_id == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out_free_only;
	}

	memset(param, 0, sizeof(struct tee_ta_param));

	res = tee_ta_get_current_session(&sess);
	if (res != TEE_SUCCESS)
		goto out_free_only;

	res = tee_svc_copy_from_user(sess, uuid, dest, sizeof(TEE_UUID));
	if (res != TEE_SUCCESS)
		goto function_exit;

	clnt_id->login = TEE_LOGIN_TRUSTED_APP;
	memcpy(&clnt_id->uuid, &sess->ctx->head->uuid, sizeof(TEE_UUID));

	res = tee_svc_copy_param(sess, NULL, param_types, params, param,
				 tmp_buf_pa, &mm_param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	/*
	 * Find session of a multi session TA or a static TA
	 * In such a case, there is no need to ask the supplicant for the TA
	 * code
	 */
	res = tee_ta_open_session(&ret_o, &s, &sess->ctx->open_sessions, uuid,
				  NULL, clnt_id, cancel_req_to, param);

	if (ret_o != TEE_ORIGIN_TEE || res != TEE_ERROR_ITEM_NOT_FOUND)
		goto function_exit;

	if (ret_o == TEE_ORIGIN_TEE && res == TEE_ERROR_ITEM_NOT_FOUND) {
		kta_signed_header_t *ta = NULL;
		struct tee_ta_nwumap lp;

		tee_mmu_set_ctx(NULL);

		/* Load TA */
		res = tee_ta_rpc_load(uuid, &ta, &lp, &ret_o);
		if (res != TEE_SUCCESS) {
			tee_mmu_set_ctx(sess->ctx);
			goto function_exit;
		}

		res = tee_ta_open_session(&ret_o, &s, &sess->ctx->open_sessions,
					  uuid, ta, clnt_id, cancel_req_to,
					  param);
		tee_mmu_set_ctx(sess->ctx);
		if (res != TEE_SUCCESS)
			goto function_exit;

		s->ctx->nwumap = lp;
	}

	res = tee_svc_update_out_param(sess, NULL, param, tmp_buf_pa, params);
	if (res != TEE_SUCCESS)
		goto function_exit;

function_exit:
	tee_ta_set_current_session(sess);

	if (mm_param != NULL) {
		TEE_Result res2;
		void *va = 0;

		res2 =
		    tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va);
		if (res2 == TEE_SUCCESS)
			tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param));
	}
	tee_mm_free(mm_param);
	tee_svc_copy_to_user(sess, ta_sess, &s, sizeof(s));
	tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o));

out_free_only:
	free(param);
	free(uuid);
	free(clnt_id);
	return res;
}