예제 #1
0
파일: tee_svc.c 프로젝트: gxliu/optee_os
TEE_Result tee_svc_invoke_ta_command(TEE_TASessionHandle ta_sess,
				     uint32_t cancel_req_to, uint32_t cmd_id,
				     uint32_t param_types, TEE_Param params[4],
				     uint32_t *ret_orig)
{
	TEE_Result res;
	uint32_t ret_o = TEE_ORIGIN_TEE;
	struct tee_ta_param param = { 0 };
	TEE_Identity clnt_id;
	struct tee_ta_session *sess;
	struct tee_ta_session *called_sess = (struct tee_ta_session *)ta_sess;
	tee_mm_entry_t *mm_param = NULL;
	tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS];

	res = tee_ta_get_current_session(&sess);
	if (res != TEE_SUCCESS)
		return res;

	res =
	    tee_ta_verify_session_pointer(called_sess,
					  &sess->ctx->open_sessions);
	if (res != TEE_SUCCESS)
		return res;

	res = tee_svc_copy_param(sess, called_sess, param_types, params,
				 &param, tmp_buf_pa, &mm_param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	res =
	    tee_ta_invoke_command(&ret_o, called_sess, &clnt_id, cancel_req_to,
				  cmd_id, &param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	res = tee_svc_update_out_param(sess, called_sess, &param, tmp_buf_pa,
				       params);
	if (res != TEE_SUCCESS)
		goto function_exit;

function_exit:
	tee_ta_set_current_session(sess);
	called_sess->calling_sess = NULL; /* clear eventual borrowed mapping */

	if (mm_param != NULL) {
		TEE_Result res2;
		void *va = 0;

		res2 =
		    tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va);
		if (res2 == TEE_SUCCESS)
			tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param));
	}
	tee_mm_free(mm_param);
	if (ret_orig)
		tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o));
	return res;
}
예제 #2
0
파일: ree_fs_ta.c 프로젝트: OP-TEE/optee_os
static TEE_Result buf_ta_open(const TEE_UUID *uuid,
			      struct user_ta_store_handle **h)
{
	struct buf_ree_fs_ta_handle *handle = NULL;
	TEE_Result res = TEE_SUCCESS;

	handle = calloc(1, sizeof(*handle));
	if (!handle)
		return TEE_ERROR_OUT_OF_MEMORY;
	res = ree_fs_ta_open(uuid, &handle->h);
	if (res)
		goto err2;
	res = ree_fs_ta_get_size(handle->h, &handle->ta_size);
	if (res)
		goto err;

	res = ree_fs_ta_get_tag(handle->h, NULL, &handle->tag_len);
	if (res != TEE_ERROR_SHORT_BUFFER) {
		res = TEE_ERROR_GENERIC;
		goto err;
	}
	handle->tag = malloc(handle->tag_len);
	if (!handle->tag) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	res = ree_fs_ta_get_tag(handle->h, handle->tag, &handle->tag_len);
	if (res)
		goto err;

	handle->mm = tee_mm_alloc(&tee_mm_sec_ddr, handle->ta_size);
	if (!handle->mm) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	handle->buf = phys_to_virt(tee_mm_get_smem(handle->mm),
				   MEM_AREA_TA_RAM);
	if (!handle->buf) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	res = ree_fs_ta_read(handle->h, handle->buf, handle->ta_size);
	if (res)
		goto err;
	*h = (struct user_ta_store_handle *)handle;
err:
	ree_fs_ta_close(handle->h);
err2:
	if (res) {
		tee_mm_free(handle->mm);
		free(handle->tag);
		free(handle);
	}
	return res;
}
예제 #3
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
{
	tee_mm_entry_t *mm;
	uint32_t attr;
	struct core_mmu_table_info tbl_info;
	uint32_t pa_s;
	uint32_t pa_e;
	size_t n;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift);
	pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift);

	mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
	if (!mm)
		return TEE_ERROR_OUT_OF_MEMORY;

	attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL;
	if (tee_pbuf_is_sec(pa, len)) {
		attr |= TEE_MATTR_SECURE;
		attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else if (tee_pbuf_is_non_sec(pa, len)) {
		if (core_mmu_is_shm_cached())
			attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else
		return TEE_ERROR_GENERIC;


	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs,
				   pa_s + (n << tbl_info.shift), attr);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);

	*va = (void *)(tee_mm_get_smem(mm) +
		       core_mmu_get_block_offset(&tbl_info, pa));
	return TEE_SUCCESS;
}
예제 #4
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
bool tee_mmu_kmap_is_mapped(void *va, size_t len)
{
	tee_vaddr_t a = (tee_vaddr_t)va;
	tee_mm_entry_t *mm = tee_mm_find(&tee_mmu_virt_kmap, a);

	if (mm == NULL)
		return false;

	if ((a + len) > (tee_mm_get_smem(mm) + tee_mm_get_bytes(mm)))
		return false;

	return true;
}
예제 #5
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
void tee_mmu_kunmap(void *va, size_t len)
{
	size_t n;
	tee_mm_entry_t *mm;
	struct core_mmu_table_info tbl_info;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	mm = tee_mm_find(&tee_mmu_virt_kmap, (vaddr_t)va);
	if (mm == NULL || len > tee_mm_get_bytes(mm))
		return;		/* Invalid range, not much to do */

	/* Clear the mmu entries */
	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs, 0, 0);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
	tee_mm_free(mm);
}
예제 #6
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
/*
 * tee_mmu_map - alloc and fill mmu mapping table for a user TA (uTA).
 *
 * param - Contains the physical addr of the input buffers
 *         Returns logical addresses
 *
 * Allocate a table to store the N first section entries of the MMU L1 table
 * used to map the target user TA, and clear table to 0.
 * Load mapping for the TA stack_heap area, code area and params area (params
 * are the 4 GP TEE TA invoke parameters buffer).
 */
TEE_Result tee_mmu_map(struct tee_ta_ctx *ctx, struct tee_ta_param *param)
{
	TEE_Result res = TEE_SUCCESS;
	paddr_t pa = 0;
	uintptr_t smem;
	size_t n;

	TEE_ASSERT((ctx->flags & TA_FLAG_EXEC_DDR) != 0);

	res = tee_mmu_umap_init(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	/*
	 * Map stack
	 */
	smem = tee_mm_get_smem(ctx->mm_stack);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_HEAP_STACK_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm_stack),
			    TEE_MMU_UDATA_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);

	/*
	 * Map code
	 */
	smem = tee_mm_get_smem(ctx->mm);
	if (core_va2pa((void *)smem, &pa)) {
		res = TEE_ERROR_SECURITY;
		goto exit;
	}
	tee_mmu_umap_set_pa(ctx->mmu->table + TEE_MMU_UMAP_CODE_IDX,
			    CORE_MMU_USER_CODE_SIZE,
			    pa, tee_mm_get_bytes(ctx->mm),
			    TEE_MMU_UCODE_ATTR | TEE_MMU_UCACHE_DEFAULT_ATTR);


	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];
		uint32_t attr = TEE_MMU_UDATA_ATTR;

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		if (tee_pbuf_is_non_sec(p->memref.buffer, p->memref.size))
			attr &= ~TEE_MATTR_SECURE;

		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_THR)
			attr |= TEE_MATTR_I_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_I_WRITE_BACK)
			attr |= TEE_MATTR_I_WRITE_BACK;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_THR)
			attr |= TEE_MATTR_O_WRITE_THR;
		if (param->param_attr[n] & TEESMC_ATTR_CACHE_O_WRITE_BACK)
			attr |= TEE_MATTR_O_WRITE_BACK;


		res = tee_mmu_umap_add_param(ctx->mmu,
				(paddr_t)p->memref.buffer, p->memref.size,
				attr);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	res = tee_mmu_umap_set_vas(ctx->mmu);
	if (res != TEE_SUCCESS)
		goto exit;

	for (n = 0; n < 4; n++) {
		uint32_t param_type = TEE_PARAM_TYPE_GET(param->types, n);
		TEE_Param *p = &param->params[n];

		if (param_type != TEE_PARAM_TYPE_MEMREF_INPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_OUTPUT &&
		    param_type != TEE_PARAM_TYPE_MEMREF_INOUT)
			continue;
		if (p->memref.size == 0)
			continue;

		res = tee_mmu_user_pa2va(ctx, (paddr_t)p->memref.buffer,
					 &p->memref.buffer);
		if (res != TEE_SUCCESS)
			goto exit;
	}

	ctx->mmu->ta_private_vmem_start = ctx->mmu->table[0].va;

	n = TEE_MMU_UMAP_MAX_ENTRIES;
	do {
		n--;
	} while (n && !ctx->mmu->table[n].size);
	ctx->mmu->ta_private_vmem_end = ctx->mmu->table[n].va +
					ctx->mmu->table[n].size;

exit:
	if (res != TEE_SUCCESS)
		tee_mmu_umap_clear(ctx->mmu);

	return res;
}
예제 #7
0
static void init_runtime(uint32_t pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;
	size_t block_size;

	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
	TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * Zero BSS area. Note that globals that would normally would go
	 * into BSS which are used before this has to be put into .nozi.*
	 * to avoid getting overwritten.
	 */
	memset(__bss_start, 0, __bss_end - __bss_start);

	thread_init_boot_thread();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	EMSG("hash_size %zu", hash_size);
	TEE_ASSERT(hashes);
	memcpy(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	TEE_ASSERT(mm);
	paged_store = (uint8_t *)tee_mm_get_smem(mm);
	/* Copy init part into pageable area */
	memcpy(paged_store, __init_start, init_size);
	/* Copy pageable part after init part into pageable area */
	memcpy(paged_store + init_size, (void *)pageable_part,
		__pageable_part_end - __pageable_part_start);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
				n, page, res);
			panic();
		}
	}

	/*
	 * Copy what's not initialized in the last init page. Needed
	 * because we're not going fault in the init pages again. We can't
	 * fault in pages until we've switched to the new vector by calling
	 * thread_init_handlers() below.
	 */
	if (init_size % SMALL_PAGE_SIZE) {
		uint8_t *p;

		memcpy(__init_start + init_size, paged_store + init_size,
			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));

		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
				~SMALL_PAGE_MASK);

		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
				     SMALL_PAGE_SIZE);
	}

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	block_size = get_block_size();
	if (!tee_mm_init(&tee_mm_vcore,
			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
				block_size),
			SMALL_PAGE_SHIFT, 0))
		panic();

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	TEE_ASSERT(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged, note that there migth be
	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
	 * claimed to avoid later allocations to get that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
	TEE_ASSERT(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	TEE_ASSERT(mm);
	if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
				paged_store, hashes))
		panic();
	tee_pager_add_pages((vaddr_t)__pageable_start,
		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start +
				ROUNDUP(init_size, SMALL_PAGE_SIZE),
			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
				SMALL_PAGE_SIZE, true);

}
예제 #8
0
int tee_rpmb_fs_write(const char *filename, uint8_t *buf, size_t size)
{
	TEE_Result res = TEE_ERROR_GENERIC;
	struct file_handle *fh = NULL;
	tee_mm_pool_t p;
	tee_mm_entry_t *mm = NULL;
	size_t length;
	uint32_t mm_flags;

	if (filename == NULL || buf == NULL) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	length = strlen(filename);
	if ((length >= FILENAME_LENGTH - 1) || (length == 0)) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	/* Create a FAT entry for the file to write. */
	fh = alloc_file_handle(filename);
	if (fh == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	/* Upper memory allocation must be used for RPMB_FS. */
	mm_flags = TEE_MM_POOL_HI_ALLOC;
	if (!tee_mm_init
	    (&p, RPMB_STORAGE_START_ADDRESS, RPMB_STORAGE_END_ADDRESS,
	     RPMB_BLOCK_SIZE_SHIFT, mm_flags)) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	res = read_fat(fh, &p);
	if (res != TEE_SUCCESS)
		goto out;

	mm = tee_mm_alloc(&p, size);
	if (mm == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	if ((fh->fat_entry.flags & FILE_IS_LAST_ENTRY) != 0) {
		res = add_fat_entry(fh);
		if (res != TEE_SUCCESS)
			goto out;
	}

	memset(&fh->fat_entry, 0, sizeof(struct rpmb_fat_entry));
	memcpy(fh->fat_entry.filename, filename, length);
	fh->fat_entry.data_size = size;
	fh->fat_entry.flags = FILE_IS_ACTIVE;
	fh->fat_entry.start_address = tee_mm_get_smem(mm);

	res = tee_rpmb_write(DEV_ID, fh->fat_entry.start_address, buf, size);
	if (res != TEE_SUCCESS)
		goto out;

	res = write_fat_entry(fh, true);

out:
	free(fh);
	if (mm != NULL)
		tee_mm_final(&p);

	if (res == TEE_SUCCESS)
		return size;

	return -1;
}
예제 #9
0
static void init_runtime(unsigned long pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;

	assert(pageable_size % SMALL_PAGE_SIZE == 0);
	assert(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * This needs to be initialized early to support address lookup
	 * in MEM_AREA_TEE_RAM
	 */
	tee_pager_early_init();

	thread_init_boot_thread();

	init_asan();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	IMSG_RAW("\n");
	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
	assert(hashes);
	asan_memcpy_unchecked(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	carve_out_asan_mem(&tee_mm_sec_ddr);

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	assert(mm);
	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
	/*
	 * Load pageable part in the dedicated allocated area:
	 * - Move pageable non-init part into pageable area. Note bootloader
	 *   may have loaded it anywhere in TA RAM hence use memmove().
	 * - Copy pageable init part from current location into pageable area.
	 */
	memmove(paged_store + init_size,
		phys_to_virt(pageable_part,
			     core_mmu_get_type_by_pa(pageable_part)),
		__pageable_part_end - __pageable_part_start);
	asan_memcpy_unchecked(paged_store, __init_start, init_size);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
			     n, page, res);
			panic();
		}
	}

	/*
	 * Assert prepaged init sections are page aligned so that nothing
	 * trails uninited at the end of the premapped init area.
	 */
	assert(!(init_size & SMALL_PAGE_MASK));

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	init_vcore(&tee_mm_vcore);

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	assert(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged.
	 * Linear memory (flat map core memory) ends there.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
	assert(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	assert(mm);
	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
				TEE_MATTR_PRX, paged_store, hashes);

	tee_pager_add_pages((vaddr_t)__pageable_start,
			init_size / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start + init_size,
			(pageable_size - init_size) / SMALL_PAGE_SIZE, true);

	/*
	 * There may be physical pages in TZSRAM before the core load address.
	 * These pages can be added to the physical pages pool of the pager.
	 * This setup may happen when a the secure bootloader runs in TZRAM
	 * and its memory can be reused by OP-TEE once boot stages complete.
	 */
	tee_pager_add_pages(tee_mm_vcore.lo,
			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
			true);
}
예제 #10
0
파일: tee_svc.c 프로젝트: gxliu/optee_os
/* Called when a TA calls an OpenSession on another TA */
TEE_Result tee_svc_open_ta_session(const TEE_UUID *dest,
				   uint32_t cancel_req_to, uint32_t param_types,
				   TEE_Param params[4],
				   TEE_TASessionHandle *ta_sess,
				   uint32_t *ret_orig)
{
	TEE_Result res;
	uint32_t ret_o = TEE_ORIGIN_TEE;
	struct tee_ta_session *s = NULL;
	struct tee_ta_session *sess;
	tee_mm_entry_t *mm_param = NULL;

	TEE_UUID *uuid = malloc(sizeof(TEE_UUID));
	struct tee_ta_param *param = malloc(sizeof(struct tee_ta_param));
	TEE_Identity *clnt_id = malloc(sizeof(TEE_Identity));
	tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS];

	if (uuid == NULL || param == NULL || clnt_id == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out_free_only;
	}

	memset(param, 0, sizeof(struct tee_ta_param));

	res = tee_ta_get_current_session(&sess);
	if (res != TEE_SUCCESS)
		goto out_free_only;

	res = tee_svc_copy_from_user(sess, uuid, dest, sizeof(TEE_UUID));
	if (res != TEE_SUCCESS)
		goto function_exit;

	clnt_id->login = TEE_LOGIN_TRUSTED_APP;
	memcpy(&clnt_id->uuid, &sess->ctx->head->uuid, sizeof(TEE_UUID));

	res = tee_svc_copy_param(sess, NULL, param_types, params, param,
				 tmp_buf_pa, &mm_param);
	if (res != TEE_SUCCESS)
		goto function_exit;

	/*
	 * Find session of a multi session TA or a static TA
	 * In such a case, there is no need to ask the supplicant for the TA
	 * code
	 */
	res = tee_ta_open_session(&ret_o, &s, &sess->ctx->open_sessions, uuid,
				  NULL, clnt_id, cancel_req_to, param);

	if (ret_o != TEE_ORIGIN_TEE || res != TEE_ERROR_ITEM_NOT_FOUND)
		goto function_exit;

	if (ret_o == TEE_ORIGIN_TEE && res == TEE_ERROR_ITEM_NOT_FOUND) {
		kta_signed_header_t *ta = NULL;
		struct tee_ta_nwumap lp;

		tee_mmu_set_ctx(NULL);

		/* Load TA */
		res = tee_ta_rpc_load(uuid, &ta, &lp, &ret_o);
		if (res != TEE_SUCCESS) {
			tee_mmu_set_ctx(sess->ctx);
			goto function_exit;
		}

		res = tee_ta_open_session(&ret_o, &s, &sess->ctx->open_sessions,
					  uuid, ta, clnt_id, cancel_req_to,
					  param);
		tee_mmu_set_ctx(sess->ctx);
		if (res != TEE_SUCCESS)
			goto function_exit;

		s->ctx->nwumap = lp;
	}

	res = tee_svc_update_out_param(sess, NULL, param, tmp_buf_pa, params);
	if (res != TEE_SUCCESS)
		goto function_exit;

function_exit:
	tee_ta_set_current_session(sess);

	if (mm_param != NULL) {
		TEE_Result res2;
		void *va = 0;

		res2 =
		    tee_mmu_kmap_pa2va((void *)tee_mm_get_smem(mm_param), &va);
		if (res2 == TEE_SUCCESS)
			tee_mmu_kunmap(va, tee_mm_get_bytes(mm_param));
	}
	tee_mm_free(mm_param);
	tee_svc_copy_to_user(sess, ta_sess, &s, sizeof(s));
	tee_svc_copy_to_user(sess, ret_orig, &ret_o, sizeof(ret_o));

out_free_only:
	free(param);
	free(uuid);
	free(clnt_id);
	return res;
}
예제 #11
0
파일: tee_svc.c 프로젝트: gxliu/optee_os
/*
 * TA invokes some TA with parameter.
 * If some parameters are memory references:
 * - either the memref is inside TA private RAM: TA is not allowed to expose
 *   its private RAM: use a temporary memory buffer and copy the data.
 * - or the memref is not in the TA private RAM:
 *   - if the memref was mapped to the TA, TA is allowed to expose it.
 *   - if so, converts memref virtual address into a physical address.
 */
static TEE_Result tee_svc_copy_param(struct tee_ta_session *sess,
				     struct tee_ta_session *called_sess,
				     uint32_t param_types,
				     TEE_Param params[TEE_NUM_PARAMS],
				     struct tee_ta_param *param,
				     tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS],
				     tee_mm_entry_t **mm)
{
	size_t n;
	TEE_Result res;
	size_t req_mem = 0;
	size_t s;
	uint8_t *dst = 0;
	tee_paddr_t dst_pa, src_pa = 0;
	bool ta_private_memref[TEE_NUM_PARAMS];

	param->types = param_types;
	if (params == NULL) {
		if (param->types != 0)
			return TEE_ERROR_BAD_PARAMETERS;
		memset(param->params, 0, sizeof(param->params));
	} else {
		tee_svc_copy_from_user(sess, param->params, params,
				       sizeof(param->params));
	}

	if ((called_sess != NULL) &&
		(called_sess->ctx->static_ta == NULL) &&
		(called_sess->ctx->flags & TA_FLAG_USER_MODE) == 0) {
		/*
		 * kernel TA, borrow the mapping of the calling
		 * during this call.
		 */
		called_sess->calling_sess = sess;
		return TEE_SUCCESS;
	}

	for (n = 0; n < TEE_NUM_PARAMS; n++) {

		ta_private_memref[n] = false;

		switch (TEE_PARAM_TYPE_GET(param->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_INPUT:
		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			if (param->params[n].memref.buffer == NULL) {
				if (param->params[n].memref.size != 0)
					return TEE_ERROR_BAD_PARAMETERS;
				break;
			}
			/* uTA cannot expose its private memory */
			if (tee_mmu_is_vbuf_inside_ta_private(sess->ctx,
				    (uintptr_t)param->params[n].memref.buffer,
				    param->params[n].memref.size)) {

				s = TEE_ROUNDUP(param->params[n].memref.size,
						sizeof(uint32_t));
				/* Check overflow */
				if (req_mem + s < req_mem)
					return TEE_ERROR_BAD_PARAMETERS;
				req_mem += s;
				ta_private_memref[n] = true;
				break;
			}
			if (!tee_mmu_is_vbuf_outside_ta_private(sess->ctx,
				    (uintptr_t)param->params[n].memref.buffer,
				    param->params[n].memref.size))
				return TEE_ERROR_BAD_PARAMETERS;

			if (tee_mmu_user_va2pa(sess->ctx,
					(void *)param->params[n].memref.buffer,
					(void **)&src_pa) != TEE_SUCCESS)
				return TEE_ERROR_BAD_PARAMETERS;

			param->param_attr[n] = tee_mmu_user_get_cache_attr(
				sess->ctx,
				(void *)param->params[n].memref.buffer);

			param->params[n].memref.buffer = (void *)src_pa;
			break;

		default:
			break;
		}
	}

	if (req_mem == 0)
		return TEE_SUCCESS;

	/* Allocate section in secure DDR */
	*mm = tee_mm_alloc(&tee_mm_sec_ddr, req_mem);
	if (*mm == NULL) {
		DMSG("tee_mm_alloc TEE_ERROR_GENERIC");
		return TEE_ERROR_GENERIC;
	}

	/* Get the virtual address for the section in secure DDR */
	res = tee_mmu_kmap(tee_mm_get_smem(*mm), req_mem, &dst);
	if (res != TEE_SUCCESS)
		return res;
	dst_pa = tee_mm_get_smem(*mm);

	for (n = 0; n < 4; n++) {

		if (ta_private_memref[n] == false)
			continue;

		s = TEE_ROUNDUP(param->params[n].memref.size, sizeof(uint32_t));

		switch (TEE_PARAM_TYPE_GET(param->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_INPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			if (param->params[n].memref.buffer != NULL) {
				res = tee_svc_copy_from_user(sess, dst,
							     param->params[n].
							     memref.buffer,
							     param->params[n].
							     memref.size);
				if (res != TEE_SUCCESS)
					return res;

				param->param_attr[n] =
					tee_mmu_kmap_get_cache_attr(dst);
				param->params[n].memref.buffer = (void *)dst_pa;
				tmp_buf_pa[n] = dst_pa;
				dst += s;
				dst_pa += s;
			}
			break;

		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
			if (param->params[n].memref.buffer != NULL) {
				param->param_attr[n] =
					tee_mmu_kmap_get_cache_attr(dst);
				param->params[n].memref.buffer = (void *)dst_pa;
				tmp_buf_pa[n] = dst_pa;
				dst += s;
				dst_pa += s;
			}
			break;

		default:
			continue;
		}
	}

	tee_mmu_kunmap(dst, req_mem);

	return TEE_SUCCESS;
}