Exemple #1
0
static TEE_Result buf_ta_open(const TEE_UUID *uuid,
			      struct user_ta_store_handle **h)
{
	struct buf_ree_fs_ta_handle *handle = NULL;
	TEE_Result res = TEE_SUCCESS;

	handle = calloc(1, sizeof(*handle));
	if (!handle)
		return TEE_ERROR_OUT_OF_MEMORY;
	res = ree_fs_ta_open(uuid, &handle->h);
	if (res)
		goto err2;
	res = ree_fs_ta_get_size(handle->h, &handle->ta_size);
	if (res)
		goto err;

	res = ree_fs_ta_get_tag(handle->h, NULL, &handle->tag_len);
	if (res != TEE_ERROR_SHORT_BUFFER) {
		res = TEE_ERROR_GENERIC;
		goto err;
	}
	handle->tag = malloc(handle->tag_len);
	if (!handle->tag) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	res = ree_fs_ta_get_tag(handle->h, handle->tag, &handle->tag_len);
	if (res)
		goto err;

	handle->mm = tee_mm_alloc(&tee_mm_sec_ddr, handle->ta_size);
	if (!handle->mm) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	handle->buf = phys_to_virt(tee_mm_get_smem(handle->mm),
				   MEM_AREA_TA_RAM);
	if (!handle->buf) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto err;
	}
	res = ree_fs_ta_read(handle->h, handle->buf, handle->ta_size);
	if (res)
		goto err;
	*h = (struct user_ta_store_handle *)handle;
err:
	ree_fs_ta_close(handle->h);
err2:
	if (res) {
		tee_mm_free(handle->mm);
		free(handle->tag);
		free(handle);
	}
	return res;
}
Exemple #2
0
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
{
	tee_mm_entry_t *mm;
	uint32_t attr;
	struct core_mmu_table_info tbl_info;
	uint32_t pa_s;
	uint32_t pa_e;
	size_t n;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift);
	pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift);

	mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
	if (!mm)
		return TEE_ERROR_OUT_OF_MEMORY;

	attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL;
	if (tee_pbuf_is_sec(pa, len)) {
		attr |= TEE_MATTR_SECURE;
		attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else if (tee_pbuf_is_non_sec(pa, len)) {
		if (core_mmu_is_shm_cached())
			attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else
		return TEE_ERROR_GENERIC;


	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs,
				   pa_s + (n << tbl_info.shift), attr);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);

	*va = (void *)(tee_mm_get_smem(mm) +
		       core_mmu_get_block_offset(&tbl_info, pa));
	return TEE_SUCCESS;
}
Exemple #3
0
static void init_runtime(uint32_t pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;
	size_t block_size;

	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
	TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * Zero BSS area. Note that globals that would normally would go
	 * into BSS which are used before this has to be put into .nozi.*
	 * to avoid getting overwritten.
	 */
	memset(__bss_start, 0, __bss_end - __bss_start);

	thread_init_boot_thread();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	EMSG("hash_size %zu", hash_size);
	TEE_ASSERT(hashes);
	memcpy(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	TEE_ASSERT(mm);
	paged_store = (uint8_t *)tee_mm_get_smem(mm);
	/* Copy init part into pageable area */
	memcpy(paged_store, __init_start, init_size);
	/* Copy pageable part after init part into pageable area */
	memcpy(paged_store + init_size, (void *)pageable_part,
		__pageable_part_end - __pageable_part_start);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
				n, page, res);
			panic();
		}
	}

	/*
	 * Copy what's not initialized in the last init page. Needed
	 * because we're not going fault in the init pages again. We can't
	 * fault in pages until we've switched to the new vector by calling
	 * thread_init_handlers() below.
	 */
	if (init_size % SMALL_PAGE_SIZE) {
		uint8_t *p;

		memcpy(__init_start + init_size, paged_store + init_size,
			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));

		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
				~SMALL_PAGE_MASK);

		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
				     SMALL_PAGE_SIZE);
	}

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	block_size = get_block_size();
	if (!tee_mm_init(&tee_mm_vcore,
			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
				block_size),
			SMALL_PAGE_SHIFT, 0))
		panic();

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	TEE_ASSERT(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged, note that there migth be
	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
	 * claimed to avoid later allocations to get that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
	TEE_ASSERT(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	TEE_ASSERT(mm);
	if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
				paged_store, hashes))
		panic();
	tee_pager_add_pages((vaddr_t)__pageable_start,
		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start +
				ROUNDUP(init_size, SMALL_PAGE_SIZE),
			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
				SMALL_PAGE_SIZE, true);

}
Exemple #4
0
int tee_rpmb_fs_write(const char *filename, uint8_t *buf, size_t size)
{
	TEE_Result res = TEE_ERROR_GENERIC;
	struct file_handle *fh = NULL;
	tee_mm_pool_t p;
	tee_mm_entry_t *mm = NULL;
	size_t length;
	uint32_t mm_flags;

	if (filename == NULL || buf == NULL) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	length = strlen(filename);
	if ((length >= FILENAME_LENGTH - 1) || (length == 0)) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	/* Create a FAT entry for the file to write. */
	fh = alloc_file_handle(filename);
	if (fh == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	/* Upper memory allocation must be used for RPMB_FS. */
	mm_flags = TEE_MM_POOL_HI_ALLOC;
	if (!tee_mm_init
	    (&p, RPMB_STORAGE_START_ADDRESS, RPMB_STORAGE_END_ADDRESS,
	     RPMB_BLOCK_SIZE_SHIFT, mm_flags)) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	res = read_fat(fh, &p);
	if (res != TEE_SUCCESS)
		goto out;

	mm = tee_mm_alloc(&p, size);
	if (mm == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	if ((fh->fat_entry.flags & FILE_IS_LAST_ENTRY) != 0) {
		res = add_fat_entry(fh);
		if (res != TEE_SUCCESS)
			goto out;
	}

	memset(&fh->fat_entry, 0, sizeof(struct rpmb_fat_entry));
	memcpy(fh->fat_entry.filename, filename, length);
	fh->fat_entry.data_size = size;
	fh->fat_entry.flags = FILE_IS_ACTIVE;
	fh->fat_entry.start_address = tee_mm_get_smem(mm);

	res = tee_rpmb_write(DEV_ID, fh->fat_entry.start_address, buf, size);
	if (res != TEE_SUCCESS)
		goto out;

	res = write_fat_entry(fh, true);

out:
	free(fh);
	if (mm != NULL)
		tee_mm_final(&p);

	if (res == TEE_SUCCESS)
		return size;

	return -1;
}
Exemple #5
0
static void init_runtime(unsigned long pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;

	assert(pageable_size % SMALL_PAGE_SIZE == 0);
	assert(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * This needs to be initialized early to support address lookup
	 * in MEM_AREA_TEE_RAM
	 */
	tee_pager_early_init();

	thread_init_boot_thread();

	init_asan();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	IMSG_RAW("\n");
	IMSG("Pager is enabled. Hashes: %zu bytes", hash_size);
	assert(hashes);
	asan_memcpy_unchecked(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	carve_out_asan_mem(&tee_mm_sec_ddr);

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	assert(mm);
	paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM);
	/*
	 * Load pageable part in the dedicated allocated area:
	 * - Move pageable non-init part into pageable area. Note bootloader
	 *   may have loaded it anywhere in TA RAM hence use memmove().
	 * - Copy pageable init part from current location into pageable area.
	 */
	memmove(paged_store + init_size,
		phys_to_virt(pageable_part,
			     core_mmu_get_type_by_pa(pageable_part)),
		__pageable_part_end - __pageable_part_start);
	asan_memcpy_unchecked(paged_store, __init_start, init_size);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
			     n, page, res);
			panic();
		}
	}

	/*
	 * Assert prepaged init sections are page aligned so that nothing
	 * trails uninited at the end of the premapped init area.
	 */
	assert(!(init_size & SMALL_PAGE_MASK));

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	init_vcore(&tee_mm_vcore);

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	assert(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged.
	 * Linear memory (flat map core memory) ends there.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA,
			   (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA));
	assert(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	assert(mm);
	tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm),
				TEE_MATTR_PRX, paged_store, hashes);

	tee_pager_add_pages((vaddr_t)__pageable_start,
			init_size / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start + init_size,
			(pageable_size - init_size) / SMALL_PAGE_SIZE, true);

	/*
	 * There may be physical pages in TZSRAM before the core load address.
	 * These pages can be added to the physical pages pool of the pager.
	 * This setup may happen when a the secure bootloader runs in TZRAM
	 * and its memory can be reused by OP-TEE once boot stages complete.
	 */
	tee_pager_add_pages(tee_mm_vcore.lo,
			(VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE,
			true);
}
Exemple #6
0
/*
 * TA invokes some TA with parameter.
 * If some parameters are memory references:
 * - either the memref is inside TA private RAM: TA is not allowed to expose
 *   its private RAM: use a temporary memory buffer and copy the data.
 * - or the memref is not in the TA private RAM:
 *   - if the memref was mapped to the TA, TA is allowed to expose it.
 *   - if so, converts memref virtual address into a physical address.
 */
static TEE_Result tee_svc_copy_param(struct tee_ta_session *sess,
				     struct tee_ta_session *called_sess,
				     uint32_t param_types,
				     TEE_Param params[TEE_NUM_PARAMS],
				     struct tee_ta_param *param,
				     tee_paddr_t tmp_buf_pa[TEE_NUM_PARAMS],
				     tee_mm_entry_t **mm)
{
	size_t n;
	TEE_Result res;
	size_t req_mem = 0;
	size_t s;
	uint8_t *dst = 0;
	tee_paddr_t dst_pa, src_pa = 0;
	bool ta_private_memref[TEE_NUM_PARAMS];

	param->types = param_types;
	if (params == NULL) {
		if (param->types != 0)
			return TEE_ERROR_BAD_PARAMETERS;
		memset(param->params, 0, sizeof(param->params));
	} else {
		tee_svc_copy_from_user(sess, param->params, params,
				       sizeof(param->params));
	}

	if ((called_sess != NULL) &&
		(called_sess->ctx->static_ta == NULL) &&
		(called_sess->ctx->flags & TA_FLAG_USER_MODE) == 0) {
		/*
		 * kernel TA, borrow the mapping of the calling
		 * during this call.
		 */
		called_sess->calling_sess = sess;
		return TEE_SUCCESS;
	}

	for (n = 0; n < TEE_NUM_PARAMS; n++) {

		ta_private_memref[n] = false;

		switch (TEE_PARAM_TYPE_GET(param->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_INPUT:
		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			if (param->params[n].memref.buffer == NULL) {
				if (param->params[n].memref.size != 0)
					return TEE_ERROR_BAD_PARAMETERS;
				break;
			}
			/* uTA cannot expose its private memory */
			if (tee_mmu_is_vbuf_inside_ta_private(sess->ctx,
				    (uintptr_t)param->params[n].memref.buffer,
				    param->params[n].memref.size)) {

				s = TEE_ROUNDUP(param->params[n].memref.size,
						sizeof(uint32_t));
				/* Check overflow */
				if (req_mem + s < req_mem)
					return TEE_ERROR_BAD_PARAMETERS;
				req_mem += s;
				ta_private_memref[n] = true;
				break;
			}
			if (!tee_mmu_is_vbuf_outside_ta_private(sess->ctx,
				    (uintptr_t)param->params[n].memref.buffer,
				    param->params[n].memref.size))
				return TEE_ERROR_BAD_PARAMETERS;

			if (tee_mmu_user_va2pa(sess->ctx,
					(void *)param->params[n].memref.buffer,
					(void **)&src_pa) != TEE_SUCCESS)
				return TEE_ERROR_BAD_PARAMETERS;

			param->param_attr[n] = tee_mmu_user_get_cache_attr(
				sess->ctx,
				(void *)param->params[n].memref.buffer);

			param->params[n].memref.buffer = (void *)src_pa;
			break;

		default:
			break;
		}
	}

	if (req_mem == 0)
		return TEE_SUCCESS;

	/* Allocate section in secure DDR */
	*mm = tee_mm_alloc(&tee_mm_sec_ddr, req_mem);
	if (*mm == NULL) {
		DMSG("tee_mm_alloc TEE_ERROR_GENERIC");
		return TEE_ERROR_GENERIC;
	}

	/* Get the virtual address for the section in secure DDR */
	res = tee_mmu_kmap(tee_mm_get_smem(*mm), req_mem, &dst);
	if (res != TEE_SUCCESS)
		return res;
	dst_pa = tee_mm_get_smem(*mm);

	for (n = 0; n < 4; n++) {

		if (ta_private_memref[n] == false)
			continue;

		s = TEE_ROUNDUP(param->params[n].memref.size, sizeof(uint32_t));

		switch (TEE_PARAM_TYPE_GET(param->types, n)) {
		case TEE_PARAM_TYPE_MEMREF_INPUT:
		case TEE_PARAM_TYPE_MEMREF_INOUT:
			if (param->params[n].memref.buffer != NULL) {
				res = tee_svc_copy_from_user(sess, dst,
							     param->params[n].
							     memref.buffer,
							     param->params[n].
							     memref.size);
				if (res != TEE_SUCCESS)
					return res;

				param->param_attr[n] =
					tee_mmu_kmap_get_cache_attr(dst);
				param->params[n].memref.buffer = (void *)dst_pa;
				tmp_buf_pa[n] = dst_pa;
				dst += s;
				dst_pa += s;
			}
			break;

		case TEE_PARAM_TYPE_MEMREF_OUTPUT:
			if (param->params[n].memref.buffer != NULL) {
				param->param_attr[n] =
					tee_mmu_kmap_get_cache_attr(dst);
				param->params[n].memref.buffer = (void *)dst_pa;
				tmp_buf_pa[n] = dst_pa;
				dst += s;
				dst_pa += s;
			}
			break;

		default:
			continue;
		}
	}

	tee_mmu_kunmap(dst, req_mem);

	return TEE_SUCCESS;
}