/*
 * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
 * It follows ARM recommendation:
 *     http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
 * Note that this implementation assumes dsb operations are part of
 * cache_maintenance_l1(), and L2 cache sync are part of
 * cache_maintenance_l2()
 */
static TEE_Result cache_operation(struct tee_ta_session *sess,
			enum utee_cache_operation op, void *va, size_t len)
{
	TEE_Result ret;
	paddr_t pa = 0;
	struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);

	if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
		return TEE_ERROR_NOT_SUPPORTED;

	/*
	 * TAs are allowed to operate cache maintenance on TA memref parameters
	 * only, not on the TA private memory.
	 */
	if (tee_mmu_is_vbuf_intersect_ta_private(utc, va, len))
		return TEE_ERROR_ACCESS_DENIED;

	ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ |
					  TEE_MEMORY_ACCESS_ANY_OWNER,
					  (uaddr_t)va, len);
	if (ret != TEE_SUCCESS)
		return TEE_ERROR_ACCESS_DENIED;

	pa = virt_to_phys(va);
	if (!pa)
		return TEE_ERROR_ACCESS_DENIED;

	switch (op) {
	case TEE_CACHEFLUSH:
		/* Clean L1, Flush L2, Flush L1 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);

	case TEE_CACHECLEAN:
		/* Clean L1, Clean L2 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);

	case TEE_CACHEINVALIDATE:
		/* Inval L2, Inval L1 */
		ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);

	default:
		return TEE_ERROR_NOT_SUPPORTED;
	}
}
Beispiel #2
0
/*
 * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
 * It follows ARM recommendation:
 *     http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
 * Note that this implementation assumes dsb operations are part of
 * cache_maintenance_l1(), and L2 cache sync are part of
 * cache_maintenance_l2()
 */
static TEE_Result cache_operation(struct tee_ta_session *sess,
			enum utee_cache_operation op, void *va, size_t len)
{
	TEE_Result ret;
	paddr_t pa = 0;
	struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);

	if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
		return TEE_ERROR_NOT_SUPPORTED;

	ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_WRITE,
					  (tee_uaddr_t)va, len);
	if (ret != TEE_SUCCESS)
		return TEE_ERROR_ACCESS_DENIED;

	pa = virt_to_phys(va);
	if (!pa)
		return TEE_ERROR_ACCESS_DENIED;

	switch (op) {
	case TEE_CACHEFLUSH:
		/* Clean L1, Flush L2, Flush L1 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);

	case TEE_CACHECLEAN:
		/* Clean L1, Clean L2 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);

	case TEE_CACHEINVALIDATE:
		/* Inval L2, Inval L1 */
		ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);

	default:
		return TEE_ERROR_NOT_SUPPORTED;
	}
}
Beispiel #3
0
void init_sec_mon(unsigned long nsec_entry)
{
	struct plat_nsec_ctx *plat_ctx;
	struct sm_nsec_ctx *nsec_ctx;

	plat_ctx = phys_to_virt(nsec_entry, MEM_AREA_IO_SEC);
	if (!plat_ctx)
		panic();

	/* Invalidate cache to fetch data from external memory */
	cache_maintenance_l1(DCACHE_AREA_INVALIDATE,
			     plat_ctx, sizeof(*plat_ctx));

	/* Initialize secure monitor */
	nsec_ctx = sm_get_nsec_ctx();

	nsec_ctx->mode_regs.usr_sp = plat_ctx->usr_sp;
	nsec_ctx->mode_regs.usr_lr = plat_ctx->usr_lr;
	nsec_ctx->mode_regs.irq_spsr = plat_ctx->irq_spsr;
	nsec_ctx->mode_regs.irq_sp = plat_ctx->irq_sp;
	nsec_ctx->mode_regs.irq_lr = plat_ctx->irq_lr;
	nsec_ctx->mode_regs.svc_spsr = plat_ctx->svc_spsr;
	nsec_ctx->mode_regs.svc_sp = plat_ctx->svc_sp;
	nsec_ctx->mode_regs.svc_lr = plat_ctx->svc_lr;
	nsec_ctx->mode_regs.abt_spsr = plat_ctx->abt_spsr;
	nsec_ctx->mode_regs.abt_sp = plat_ctx->abt_sp;
	nsec_ctx->mode_regs.abt_lr = plat_ctx->abt_lr;
	nsec_ctx->mode_regs.und_spsr = plat_ctx->und_spsr;
	nsec_ctx->mode_regs.und_sp = plat_ctx->und_sp;
	nsec_ctx->mode_regs.und_lr = plat_ctx->und_lr;
	nsec_ctx->mon_lr = plat_ctx->mon_lr;
	nsec_ctx->mon_spsr = plat_ctx->mon_spsr;
}
Beispiel #4
0
static int platform_smp_boot(size_t core_idx, uint32_t entry)
{
	uint32_t val;
	vaddr_t va = src_base();

	if ((core_idx == 0) || (core_idx >= CFG_TEE_CORE_NB_CORE))
		return OPTEE_SMC_RETURN_EBADCMD;

	/* set secondary cores' NS entry addresses */

	ns_entry_addrs[core_idx] = entry;
	cache_maintenance_l1(DCACHE_AREA_CLEAN,
		&ns_entry_addrs[core_idx],
		sizeof(uint32_t));
	cache_maintenance_l2(L2CACHE_AREA_CLEAN,
		(paddr_t)&ns_entry_addrs[core_idx],
		sizeof(uint32_t));

	/* boot secondary cores from OP-TEE load address */

	write32((uint32_t)CFG_TEE_LOAD_ADDR, va + SRC_GPR1 + core_idx * 8);

	/* release secondary core */

	val = read32(va + SRC_SCR);
	val |=  BIT32(SRC_SCR_CORE1_ENABLE_OFFSET + (core_idx - 1));
	val |=  BIT32(SRC_SCR_CORE1_RST_OFFSET + (core_idx - 1));
	write32(val, va + SRC_SCR);
	return OPTEE_SMC_RETURN_OK;
}
Beispiel #5
0
void init_sec_mon(uint32_t nsec_entry)
{
	struct plat_nsec_ctx *plat_ctx = (struct plat_nsec_ctx *)nsec_entry;
	struct sm_nsec_ctx *nsec_ctx;

	/* Invalidate cache to fetch data from external memory */
	cache_maintenance_l1(DCACHE_AREA_INVALIDATE, (void *)nsec_entry,
		sizeof(struct plat_nsec_ctx));

	/* Initialize secure monitor */
	nsec_ctx = sm_get_nsec_ctx();

	nsec_ctx->usr_sp = plat_ctx->usr_sp;
	nsec_ctx->usr_lr = plat_ctx->usr_lr;
	nsec_ctx->irq_spsr = plat_ctx->irq_spsr;
	nsec_ctx->irq_sp = plat_ctx->irq_sp;
	nsec_ctx->irq_lr = plat_ctx->irq_lr;
	nsec_ctx->svc_spsr = plat_ctx->svc_spsr;
	nsec_ctx->svc_sp = plat_ctx->svc_sp;
	nsec_ctx->svc_lr = plat_ctx->svc_lr;
	nsec_ctx->abt_spsr = plat_ctx->abt_spsr;
	nsec_ctx->abt_sp = plat_ctx->abt_sp;
	nsec_ctx->abt_lr = plat_ctx->abt_lr;
	nsec_ctx->und_spsr = plat_ctx->und_spsr;
	nsec_ctx->und_sp = plat_ctx->und_sp;
	nsec_ctx->und_lr = plat_ctx->und_lr;
	nsec_ctx->mon_lr = plat_ctx->mon_lr;
	nsec_ctx->mon_spsr = plat_ctx->mon_spsr;
}
Beispiel #6
0
/**
 * handle platform special smc commands.
 */
uint32_t platform_smc_handle(struct thread_smc_args *smc_args)
{
	uint32_t ret = TEE_SUCCESS;
	switch (smc_args->a1) {
	case TEESMC_OPTEE_SIP_SUNXI_SET_SMP_BOOTENTRY:
		sunxi_secondary_ns_entry = smc_args->a2;
		
		/* in order to sync with secondary up cpu */
		cache_maintenance_l1(DCACHE_AREA_CLEAN, 
		                       (void *)(&sunxi_secondary_ns_entry), 
		                       sizeof(uint32_t));
		break;
	default:
		ret = TEESMC_RETURN_EBADCMD;
		break;
	}
	smc_args->a0 = ret;
	return ret;
}
Beispiel #7
0
static void init_runtime(uint32_t pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;
	size_t block_size;

	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
	TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * Zero BSS area. Note that globals that would normally would go
	 * into BSS which are used before this has to be put into .nozi.*
	 * to avoid getting overwritten.
	 */
	memset(__bss_start, 0, __bss_end - __bss_start);

	thread_init_boot_thread();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	EMSG("hash_size %zu", hash_size);
	TEE_ASSERT(hashes);
	memcpy(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	TEE_ASSERT(mm);
	paged_store = (uint8_t *)tee_mm_get_smem(mm);
	/* Copy init part into pageable area */
	memcpy(paged_store, __init_start, init_size);
	/* Copy pageable part after init part into pageable area */
	memcpy(paged_store + init_size, (void *)pageable_part,
		__pageable_part_end - __pageable_part_start);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
				n, page, res);
			panic();
		}
	}

	/*
	 * Copy what's not initialized in the last init page. Needed
	 * because we're not going fault in the init pages again. We can't
	 * fault in pages until we've switched to the new vector by calling
	 * thread_init_handlers() below.
	 */
	if (init_size % SMALL_PAGE_SIZE) {
		uint8_t *p;

		memcpy(__init_start + init_size, paged_store + init_size,
			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));

		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
				~SMALL_PAGE_MASK);

		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
				     SMALL_PAGE_SIZE);
	}

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	block_size = get_block_size();
	if (!tee_mm_init(&tee_mm_vcore,
			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
				block_size),
			SMALL_PAGE_SHIFT, 0))
		panic();

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	TEE_ASSERT(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged, note that there migth be
	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
	 * claimed to avoid later allocations to get that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
	TEE_ASSERT(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	TEE_ASSERT(mm);
	if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
				paged_store, hashes))
		panic();
	tee_pager_add_pages((vaddr_t)__pageable_start,
		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start +
				ROUNDUP(init_size, SMALL_PAGE_SIZE),
			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
				SMALL_PAGE_SIZE, true);

}