예제 #1
0
파일: tee_mmu.c 프로젝트: Machiry/optee_os
void teecore_init_ta_ram(void)
{
	vaddr_t s;
	vaddr_t e;
	paddr_t ps;
	paddr_t pe;

	/* get virtual addr/size of RAM where TA are loaded/executedNSec
	 * shared mem allcated from teecore */
	core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e);
	ps = virt_to_phys((void *)s);
	TEE_ASSERT(ps);
	pe = virt_to_phys((void *)(e - 1)) + 1;
	TEE_ASSERT(pe);

	TEE_ASSERT((ps & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
	TEE_ASSERT((pe & (CORE_MMU_USER_CODE_SIZE - 1)) == 0);
	/* extra check: we could rely on  core_mmu_get_mem_by_type() */
	TEE_ASSERT(tee_pbuf_is_sec(ps, pe - ps) == true);

	TEE_ASSERT(tee_mm_is_empty(&tee_mm_sec_ddr));

	/* remove previous config and init TA ddr memory pool */
	tee_mm_final(&tee_mm_sec_ddr);
	tee_mm_init(&tee_mm_sec_ddr, ps, pe, CORE_MMU_USER_CODE_SHIFT,
		    TEE_MM_POOL_NO_FLAGS);
}
예제 #2
0
파일: tee_mmu.c 프로젝트: MALATTAR/optee_os
void teecore_init_pub_ram(void)
{
	vaddr_t s;
	vaddr_t e;
	unsigned int nsec_tee_size = 32 * 1024;

	/* get virtual addr/size of NSec shared mem allcated from teecore */
	core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e);

	TEE_ASSERT(s < e);
	TEE_ASSERT((s & SMALL_PAGE_MASK) == 0);
	TEE_ASSERT((e & SMALL_PAGE_MASK) == 0);
	/* extra check: we could rely on  core_mmu_get_mem_by_type() */
	TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true);

	/*
	 * 32kByte first bytes are allocated from teecore.
	 * Remaining is under control of the NSec allocator.
	 */
	TEE_ASSERT((e - s) > nsec_tee_size);

	TEE_ASSERT(tee_mm_is_empty(&tee_mm_pub_ddr));
	tee_mm_final(&tee_mm_pub_ddr);
	tee_mm_init(&tee_mm_pub_ddr, s, s + nsec_tee_size, SMALL_PAGE_SHIFT,
		    TEE_MM_POOL_NO_FLAGS);

	s += nsec_tee_size;
	default_nsec_shm_paddr = s;
	default_nsec_shm_size = e - s;
}
예제 #3
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
/*
 * tee_mmu_kmap_init - init TA mapping support
 *
 * TAs are mapped in virtual space [0 32MB].
 * The TA MMU L1 table is always located at TEE_MMU_UL1_BASE.
 * The MMU table for a target TA instance will be copied to this address
 * when tee core sets up TA context.
 */
void tee_mmu_kmap_init(void)
{
	vaddr_t s = TEE_MMU_KMAP_START_VA;
	vaddr_t e = TEE_MMU_KMAP_END_VA;
	struct core_mmu_table_info tbl_info;

	if (!core_mmu_find_table(s, UINT_MAX, &tbl_info))
		panic();

	if (!tee_mm_init(&tee_mmu_virt_kmap, s, e, tbl_info.shift,
			 TEE_MM_POOL_NO_FLAGS)) {
		DMSG("Failed to init kmap. Trap CPU!");
		panic();
	}
}
예제 #4
0
static void init_vcore(tee_mm_pool_t *mm_vcore)
{
	const vaddr_t begin = TEE_RAM_VA_START;
	vaddr_t end = TEE_RAM_VA_START + TEE_RAM_VA_SIZE;

#ifdef CFG_CORE_SANITIZE_KADDRESS
	/* Carve out asan memory, flat maped after core memory */
	if (end > ASAN_SHADOW_PA)
		end = ASAN_MAP_PA;
#endif

	if (!tee_mm_init(mm_vcore, begin, end, SMALL_PAGE_SHIFT,
			 TEE_MM_POOL_NO_FLAGS))
		panic("tee_mm_vcore init failed");
}
예제 #5
0
static void init_runtime(uint32_t pageable_part)
{
	size_t n;
	size_t init_size = (size_t)__init_size;
	size_t pageable_size = __pageable_end - __pageable_start;
	size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) *
			   TEE_SHA256_HASH_SIZE;
	tee_mm_entry_t *mm;
	uint8_t *paged_store;
	uint8_t *hashes;
	size_t block_size;

	TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0);
	TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size);

	/*
	 * Zero BSS area. Note that globals that would normally would go
	 * into BSS which are used before this has to be put into .nozi.*
	 * to avoid getting overwritten.
	 */
	memset(__bss_start, 0, __bss_end - __bss_start);

	thread_init_boot_thread();

	malloc_add_pool(__heap1_start, __heap1_end - __heap1_start);
	malloc_add_pool(__heap2_start, __heap2_end - __heap2_start);

	hashes = malloc(hash_size);
	EMSG("hash_size %zu", hash_size);
	TEE_ASSERT(hashes);
	memcpy(hashes, __tmp_hashes_start, hash_size);

	/*
	 * Need tee_mm_sec_ddr initialized to be able to allocate secure
	 * DDR below.
	 */
	teecore_init_ta_ram();

	mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size);
	TEE_ASSERT(mm);
	paged_store = (uint8_t *)tee_mm_get_smem(mm);
	/* Copy init part into pageable area */
	memcpy(paged_store, __init_start, init_size);
	/* Copy pageable part after init part into pageable area */
	memcpy(paged_store + init_size, (void *)pageable_part,
		__pageable_part_end - __pageable_part_start);

	/* Check that hashes of what's in pageable area is OK */
	DMSG("Checking hashes of pageable area");
	for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) {
		const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE;
		const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE;
		TEE_Result res;

		DMSG("hash pg_idx %zu hash %p page %p", n, hash, page);
		res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE);
		if (res != TEE_SUCCESS) {
			EMSG("Hash failed for page %zu at %p: res 0x%x",
				n, page, res);
			panic();
		}
	}

	/*
	 * Copy what's not initialized in the last init page. Needed
	 * because we're not going fault in the init pages again. We can't
	 * fault in pages until we've switched to the new vector by calling
	 * thread_init_handlers() below.
	 */
	if (init_size % SMALL_PAGE_SIZE) {
		uint8_t *p;

		memcpy(__init_start + init_size, paged_store + init_size,
			SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE));

		p = (uint8_t *)(((vaddr_t)__init_start + init_size) &
				~SMALL_PAGE_MASK);

		cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE);
		cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p,
				     SMALL_PAGE_SIZE);
	}

	/*
	 * Initialize the virtual memory pool used for main_mmu_l2_ttb which
	 * is supplied to tee_pager_init() below.
	 */
	block_size = get_block_size();
	if (!tee_mm_init(&tee_mm_vcore,
			ROUNDDOWN(CFG_TEE_RAM_START, block_size),
			ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE,
				block_size),
			SMALL_PAGE_SHIFT, 0))
		panic();

	/*
	 * Assign alias area for pager end of the small page block the rest
	 * of the binary is loaded into. We're taking more than needed, but
	 * we're guaranteed to not need more than the physical amount of
	 * TZSRAM.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore,
		(vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE);
	TEE_ASSERT(mm);
	tee_pager_set_alias_area(mm);

	/*
	 * Claim virtual memory which isn't paged, note that there migth be
	 * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also
	 * claimed to avoid later allocations to get that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo,
			(vaddr_t)(__text_init_start - tee_mm_vcore.lo));
	TEE_ASSERT(mm);

	/*
	 * Allocate virtual memory for the pageable area and let the pager
	 * take charge of all the pages already assigned to that memory.
	 */
	mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start,
			   pageable_size);
	TEE_ASSERT(mm);
	if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X,
				paged_store, hashes))
		panic();
	tee_pager_add_pages((vaddr_t)__pageable_start,
		ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false);
	tee_pager_add_pages((vaddr_t)__pageable_start +
				ROUNDUP(init_size, SMALL_PAGE_SIZE),
			(pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) /
				SMALL_PAGE_SIZE, true);

}
예제 #6
0
int tee_rpmb_fs_write(const char *filename, uint8_t *buf, size_t size)
{
	TEE_Result res = TEE_ERROR_GENERIC;
	struct file_handle *fh = NULL;
	tee_mm_pool_t p;
	tee_mm_entry_t *mm = NULL;
	size_t length;
	uint32_t mm_flags;

	if (filename == NULL || buf == NULL) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	length = strlen(filename);
	if ((length >= FILENAME_LENGTH - 1) || (length == 0)) {
		res = TEE_ERROR_BAD_PARAMETERS;
		goto out;
	}

	/* Create a FAT entry for the file to write. */
	fh = alloc_file_handle(filename);
	if (fh == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	/* Upper memory allocation must be used for RPMB_FS. */
	mm_flags = TEE_MM_POOL_HI_ALLOC;
	if (!tee_mm_init
	    (&p, RPMB_STORAGE_START_ADDRESS, RPMB_STORAGE_END_ADDRESS,
	     RPMB_BLOCK_SIZE_SHIFT, mm_flags)) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	res = read_fat(fh, &p);
	if (res != TEE_SUCCESS)
		goto out;

	mm = tee_mm_alloc(&p, size);
	if (mm == NULL) {
		res = TEE_ERROR_OUT_OF_MEMORY;
		goto out;
	}

	if ((fh->fat_entry.flags & FILE_IS_LAST_ENTRY) != 0) {
		res = add_fat_entry(fh);
		if (res != TEE_SUCCESS)
			goto out;
	}

	memset(&fh->fat_entry, 0, sizeof(struct rpmb_fat_entry));
	memcpy(fh->fat_entry.filename, filename, length);
	fh->fat_entry.data_size = size;
	fh->fat_entry.flags = FILE_IS_ACTIVE;
	fh->fat_entry.start_address = tee_mm_get_smem(mm);

	res = tee_rpmb_write(DEV_ID, fh->fat_entry.start_address, buf, size);
	if (res != TEE_SUCCESS)
		goto out;

	res = write_fat_entry(fh, true);

out:
	free(fh);
	if (mm != NULL)
		tee_mm_final(&p);

	if (res == TEE_SUCCESS)
		return size;

	return -1;
}