예제 #1
0
static size_t get_block_size(void)
{
	struct core_mmu_table_info tbl_info;
	unsigned l;

	if (!core_mmu_find_table(CFG_TEE_RAM_START, UINT_MAX, &tbl_info))
		panic();
	l = tbl_info.level - 1;
	if (!core_mmu_find_table(CFG_TEE_RAM_START, l, &tbl_info))
		panic();
	return 1 << tbl_info.shift;
}
예제 #2
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
static TEE_Result tee_mmu_kmap_va2pa_attr(void *va, void **pa, uint32_t *attr)
{
	struct core_mmu_table_info tbl_info;
	size_t block_offset;
	size_t n;
	paddr_t npa;
	uint32_t nattr;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	if (!tee_mm_addr_is_within_range(&tee_mmu_virt_kmap, (vaddr_t)va))
		return TEE_ERROR_ACCESS_DENIED;

	n = core_mmu_va2idx(&tbl_info, (vaddr_t)va);
	core_mmu_get_entry(&tbl_info, n, &npa, &nattr);
	if (!(nattr & TEE_MATTR_VALID_BLOCK))
		return TEE_ERROR_ACCESS_DENIED;

	block_offset = core_mmu_get_block_offset(&tbl_info, (vaddr_t)va);
	*pa = (void *)(npa + block_offset);

	if (attr)
		*attr = nattr;

	return TEE_SUCCESS;
}
예제 #3
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va)
{
	size_t n;
	struct core_mmu_table_info tbl_info;
	size_t shift;
	paddr_t match_pa;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	shift = tbl_info.shift;
	match_pa = ROUNDDOWN((paddr_t)pa, 1 << shift);

	for (n = core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_START_VA);
	     n < core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_END_VA); n++) {
		uint32_t attr;
		paddr_t npa;

		core_mmu_get_entry(&tbl_info, n, &npa, &attr);
		if (!(attr & TEE_MATTR_VALID_BLOCK))
			continue;
		assert(!(attr & TEE_MATTR_TABLE));

		if (npa == match_pa) {
			*va = (void *)(core_mmu_idx2va(&tbl_info, n) +
				       ((paddr_t)pa - match_pa));
			return TEE_SUCCESS;
		}
	}

	return TEE_ERROR_ACCESS_DENIED;
}
예제 #4
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
/*
 * tee_mmu_kmap_init - init TA mapping support
 *
 * TAs are mapped in virtual space [0 32MB].
 * The TA MMU L1 table is always located at TEE_MMU_UL1_BASE.
 * The MMU table for a target TA instance will be copied to this address
 * when tee core sets up TA context.
 */
void tee_mmu_kmap_init(void)
{
	vaddr_t s = TEE_MMU_KMAP_START_VA;
	vaddr_t e = TEE_MMU_KMAP_END_VA;
	struct core_mmu_table_info tbl_info;

	if (!core_mmu_find_table(s, UINT_MAX, &tbl_info))
		panic();

	if (!tee_mm_init(&tee_mmu_virt_kmap, s, e, tbl_info.shift,
			 TEE_MM_POOL_NO_FLAGS)) {
		DMSG("Failed to init kmap. Trap CPU!");
		panic();
	}
}
예제 #5
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
{
	tee_mm_entry_t *mm;
	uint32_t attr;
	struct core_mmu_table_info tbl_info;
	uint32_t pa_s;
	uint32_t pa_e;
	size_t n;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift);
	pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift);

	mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
	if (!mm)
		return TEE_ERROR_OUT_OF_MEMORY;

	attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL;
	if (tee_pbuf_is_sec(pa, len)) {
		attr |= TEE_MATTR_SECURE;
		attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else if (tee_pbuf_is_non_sec(pa, len)) {
		if (core_mmu_is_shm_cached())
			attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else
		return TEE_ERROR_GENERIC;


	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs,
				   pa_s + (n << tbl_info.shift), attr);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);

	*va = (void *)(tee_mm_get_smem(mm) +
		       core_mmu_get_block_offset(&tbl_info, pa));
	return TEE_SUCCESS;
}
예제 #6
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
void tee_mmu_kunmap(void *va, size_t len)
{
	size_t n;
	tee_mm_entry_t *mm;
	struct core_mmu_table_info tbl_info;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	mm = tee_mm_find(&tee_mmu_virt_kmap, (vaddr_t)va);
	if (mm == NULL || len > tee_mm_get_bytes(mm))
		return;		/* Invalid range, not much to do */

	/* Clear the mmu entries */
	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs, 0, 0);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
	tee_mm_free(mm);
}
예제 #7
0
파일: core_mmu.c 프로젝트: lackan/optee_os
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
{
	struct core_mmu_table_info tbl_info;
	struct tee_mmap_region *map;
	size_t n;
	size_t granule;
	paddr_t p;
	size_t l;

	if (!len)
		return true;

	/* Check if the memory is already mapped */
	map = find_map_by_type_and_pa(type, addr);
	if (map && pbuf_inside_map_area(addr, len, map))
		return true;

	/* Find the reserved va space used for late mappings */
	map = find_map_by_type(MEM_AREA_RES_VASPACE);
	if (!map)
		return false;

	if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
		return false;

	granule = 1 << tbl_info.shift;
	p = ROUNDDOWN(addr, granule);
	l = ROUNDUP(len + addr - p, granule);
	/*
	 * Something is wrong, we can't fit the va range into the selected
	 * table. The reserved va range is possibly missaligned with
	 * granule.
	 */
	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
		return false;

	/* Find end of the memory map */
	n = 0;
	while (static_memory_map[n].type != MEM_AREA_NOTYPE)
		n++;

	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
		/* There's room for another entry */
		static_memory_map[n].va = map->va;
		static_memory_map[n].size = l;
		static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
		map->va += l;
		map->size -= l;
		map = static_memory_map + n;
	} else {
		/*
		 * There isn't room for another entry, steal the reserved
		 * entry as it's not useful for anything else any longer.
		 */
		map->size = l;
	}
	map->type = type;
	map->region_size = granule;
	map->attr = core_mmu_type_to_attr(type);
	map->pa = p;

	set_region(&tbl_info, map);
	return true;
}