예제 #1
0
static void set_region(struct core_mmu_table_info *tbl_info,
		struct tee_mmap_region *region)
{
	unsigned end;
	unsigned idx;
	paddr_t pa;

	/* va, len and pa should be block aligned */
	assert(!core_mmu_get_block_offset(tbl_info, region->va));
	assert(!core_mmu_get_block_offset(tbl_info, region->size));
	assert(!core_mmu_get_block_offset(tbl_info, region->pa));

	idx = core_mmu_va2idx(tbl_info, region->va);
	end = core_mmu_va2idx(tbl_info, region->va + region->size);
	pa = region->pa;

	debug_print("set_region va %016" PRIxVA " pa %016" PRIxPA " size %016zu",
		region->va, region->pa, region->size);

	while (idx < end) {
		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
		idx++;
		pa += 1 << tbl_info->shift;
	}
}
예제 #2
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va)
{
	size_t n;
	struct core_mmu_table_info tbl_info;
	size_t shift;
	paddr_t match_pa;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	shift = tbl_info.shift;
	match_pa = ROUNDDOWN((paddr_t)pa, 1 << shift);

	for (n = core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_START_VA);
	     n < core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_END_VA); n++) {
		uint32_t attr;
		paddr_t npa;

		core_mmu_get_entry(&tbl_info, n, &npa, &attr);
		if (!(attr & TEE_MATTR_VALID_BLOCK))
			continue;
		assert(!(attr & TEE_MATTR_TABLE));

		if (npa == match_pa) {
			*va = (void *)(core_mmu_idx2va(&tbl_info, n) +
				       ((paddr_t)pa - match_pa));
			return TEE_SUCCESS;
		}
	}

	return TEE_ERROR_ACCESS_DENIED;
}
예제 #3
0
파일: core_mmu.c 프로젝트: lackan/optee_os
static void *phys_to_virt_tee_ram(paddr_t pa)
{
	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
	unsigned idx;
	unsigned end_idx;
	uint32_t a;
	paddr_t p;

	if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
		return (void *)(vaddr_t)pa;

	end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
				      CFG_TEE_RAM_VA_SIZE);
	/* Most addresses are mapped lineary, try that first if possible. */
	idx = core_mmu_va2idx(ti, pa);
	if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
	    idx < end_idx) {
		core_mmu_get_entry(ti, idx, &p, &a);
		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
			return (void *)core_mmu_idx2va(ti, idx);
	}

	for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
	     idx < end_idx; idx++) {
		core_mmu_get_entry(ti, idx, &p, &a);
		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
			return (void *)core_mmu_idx2va(ti, idx);
	}

	return NULL;
}
예제 #4
0
파일: tee_mmu.c 프로젝트: enavro/optee_os
static TEE_Result tee_mmu_kmap_va2pa_attr(void *va, void **pa, uint32_t *attr)
{
	struct core_mmu_table_info tbl_info;
	size_t block_offset;
	size_t n;
	paddr_t npa;
	uint32_t nattr;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	if (!tee_mm_addr_is_within_range(&tee_mmu_virt_kmap, (vaddr_t)va))
		return TEE_ERROR_ACCESS_DENIED;

	n = core_mmu_va2idx(&tbl_info, (vaddr_t)va);
	core_mmu_get_entry(&tbl_info, n, &npa, &nattr);
	if (!(nattr & TEE_MATTR_VALID_BLOCK))
		return TEE_ERROR_ACCESS_DENIED;

	block_offset = core_mmu_get_block_offset(&tbl_info, (vaddr_t)va);
	*pa = (void *)(npa + block_offset);

	if (attr)
		*attr = nattr;

	return TEE_SUCCESS;
}
예제 #5
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t user_va_base;
	size_t user_va_size;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	core_mmu_get_user_va_range(&user_va_base, &user_va_size);
	if (v >= user_va_base && v < (user_va_base + user_va_size)) {
		if (!core_mmu_user_mapping_is_active()) {
			TEE_ASSERT(pa == 0);
			return;
		}

		res = tee_mmu_user_va2pa_helper(
			to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
		if (res == TEE_SUCCESS)
			TEE_ASSERT(pa == p);
		else
			TEE_ASSERT(pa == 0);
		return;
	}
#ifdef CFG_WITH_PAGER
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe, rw areas
		 * when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			TEE_ASSERT(pa == p);
		} else
			TEE_ASSERT(pa == 0);
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p))
		TEE_ASSERT(pa == p);
	else
		TEE_ASSERT(pa == 0);
}
예제 #6
0
파일: core_mmu.c 프로젝트: lackan/optee_os
static void set_pg_region(struct core_mmu_table_info *dir_info,
			struct tee_mmap_region *region, struct pgt **pgt,
			struct core_mmu_table_info *pg_info)
{
	struct tee_mmap_region r = *region;
	vaddr_t end = r.va + r.size;
	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;

	while (r.va < end) {
		if (!pg_info->table ||
		     r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
			/*
			 * We're assigning a new translation table.
			 */
			unsigned int idx;

			assert(*pgt); /* We should have alloced enough */

			/* Virtual addresses must grow */
			assert(r.va > pg_info->va_base);

			idx = core_mmu_va2idx(dir_info, r.va);
			pg_info->table = (*pgt)->tbl;
			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
#ifdef CFG_PAGED_USER_TA
			assert((*pgt)->vabase == pg_info->va_base);
#endif
			*pgt = SLIST_NEXT(*pgt, link);

			core_mmu_set_entry(dir_info, idx,
					   virt_to_phys(pg_info->table),
					   pgt_attr);
		}

		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
			     end - r.va);
		if (!(r.attr & TEE_MATTR_PAGED))
			set_region(pg_info, &r);
		r.va += r.size;
		r.pa += r.size;
	}
}
예제 #7
0
파일: core_mmu.c 프로젝트: lackan/optee_os
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	if (core_mmu_user_va_range_is_defined()) {
		vaddr_t user_va_base;
		size_t user_va_size;

		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
		if (v >= user_va_base &&
		    v <= (user_va_base - 1 + user_va_size)) {
			if (!core_mmu_user_mapping_is_active()) {
				if (pa)
					panic("issue in linear address space");
				return;
			}

			res = tee_mmu_user_va2pa_helper(
				to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
			if (res == TEE_SUCCESS && pa != p)
				panic("bad pa");
			if (res != TEE_SUCCESS && pa)
				panic("false pa");
			return;
		}
	}
#ifdef CFG_WITH_PAGER
	if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
		if (v != pa)
			panic("issue in linear address space");
		return;
	}
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe,
		 * rw-locked areas when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			if (pa != p)
				panic();
		} else
			if (pa)
				panic();
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p)) {
		if (pa != p)
			panic();
	} else {
		if (pa)
			panic();
	}
}
예제 #8
0
파일: core_mmu.c 프로젝트: lackan/optee_os
bool core_mmu_add_mapping(enum teecore_memtypes type, paddr_t addr, size_t len)
{
	struct core_mmu_table_info tbl_info;
	struct tee_mmap_region *map;
	size_t n;
	size_t granule;
	paddr_t p;
	size_t l;

	if (!len)
		return true;

	/* Check if the memory is already mapped */
	map = find_map_by_type_and_pa(type, addr);
	if (map && pbuf_inside_map_area(addr, len, map))
		return true;

	/* Find the reserved va space used for late mappings */
	map = find_map_by_type(MEM_AREA_RES_VASPACE);
	if (!map)
		return false;

	if (!core_mmu_find_table(map->va, UINT_MAX, &tbl_info))
		return false;

	granule = 1 << tbl_info.shift;
	p = ROUNDDOWN(addr, granule);
	l = ROUNDUP(len + addr - p, granule);
	/*
	 * Something is wrong, we can't fit the va range into the selected
	 * table. The reserved va range is possibly missaligned with
	 * granule.
	 */
	if (core_mmu_va2idx(&tbl_info, map->va + len) >= tbl_info.num_entries)
		return false;

	/* Find end of the memory map */
	n = 0;
	while (static_memory_map[n].type != MEM_AREA_NOTYPE)
		n++;

	if (n < (ARRAY_SIZE(static_memory_map) - 1)) {
		/* There's room for another entry */
		static_memory_map[n].va = map->va;
		static_memory_map[n].size = l;
		static_memory_map[n + 1].type = MEM_AREA_NOTYPE;
		map->va += l;
		map->size -= l;
		map = static_memory_map + n;
	} else {
		/*
		 * There isn't room for another entry, steal the reserved
		 * entry as it's not useful for anything else any longer.
		 */
		map->size = l;
	}
	map->type = type;
	map->region_size = granule;
	map->attr = core_mmu_type_to_attr(type);
	map->pa = p;

	set_region(&tbl_info, map);
	return true;
}