Example #1
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t user_va_base;
	size_t user_va_size;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	core_mmu_get_user_va_range(&user_va_base, &user_va_size);
	if (v >= user_va_base && v < (user_va_base + user_va_size)) {
		if (!core_mmu_user_mapping_is_active()) {
			TEE_ASSERT(pa == 0);
			return;
		}

		res = tee_mmu_user_va2pa_helper(
			to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
		if (res == TEE_SUCCESS)
			TEE_ASSERT(pa == p);
		else
			TEE_ASSERT(pa == 0);
		return;
	}
#ifdef CFG_WITH_PAGER
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe, rw areas
		 * when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			TEE_ASSERT(pa == p);
		} else
			TEE_ASSERT(pa == 0);
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p))
		TEE_ASSERT(pa == p);
	else
		TEE_ASSERT(pa == 0);
}
Example #2
0
static void *phys_to_virt_ta_vaspace(paddr_t pa)
{
	TEE_Result res;
	void *va = NULL;

	if (!core_mmu_user_mapping_is_active())
		return NULL;

	res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
					pa, &va);
	if (res != TEE_SUCCESS)
		return NULL;
	return va;
}
Example #3
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	if (core_mmu_user_va_range_is_defined()) {
		vaddr_t user_va_base;
		size_t user_va_size;

		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
		if (v >= user_va_base &&
		    v <= (user_va_base - 1 + user_va_size)) {
			if (!core_mmu_user_mapping_is_active()) {
				if (pa)
					panic("issue in linear address space");
				return;
			}

			res = tee_mmu_user_va2pa_helper(
				to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
			if (res == TEE_SUCCESS && pa != p)
				panic("bad pa");
			if (res != TEE_SUCCESS && pa)
				panic("false pa");
			return;
		}
	}
#ifdef CFG_WITH_PAGER
	if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
		if (v != pa)
			panic("issue in linear address space");
		return;
	}
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe,
		 * rw-locked areas when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			if (pa != p)
				panic();
		} else
			if (pa)
				panic();
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p)) {
		if (pa != p)
			panic();
	} else {
		if (pa)
			panic();
	}
}