Exemplo n.º 1
0
static void transfer_tables(struct pgt_cache *pgt_cache, void *old_ctx,
			    vaddr_t old_va, void *new_ctx, vaddr_t new_va,
			    size_t size)
{
	const size_t pgtsize = CORE_MMU_PGDIR_SIZE;
	const vaddr_t new_base = ROUNDDOWN(new_va, pgtsize);
	const vaddr_t old_base = ROUNDDOWN(old_va, pgtsize);
	const size_t num_new_pgt = (size - 1 + new_va - new_base) / pgtsize + 1;
	const size_t num_old_pgt = (size - 1 + old_va - old_base) / pgtsize + 1;
	struct pgt *new_pgt[num_new_pgt];
	struct pgt *old_pgt[num_old_pgt];
	struct pgt *pgt;
	size_t n;

	/*
	 * Fill in new_pgt based on pgt_cache. Note that the pages should
	 * already have been allocated.
	 */
	SLIST_FOREACH(pgt, pgt_cache, link) {
		if (pgt->vabase < new_base)
			continue;
		n = (pgt->vabase - new_base) / pgtsize;
		if (n < num_new_pgt)
			new_pgt[n] = pgt;
	}
	for (n = 0; n < num_new_pgt; n++) {
		assert(new_pgt[n]);
		assert(new_pgt[n]->ctx == new_ctx);
	}

	mutex_lock(&pgt_mu);

	/* Extract the array of pgts that need their content transferred */
	for (n = 0; n < num_old_pgt; n++) {
		/*
		 * If the pgt isn't in the cache list there's nothing to
		 * transfer, so NULL here is OK.
		 */
		old_pgt[n] = pop_from_cache_list(old_base + n * pgtsize,
						 old_ctx);
	}

	tee_pager_transfer_uta_region(to_user_ta_ctx(old_ctx), old_va,
				      to_user_ta_ctx(new_ctx), new_va, new_pgt,
				      size);

	for (n = 0; n < num_old_pgt; n++) {
		if (!old_pgt[n])
			continue;

		if (old_pgt[n]->num_used_entries)
			push_to_cache_list(old_pgt[n]);
		else
			push_to_free_list(old_pgt[n]);
	}

	mutex_unlock(&pgt_mu);
}
Exemplo n.º 2
0
TEE_Result syscall_se_reader_open_session(unsigned long reader_handle,
			uint32_t *session_handle)
{
	TEE_Result ret;
	struct tee_se_reader_proxy *r = tee_svc_uref_to_kaddr(reader_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;
	struct tee_se_session *ksession = NULL;

	if (!tee_se_manager_is_reader_proxy_valid(r))
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	ret = tee_se_reader_open_session(r, &ksession);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	ret = tee_se_service_add_session(service, ksession);

	ret = tee_svc_copy_kaddr_to_uref(session_handle, ksession);
	if (ret != TEE_SUCCESS)
		return ret;

	return TEE_SUCCESS;
}
Exemplo n.º 3
0
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
	const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);

	TEE_ASSERT(utc->mmu && utc->mmu->table &&
		   utc->mmu->size == TEE_MMU_UMAP_MAX_ENTRIES);

	return utc->mmu->table[1].va;
}
Exemplo n.º 4
0
uintptr_t tee_mmu_get_load_addr(const struct tee_ta_ctx *const ctx)
{
	const struct user_ta_ctx *utc = to_user_ta_ctx((void *)ctx);

	TEE_ASSERT(utc->mmu && utc->mmu->table &&
		   utc->mmu->size >= TEE_MMU_UMAP_CODE_IDX);

	return utc->mmu->table[TEE_MMU_UMAP_CODE_IDX].va;
}
Exemplo n.º 5
0
/*
 * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
 * It follows ARM recommendation:
 *     http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
 * Note that this implementation assumes dsb operations are part of
 * cache_maintenance_l1(), and L2 cache sync are part of
 * cache_maintenance_l2()
 */
static TEE_Result cache_operation(struct tee_ta_session *sess,
			enum utee_cache_operation op, void *va, size_t len)
{
	TEE_Result ret;
	paddr_t pa = 0;
	struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);

	if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
		return TEE_ERROR_NOT_SUPPORTED;

	/*
	 * TAs are allowed to operate cache maintenance on TA memref parameters
	 * only, not on the TA private memory.
	 */
	if (tee_mmu_is_vbuf_intersect_ta_private(utc, va, len))
		return TEE_ERROR_ACCESS_DENIED;

	ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_READ |
					  TEE_MEMORY_ACCESS_ANY_OWNER,
					  (uaddr_t)va, len);
	if (ret != TEE_SUCCESS)
		return TEE_ERROR_ACCESS_DENIED;

	pa = virt_to_phys(va);
	if (!pa)
		return TEE_ERROR_ACCESS_DENIED;

	switch (op) {
	case TEE_CACHEFLUSH:
		/* Clean L1, Flush L2, Flush L1 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);

	case TEE_CACHECLEAN:
		/* Clean L1, Clean L2 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);

	case TEE_CACHEINVALIDATE:
		/* Inval L2, Inval L1 */
		ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);

	default:
		return TEE_ERROR_NOT_SUPPORTED;
	}
}
Exemplo n.º 6
0
void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
{
	if (!ctx || !is_user_ta_ctx(ctx)) {
		core_mmu_set_user_map(NULL);
	} else {
		struct core_mmu_user_map map;
		struct user_ta_ctx *utc = to_user_ta_ctx(ctx);

		core_mmu_create_user_map(utc->mmu, utc->context, &map);
		core_mmu_set_user_map(&map);
	}
}
Exemplo n.º 7
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t user_va_base;
	size_t user_va_size;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	core_mmu_get_user_va_range(&user_va_base, &user_va_size);
	if (v >= user_va_base && v < (user_va_base + user_va_size)) {
		if (!core_mmu_user_mapping_is_active()) {
			TEE_ASSERT(pa == 0);
			return;
		}

		res = tee_mmu_user_va2pa_helper(
			to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
		if (res == TEE_SUCCESS)
			TEE_ASSERT(pa == p);
		else
			TEE_ASSERT(pa == 0);
		return;
	}
#ifdef CFG_WITH_PAGER
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe, rw areas
		 * when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			TEE_ASSERT(pa == p);
		} else
			TEE_ASSERT(pa == 0);
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p))
		TEE_ASSERT(pa == p);
	else
		TEE_ASSERT(pa == 0);
}
Exemplo n.º 8
0
static void *phys_to_virt_ta_vaspace(paddr_t pa)
{
	TEE_Result res;
	void *va = NULL;

	if (!core_mmu_user_mapping_is_active())
		return NULL;

	res = tee_mmu_user_pa2va_helper(to_user_ta_ctx(tee_mmu_get_ctx()),
					pa, &va);
	if (res != TEE_SUCCESS)
		return NULL;
	return va;
}
Exemplo n.º 9
0
/*
 * tee_uta_cache_operation - dynamic cache clean/inval request from a TA
 * It follows ARM recommendation:
 *     http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0246d/Beicdhde.html
 * Note that this implementation assumes dsb operations are part of
 * cache_maintenance_l1(), and L2 cache sync are part of
 * cache_maintenance_l2()
 */
static TEE_Result cache_operation(struct tee_ta_session *sess,
			enum utee_cache_operation op, void *va, size_t len)
{
	TEE_Result ret;
	paddr_t pa = 0;
	struct user_ta_ctx *utc = to_user_ta_ctx(sess->ctx);

	if ((sess->ctx->flags & TA_FLAG_CACHE_MAINTENANCE) == 0)
		return TEE_ERROR_NOT_SUPPORTED;

	ret = tee_mmu_check_access_rights(utc, TEE_MEMORY_ACCESS_WRITE,
					  (tee_uaddr_t)va, len);
	if (ret != TEE_SUCCESS)
		return TEE_ERROR_ACCESS_DENIED;

	pa = virt_to_phys(va);
	if (!pa)
		return TEE_ERROR_ACCESS_DENIED;

	switch (op) {
	case TEE_CACHEFLUSH:
		/* Clean L1, Flush L2, Flush L1 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		ret = cache_maintenance_l2(L2CACHE_AREA_CLEAN_INV, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_CLEAN_INV, va, len);

	case TEE_CACHECLEAN:
		/* Clean L1, Clean L2 */
		ret = cache_maintenance_l1(DCACHE_AREA_CLEAN, va, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l2(L2CACHE_AREA_CLEAN, pa, len);

	case TEE_CACHEINVALIDATE:
		/* Inval L2, Inval L1 */
		ret = cache_maintenance_l2(L2CACHE_AREA_INVALIDATE, pa, len);
		if (ret != TEE_SUCCESS)
			return ret;
		return cache_maintenance_l1(DCACHE_AREA_INVALIDATE, va, len);

	default:
		return TEE_ERROR_NOT_SUPPORTED;
	}
}
Exemplo n.º 10
0
TEE_Result syscall_se_session_is_closed(unsigned long session_handle)
{
	TEE_Result ret;
	struct tee_se_session *s = tee_svc_uref_to_kaddr(session_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;

	if (!tee_se_service_is_session_valid(service, s))
		return TEE_ERROR_BAD_PARAMETERS;

	return tee_se_service_is_session_closed(service, s);
}
Exemplo n.º 11
0
TEE_Result syscall_se_session_open_channel(unsigned long session_handle,
			unsigned long is_logical, const void *aid_buf,
			size_t aid_buf_len, uint32_t *channel_handle)
{
	TEE_Result ret;
	struct tee_se_session *s = tee_svc_uref_to_kaddr(session_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;
	struct tee_se_aid *se_aid = NULL;
	struct tee_se_channel *kc = NULL;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	if (!tee_se_service_is_session_valid(service, s))
		return TEE_ERROR_BAD_PARAMETERS;

	if (aid_buf) {
		ret = tee_se_aid_create_from_buffer((void *)aid_buf,
						    aid_buf_len, &se_aid);
		if (ret != TEE_SUCCESS)
			return ret;
	}

	if (is_logical)
		ret = tee_se_session_open_logical_channel(s, se_aid, &kc);
	else
		ret = tee_se_session_open_basic_channel(s, se_aid, &kc);
	if (ret != TEE_SUCCESS)
		goto error_free_aid;

	ret = tee_svc_copy_kaddr_to_uref(channel_handle, kc);
	if (ret != TEE_SUCCESS)
		goto error_free_aid;

	return TEE_SUCCESS;

error_free_aid:
	if (se_aid)
		tee_se_aid_release(se_aid);
	return TEE_SUCCESS;
}
Exemplo n.º 12
0
TEE_Result syscall_se_reader_close_sessions(unsigned long reader_handle)
{
	TEE_Result ret;
	struct tee_se_reader_proxy *r = tee_svc_uref_to_kaddr(reader_handle);
	struct tee_se_service *service;
	struct tee_ta_session *sess;

	if (!tee_se_manager_is_reader_proxy_valid(r))
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	tee_se_service_close_sessions_by_reader(service, r);

	return TEE_SUCCESS;
}
Exemplo n.º 13
0
static TEE_Result gprof_start_pc_sampling(struct tee_ta_session *s,
					  uint32_t param_types,
					  TEE_Param params[TEE_NUM_PARAMS])
{
	uint32_t exp_pt = TEE_PARAM_TYPES(TEE_PARAM_TYPE_MEMREF_INOUT,
					  TEE_PARAM_TYPE_VALUE_INPUT,
					  TEE_PARAM_TYPE_NONE,
					  TEE_PARAM_TYPE_NONE);
	struct sample_buf *sbuf;
	uint32_t offset;
	uint32_t scale;
	TEE_Result res;
	uint32_t len;
	uaddr_t buf;

	if (exp_pt != param_types)
		return TEE_ERROR_BAD_PARAMETERS;

	buf = (uaddr_t)params[0].memref.buffer;
	len = params[0].memref.size;
	offset = params[1].value.a;
	scale = params[1].value.b;

	res = tee_mmu_check_access_rights(to_user_ta_ctx(s->ctx),
					  TEE_MEMORY_ACCESS_WRITE |
					  TEE_MEMORY_ACCESS_ANY_OWNER,
					  buf, len);
	if (res != TEE_SUCCESS)
		return res;
	sbuf = calloc(1, sizeof(*sbuf));
	if (!sbuf)
		return TEE_ERROR_OUT_OF_MEMORY;

	sbuf->samples = (uint16_t *)buf;
	sbuf->nsamples = len / sizeof(*sbuf->samples);
	sbuf->offset = offset;
	sbuf->scale = scale;
	sbuf->freq = read_cntfrq();
	sbuf->enabled = true;
	s->sbuf = sbuf;

	return TEE_SUCCESS;
}
Exemplo n.º 14
0
TEE_Result syscall_se_channel_get_select_resp(unsigned long channel_handle,
			void *resp, uint64_t *resp_len)
{
	TEE_Result ret;
	struct tee_se_channel *c = tee_svc_uref_to_kaddr(channel_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;
	struct resp_apdu *resp_apdu;
	size_t kresp_len;
	uint64_t uresp_len;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	if (!tee_se_service_is_channel_valid(service, c))
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_svc_copy_from_user(&uresp_len, resp_len, sizeof(size_t));
	if (ret != TEE_SUCCESS)
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_se_channel_get_select_response(c, &resp_apdu);
	if (ret != TEE_SUCCESS)
		return ret;

	kresp_len = apdu_get_length(to_apdu_base(resp_apdu));
	if (uresp_len < kresp_len)
		return TEE_ERROR_SHORT_BUFFER;

	ret = tee_svc_copy_to_user(resp,
			apdu_get_data(to_apdu_base(resp_apdu)), kresp_len);
	if (ret != TEE_SUCCESS)
		return ret;

	uresp_len = kresp_len;
	ret = tee_svc_copy_to_user(resp_len, &uresp_len, sizeof(*resp_len));
	if (ret != TEE_SUCCESS)
		return ret;

	return TEE_SUCCESS;
}
Exemplo n.º 15
0
TEE_Result syscall_se_channel_select_next(unsigned long channel_handle)
{
	TEE_Result ret;
	struct tee_se_channel *c = tee_svc_uref_to_kaddr(channel_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	if (!tee_se_service_is_channel_valid(service, c))
		return TEE_ERROR_BAD_PARAMETERS;

	tee_se_channel_select_next(c);

	return TEE_SUCCESS;
}
Exemplo n.º 16
0
TEE_Result syscall_se_session_get_atr(unsigned long session_handle,
			void *atr, uint64_t *atr_len)
{
	TEE_Result ret;
	struct tee_se_session *s = tee_svc_uref_to_kaddr(session_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;
	size_t katr_len;
	uint64_t uatr_len;
	uint8_t *katr;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	if (!tee_se_service_is_session_valid(service, s))
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_svc_copy_from_user(&uatr_len, atr_len, sizeof(uatr_len));
	if (ret != TEE_SUCCESS)
		return ret;

	katr_len = uatr_len;
	ret = tee_se_session_get_atr(s, &katr, &katr_len);
	if (ret != TEE_SUCCESS)
		return ret;

	if (uatr_len < katr_len)
		return TEE_ERROR_SHORT_BUFFER;

	ret = tee_svc_copy_to_user(atr, katr, katr_len);
	if (ret != TEE_SUCCESS)
		return ret;

	uatr_len = katr_len;
	ret = tee_svc_copy_to_user(atr_len, &uatr_len, sizeof(*atr_len));
	if (ret != TEE_SUCCESS)
		return ret;

	return TEE_SUCCESS;
}
Exemplo n.º 17
0
void tee_mmu_set_ctx(struct tee_ta_ctx *ctx)
{
	if (!ctx || !is_user_ta_ctx(ctx)) {
		core_mmu_set_user_map(NULL);
#ifdef CFG_SMALL_PAGE_USER_TA
		/*
		 * We're not needing the user page tables for the moment,
		 * release them as some other thread may be waiting for
		 * them.
		 */
		pgt_free(&thread_get_tsd()->pgt_cache);
#endif
	} else {
		struct core_mmu_user_map map;
		struct user_ta_ctx *utc = to_user_ta_ctx(ctx);

		core_mmu_create_user_map(utc->mmu, utc->context, &map);
		core_mmu_set_user_map(&map);
	}
	thread_get_tsd()->ctx = ctx;
}
Exemplo n.º 18
0
static void get_current_ta_exidx_stack(vaddr_t *exidx, size_t *exidx_sz,
				       vaddr_t *stack, size_t *stack_size)
{
	struct tee_ta_session *s;
	struct user_ta_ctx *utc;

	if (tee_ta_get_current_session(&s) != TEE_SUCCESS)
		panic();

	utc = to_user_ta_ctx(s->ctx);

	/* Only 32-bit TAs use .ARM.exidx/.ARM.extab exception handling */
	assert(utc->is_32bit);

	*exidx = utc->exidx_start; /* NULL if TA has no unwind tables */
	if (*exidx)
		*exidx += utc->load_addr;
	*exidx_sz = utc->exidx_size;

	*stack = utc->stack_addr;
	*stack_size = utc->mobj_stack->size;
}
Exemplo n.º 19
0
TEE_Result syscall_se_channel_transmit(unsigned long channel_handle,
			void *cmd, unsigned long cmd_len, void *resp,
			uint64_t *resp_len)
{
	TEE_Result ret;
	struct tee_se_channel *c = tee_svc_uref_to_kaddr(channel_handle);
	struct tee_ta_session *sess;
	struct tee_se_service *service;
	struct cmd_apdu *cmd_apdu;
	struct resp_apdu *resp_apdu;
	void *kcmd_buf;
	uint64_t kresp_len;

	ret = tee_ta_get_current_session(&sess);
	if (ret != TEE_SUCCESS)
		return ret;

	service = to_user_ta_ctx(sess->ctx)->se_service;
	if (!tee_se_service_is_channel_valid(service, c))
		return TEE_ERROR_BAD_PARAMETERS;

	ret = tee_svc_copy_from_user(&kresp_len, resp_len, sizeof(kresp_len));
	if (ret != TEE_SUCCESS)
		return ret;

	kcmd_buf = malloc(cmd_len);
	if (kcmd_buf == NULL)
		return TEE_ERROR_OUT_OF_MEMORY;

	ret = tee_svc_copy_from_user(kcmd_buf, cmd, cmd_len);
	if (ret != TEE_SUCCESS)
		goto err_free_cmd_buf;

	cmd_apdu =
		alloc_cmd_apdu_from_buf(kcmd_buf, cmd_len);
	if (cmd_apdu == NULL)
		goto err_free_cmd_buf;

	kresp_len -= 2; /* reserve space for SW1 and SW2 */
	resp_apdu = alloc_resp_apdu(kresp_len);
	if (resp_apdu == NULL)
		goto err_free_cmd_apdu;

	ret = tee_se_channel_transmit(c, cmd_apdu, resp_apdu);
	if (ret != TEE_SUCCESS)
		goto err_free_resp_apdu;

	kresp_len = apdu_get_length(to_apdu_base(resp_apdu));
	ret = tee_svc_copy_to_user(resp_len, &kresp_len, sizeof(*resp_len));
	if (ret != TEE_SUCCESS)
		goto err_free_resp_apdu;

	ret = tee_svc_copy_to_user(resp, resp_apdu_get_data(resp_apdu),
				   kresp_len);
	if (ret != TEE_SUCCESS)
		goto err_free_resp_apdu;

	apdu_release(to_apdu_base(resp_apdu));
	apdu_release(to_apdu_base(cmd_apdu));
	free(kcmd_buf);

	return TEE_SUCCESS;

err_free_resp_apdu:
	apdu_release(to_apdu_base(resp_apdu));
err_free_cmd_apdu:
	apdu_release(to_apdu_base(cmd_apdu));
err_free_cmd_buf:
	free(kcmd_buf);
	return ret;
}
Exemplo n.º 20
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	if (core_mmu_user_va_range_is_defined()) {
		vaddr_t user_va_base;
		size_t user_va_size;

		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
		if (v >= user_va_base &&
		    v <= (user_va_base - 1 + user_va_size)) {
			if (!core_mmu_user_mapping_is_active()) {
				if (pa)
					panic("issue in linear address space");
				return;
			}

			res = tee_mmu_user_va2pa_helper(
				to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
			if (res == TEE_SUCCESS && pa != p)
				panic("bad pa");
			if (res != TEE_SUCCESS && pa)
				panic("false pa");
			return;
		}
	}
#ifdef CFG_WITH_PAGER
	if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
		if (v != pa)
			panic("issue in linear address space");
		return;
	}
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe,
		 * rw-locked areas when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			if (pa != p)
				panic();
		} else
			if (pa)
				panic();
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p)) {
		if (pa != p)
			panic();
	} else {
		if (pa)
			panic();
	}
}