コード例 #1
0
ファイル: core_mmu.c プロジェクト: lackan/optee_os
static void *phys_to_virt_tee_ram(paddr_t pa)
{
	struct core_mmu_table_info *ti = &tee_pager_tbl_info;
	unsigned idx;
	unsigned end_idx;
	uint32_t a;
	paddr_t p;

	if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end())
		return (void *)(vaddr_t)pa;

	end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START +
				      CFG_TEE_RAM_VA_SIZE);
	/* Most addresses are mapped lineary, try that first if possible. */
	idx = core_mmu_va2idx(ti, pa);
	if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) &&
	    idx < end_idx) {
		core_mmu_get_entry(ti, idx, &p, &a);
		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
			return (void *)core_mmu_idx2va(ti, idx);
	}

	for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START);
	     idx < end_idx; idx++) {
		core_mmu_get_entry(ti, idx, &p, &a);
		if ((a & TEE_MATTR_VALID_BLOCK) && p == pa)
			return (void *)core_mmu_idx2va(ti, idx);
	}

	return NULL;
}
コード例 #2
0
ファイル: tee_mmu.c プロジェクト: enavro/optee_os
TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va)
{
	size_t n;
	struct core_mmu_table_info tbl_info;
	size_t shift;
	paddr_t match_pa;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	shift = tbl_info.shift;
	match_pa = ROUNDDOWN((paddr_t)pa, 1 << shift);

	for (n = core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_START_VA);
	     n < core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_END_VA); n++) {
		uint32_t attr;
		paddr_t npa;

		core_mmu_get_entry(&tbl_info, n, &npa, &attr);
		if (!(attr & TEE_MATTR_VALID_BLOCK))
			continue;
		assert(!(attr & TEE_MATTR_TABLE));

		if (npa == match_pa) {
			*va = (void *)(core_mmu_idx2va(&tbl_info, n) +
				       ((paddr_t)pa - match_pa));
			return TEE_SUCCESS;
		}
	}

	return TEE_ERROR_ACCESS_DENIED;
}
コード例 #3
0
ファイル: core_mmu.c プロジェクト: lackan/optee_os
static void set_pg_region(struct core_mmu_table_info *dir_info,
			struct tee_mmap_region *region, struct pgt **pgt,
			struct core_mmu_table_info *pg_info)
{
	struct tee_mmap_region r = *region;
	vaddr_t end = r.va + r.size;
	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;

	while (r.va < end) {
		if (!pg_info->table ||
		     r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
			/*
			 * We're assigning a new translation table.
			 */
			unsigned int idx;

			assert(*pgt); /* We should have alloced enough */

			/* Virtual addresses must grow */
			assert(r.va > pg_info->va_base);

			idx = core_mmu_va2idx(dir_info, r.va);
			pg_info->table = (*pgt)->tbl;
			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
#ifdef CFG_PAGED_USER_TA
			assert((*pgt)->vabase == pg_info->va_base);
#endif
			*pgt = SLIST_NEXT(*pgt, link);

			core_mmu_set_entry(dir_info, idx,
					   virt_to_phys(pg_info->table),
					   pgt_attr);
		}

		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
			     end - r.va);
		if (!(r.attr & TEE_MATTR_PAGED))
			set_region(pg_info, &r);
		r.va += r.size;
		r.pa += r.size;
	}
}