Exemplo n.º 1
0
TEE_Result tee_mmu_init(struct user_ta_ctx *utc)
{
	uint32_t asid = 1;

	if (!utc->context) {
		utc->context = 1;

		/* Find available ASID */
		while (!(asid & g_asid) && (asid != 0)) {
			utc->context++;
			asid = asid << 1;
		}

		if (asid == 0) {
			DMSG("Failed to allocate ASID");
			return TEE_ERROR_GENERIC;
		}
		g_asid &= ~asid;
	}

	utc->mmu = calloc(1, sizeof(struct tee_mmu_info));
	if (!utc->mmu)
		return TEE_ERROR_OUT_OF_MEMORY;
	utc->mmu->table = calloc(TEE_MMU_UMAP_MAX_ENTRIES,
				 sizeof(struct tee_mmap_region));
	if (!utc->mmu->table)
		return TEE_ERROR_OUT_OF_MEMORY;
	utc->mmu->size = TEE_MMU_UMAP_MAX_ENTRIES;
	core_mmu_get_user_va_range(&utc->mmu->ta_private_vmem_start, NULL);
	return TEE_SUCCESS;
}
Exemplo n.º 2
0
static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu)
{
	size_t n;
	vaddr_t va;
	vaddr_t va_range_base;
	size_t va_range_size;

	assert(mmu->table && mmu->size == TEE_MMU_UMAP_MAX_ENTRIES);

	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
	va = va_range_base;
	for (n = 0; n < TEE_MMU_UMAP_PARAM_IDX; n++) {
		assert(mmu->table[n].size); /* PA must be assigned by now */
		mmu->table[n].va = va;
		va += ROUNDUP(mmu->table[n].size, CORE_MMU_USER_CODE_SIZE);
	}

	va = ROUNDUP(va, CORE_MMU_USER_PARAM_SIZE);
	for (; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
		if (!mmu->table[n].size)
			continue;
		mmu->table[n].va = va;
		va += mmu->table[n].size;
		/* Put some empty space between each area */
		va += CORE_MMU_USER_PARAM_SIZE;
		if ((va - va_range_base) >= va_range_size)
			return TEE_ERROR_EXCESS_DATA;
	}

	return TEE_SUCCESS;
}
Exemplo n.º 3
0
static TEE_Result tee_mmu_umap_set_vas(struct tee_mmu_info *mmu)
{
	const size_t granule = CORE_MMU_USER_PARAM_SIZE;
	vaddr_t va_range_base;
	vaddr_t va;
	size_t va_range_size;
	size_t n;

	/* Find last table entry used to map code and data */
	n = TEE_MMU_UMAP_PARAM_IDX - 1;
	while (n && !mmu->table[n].size)
		n--;
	va = mmu->table[n].va + mmu->table[n].size;
	assert(va);

	core_mmu_get_user_va_range(&va_range_base, &va_range_size);
	assert(va_range_base == mmu->ta_private_vmem_start);

	/*
	 * Assign parameters in secure memory.
	 */
	va = ROUNDUP(va, granule);
	for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
		if (!mmu->table[n].size ||
		    !(mmu->table[n].attr & TEE_MATTR_SECURE))
			continue;
		mmu->table[n].va = va;
		va += mmu->table[n].size;
		/* Put some empty space between each area */
		va += granule;
		if ((va - va_range_base) >= va_range_size)
			return TEE_ERROR_EXCESS_DATA;
	}

	/*
	 * Assign parameters in nonsecure shared memory.
	 * Note that we're making sure that they will reside in a new page
	 * directory as they are to be mapped nonsecure.
	 */
	va = ROUNDUP(va, CORE_MMU_PGDIR_SIZE);
	for (n = TEE_MMU_UMAP_PARAM_IDX; n < TEE_MMU_UMAP_MAX_ENTRIES; n++) {
		if (!mmu->table[n].size ||
		    (mmu->table[n].attr & TEE_MATTR_SECURE))
			continue;
		mmu->table[n].va = va;
		va += mmu->table[n].size;
		/* Put some empty space between each area */
		va += granule;
		if ((va - va_range_base) >= va_range_size)
			return TEE_ERROR_EXCESS_DATA;
	}

	return TEE_SUCCESS;
}
Exemplo n.º 4
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t user_va_base;
	size_t user_va_size;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	core_mmu_get_user_va_range(&user_va_base, &user_va_size);
	if (v >= user_va_base && v < (user_va_base + user_va_size)) {
		if (!core_mmu_user_mapping_is_active()) {
			TEE_ASSERT(pa == 0);
			return;
		}

		res = tee_mmu_user_va2pa_helper(
			to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
		if (res == TEE_SUCCESS)
			TEE_ASSERT(pa == p);
		else
			TEE_ASSERT(pa == 0);
		return;
	}
#ifdef CFG_WITH_PAGER
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe, rw areas
		 * when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			TEE_ASSERT(pa == p);
		} else
			TEE_ASSERT(pa == 0);
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p))
		TEE_ASSERT(pa == p);
	else
		TEE_ASSERT(pa == 0);
}
Exemplo n.º 5
0
static uint64_t populate_user_map(struct tee_mmu_info *mmu)
{
	struct core_mmu_table_info tbl_info;
	unsigned n;
	struct tee_mmap_region region;
	vaddr_t va_range_base;
	size_t va_range_size;

	core_mmu_get_user_va_range(&va_range_base, &va_range_size);

	tbl_info.table = xlat_tables_ul1[thread_get_id()];
	tbl_info.va_base = va_range_base;
	tbl_info.level = 2;
	tbl_info.shift = L2_XLAT_ADDRESS_SHIFT;
	tbl_info.num_entries = XLAT_TABLE_ENTRIES;

	/* Clear the table before use */
	memset(tbl_info.table, 0, XLAT_TABLE_SIZE);

	region.pa = 0;
	region.va = va_range_base;
	region.attr = 0;

	for (n = 0; n < mmu->size; n++) {
		if (!mmu->table[n].size)
			continue;

		/* Empty mapping for gaps */
		region.size = mmu->table[n].va - region.va;
		set_region(&tbl_info, &region);

		set_region(&tbl_info, mmu->table + n);
		region.va = mmu->table[n].va + mmu->table[n].size;
		assert((region.va - va_range_base) <= va_range_size);
	}
	region.size = va_range_size - (region.va - va_range_base);
	set_region(&tbl_info, &region);

	return (uintptr_t)tbl_info.table | TABLE_DESC;
}
Exemplo n.º 6
0
void core_mmu_create_user_map(struct tee_mmu_info *mmu, uint32_t asid,
		struct core_mmu_user_map *map)
{

	COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);

	if (mmu) {
		struct core_mmu_table_info dir_info;
		vaddr_t va_range_base;
		void *tbl = xlat_tables_ul1[thread_get_id()];

		core_mmu_get_user_va_range(&va_range_base, NULL);
		core_mmu_set_info_table(&dir_info, 2, va_range_base, tbl);
		memset(tbl, 0, PGT_SIZE);
		core_mmu_populate_user_map(&dir_info, mmu);
		map->user_map = (paddr_t)dir_info.table | TABLE_DESC;
		map->asid = asid & TTBR_ASID_MASK;
	} else {
		map->user_map = 0;
		map->asid = 0;
	}
}
Exemplo n.º 7
0
static void check_pa_matches_va(void *va, paddr_t pa)
{
	TEE_Result res;
	vaddr_t v = (vaddr_t)va;
	paddr_t p = 0;

	if (core_mmu_user_va_range_is_defined()) {
		vaddr_t user_va_base;
		size_t user_va_size;

		core_mmu_get_user_va_range(&user_va_base, &user_va_size);
		if (v >= user_va_base &&
		    v <= (user_va_base - 1 + user_va_size)) {
			if (!core_mmu_user_mapping_is_active()) {
				if (pa)
					panic("issue in linear address space");
				return;
			}

			res = tee_mmu_user_va2pa_helper(
				to_user_ta_ctx(tee_mmu_get_ctx()), va, &p);
			if (res == TEE_SUCCESS && pa != p)
				panic("bad pa");
			if (res != TEE_SUCCESS && pa)
				panic("false pa");
			return;
		}
	}
#ifdef CFG_WITH_PAGER
	if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) {
		if (v != pa)
			panic("issue in linear address space");
		return;
	}
	if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) &&
	    v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) {
		struct core_mmu_table_info *ti = &tee_pager_tbl_info;
		uint32_t a;

		/*
		 * Lookups in the page table managed by the pager is
		 * dangerous for addresses in the paged area as those pages
		 * changes all the time. But some ranges are safe,
		 * rw-locked areas when the page is populated for instance.
		 */
		core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a);
		if (a & TEE_MATTR_VALID_BLOCK) {
			paddr_t mask = ((1 << ti->shift) - 1);

			p |= v & mask;
			if (pa != p)
				panic();
		} else
			if (pa)
				panic();
		return;
	}
#endif
	if (!core_va2pa_helper(va, &p)) {
		if (pa != p)
			panic();
	} else {
		if (pa)
			panic();
	}
}