Esempio n. 1
0
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
			struct tee_mmu_info *mmu)
{
	struct core_mmu_table_info pg_info;
	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
	struct pgt *pgt;
	size_t n;
	vaddr_t base;
	vaddr_t end;

	if (!mmu->size)
		return;	/* Nothing to map */

	/* Find the last valid entry */
	n = mmu->size;
	while (true) {
		n--;
		if (mmu->table[n].size)
			break;
		if (!n)
			return;	/* Nothing to map */
	}

	/*
	 * Allocate all page tables in advance.
	 */
	base = ROUNDDOWN(mmu->table[0].va, CORE_MMU_PGDIR_SIZE);
	end = ROUNDUP(mmu->table[n].va + mmu->table[n].size,
		      CORE_MMU_PGDIR_SIZE);
	pgt_alloc(pgt_cache, (end - base) >> CORE_MMU_PGDIR_SHIFT);
	pgt = SLIST_FIRST(pgt_cache);

	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);

	for (n = 0; n < mmu->size; n++) {
		if (!mmu->table[n].size)
			continue;
		set_pg_region(dir_info, mmu->table + n, &pgt, &pg_info);
	}
}
Esempio n. 2
0
void core_mmu_create_user_map(struct tee_mmu_info *mmu, uint32_t asid,
		struct core_mmu_user_map *map)
{

	COMPILE_TIME_ASSERT(sizeof(uint64_t) * XLAT_TABLE_ENTRIES == PGT_SIZE);

	if (mmu) {
		struct core_mmu_table_info dir_info;
		vaddr_t va_range_base;
		void *tbl = xlat_tables_ul1[thread_get_id()];

		core_mmu_get_user_va_range(&va_range_base, NULL);
		core_mmu_set_info_table(&dir_info, 2, va_range_base, tbl);
		memset(tbl, 0, PGT_SIZE);
		core_mmu_populate_user_map(&dir_info, mmu);
		map->user_map = (paddr_t)dir_info.table | TABLE_DESC;
		map->asid = asid & TTBR_ASID_MASK;
	} else {
		map->user_map = 0;
		map->asid = 0;
	}
}
Esempio n. 3
0
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info,
				struct user_ta_ctx *utc)
{
	struct core_mmu_table_info pg_info;
	struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache;
	struct pgt *pgt;
	size_t n;

	if (!utc->mmu->size)
		return;	/* Nothing to map */

	/* Find the last valid entry */
	n = utc->mmu->size;
	while (true) {
		n--;
		if (utc->mmu->table[n].size)
			break;
		if (!n)
			return;	/* Nothing to map */
	}

	/*
	 * Allocate all page tables in advance.
	 */
	pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->table[0].va,
		  utc->mmu->table[n].va + utc->mmu->table[n].size - 1);
	pgt = SLIST_FIRST(pgt_cache);

	core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL);

	for (n = 0; n < utc->mmu->size; n++) {
		if (!utc->mmu->table[n].size)
			continue;
		set_pg_region(dir_info, utc->mmu->table + n, &pgt, &pg_info);
	}
}