Example #1
0
static void set_region(struct core_mmu_table_info *tbl_info,
		struct tee_mmap_region *region)
{
	unsigned end;
	unsigned idx;
	paddr_t pa;

	/* va, len and pa should be block aligned */
	assert(!core_mmu_get_block_offset(tbl_info, region->va));
	assert(!core_mmu_get_block_offset(tbl_info, region->size));
	assert(!core_mmu_get_block_offset(tbl_info, region->pa));

	idx = core_mmu_va2idx(tbl_info, region->va);
	end = core_mmu_va2idx(tbl_info, region->va + region->size);
	pa = region->pa;

	debug_print("set_region va %016" PRIxVA " pa %016" PRIxPA " size %016zu",
		region->va, region->pa, region->size);

	while (idx < end) {
		core_mmu_set_entry(tbl_info, idx, pa, region->attr);
		idx++;
		pa += 1 << tbl_info->shift;
	}
}
Example #2
0
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va)
{
	tee_mm_entry_t *mm;
	uint32_t attr;
	struct core_mmu_table_info tbl_info;
	uint32_t pa_s;
	uint32_t pa_e;
	size_t n;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift);
	pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift);

	mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s);
	if (!mm)
		return TEE_ERROR_OUT_OF_MEMORY;

	attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL;
	if (tee_pbuf_is_sec(pa, len)) {
		attr |= TEE_MATTR_SECURE;
		attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else if (tee_pbuf_is_non_sec(pa, len)) {
		if (core_mmu_is_shm_cached())
			attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK;
	} else
		return TEE_ERROR_GENERIC;


	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs,
				   pa_s + (n << tbl_info.shift), attr);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);

	*va = (void *)(tee_mm_get_smem(mm) +
		       core_mmu_get_block_offset(&tbl_info, pa));
	return TEE_SUCCESS;
}
Example #3
0
static void set_pg_region(struct core_mmu_table_info *dir_info,
			struct tee_mmap_region *region, struct pgt **pgt,
			struct core_mmu_table_info *pg_info)
{
	struct tee_mmap_region r = *region;
	vaddr_t end = r.va + r.size;
	uint32_t pgt_attr = (r.attr & TEE_MATTR_SECURE) | TEE_MATTR_TABLE;

	while (r.va < end) {
		if (!pg_info->table ||
		     r.va >= (pg_info->va_base + CORE_MMU_PGDIR_SIZE)) {
			/*
			 * We're assigning a new translation table.
			 */
			unsigned int idx;

			assert(*pgt); /* We should have alloced enough */

			/* Virtual addresses must grow */
			assert(r.va > pg_info->va_base);

			idx = core_mmu_va2idx(dir_info, r.va);
			pg_info->table = (*pgt)->tbl;
			pg_info->va_base = core_mmu_idx2va(dir_info, idx);
#ifdef CFG_PAGED_USER_TA
			assert((*pgt)->vabase == pg_info->va_base);
#endif
			*pgt = SLIST_NEXT(*pgt, link);

			core_mmu_set_entry(dir_info, idx,
					   virt_to_phys(pg_info->table),
					   pgt_attr);
		}

		r.size = MIN(CORE_MMU_PGDIR_SIZE - (r.va - pg_info->va_base),
			     end - r.va);
		if (!(r.attr & TEE_MATTR_PAGED))
			set_region(pg_info, &r);
		r.va += r.size;
		r.pa += r.size;
	}
}
Example #4
0
void tee_mmu_kunmap(void *va, size_t len)
{
	size_t n;
	tee_mm_entry_t *mm;
	struct core_mmu_table_info tbl_info;
	size_t offs;

	if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info))
		panic();

	mm = tee_mm_find(&tee_mmu_virt_kmap, (vaddr_t)va);
	if (mm == NULL || len > tee_mm_get_bytes(mm))
		return;		/* Invalid range, not much to do */

	/* Clear the mmu entries */
	offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift;
	for (n = 0; n < tee_mm_get_size(mm); n++)
		core_mmu_set_entry(&tbl_info, n + offs, 0, 0);

	core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0);
	tee_mm_free(mm);
}