void tee_mmu_set_map(struct tee_mmu_mapping *map) { if (map == NULL) tee_mmu_switch(read_ttbr1(), 0); else tee_mmu_switch(map->ttbr0, map->ctxid); core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); }
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va) { tee_mm_entry_t *mm; uint32_t attr; struct core_mmu_table_info tbl_info; uint32_t pa_s; uint32_t pa_e; size_t n; size_t offs; if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info)) panic(); pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift); pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift); mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s); if (!mm) return TEE_ERROR_OUT_OF_MEMORY; attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL; if (tee_pbuf_is_sec(pa, len)) { attr |= TEE_MATTR_SECURE; attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK; } else if (tee_pbuf_is_non_sec(pa, len)) { if (core_mmu_is_shm_cached()) attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK; } else return TEE_ERROR_GENERIC; offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift; for (n = 0; n < tee_mm_get_size(mm); n++) core_mmu_set_entry(&tbl_info, n + offs, pa_s + (n << tbl_info.shift), attr); core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); *va = (void *)(tee_mm_get_smem(mm) + core_mmu_get_block_offset(&tbl_info, pa)); return TEE_SUCCESS; }
void tee_mmu_kunmap(void *va, size_t len) { size_t n; tee_mm_entry_t *mm; struct core_mmu_table_info tbl_info; size_t offs; if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info)) panic(); mm = tee_mm_find(&tee_mmu_virt_kmap, (vaddr_t)va); if (mm == NULL || len > tee_mm_get_bytes(mm)) return; /* Invalid range, not much to do */ /* Clear the mmu entries */ offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift; for (n = 0; n < tee_mm_get_size(mm); n++) core_mmu_set_entry(&tbl_info, n + offs, 0, 0); core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); tee_mm_free(mm); }