static void *phys_to_virt_tee_ram(paddr_t pa) { struct core_mmu_table_info *ti = &tee_pager_tbl_info; unsigned idx; unsigned end_idx; uint32_t a; paddr_t p; if (pa >= CFG_TEE_LOAD_ADDR && pa < get_linear_map_end()) return (void *)(vaddr_t)pa; end_idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE); /* Most addresses are mapped lineary, try that first if possible. */ idx = core_mmu_va2idx(ti, pa); if (idx >= core_mmu_va2idx(ti, CFG_TEE_RAM_START) && idx < end_idx) { core_mmu_get_entry(ti, idx, &p, &a); if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) return (void *)core_mmu_idx2va(ti, idx); } for (idx = core_mmu_va2idx(ti, CFG_TEE_RAM_START); idx < end_idx; idx++) { core_mmu_get_entry(ti, idx, &p, &a); if ((a & TEE_MATTR_VALID_BLOCK) && p == pa) return (void *)core_mmu_idx2va(ti, idx); } return NULL; }
static TEE_Result tee_mmu_kmap_va2pa_attr(void *va, void **pa, uint32_t *attr) { struct core_mmu_table_info tbl_info; size_t block_offset; size_t n; paddr_t npa; uint32_t nattr; if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info)) panic(); if (!tee_mm_addr_is_within_range(&tee_mmu_virt_kmap, (vaddr_t)va)) return TEE_ERROR_ACCESS_DENIED; n = core_mmu_va2idx(&tbl_info, (vaddr_t)va); core_mmu_get_entry(&tbl_info, n, &npa, &nattr); if (!(nattr & TEE_MATTR_VALID_BLOCK)) return TEE_ERROR_ACCESS_DENIED; block_offset = core_mmu_get_block_offset(&tbl_info, (vaddr_t)va); *pa = (void *)(npa + block_offset); if (attr) *attr = nattr; return TEE_SUCCESS; }
TEE_Result tee_mmu_kmap_pa2va_helper(void *pa, void **va) { size_t n; struct core_mmu_table_info tbl_info; size_t shift; paddr_t match_pa; if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info)) panic(); shift = tbl_info.shift; match_pa = ROUNDDOWN((paddr_t)pa, 1 << shift); for (n = core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_START_VA); n < core_mmu_va2idx(&tbl_info, TEE_MMU_KMAP_END_VA); n++) { uint32_t attr; paddr_t npa; core_mmu_get_entry(&tbl_info, n, &npa, &attr); if (!(attr & TEE_MATTR_VALID_BLOCK)) continue; assert(!(attr & TEE_MATTR_TABLE)); if (npa == match_pa) { *va = (void *)(core_mmu_idx2va(&tbl_info, n) + ((paddr_t)pa - match_pa)); return TEE_SUCCESS; } } return TEE_ERROR_ACCESS_DENIED; }
static void check_pa_matches_va(void *va, paddr_t pa) { TEE_Result res; vaddr_t user_va_base; size_t user_va_size; vaddr_t v = (vaddr_t)va; paddr_t p = 0; core_mmu_get_user_va_range(&user_va_base, &user_va_size); if (v >= user_va_base && v < (user_va_base + user_va_size)) { if (!core_mmu_user_mapping_is_active()) { TEE_ASSERT(pa == 0); return; } res = tee_mmu_user_va2pa_helper( to_user_ta_ctx(tee_mmu_get_ctx()), va, &p); if (res == TEE_SUCCESS) TEE_ASSERT(pa == p); else TEE_ASSERT(pa == 0); return; } #ifdef CFG_WITH_PAGER if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) && v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) { struct core_mmu_table_info *ti = &tee_pager_tbl_info; uint32_t a; /* * Lookups in the page table managed by the pager is * dangerous for addresses in the paged area as those pages * changes all the time. But some ranges are safe, rw areas * when the page is populated for instance. */ core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a); if (a & TEE_MATTR_VALID_BLOCK) { paddr_t mask = ((1 << ti->shift) - 1); p |= v & mask; TEE_ASSERT(pa == p); } else TEE_ASSERT(pa == 0); return; } #endif if (!core_va2pa_helper(va, &p)) TEE_ASSERT(pa == p); else TEE_ASSERT(pa == 0); }
static void check_pa_matches_va(void *va, paddr_t pa) { TEE_Result res; vaddr_t v = (vaddr_t)va; paddr_t p = 0; if (core_mmu_user_va_range_is_defined()) { vaddr_t user_va_base; size_t user_va_size; core_mmu_get_user_va_range(&user_va_base, &user_va_size); if (v >= user_va_base && v <= (user_va_base - 1 + user_va_size)) { if (!core_mmu_user_mapping_is_active()) { if (pa) panic("issue in linear address space"); return; } res = tee_mmu_user_va2pa_helper( to_user_ta_ctx(tee_mmu_get_ctx()), va, &p); if (res == TEE_SUCCESS && pa != p) panic("bad pa"); if (res != TEE_SUCCESS && pa) panic("false pa"); return; } } #ifdef CFG_WITH_PAGER if (v >= CFG_TEE_LOAD_ADDR && v < get_linear_map_end()) { if (v != pa) panic("issue in linear address space"); return; } if (v >= (CFG_TEE_LOAD_ADDR & ~CORE_MMU_PGDIR_MASK) && v <= (CFG_TEE_LOAD_ADDR | CORE_MMU_PGDIR_MASK)) { struct core_mmu_table_info *ti = &tee_pager_tbl_info; uint32_t a; /* * Lookups in the page table managed by the pager is * dangerous for addresses in the paged area as those pages * changes all the time. But some ranges are safe, * rw-locked areas when the page is populated for instance. */ core_mmu_get_entry(ti, core_mmu_va2idx(ti, v), &p, &a); if (a & TEE_MATTR_VALID_BLOCK) { paddr_t mask = ((1 << ti->shift) - 1); p |= v & mask; if (pa != p) panic(); } else if (pa) panic(); return; } #endif if (!core_va2pa_helper(va, &p)) { if (pa != p) panic(); } else { if (pa) panic(); } }