void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, struct tee_mmu_info *mmu) { struct core_mmu_table_info pg_info; struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache; struct pgt *pgt; size_t n; vaddr_t base; vaddr_t end; if (!mmu->size) return; /* Nothing to map */ /* Find the last valid entry */ n = mmu->size; while (true) { n--; if (mmu->table[n].size) break; if (!n) return; /* Nothing to map */ } /* * Allocate all page tables in advance. */ base = ROUNDDOWN(mmu->table[0].va, CORE_MMU_PGDIR_SIZE); end = ROUNDUP(mmu->table[n].va + mmu->table[n].size, CORE_MMU_PGDIR_SIZE); pgt_alloc(pgt_cache, (end - base) >> CORE_MMU_PGDIR_SHIFT); pgt = SLIST_FIRST(pgt_cache); core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); for (n = 0; n < mmu->size; n++) { if (!mmu->table[n].size) continue; set_pg_region(dir_info, mmu->table + n, &pgt, &pg_info); } }
void core_mmu_populate_user_map(struct core_mmu_table_info *dir_info, struct user_ta_ctx *utc) { struct core_mmu_table_info pg_info; struct pgt_cache *pgt_cache = &thread_get_tsd()->pgt_cache; struct pgt *pgt; size_t n; if (!utc->mmu->size) return; /* Nothing to map */ /* Find the last valid entry */ n = utc->mmu->size; while (true) { n--; if (utc->mmu->table[n].size) break; if (!n) return; /* Nothing to map */ } /* * Allocate all page tables in advance. */ pgt_alloc(pgt_cache, &utc->ctx, utc->mmu->table[0].va, utc->mmu->table[n].va + utc->mmu->table[n].size - 1); pgt = SLIST_FIRST(pgt_cache); core_mmu_set_info_table(&pg_info, dir_info->level + 1, 0, NULL); for (n = 0; n < utc->mmu->size; n++) { if (!utc->mmu->table[n].size) continue; set_pg_region(dir_info, utc->mmu->table + n, &pgt, &pg_info); } }