void teecore_init_ta_ram(void) { vaddr_t s; vaddr_t e; paddr_t ps; paddr_t pe; /* get virtual addr/size of RAM where TA are loaded/executedNSec * shared mem allcated from teecore */ core_mmu_get_mem_by_type(MEM_AREA_TA_RAM, &s, &e); ps = virt_to_phys((void *)s); TEE_ASSERT(ps); pe = virt_to_phys((void *)(e - 1)) + 1; TEE_ASSERT(pe); TEE_ASSERT((ps & (CORE_MMU_USER_CODE_SIZE - 1)) == 0); TEE_ASSERT((pe & (CORE_MMU_USER_CODE_SIZE - 1)) == 0); /* extra check: we could rely on core_mmu_get_mem_by_type() */ TEE_ASSERT(tee_pbuf_is_sec(ps, pe - ps) == true); TEE_ASSERT(tee_mm_is_empty(&tee_mm_sec_ddr)); /* remove previous config and init TA ddr memory pool */ tee_mm_final(&tee_mm_sec_ddr); tee_mm_init(&tee_mm_sec_ddr, ps, pe, CORE_MMU_USER_CODE_SHIFT, TEE_MM_POOL_NO_FLAGS); }
TEE_Result tee_mmu_kmap_helper(tee_paddr_t pa, size_t len, void **va) { tee_mm_entry_t *mm; uint32_t attr; struct core_mmu_table_info tbl_info; uint32_t pa_s; uint32_t pa_e; size_t n; size_t offs; if (!core_mmu_find_table(TEE_MMU_KMAP_START_VA, UINT_MAX, &tbl_info)) panic(); pa_s = ROUNDDOWN(pa, 1 << tbl_info.shift); pa_e = ROUNDUP(pa + len, 1 << tbl_info.shift); mm = tee_mm_alloc(&tee_mmu_virt_kmap, pa_e - pa_s); if (!mm) return TEE_ERROR_OUT_OF_MEMORY; attr = TEE_MATTR_VALID_BLOCK | TEE_MATTR_PRW | TEE_MATTR_GLOBAL; if (tee_pbuf_is_sec(pa, len)) { attr |= TEE_MATTR_SECURE; attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK; } else if (tee_pbuf_is_non_sec(pa, len)) { if (core_mmu_is_shm_cached()) attr |= TEE_MATTR_I_WRITE_BACK | TEE_MATTR_O_WRITE_BACK; } else return TEE_ERROR_GENERIC; offs = (tee_mm_get_smem(mm) - tbl_info.va_base) >> tbl_info.shift; for (n = 0; n < tee_mm_get_size(mm); n++) core_mmu_set_entry(&tbl_info, n + offs, pa_s + (n << tbl_info.shift), attr); core_tlb_maintenance(TLBINV_UNIFIEDTLB, 0); *va = (void *)(tee_mm_get_smem(mm) + core_mmu_get_block_offset(&tbl_info, pa)); return TEE_SUCCESS; }