void teecore_init_pub_ram(void) { vaddr_t s; vaddr_t e; unsigned int nsec_tee_size = 32 * 1024; /* get virtual addr/size of NSec shared mem allcated from teecore */ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); TEE_ASSERT(s < e); TEE_ASSERT((s & SMALL_PAGE_MASK) == 0); TEE_ASSERT((e & SMALL_PAGE_MASK) == 0); /* extra check: we could rely on core_mmu_get_mem_by_type() */ TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true); /* * 32kByte first bytes are allocated from teecore. * Remaining is under control of the NSec allocator. */ TEE_ASSERT((e - s) > nsec_tee_size); TEE_ASSERT(tee_mm_is_empty(&tee_mm_pub_ddr)); tee_mm_final(&tee_mm_pub_ddr); tee_mm_init(&tee_mm_pub_ddr, s, s + nsec_tee_size, SMALL_PAGE_SHIFT, TEE_MM_POOL_NO_FLAGS); s += nsec_tee_size; default_nsec_shm_paddr = s; default_nsec_shm_size = e - s; }
static TEE_Result load_header(const struct shdr *signed_ta, struct shdr **sec_shdr) { size_t s; if (!tee_vbuf_is_non_sec(signed_ta, sizeof(*signed_ta))) return TEE_ERROR_SECURITY; s = SHDR_GET_SIZE(signed_ta); if (!tee_vbuf_is_non_sec(signed_ta, s)) return TEE_ERROR_SECURITY; /* Copy signed header into secure memory */ *sec_shdr = malloc(s); if (!*sec_shdr) return TEE_ERROR_OUT_OF_MEMORY; memcpy(*sec_shdr, signed_ta, s); return TEE_SUCCESS; }
void teecore_init_pub_ram(void) { vaddr_t s; vaddr_t e; /* get virtual addr/size of NSec shared mem allcated from teecore */ core_mmu_get_mem_by_type(MEM_AREA_NSEC_SHM, &s, &e); TEE_ASSERT(s < e); TEE_ASSERT((s & SMALL_PAGE_MASK) == 0); TEE_ASSERT((e & SMALL_PAGE_MASK) == 0); /* extra check: we could rely on core_mmu_get_mem_by_type() */ TEE_ASSERT(tee_vbuf_is_non_sec(s, e - s) == true); #ifdef CFG_PL310 /* Allocate statically the l2cc mutex */ TEE_ASSERT((e - s) > 0); tee_l2cc_store_mutex_boot_pa(s); s += sizeof(uint32_t); /* size of a pl310 mutex */ #endif default_nsec_shm_paddr = virt_to_phys((void *)s); default_nsec_shm_size = e - s; }