/* teecore heap address/size is defined in scatter file */ extern unsigned char teecore_heap_start; extern unsigned char teecore_heap_end; static void main_fiq(void); static void main_tee_entry_std(struct thread_smc_args *args); static void main_tee_entry_fast(struct thread_smc_args *args); static const struct thread_handlers handlers = { .std_smc = main_tee_entry_std, .fast_smc = main_tee_entry_fast, .fiq = main_fiq, .svc = tee_svc_handler, .abort = tee_pager_abort_handler, .cpu_on = pm_panic, .cpu_off = pm_panic, .cpu_suspend = pm_panic, .cpu_resume = pm_panic, .system_off = pm_panic, .system_reset = pm_panic, }; void main_init(uint32_t nsec_entry); /* called from assembly only */ void main_init(uint32_t nsec_entry) { struct sm_nsec_ctx *nsec_ctx; size_t pos = get_core_pos(); /* * Mask IRQ and FIQ before switch to the thread vector as the * thread handler requires IRQ and FIQ to be masked while executing * with the temporary stack. The thread subsystem also asserts that * IRQ is blocked when using most if its functions. */ thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ); if (pos == 0) { thread_init_primary(&handlers); /* initialize platform */ platform_init(); } thread_init_per_cpu(); /* Initialize secure monitor */ nsec_ctx = sm_get_nsec_ctx(); nsec_ctx->mon_lr = nsec_entry; nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; if (pos == 0) { unsigned long a, s; /* core malloc pool init */ #ifdef CFG_TEE_MALLOC_START a = CFG_TEE_MALLOC_START; s = CFG_TEE_MALLOC_SIZE; #else a = (unsigned long)&teecore_heap_start; s = (unsigned long)&teecore_heap_end; a = ((a + 1) & ~0x0FFFF) + 0x10000; /* 64kB aligned */ s = s & ~0x0FFFF; /* 64kB aligned */ s = s - a; #endif malloc_add_pool((void *)a, s); teecore_init_ta_ram(); if (init_teecore() != TEE_SUCCESS) { panic(); } } IMSG("optee initialize finished\n"); }
static void init_runtime(unsigned long pageable_part __unused) { thread_init_boot_thread(); init_asan(); malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); /* * Initialized at this stage in the pager version of this function * above */ teecore_init_ta_ram(); IMSG_RAW("\n"); }
static void init_runtime(uint32_t pageable_part __unused) { /* * Zero BSS area. Note that globals that would normally would go * into BSS which are used before this has to be put into .nozi.* * to avoid getting overwritten. */ memset(__bss_start, 0, __bss_end - __bss_start); thread_init_boot_thread(); malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); /* * Initialized at this stage in the pager version of this function * above */ teecore_init_ta_ram(); }
static void init_runtime(uint32_t pageable_part) { size_t n; size_t init_size = (size_t)__init_size; size_t pageable_size = __pageable_end - __pageable_start; size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * TEE_SHA256_HASH_SIZE; tee_mm_entry_t *mm; uint8_t *paged_store; uint8_t *hashes; size_t block_size; TEE_ASSERT(pageable_size % SMALL_PAGE_SIZE == 0); TEE_ASSERT(hash_size == (size_t)__tmp_hashes_size); /* * Zero BSS area. Note that globals that would normally would go * into BSS which are used before this has to be put into .nozi.* * to avoid getting overwritten. */ memset(__bss_start, 0, __bss_end - __bss_start); thread_init_boot_thread(); malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); hashes = malloc(hash_size); EMSG("hash_size %zu", hash_size); TEE_ASSERT(hashes); memcpy(hashes, __tmp_hashes_start, hash_size); /* * Need tee_mm_sec_ddr initialized to be able to allocate secure * DDR below. */ teecore_init_ta_ram(); mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); TEE_ASSERT(mm); paged_store = (uint8_t *)tee_mm_get_smem(mm); /* Copy init part into pageable area */ memcpy(paged_store, __init_start, init_size); /* Copy pageable part after init part into pageable area */ memcpy(paged_store + init_size, (void *)pageable_part, __pageable_part_end - __pageable_part_start); /* Check that hashes of what's in pageable area is OK */ DMSG("Checking hashes of pageable area"); for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; TEE_Result res; DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); if (res != TEE_SUCCESS) { EMSG("Hash failed for page %zu at %p: res 0x%x", n, page, res); panic(); } } /* * Copy what's not initialized in the last init page. Needed * because we're not going fault in the init pages again. We can't * fault in pages until we've switched to the new vector by calling * thread_init_handlers() below. */ if (init_size % SMALL_PAGE_SIZE) { uint8_t *p; memcpy(__init_start + init_size, paged_store + init_size, SMALL_PAGE_SIZE - (init_size % SMALL_PAGE_SIZE)); p = (uint8_t *)(((vaddr_t)__init_start + init_size) & ~SMALL_PAGE_MASK); cache_maintenance_l1(DCACHE_AREA_CLEAN, p, SMALL_PAGE_SIZE); cache_maintenance_l1(ICACHE_AREA_INVALIDATE, p, SMALL_PAGE_SIZE); } /* * Initialize the virtual memory pool used for main_mmu_l2_ttb which * is supplied to tee_pager_init() below. */ block_size = get_block_size(); if (!tee_mm_init(&tee_mm_vcore, ROUNDDOWN(CFG_TEE_RAM_START, block_size), ROUNDUP(CFG_TEE_RAM_START + CFG_TEE_RAM_VA_SIZE, block_size), SMALL_PAGE_SHIFT, 0)) panic(); /* * Assign alias area for pager end of the small page block the rest * of the binary is loaded into. We're taking more than needed, but * we're guaranteed to not need more than the physical amount of * TZSRAM. */ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE); TEE_ASSERT(mm); tee_pager_set_alias_area(mm); /* * Claim virtual memory which isn't paged, note that there migth be * a gap between tee_mm_vcore.lo and TEE_RAM_START which is also * claimed to avoid later allocations to get that memory. */ mm = tee_mm_alloc2(&tee_mm_vcore, tee_mm_vcore.lo, (vaddr_t)(__text_init_start - tee_mm_vcore.lo)); TEE_ASSERT(mm); /* * Allocate virtual memory for the pageable area and let the pager * take charge of all the pages already assigned to that memory. */ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, pageable_size); TEE_ASSERT(mm); if (!tee_pager_add_area(mm, TEE_PAGER_AREA_RO | TEE_PAGER_AREA_X, paged_store, hashes)) panic(); tee_pager_add_pages((vaddr_t)__pageable_start, ROUNDUP(init_size, SMALL_PAGE_SIZE) / SMALL_PAGE_SIZE, false); tee_pager_add_pages((vaddr_t)__pageable_start + ROUNDUP(init_size, SMALL_PAGE_SIZE), (pageable_size - ROUNDUP(init_size, SMALL_PAGE_SIZE)) / SMALL_PAGE_SIZE, true); }
static void init_runtime(unsigned long pageable_part) { size_t n; size_t init_size = (size_t)__init_size; size_t pageable_size = __pageable_end - __pageable_start; size_t hash_size = (pageable_size / SMALL_PAGE_SIZE) * TEE_SHA256_HASH_SIZE; tee_mm_entry_t *mm; uint8_t *paged_store; uint8_t *hashes; assert(pageable_size % SMALL_PAGE_SIZE == 0); assert(hash_size == (size_t)__tmp_hashes_size); /* * This needs to be initialized early to support address lookup * in MEM_AREA_TEE_RAM */ tee_pager_early_init(); thread_init_boot_thread(); init_asan(); malloc_add_pool(__heap1_start, __heap1_end - __heap1_start); malloc_add_pool(__heap2_start, __heap2_end - __heap2_start); hashes = malloc(hash_size); IMSG_RAW("\n"); IMSG("Pager is enabled. Hashes: %zu bytes", hash_size); assert(hashes); asan_memcpy_unchecked(hashes, __tmp_hashes_start, hash_size); /* * Need tee_mm_sec_ddr initialized to be able to allocate secure * DDR below. */ teecore_init_ta_ram(); carve_out_asan_mem(&tee_mm_sec_ddr); mm = tee_mm_alloc(&tee_mm_sec_ddr, pageable_size); assert(mm); paged_store = phys_to_virt(tee_mm_get_smem(mm), MEM_AREA_TA_RAM); /* * Load pageable part in the dedicated allocated area: * - Move pageable non-init part into pageable area. Note bootloader * may have loaded it anywhere in TA RAM hence use memmove(). * - Copy pageable init part from current location into pageable area. */ memmove(paged_store + init_size, phys_to_virt(pageable_part, core_mmu_get_type_by_pa(pageable_part)), __pageable_part_end - __pageable_part_start); asan_memcpy_unchecked(paged_store, __init_start, init_size); /* Check that hashes of what's in pageable area is OK */ DMSG("Checking hashes of pageable area"); for (n = 0; (n * SMALL_PAGE_SIZE) < pageable_size; n++) { const uint8_t *hash = hashes + n * TEE_SHA256_HASH_SIZE; const uint8_t *page = paged_store + n * SMALL_PAGE_SIZE; TEE_Result res; DMSG("hash pg_idx %zu hash %p page %p", n, hash, page); res = hash_sha256_check(hash, page, SMALL_PAGE_SIZE); if (res != TEE_SUCCESS) { EMSG("Hash failed for page %zu at %p: res 0x%x", n, page, res); panic(); } } /* * Assert prepaged init sections are page aligned so that nothing * trails uninited at the end of the premapped init area. */ assert(!(init_size & SMALL_PAGE_MASK)); /* * Initialize the virtual memory pool used for main_mmu_l2_ttb which * is supplied to tee_pager_init() below. */ init_vcore(&tee_mm_vcore); /* * Assign alias area for pager end of the small page block the rest * of the binary is loaded into. We're taking more than needed, but * we're guaranteed to not need more than the physical amount of * TZSRAM. */ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)tee_mm_vcore.hi - TZSRAM_SIZE, TZSRAM_SIZE); assert(mm); tee_pager_set_alias_area(mm); /* * Claim virtual memory which isn't paged. * Linear memory (flat map core memory) ends there. */ mm = tee_mm_alloc2(&tee_mm_vcore, VCORE_UNPG_RX_PA, (vaddr_t)(__pageable_start - VCORE_UNPG_RX_PA)); assert(mm); /* * Allocate virtual memory for the pageable area and let the pager * take charge of all the pages already assigned to that memory. */ mm = tee_mm_alloc2(&tee_mm_vcore, (vaddr_t)__pageable_start, pageable_size); assert(mm); tee_pager_add_core_area(tee_mm_get_smem(mm), tee_mm_get_bytes(mm), TEE_MATTR_PRX, paged_store, hashes); tee_pager_add_pages((vaddr_t)__pageable_start, init_size / SMALL_PAGE_SIZE, false); tee_pager_add_pages((vaddr_t)__pageable_start + init_size, (pageable_size - init_size) / SMALL_PAGE_SIZE, true); /* * There may be physical pages in TZSRAM before the core load address. * These pages can be added to the physical pages pool of the pager. * This setup may happen when a the secure bootloader runs in TZRAM * and its memory can be reused by OP-TEE once boot stages complete. */ tee_pager_add_pages(tee_mm_vcore.lo, (VCORE_UNPG_RX_PA - tee_mm_vcore.lo) / SMALL_PAGE_SIZE, true); }