static void init_primary_helper(uint32_t pageable_part, uint32_t nsec_entry) { /* * Mask asynchronous exceptions before switch to the thread vector * as the thread handler requires those to be masked while * executing with the temporary stack. The thread subsystem also * asserts that IRQ is blocked when using most if its functions. */ thread_set_exceptions(THREAD_EXCP_ALL); init_vfp_sec(); init_runtime(pageable_part); IMSG("Initializing (%s)\n", core_v_str); thread_init_primary(generic_boot_get_handlers()); thread_init_per_cpu(); init_sec_mon(nsec_entry); main_init_gic(); init_vfp_nsec(); if (init_teecore() != TEE_SUCCESS) panic(); DMSG("Primary CPU switching to normal world boot\n"); }
static void main_tee_entry_fast(struct thread_smc_args *args) { /* TODO move to main_init() */ if (init_teecore() != TEE_SUCCESS) panic(); /* SiP Service Call Count */ if (args->a0 == TEESMC32_SIP_SUNXI_CALLS_COUNT) { args->a0 = 1; return; } /* SiP Service Call UID */ if (args->a0 == TEESMC32_SIP_SUNXI_CALLS_UID) { args->a0 = TEESMC_SIP_SUNXI_UID_R0; args->a1 = TEESMC_SIP_SUNXI_UID_R1; args->a2 = TEESMC_SIP_SUNXI_UID_R2; args->a3 = TEESMC_SIP_SUNXI_UID_R3; return; } /* SiP Service Calls */ if (args->a0 == TEESMC32_OPTEE_FAST_CALL_SIP_SUNXI) { platform_smc_handle(args); return; } tee_entry_fast(args); }
static void main_tee_entry_std(struct thread_smc_args *args) { /* TODO move to main_init() */ if (init_teecore() != TEE_SUCCESS) panic(); tee_entry_std(args); }
static void main_init_helper(bool is_primary, size_t pos, uint32_t nsec_entry) { /* * Mask external Abort, IRQ and FIQ before switch to the thread * vector as the thread handler requires externl Abort, IRQ and FIQ * to be masked while executing with the temporary stack. The * thread subsystem also asserts that IRQ is blocked when using * most if its functions. */ write_cpsr(read_cpsr() | CPSR_FIA); if (is_primary) { uintptr_t bss_start = (uintptr_t)&__bss_start; uintptr_t bss_end = (uintptr_t)&__bss_end; size_t n; /* Initialize uart with physical address */ uart_init(CONSOLE_UART_BASE); /* * Zero BSS area. Note that globals that would normally * would go into BSS which are used before this has to be * put into .nozi.* to avoid getting overwritten. */ memset((void *)bss_start, 0, bss_end - bss_start); DMSG("TEE initializing\n"); /* Initialize canaries around the stacks */ init_canaries(); /* Assign the thread stacks */ for (n = 0; n < NUM_THREADS; n++) { if (!thread_init_stack(n, GET_STACK(stack_thread[n]))) panic(); } } if (!thread_init_stack(THREAD_TMP_STACK, GET_STACK(stack_tmp[pos]))) panic(); if (!thread_init_stack(THREAD_ABT_STACK, GET_STACK(stack_abt[pos]))) panic(); thread_init_handlers(&handlers); main_init_sec_mon(pos, nsec_entry); if (is_primary) { main_init_gic(); if (init_teecore() != TEE_SUCCESS) panic(); DMSG("Primary CPU switching to normal world boot\n"); } else { DMSG("Secondary CPU Switching to normal world boot\n"); } }
/* teecore heap address/size is defined in scatter file */ extern unsigned char teecore_heap_start; extern unsigned char teecore_heap_end; static void main_fiq(void); static void main_tee_entry_std(struct thread_smc_args *args); static void main_tee_entry_fast(struct thread_smc_args *args); static const struct thread_handlers handlers = { .std_smc = main_tee_entry_std, .fast_smc = main_tee_entry_fast, .fiq = main_fiq, .svc = tee_svc_handler, .abort = tee_pager_abort_handler, .cpu_on = pm_panic, .cpu_off = pm_panic, .cpu_suspend = pm_panic, .cpu_resume = pm_panic, .system_off = pm_panic, .system_reset = pm_panic, }; void main_init(uint32_t nsec_entry); /* called from assembly only */ void main_init(uint32_t nsec_entry) { struct sm_nsec_ctx *nsec_ctx; size_t pos = get_core_pos(); /* * Mask IRQ and FIQ before switch to the thread vector as the * thread handler requires IRQ and FIQ to be masked while executing * with the temporary stack. The thread subsystem also asserts that * IRQ is blocked when using most if its functions. */ thread_mask_exceptions(THREAD_EXCP_FIQ | THREAD_EXCP_IRQ); if (pos == 0) { thread_init_primary(&handlers); /* initialize platform */ platform_init(); } thread_init_per_cpu(); /* Initialize secure monitor */ nsec_ctx = sm_get_nsec_ctx(); nsec_ctx->mon_lr = nsec_entry; nsec_ctx->mon_spsr = CPSR_MODE_SVC | CPSR_I; if (pos == 0) { unsigned long a, s; /* core malloc pool init */ #ifdef CFG_TEE_MALLOC_START a = CFG_TEE_MALLOC_START; s = CFG_TEE_MALLOC_SIZE; #else a = (unsigned long)&teecore_heap_start; s = (unsigned long)&teecore_heap_end; a = ((a + 1) & ~0x0FFFF) + 0x10000; /* 64kB aligned */ s = s & ~0x0FFFF; /* 64kB aligned */ s = s - a; #endif malloc_add_pool((void *)a, s); teecore_init_ta_ram(); if (init_teecore() != TEE_SUCCESS) { panic(); } } IMSG("optee initialize finished\n"); }
static void main_tee_entry(struct thread_smc_args *args) { /* * This function first catches all ST specific SMC functions * if none matches, the generic tee_entry is called. */ /* TODO move to main_init() */ if (init_teecore() != TEE_SUCCESS) panic(); if (args->a0 == TEESMC32_ST_FASTCALL_GET_SHM_CONFIG) { args->a0 = TEESMC_RETURN_OK; args->a1 = default_nsec_shm_paddr; args->a2 = default_nsec_shm_size; /* Should this be TEESMC cache attributes instead? */ args->a3 = core_mmu_is_shm_cached(); return; } if (args->a0 == TEESMC32_ST_FASTCALL_L2CC_MUTEX) { switch (args->a1) { case TEESMC_ST_L2CC_MUTEX_GET_ADDR: case TEESMC_ST_L2CC_MUTEX_SET_ADDR: case TEESMC_ST_L2CC_MUTEX_ENABLE: case TEESMC_ST_L2CC_MUTEX_DISABLE: /* TODO call the appropriate internal functions */ args->a0 = TEESMC_RETURN_UNKNOWN_FUNCTION; return; default: args->a0 = TEESMC_RETURN_EBADCMD; return; } } tee_entry(args); }