static void init_primary_helper(uint32_t pageable_part, uint32_t nsec_entry) { /* * Mask asynchronous exceptions before switch to the thread vector * as the thread handler requires those to be masked while * executing with the temporary stack. The thread subsystem also * asserts that IRQ is blocked when using most if its functions. */ thread_set_exceptions(THREAD_EXCP_ALL); init_vfp_sec(); init_runtime(pageable_part); IMSG("Initializing (%s)\n", core_v_str); thread_init_primary(generic_boot_get_handlers()); thread_init_per_cpu(); init_sec_mon(nsec_entry); main_init_gic(); init_vfp_nsec(); if (init_teecore() != TEE_SUCCESS) panic(); DMSG("Primary CPU switching to normal world boot\n"); }
static void init_secondary_helper(uint32_t nsec_entry) { /* * Mask asynchronous exceptions before switch to the thread vector * as the thread handler requires those to be masked while * executing with the temporary stack. The thread subsystem also * asserts that IRQ is blocked when using most if its functions. */ thread_set_exceptions(THREAD_EXCP_ALL); thread_init_per_cpu(); init_sec_mon(nsec_entry); init_vfp_sec(); init_vfp_nsec(); DMSG("Secondary CPU Switching to normal world boot\n"); }
unsigned int cache_maintenance_l2(int op, paddr_t pa, size_t len) { unsigned int ret = TEE_SUCCESS; uint32_t exceptions = thread_mask_exceptions(THREAD_EXCP_IRQ); tee_l2cc_mutex_lock(); switch (op) { case L2CACHE_INVALIDATE: arm_cl2_invbyway(pl310_base()); break; case L2CACHE_AREA_INVALIDATE: if (len) arm_cl2_invbypa(pl310_base(), pa, pa + len - 1); break; case L2CACHE_CLEAN: arm_cl2_cleanbyway(pl310_base()); break; case L2CACHE_AREA_CLEAN: if (len) arm_cl2_cleanbypa(pl310_base(), pa, pa + len - 1); break; case L2CACHE_CLEAN_INV: arm_cl2_cleaninvbyway(pl310_base()); break; case L2CACHE_AREA_CLEAN_INV: if (len) arm_cl2_cleaninvbypa(pl310_base(), pa, pa + len - 1); break; default: ret = TEE_ERROR_NOT_IMPLEMENTED; } tee_l2cc_mutex_unlock(); thread_set_exceptions(exceptions); return ret; }