/* * This helper function handles Secure EL1 preemption. The preemption could be * due Non Secure interrupts or EL3 interrupts. In both the cases we context * switch to the normal world and in case of EL3 interrupts, it will again be * routed to EL3 which will get handled at the exception vectors. */ uint64_t tspd_handle_sp_preemption(void *handle) { cpu_context_t *ns_cpu_context; assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * To allow Secure EL1 interrupt handler to re-enter TSP while TSP * is preempted, the secure system register context which will get * overwritten must be additionally saved. This is currently done * by the TSPD S-EL1 interrupt handler. */ /* * Restore non-secure state. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); /* * The TSP was preempted during STD SMC execution. * Return back to the normal world with SMC_PREEMPTED as error * code in x0. */ SMC_RET1(ns_cpu_context, SMC_PREEMPTED); }
/******************************************************************************* * This function takes an SP context pointer and performs a synchronous entry * into it. ******************************************************************************/ uint64_t spm_sp_synchronous_entry(sp_context_t *sp_ctx, int can_preempt) { uint64_t rc; unsigned int linear_id = plat_my_core_pos(); assert(sp_ctx != NULL); /* Assign the context of the SP to this CPU */ spm_cpu_set_sp_ctx(linear_id, sp_ctx); cm_set_context(&(sp_ctx->cpu_ctx), SECURE); /* Restore the context assigned above */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* Invalidate TLBs at EL1. */ tlbivmalle1(); dsbish(); if (can_preempt == 1) { enable_intr_rm_local(INTR_TYPE_NS, SECURE); } else { disable_intr_rm_local(INTR_TYPE_NS, SECURE); } /* Enter Secure Partition */ rc = spm_secure_partition_enter(&sp_ctx->c_rt_ctx); /* Save secure state */ cm_el1_sysregs_context_save(SECURE); return rc; }
/******************************************************************************* * This function programs EL3 registers and performs other setup to enable entry * into the next image after BL31 at the next ERET. ******************************************************************************/ void bl31_prepare_next_image_entry() { el_change_info_t *next_image_info; uint32_t scr, image_type; /* Determine which image to execute next */ image_type = bl31_get_next_image_type(); /* * Setup minimal architectural state of the next highest EL to * allow execution in it immediately upon entering it. */ bl31_next_el_arch_setup(image_type); /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_get_next_image_info(image_type); assert(next_image_info); scr = read_scr(); if (image_type == NON_SECURE) scr |= SCR_NS_BIT; /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. */ cm_set_el3_eret_context(next_image_info->security_state, next_image_info->entrypoint, next_image_info->spsr, scr); /* Finally set the next context */ cm_set_next_eret_context(next_image_info->security_state); }
/******************************************************************************* * This function is the handler registered for S-EL1 interrupts by the * OPTEED. It validates the interrupt and upon success arranges entry into * the OPTEE at 'optee_fiq_entry()' for handling the interrupt. ******************************************************************************/ static uint64_t opteed_sel1_interrupt_handler(uint32_t id, uint32_t flags, void *handle, void *cookie) { uint32_t linear_id; optee_context_t *optee_ctx; /* Check the security state when the exception was generated */ assert(get_interrupt_src_ss(flags) == NON_SECURE); /* Sanity check the pointer to this cpu's context */ assert(handle == cm_get_context(NON_SECURE)); /* Save the non-secure context before entering the OPTEE */ cm_el1_sysregs_context_save(NON_SECURE); /* Get a reference to this cpu's OPTEE context */ linear_id = plat_my_core_pos(); optee_ctx = &opteed_sp_context[linear_id]; assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); cm_set_elr_el3(SECURE, (uint64_t)&optee_vectors->fiq_entry); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* * Tell the OPTEE that it has to handle an FIQ (synchronously). * Also the instruction in normal world where the interrupt was * generated is passed for debugging purposes. It is safe to * retrieve this address from ELR_EL3 as the secure context will * not take effect until el3_exit(). */ SMC_RET1(&optee_ctx->cpu_ctx, read_elr_el3()); }
/******************************************************************************* * BL31 is responsible for setting up the runtime services for the primary cpu * before passing control to the bootloader or an Operating System. This * function calls runtime_svc_init() which initializes all registered runtime * services. The run time services would setup enough context for the core to * swtich to the next exception level. When this function returns, the core will * switch to the programmed exception level via. an ERET. ******************************************************************************/ void bl31_main(void) { #if DEBUG unsigned long mpidr = read_mpidr(); #endif /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); /* Perform platform setup in BL1 */ bl31_platform_setup(); printf("BL31 %s\n\r", build_message); /* Initialise helper libraries */ bl31_lib_init(); /* Initialize the runtime services e.g. psci */ runtime_svc_init(); /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); /* * Use the more complex exception vectors now that context * management is setup. SP_EL3 should point to a 'cpu_context' * structure which has an exception stack allocated. The PSCI * service should have set the context. */ assert(cm_get_context(mpidr, NON_SECURE)); cm_set_next_eret_context(NON_SECURE); cm_init_pcpu_ptr_cache(); write_vbar_el3((uint64_t) runtime_exceptions); isb(); next_image_type = NON_SECURE; /* * All the cold boot actions on the primary cpu are done. We now need to * decide which is the next image (BL32 or BL33) and how to execute it. * If the SPD runtime service is present, it would want to pass control * to BL32 first in S-EL1. In that case, SPD would have registered a * function to intialize bl32 where it takes responsibility of entering * S-EL1 and returning control back to bl31_main. Once this is done we * can prepare entry into BL33 as normal. */ /* * If SPD had registerd an init hook, invoke it. Pass it the information * about memory extents */ if (bl32_init) (*bl32_init)(bl31_plat_get_bl32_mem_layout()); /* * We are ready to enter the next EL. Prepare entry into the image * corresponding to the desired security state after the next ERET. */ bl31_prepare_next_image_entry(); }
/******************************************************************************* * This function is the handler registered for S-EL1 interrupts by the TSPD. It * validates the interrupt and upon success arranges entry into the TSP at * 'tsp_sel1_intr_entry()' for handling the interrupt. ******************************************************************************/ static uint64_t tspd_sel1_interrupt_handler(uint32_t id, uint32_t flags, void *handle, void *cookie) { uint32_t linear_id; tsp_context_t *tsp_ctx; /* Check the security state when the exception was generated */ assert(get_interrupt_src_ss(flags) == NON_SECURE); /* Sanity check the pointer to this cpu's context */ assert(handle == cm_get_context(NON_SECURE)); /* Save the non-secure context before entering the TSP */ cm_el1_sysregs_context_save(NON_SECURE); /* Get a reference to this cpu's TSP context */ linear_id = plat_my_core_pos(); tsp_ctx = &tspd_sp_context[linear_id]; assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* * Determine if the TSP was previously preempted. Its last known * context has to be preserved in this case. * The TSP should return control to the TSPD after handling this * S-EL1 interrupt. Preserve essential EL3 context to allow entry into * the TSP at the S-EL1 interrupt entry point using the 'cpu_context' * structure. There is no need to save the secure system register * context since the TSP is supposed to preserve it during S-EL1 * interrupt handling. */ if (get_std_smc_active_flag(tsp_ctx->state)) { tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, CTX_SPSR_EL3); tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, CTX_ELR_EL3); #if TSP_NS_INTR_ASYNC_PREEMPT /*Need to save the previously interrupted secure context */ memcpy(&tsp_ctx->sp_ctx, &tsp_ctx->cpu_ctx, TSPD_SP_CTX_SIZE); #endif } cm_el1_sysregs_context_restore(SECURE); cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->sel1_intr_entry, SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); cm_set_next_eret_context(SECURE); /* * Tell the TSP that it has to handle a S-EL1 interrupt synchronously. * Also the instruction in normal world where the interrupt was * generated is passed for debugging purposes. It is safe to retrieve * this address from ELR_EL3 as the secure context will not take effect * until el3_exit(). */ SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_SEL1_INTR_AND_RETURN, read_elr_el3()); }
/******************************************************************************* * BL31 is responsible for setting up the runtime services for the primary cpu * before passing control to the bootloader or an Operating System. This * function calls runtime_svc_init() which initializes all registered runtime * services. The run time services would setup enough context for the core to * swtich to the next exception level. When this function returns, the core will * switch to the programmed exception level via. an ERET. ******************************************************************************/ void bl31_main(void) { #if DEBUG unsigned long mpidr = read_mpidr(); #endif /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); /* Perform platform setup in BL1 */ bl31_platform_setup(); #if defined (__GNUC__) printf("BL31 Built : %s, %s\n\r", __TIME__, __DATE__); #endif /* Initialise helper libraries */ bl31_lib_init(); /* Initialize the runtime services e.g. psci */ runtime_svc_init(); /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); /* * Use the more complex exception vectors now that context * management is setup. SP_EL3 should point to a 'cpu_context' * structure which has an exception stack allocated. The PSCI * service should have set the context. */ assert(cm_get_context(mpidr, NON_SECURE)); cm_set_next_eret_context(NON_SECURE); write_vbar_el3((uint64_t) runtime_exceptions); /* * All the cold boot actions on the primary cpu are done. We * now need to decide which is the next image (BL32 or BL33) * and how to execute it. If the SPD runtime service is * present, it would want to pass control to BL32 first in * S-EL1. It will export the bl32_init() routine where it takes * responsibility of entering S-EL1 and returning control back * to bl31_main. Once this is done we can prepare entry into * BL33 as normal. */ /* Tell BL32 about it memory extents as well */ if (bl32_init) bl32_init(bl31_plat_get_bl32_mem_layout()); /* * We are ready to enter the next EL. Prepare entry into the image * corresponding to the desired security state after the next ERET. */ bl31_prepare_next_image_entry(); }
/******************************************************************************* * BL31 is responsible for setting up the runtime services for the primary cpu * before passing control to the bootloader (UEFI) or Linux. This function calls * runtime_svc_init() which initializes all registered runtime services. The run * time services would setup enough context for the core to swtich to the next * exception level. When this function returns, the core will switch to the * programmed exception level via. an ERET. ******************************************************************************/ void bl31_main(void) { el_change_info *next_image_info; uint32_t scr; /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); /* Perform platform setup in BL1 */ bl31_platform_setup(); #if defined (__GNUC__) printf("BL31 Built : %s, %s\n\r", __TIME__, __DATE__); #endif /* Initialise helper libraries */ bl31_lib_init(); /* Initialize the runtime services e.g. psci */ runtime_svc_init(); /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); /* * Setup minimal architectural state of the next highest EL to * allow execution in it immediately upon entering it. */ bl31_arch_next_el_setup(); /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_get_next_image_info(); scr = read_scr(); if (next_image_info->security_state == NON_SECURE) scr |= SCR_NS_BIT; /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. */ cm_set_el3_eret_context(next_image_info->security_state, next_image_info->entrypoint, next_image_info->spsr, scr); /* Finally set the next context */ cm_set_next_eret_context(next_image_info->security_state); }
/******************************************************************************* * This function takes an SP context pointer and: * 1. Applies the S-EL1 system register context from tsp_ctx->cpu_ctx. * 2. Saves the current C runtime state (callee saved registers) on the stack * frame and saves a reference to this state. * 3. Calls el3_exit() so that the EL3 system and general purpose registers * from the tsp_ctx->cpu_ctx are used to enter the secure payload image. ******************************************************************************/ uint64_t tbase_synchronous_sp_entry(tbase_context *tbase_ctx) { uint64_t rc; assert(tbase_ctx->c_rt_ctx == 0); /* Apply the Secure EL1 system register context and switch to it */ assert(cm_get_context(SECURE) == &tbase_ctx->cpu_ctx); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); rc = tbase_enter_sp(&tbase_ctx->c_rt_ctx); #if DEBUG tbase_ctx->c_rt_ctx = 0; #endif return rc; }
uint64_t tspd_handle_sp_preemption(void *handle) { cpu_context_t *ns_cpu_context; assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. The secure system * register context will be saved when required. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, SMC_PREEMPTED); }
/******************************************************************************* * Migrate TBASE. ******************************************************************************/ static void tbase_migrate_handler(uint64_t x1, uint64_t par2) { #if DEBUG uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); // tbase_context *tbase_ctx = &secure_context[linear_id]; DBG_PRINTF("\r\ntbase_migrate_handler %d %x %x\r\n", linear_id, x1, par2); #endif // TODO: Not working, not tested at all // cpu_context_t *ns_context = (cpu_context_t *) cm_get_context(NON_SECURE); // gp_regs_t *ns_gpregs = get_gpregs_ctx(ns_context); cm_el1_sysregs_context_save(NON_SECURE); fc_response_t resp; /*uint64_t res = */tbase_monitor_fastcall(0/*smc_fid*/, x1, par2, 0, 0, &resp ); cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); }
/******************************************************************************* * This function takes a TLK context pointer and: * 1. Applies the S-EL1 system register context from tlk_ctx->cpu_ctx. * 2. Saves the current C runtime state (callee saved registers) on the stack * frame and saves a reference to this state. * 3. Calls el3_exit() so that the EL3 system and general purpose registers * from the tlk_ctx->cpu_ctx are used to enter the secure payload image. ******************************************************************************/ uint64_t tlkd_synchronous_sp_entry(tlk_context_t *tlk_ctx) { uint64_t rc; /* Passing a NULL context is a critical programming error */ assert(tlk_ctx); assert(tlk_ctx->c_rt_ctx == 0); /* Apply the Secure EL1 system register context and switch to it */ assert(cm_get_context(SECURE) == &tlk_ctx->cpu_ctx); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); rc = tlkd_enter_sp(&tlk_ctx->c_rt_ctx); #if DEBUG tlk_ctx->c_rt_ctx = 0; #endif return rc; }
// ************************************************************************************ // fastcall handler static uint64_t tbase_fastcall_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; int caller_security_state = flags&1; if (caller_security_state==SECURE) { switch(maskSWdRegister(smc_fid)) { case TBASE_SMC_FASTCALL_RETURN: { // Return values from fastcall already in cpu_context! // TODO: Could we skip saving sysregs? DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_RETURN\n\r"); tbase_synchronous_sp_exit(tbase_ctx, 0, 1); } case TBASE_SMC_FASTCALL_CONFIG_OK: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_CONFIG_OK\n\r"); configure_tbase(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_OUTPUT: { output(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_STATUS: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_STATUS\n\r"); tbase_status(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_INPUT: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_INPUT\n\r"); smc_fid = plat_tbase_input(x1,&x2,&(tbase_ctx->tbase_input_fastcall)); SMC_RET3(handle,smc_fid,page_align(registerFileEnd[REGISTER_FILE_NWD] - registerFileStart[REGISTER_FILE_NWD], UP)+(uint64_t)&(tbase_ctx->tbase_input_fastcall)- registerFileStart[REGISTER_FILE_MONITOR],x2); break; } case TBASE_SMC_FASTCALL_DUMP: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_DUMP\n\r"); tbase_triggerSgiDump(); SMC_RET1(handle,smc_fid); break; } default: { // What now? DBG_PRINTF( "tbase_fastcall_handler SMC_UNK %x\n\r", smc_fid ); SMC_RET1(handle, SMC_UNK); break; } } } else { if (smc_fid == TBASE_SMC_AEE_DUMP) // N-world can request AEE Dump function { mt_atf_trigger_WDT_FIQ(); // Once we return to the N-world's caller, // FIQ will be trigged and bring us on EL3 (ATF) on core #0 because HW wiring. // Then FIQ will be handled the same way as for HW WDT FIQ. //Do we need to save-recover n-context before being able to use it for return? cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 0; } if ((tbaseExecutionStatus&TBASE_STATUS_FASTCALL_OK_BIT)==0) { // TBASE must be initialized to be usable // TODO: What is correct error code? DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" ); SMC_RET1(handle, SMC_UNK); return 0; } if(tbase_ctx->state == TBASE_STATE_OFF) { DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" ); SMC_RET1(handle, SMC_UNK); return 0; } DBG_PRINTF( "tbase_fastcall_handler NWd %x\n\r", smc_fid ); // So far all fastcalls go to tbase // Save NWd context gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle); write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); // These are not saved yet write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 ); cm_el1_sysregs_context_save(NON_SECURE); // Load SWd context tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_FASTCALL); #if DEBUG print_fastcall_params("entry", NON_SECURE); #endif tbase_synchronous_sp_entry(tbase_ctx); cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 0; // Does not seem to matter what we return } }
/******************************************************************************* * BL31 is responsible for setting up the runtime services for the primary cpu * before passing control to the bootloader or an Operating System. This * function calls runtime_svc_init() which initializes all registered runtime * services. The run time services would setup enough context for the core to * swtich to the next exception level. When this function returns, the core will * switch to the programmed exception level via. an ERET. ******************************************************************************/ void bl31_main(void) { #if DEBUG unsigned long mpidr = read_mpidr(); #endif atf_arg_t_ptr teearg = (atf_arg_t_ptr)(uintptr_t)TEE_BOOT_INFO_ADDR; if(teearg->atf_log_buf_size != 0) { teearg->atf_aee_debug_buf_size = ATF_AEE_BUFFER_SIZE; teearg->atf_aee_debug_buf_start = teearg->atf_log_buf_start + teearg->atf_log_buf_size - ATF_AEE_BUFFER_SIZE; mt_log_setup(teearg->atf_log_buf_start, teearg->atf_log_buf_size, teearg->atf_aee_debug_buf_size); printf("ATF log service is registered (0x%x, aee:0x%x)\n", teearg->atf_log_buf_start, teearg->atf_aee_debug_buf_start); } else { teearg->atf_aee_debug_buf_size = 0; teearg->atf_aee_debug_buf_start = 0; } /* Perform remaining generic architectural setup from EL3 */ bl31_arch_setup(); /* Perform platform setup in BL1 */ bl31_platform_setup(); printf("BL31 %s\n\r", build_message); /* Initialise helper libraries */ bl31_lib_init(); /* Initialize the runtime services e.g. psci */ runtime_svc_init(); /* Clean caches before re-entering normal world */ dcsw_op_all(DCCSW); /* * Use the more complex exception vectors now that context * management is setup. SP_EL3 should point to a 'cpu_context' * structure which has an exception stack allocated. The PSCI * service should have set the context. */ assert(cm_get_context(mpidr, NON_SECURE)); cm_set_next_eret_context(NON_SECURE); cm_init_pcpu_ptr_cache(); write_vbar_el3((uint64_t) runtime_exceptions); isb(); next_image_type = NON_SECURE; /* * All the cold boot actions on the primary cpu are done. We now need to * decide which is the next image (BL32 or BL33) and how to execute it. * If the SPD runtime service is present, it would want to pass control * to BL32 first in S-EL1. In that case, SPD would have registered a * function to intialize bl32 where it takes responsibility of entering * S-EL1 and returning control back to bl31_main. Once this is done we * can prepare entry into BL33 as normal. */ /* * If SPD had registerd an init hook, invoke it. */ if(teearg->tee_support) { printf("[BL31] Jump to secure OS for initialization!\n\r"); if (bl32_init) { (*bl32_init)(teearg->tee_entry, teearg->tee_boot_arg_addr); } else { printf("[ERROR] Secure OS is not initialized!\n\r"); //assert(0); } } else { printf("[BL31] Jump to FIQD for initialization!\n\r"); if (bl32_init) { (*bl32_init)(0, 0); } } /* * We are ready to enter the next EL. Prepare entry into the image * corresponding to the desired security state after the next ERET. */ bl31_prepare_next_image_entry(); printf("[BL31] Final dump!\n\r"); clear_uart_flag(); printf("[BL31] SHOULD not dump in UART but in log buffer!\n\r"); }
/******************************************************************************* * This function programs EL3 registers and performs other setup to enable entry * into the next image after BL31 at the next ERET. ******************************************************************************/ void bl31_prepare_next_image_entry() { entry_point_info_t *next_image_info; uint32_t scr, image_type; cpu_context_t *ctx; gp_regs_t *gp_regs; /* Determine which image to execute next */ image_type = bl31_get_next_image_type(); /* * Setup minimal architectural state of the next highest EL to * allow execution in it immediately upon entering it. */ bl31_next_el_arch_setup(image_type); /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_plat_get_next_image_ep_info(image_type); assert(next_image_info); assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr)); scr = read_scr(); scr &= ~SCR_NS_BIT; if (image_type == NON_SECURE) scr |= SCR_NS_BIT; scr &= ~SCR_RW_BIT; if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == (MODE_RW_64 << MODE_RW_SHIFT)) { scr |= SCR_RW_BIT; scr |= SCR_HCE_BIT; } else { scr &= ~(SCR_HCE_BIT); } /* * FIXME: Need a configurable flag when we have hypervisor installed * This is for PSCI CPU_UP api to work correctly * PSCI uses scr.hce to determine the target CPU of CPU_UP * returns to NS world with HYP mode(HCE is set) or SVC mode(HCE is not set) * (refer to psci_set_ns_entry_info() in psci_common.c) * since we don't have hypervisor installed for now, we need to * enter linux with SVC mode. */ // FIXME: For 64bit kernel, we need return to normal world with EL2 // Temporary comment out this to enable 64bit kernel smp. // Need a method to configure this for 32bit/64bit kernel //scr &= ~(SCR_HCE_BIT); /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. */ cm_set_el3_eret_context(image_type, next_image_info->pc, next_image_info->spsr, scr); /* * Save the args generated in BL2 for the image in the right context * used on its entry */ ctx = cm_get_context(read_mpidr(), image_type); gp_regs = get_gpregs_ctx(ctx); memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t)); /* Finally set the next context */ cm_set_next_eret_context(image_type); }
void bl31_prepare_k64_entry(void) { entry_point_info_t *next_image_info; uint32_t scr, image_type; cpu_context_t *ctx; gp_regs_t *gp_regs; /* Determine which image to execute next */ image_type = NON_SECURE; //bl31_get_next_image_type(); /* * Setup minimal architectural state of the next highest EL to * allow execution in it immediately upon entering it. */ bl31_next_el_arch_setup(image_type); /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_plat_get_next_kernel_ep_info(image_type); assert(next_image_info); assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr)); /* check is set 64bit kernel*/ printf("next_image_info->spsr = 0x%llx\n", next_image_info->spsr); scr = read_scr(); scr &= ~SCR_NS_BIT; if (image_type == NON_SECURE) scr |= SCR_NS_BIT; scr &= ~SCR_RW_BIT; if ((next_image_info->spsr & (1 << MODE_RW_SHIFT)) == (MODE_RW_64 << MODE_RW_SHIFT)) { scr |= SCR_RW_BIT; printf("spsr is 64 bit\n"); } scr |= SCR_HCE_BIT; /* * Tell the context mgmt. library to ensure that SP_EL3 points to * the right context to exit from EL3 correctly. */ cm_set_el3_eret_context(image_type, next_image_info->pc, next_image_info->spsr, scr); /* * Save the args generated in BL2 for the image in the right context * used on its entry */ ctx = cm_get_context(read_mpidr(), image_type); gp_regs = get_gpregs_ctx(ctx); memcpy(gp_regs, (void *)&next_image_info->args, sizeof(aapcs64_params_t)); printf("Finally set the next context\n"); /* Finally set the next context */ cm_set_next_eret_context(image_type); }
static uint64_t tbase_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; int caller_security_state = flags&1; DBG_PRINTF("tbase_smc_handler %d %x\n\r", caller_security_state, smc_fid); if (caller_security_state==SECURE) { // Yield to NWd // TODO: Check id if (tbaseInitStatus==TBASE_INIT_CONFIG_OK) { // Save sysregs to all cores. // After this tbase can work on any core. save_sysregs_allcore(); tbaseInitStatus = TBASE_INIT_SYSREGS_OK; if (tbaseExecutionStatus==TBASE_STATUS_UNINIT) { tbaseExecutionStatus = TBASE_STATUS_NORMAL; } } // If above check fails, it is not possible to return to tbase. tbase_synchronous_sp_exit(tbase_ctx, 0, 1); } else { if ((tbaseExecutionStatus&TBASE_STATUS_SMC_OK_BIT)==0) { // TBASE must be initialized to be usable DBG_PRINTF( "tbase_smc_handler tbase not ready for smc.\n\r"); // TODO: What is correct error code? SMC_RET1(handle, SMC_UNK); return 1; } if(tbase_ctx->state == TBASE_STATE_OFF) { DBG_PRINTF( "tbase_smc_handler tbase not ready for fastcall\n\r" ); return 1; } // NSIQ, go to SWd // TODO: Check id? // Save NWd gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle); write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 ); cm_el1_sysregs_context_save(NON_SECURE); // Load SWd tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_SMC); // Enter tbase. tbase must return using normal SMC, which will continue here. tbase_synchronous_sp_entry(tbase_ctx); // Load NWd cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); } return 0; }
//************************************************************************************************ // FIQ handler for FIQ when in NWd uint64_t tbase_fiq_handler( uint32_t id, uint32_t flags, void *handle, void *cookie) { uint64_t mpidr; uint32_t linear_id; tbase_context *tbase_ctx; mpidr = read_mpidr(); linear_id = platform_get_core_pos(mpidr); tbase_ctx = &secure_context[linear_id]; assert(&tbase_ctx->cpu_ctx == cm_get_context(SECURE)); /* Check if the vector has been entered for SGI/FIQ dump reason */ if (id == FIQ_SMP_CALL_SGI) { /* ACK gic */ { unsigned int iar; iar = gicc_read_IAR(get_plat_config()->gicc_base); gicc_write_EOIR(get_plat_config()->gicc_base, iar); } /* Save the non-secure context before entering the TSP */ cm_el1_sysregs_context_save(NON_SECURE); /* Call customer's dump implementation */ plat_tbase_dump(); // Load NWd //cm_el1_sysregs_context_restore(NON_SECURE); //cm_set_next_eret_context(NON_SECURE); } else { /* Check the security state when the exception was generated */ assert(get_interrupt_src_ss(flags) == NON_SECURE); /* Sanity check the pointer to this cpu's context */ assert(handle == cm_get_context(NON_SECURE)); if ((tbaseExecutionStatus&TBASE_STATUS_SMC_OK_BIT)==0) { // TBASE must be initialized to be usable // TODO: What should we really do here? // We should disable FIQs to prevent futher interrupts DBG_PRINTF( "tbase_interrupt_handler tbase not ready for interrupt\n\r" ); return 1; } if(tbase_ctx->state == TBASE_STATE_OFF) { DBG_PRINTF( "tbase_interrupt_handler tbase not ready for fastcall\n\r" ); return 1; } /* Save the non-secure context before entering the TSP */ cm_el1_sysregs_context_save(NON_SECURE); /* Switch to secure context now */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); // Load SWd context tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_FIQ); // Enter tbase. tbase must return using normal SMC, which will continue here. tbase_synchronous_sp_entry(tbase_ctx); // Load NWd cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); } return 0; }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure * payload to delegate work and return results back to the non-secure * state. Lastly it will also return any information that OPTEE needs to do * the work assigned to it. ******************************************************************************/ uint64_t opteed_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; uint64_t rc; /* * Determine which security state this SMC originated from */ if (is_caller_non_secure(flags)) { /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * OPTEE to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect OPTEE to manage the PSTATE.I and PSTATE.F * flags as appropriate. */ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->fast_smc_entry); } else { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->std_smc_entry); } cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3); } /* * Returning from OPTEE */ switch (smc_fid) { /* * OPTEE has finished initialising itself after a cold boot */ case TEESMC_OPTEED_RETURN_ENTRY_DONE: /* * Stash the OPTEE entry points information. This is done * only once on the primary cpu */ assert(optee_vectors == NULL); optee_vectors = (optee_vectors_t *) x1; if (optee_vectors) { set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON); /* * OPTEE has been successfully initialized. * Register power management hooks with PSCI */ psci_register_spd_pm_hook(&opteed_pm); /* * Register an interrupt handler for S-EL1 interrupts * when generated during code executing in the * non-secure state. */ flags = 0; set_interrupt_rm_flag(flags, NON_SECURE); rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, opteed_sel1_interrupt_handler, flags); if (rc) panic(); } /* * OPTEE reports completion. The OPTEED must have initiated * the original request through a synchronous entry into * OPTEE. Jump back to the original C runtime context. */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * These function IDs is used only by OP-TEE to indicate it has * finished: * 1. turning itself on in response to an earlier psci * cpu_on request * 2. resuming itself after an earlier psci cpu_suspend * request. */ case TEESMC_OPTEED_RETURN_ON_DONE: case TEESMC_OPTEED_RETURN_RESUME_DONE: /* * These function IDs is used only by the SP to indicate it has * finished: * 1. suspending itself after an earlier psci cpu_suspend * request. * 2. turning itself off in response to an earlier psci * cpu_off request. */ case TEESMC_OPTEED_RETURN_OFF_DONE: case TEESMC_OPTEED_RETURN_SUSPEND_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE: /* * OPTEE reports completion. The OPTEED must have initiated the * original request through a synchronous entry into OPTEE. * Jump back to the original C runtime context, and pass x1 as * return value to the caller */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * OPTEE is returning from a call or being preempted from a call, in * either case execution should resume in the normal world. */ case TEESMC_OPTEED_RETURN_CALL_DONE: /* * This is the result from the secure client of an * earlier request. The results are in x0-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); /* * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution * should resume in the normal world. */ case TEESMC_OPTEED_RETURN_FIQ_DONE: /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since OPTEE was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET0((uint64_t) ns_cpu_context); default: panic(); } }
/******************************************************************************* * This function is the handler registered for S-EL1 interrupts by the FIQD. It * validates the interrupt and upon success arranges entry into the TSP at * 'tsp_fiq_entry()' for handling the interrupt. ******************************************************************************/ static uint64_t fiqd_sel1_interrupt_handler(uint32_t id, uint32_t flags, void *handle, void *cookie) { unsigned int iar; /* Check the security state when the exception was generated */ assert(get_interrupt_src_ss(flags) == NON_SECURE); #if IMF_READ_INTERRUPT_ID /* Check the security status of the interrupt */ assert(plat_ic_get_interrupt_type(id) == INTR_TYPE_S_EL1); #endif /* Sanity check the pointer to this cpu's context */ assert(handle == cm_get_context(NON_SECURE)); /* Save the non-secure context before entering the TSP */ cm_el1_sysregs_context_save(NON_SECURE); iar = get_ack_info(); ack_sgi(iar); if(id == WDT_IRQ_BIT_ID) { /* FIX-ME : change 0xFE to the kernel online CPU mask */ fiq_smp_call_function(0xFE, aee_wdt_dump, 0, 0); aee_wdt_dump(); } if(id == FIQ_SMP_CALL_SGI) { fiq_icc_isr(); } SMC_RET0(handle); #if 0 /* Get a reference to this cpu's TSP context */ linear_id = platform_get_core_pos(mpidr); tsp_ctx = &fiqd_sp_context[linear_id]; assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* * Determine if the TSP was previously preempted. Its last known * context has to be preserved in this case. * The TSP should return control to the FIQD after handling this * FIQ. Preserve essential EL3 context to allow entry into the * TSP at the FIQ entry point using the 'cpu_context' structure. * There is no need to save the secure system register context * since the TSP is supposed to preserve it during S-EL1 interrupt * handling. */ if (get_std_smc_active_flag(tsp_ctx->state)) { tsp_ctx->saved_spsr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, CTX_SPSR_EL3); tsp_ctx->saved_elr_el3 = SMC_GET_EL3(&tsp_ctx->cpu_ctx, CTX_ELR_EL3); } cm_el1_sysregs_context_restore(SECURE); cm_set_elr_spsr_el3(SECURE, (uint64_t) &tsp_vectors->fiq_entry, SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS)); cm_set_next_eret_context(SECURE); /* * Tell the TSP that it has to handle an FIQ synchronously. Also the * instruction in normal world where the interrupt was generated is * passed for debugging purposes. It is safe to retrieve this address * from ELR_EL3 as the secure context will not take effect until * el3_exit(). */ SMC_RET2(&tsp_ctx->cpu_ctx, TSP_HANDLE_FIQ_AND_RETURN, read_elr_el3()); #endif }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure payload * to delegate work and return results back to the non-secure state. Lastly it * will also return any information that the secure payload needs to do the * work assigned to it. ******************************************************************************/ uint64_t tspd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; uint32_t linear_id = plat_my_core_pos(), ns; tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; uint64_t rc; #if TSP_INIT_ASYNC entry_point_info_t *next_image_info; #endif /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used by TSP to indicate that it was * preempted by a normal world IRQ. * */ case TSP_PREEMPTED: if (ns) SMC_RET1(handle, SMC_UNK); return tspd_handle_sp_preemption(handle); /* * This function ID is used only by the TSP to indicate that it has * finished handling a S-EL1 FIQ interrupt. Execution should resume * in the normal world. */ case TSP_HANDLED_S_EL1_FIQ: if (ns) SMC_RET1(handle, SMC_UNK); assert(handle == cm_get_context(SECURE)); /* * Restore the relevant EL3 state which saved to service * this SMC. */ if (get_std_smc_active_flag(tsp_ctx->state)) { SMC_SET_EL3(&tsp_ctx->cpu_ctx, CTX_SPSR_EL3, tsp_ctx->saved_spsr_el3); SMC_SET_EL3(&tsp_ctx->cpu_ctx, CTX_ELR_EL3, tsp_ctx->saved_elr_el3); #if TSPD_ROUTE_IRQ_TO_EL3 /* * Need to restore the previously interrupted * secure context. */ memcpy(&tsp_ctx->cpu_ctx, &tsp_ctx->sp_ctx, TSPD_SP_CTX_SIZE); #endif } /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since the TSP was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET0((uint64_t) ns_cpu_context); /* * This function ID is used only by the TSP to indicate that it was * interrupted due to a EL3 FIQ interrupt. Execution should resume * in the normal world. */ case TSP_EL3_FIQ: if (ns) SMC_RET1(handle, SMC_UNK); assert(handle == cm_get_context(SECURE)); /* Assert that standard SMC execution has been preempted */ assert(get_std_smc_active_flag(tsp_ctx->state)); /* Save the secure system register state */ cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, TSP_EL3_FIQ); /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case TSP_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Stash the SP entry points information. This is done * only once on the primary cpu */ assert(tsp_vectors == NULL); tsp_vectors = (tsp_vectors_t *) x1; if (tsp_vectors) { set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); /* * TSP has been successfully initialized. Register power * managemnt hooks with PSCI */ psci_register_spd_pm_hook(&tspd_pm); /* * Register an interrupt handler for S-EL1 interrupts * when generated during code executing in the * non-secure state. */ flags = 0; set_interrupt_rm_flag(flags, NON_SECURE); rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, tspd_sel1_interrupt_handler, flags); if (rc) panic(); #if TSPD_ROUTE_IRQ_TO_EL3 /* * Register an interrupt handler for NS interrupts when * generated during code executing in secure state are * routed to EL3. */ flags = 0; set_interrupt_rm_flag(flags, SECURE); rc = register_interrupt_type_handler(INTR_TYPE_NS, tspd_ns_interrupt_handler, flags); if (rc) panic(); /* * Disable the interrupt NS locally since it will be enabled globally * within cm_init_my_context. */ disable_intr_rm_local(INTR_TYPE_NS, SECURE); #endif } #if TSP_INIT_ASYNC /* Save the Secure EL1 system register context */ assert(cm_get_context(SECURE) == &tsp_ctx->cpu_ctx); cm_el1_sysregs_context_save(SECURE); /* Program EL3 registers to enable entry into the next EL */ next_image_info = bl31_plat_get_next_image_ep_info(NON_SECURE); assert(next_image_info); assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr)); cm_init_my_context(next_image_info); cm_prepare_el3_exit(NON_SECURE); SMC_RET0(cm_get_context(NON_SECURE)); #else /* * SP reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ tspd_synchronous_sp_exit(tsp_ctx, x1); #endif /* * These function IDs is used only by the SP to indicate it has * finished: * 1. turning itself on in response to an earlier psci * cpu_on request * 2. resuming itself after an earlier psci cpu_suspend * request. */ case TSP_ON_DONE: case TSP_RESUME_DONE: /* * These function IDs is used only by the SP to indicate it has * finished: * 1. suspending itself after an earlier psci cpu_suspend * request. * 2. turning itself off in response to an earlier psci * cpu_off request. */ case TSP_OFF_DONE: case TSP_SUSPEND_DONE: case TSP_SYSTEM_OFF_DONE: case TSP_SYSTEM_RESET_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * SP reports completion. The SPD must have initiated the * original request through a synchronous entry into the SP. * Jump back to the original C runtime context, and pass x1 as * return value to the caller */ tspd_synchronous_sp_exit(tsp_ctx, x1); /* * Request from non-secure client to perform an * arithmetic operation or response from secure * payload to an earlier request. */ case TSP_FAST_FID(TSP_ADD): case TSP_FAST_FID(TSP_SUB): case TSP_FAST_FID(TSP_MUL): case TSP_FAST_FID(TSP_DIV): case TSP_STD_FID(TSP_ADD): case TSP_STD_FID(TSP_SUB): case TSP_STD_FID(TSP_MUL): case TSP_STD_FID(TSP_DIV): if (ns) { /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted */ if (get_std_smc_active_flag(tsp_ctx->state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* Save x1 and x2 for use by TSP_GET_ARGS call below */ store_tsp_args(tsp_ctx, x1, x2); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&tsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect the TSP to manage the PSTATE.I and PSTATE.F * flags as appropriate. */ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->fast_smc_entry); } else { set_std_smc_active_flag(tsp_ctx->state); cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->std_smc_entry); #if TSPD_ROUTE_IRQ_TO_EL3 /* * Enable the routing of NS interrupts to EL3 * during STD SMC processing on this core. */ enable_intr_rm_local(INTR_TYPE_NS, SECURE); #endif } cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); SMC_RET3(&tsp_ctx->cpu_ctx, smc_fid, x1, x2); } else { /* * This is the result from the secure client of an * earlier request. The results are in x1-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_STD) { clr_std_smc_active_flag(tsp_ctx->state); #if TSPD_ROUTE_IRQ_TO_EL3 /* * Disable the routing of NS interrupts to EL3 * after STD SMC processing is finished on this * core. */ disable_intr_rm_local(INTR_TYPE_NS, SECURE); #endif } SMC_RET3(ns_cpu_context, x1, x2, x3); } break; /* * Request from non secure world to resume the preempted * Standard SMC call. */ case TSP_FID_RESUME: /* RESUME should be invoked only by normal world */ if (!ns) { assert(0); break; } /* * This is a resume request from the non-secure client. * save the non-secure state and send the request to * the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted before resume */ if (!get_std_smc_active_flag(tsp_ctx->state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ #if TSPD_ROUTE_IRQ_TO_EL3 /* * Enable the routing of NS interrupts to EL3 during resumption * of STD SMC call on this core. */ enable_intr_rm_local(INTR_TYPE_NS, SECURE); #endif /* We just need to return to the preempted point in * TSP and the execution will resume as normal. */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); SMC_RET0(&tsp_ctx->cpu_ctx); /* * This is a request from the secure payload for more arguments * for an ongoing arithmetic operation requested by the * non-secure world. Simply return the arguments from the non- * secure client in the original call. */ case TSP_GET_ARGS: if (ns) SMC_RET1(handle, SMC_UNK); get_tsp_args(tsp_ctx, x1, x2); SMC_RET2(handle, x1, x2); case TOS_CALL_COUNT: /* * Return the number of service function IDs implemented to * provide service to non-secure */ SMC_RET1(handle, TSP_NUM_FID); case TOS_UID: /* Return TSP UID to the caller */ SMC_UUID_RET(handle, tsp_uuid); case TOS_CALL_VERSION: /* Return the version of current implementation */ SMC_RET2(handle, TSP_VERSION_MAJOR, TSP_VERSION_MINOR); default: break; } SMC_RET1(handle, SMC_UNK); }
static int32_t tbase_init_entry() { DBG_PRINTF("tbase_init\n\r"); // Save el1 registers in case non-secure world has already been set up. cm_el1_sysregs_context_save(NON_SECURE); uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; // Note: mapping is 1:1, so physical and virtual addresses are here the same. cpu_context_t *ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE); // ************************************************************************************ // Configure parameter passing to tbase // Calculate page start addresses for register areas. registerFileStart[REGISTER_FILE_NWD] = page_align((uint64_t)&ns_entry_context, DOWN); registerFileStart[REGISTER_FILE_MONITOR] = page_align((uint64_t)&msm_area, DOWN); // Calculate page end addresses for register areas. registerFileEnd[REGISTER_FILE_NWD] = (uint64_t)(&ns_entry_context[TBASE_CORE_COUNT]); registerFileEnd[REGISTER_FILE_MONITOR] = ((uint64_t)&msm_area) +sizeof(msm_area); int32_t totalPages = 0; for (int area=0; area<REGISTER_FILE_COUNT; area++) { int32_t pages = page_align(registerFileEnd[area] - registerFileStart[area], UP) / PAGE_SIZE; assert( pages +totalPages <= TBASE_INTERFACE_PAGES ); tbase_init_register_file(area, totalPages, pages); totalPages += pages; } // ************************************************************************************ // Create boot structure tbaseBootCfg.magic = TBASE_BOOTCFG_MAGIC; tbaseBootCfg.length = sizeof(bootCfg_t); tbaseBootCfg.version = TBASE_MONITOR_INTERFACE_VERSION; tbaseBootCfg.dRamBase = TBASE_NWD_DRAM_BASE; tbaseBootCfg.dRamSize = TBASE_NWD_DRAM_SIZE; tbaseBootCfg.secDRamBase = TBASE_SWD_DRAM_BASE; tbaseBootCfg.secDRamSize = TBASE_SWD_DRAM_SIZE; tbaseBootCfg.secIRamBase = TBASE_SWD_IMEM_BASE; tbaseBootCfg.secIRamSize = TBASE_SWD_IMEM_SIZE; tbaseBootCfg.conf_mair_el3 = read_mair_el3(); tbaseBootCfg.MSMPteCount = totalPages; tbaseBootCfg.MSMBase = (uint64_t)registerFileL2; tbaseBootCfg.gic_distributor_base = TBASE_GIC_DIST_BASE; tbaseBootCfg.gic_cpuinterface_base = TBASE_GIC_CPU_BASE; tbaseBootCfg.gic_version = TBASE_GIC_VERSION; tbaseBootCfg.total_number_spi = TBASE_SPI_COUNT; tbaseBootCfg.ssiq_number = TBASE_SSIQ_NRO; tbaseBootCfg.flags = TBASE_MONITOR_FLAGS; DBG_PRINTF("*** tbase boot cfg ***\n\r"); DBG_PRINTF("* magic : 0x%.X\n\r",tbaseBootCfg.magic); DBG_PRINTF("* length : 0x%.X\n\r",tbaseBootCfg.length); DBG_PRINTF("* version : 0x%.X\n\r",tbaseBootCfg.version); DBG_PRINTF("* dRamBase : 0x%.X\n\r",tbaseBootCfg.dRamBase); DBG_PRINTF("* dRamSize : 0x%.X\n\r",tbaseBootCfg.dRamSize); DBG_PRINTF("* secDRamBase : 0x%.X\n\r",tbaseBootCfg.secDRamBase); DBG_PRINTF("* secDRamSize : 0x%.X\n\r",tbaseBootCfg.secDRamSize); DBG_PRINTF("* secIRamBase : 0x%.X\n\r",tbaseBootCfg.secIRamBase); DBG_PRINTF("* secIRamSize : 0x%.X\n\r",tbaseBootCfg.secIRamSize); DBG_PRINTF("* conf_mair_el3 : 0x%.X\n\r",tbaseBootCfg.conf_mair_el3); DBG_PRINTF("* MSMPteCount : 0x%.X\n\r",tbaseBootCfg.MSMPteCount); DBG_PRINTF("* MSMBase : 0x%.X\n\r",tbaseBootCfg.MSMBase); DBG_PRINTF("* gic_distributor_base : 0x%.X\n\r",tbaseBootCfg.gic_distributor_base); DBG_PRINTF("* gic_cpuinterface_base : 0x%.X\n\r",tbaseBootCfg.gic_cpuinterface_base); DBG_PRINTF("* gic_version : 0x%.X\n\r",tbaseBootCfg.gic_version); DBG_PRINTF("* total_number_spi : 0x%.X\n\r",tbaseBootCfg.total_number_spi); DBG_PRINTF("* ssiq_number : 0x%.X\n\r",tbaseBootCfg.ssiq_number); DBG_PRINTF("* flags : 0x%.X\n\r",tbaseBootCfg.flags); // ************************************************************************************ // tbaseBootCfg and l2 entries may be accesses uncached, so must flush those. flush_dcache_range((unsigned long)&tbaseBootCfg, sizeof(bootCfg_t)); flush_dcache_range((unsigned long)®isterFileL2, sizeof(registerFileL2)); // ************************************************************************************ // Set registers for tbase initialization entry cpu_context_t *s_entry_context = &tbase_ctx->cpu_ctx; gp_regs_t *s_entry_gpregs = get_gpregs_ctx(s_entry_context); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, 0); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, (int64_t)&tbaseBootCfg); // SPSR for SMC handling (FIQ mode) tbaseEntrySpsr = TBASE_ENTRY_SPSR; DBG_PRINTF("tbase init SPSR 0x%x\n\r", read_ctx_reg(get_el3state_ctx(&tbase_ctx->cpu_ctx), CTX_SPSR_EL3) ); DBG_PRINTF("tbase SMC SPSR %x\nr\r", tbaseEntrySpsr ); // ************************************************************************************ // Start tbase tbase_synchronous_sp_entry(tbase_ctx); tbase_ctx->state = TBASE_STATE_ON; #if TBASE_PM_ENABLE // Register power managemnt hooks with PSCI psci_register_spd_pm_hook(&tbase_pm); #endif cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 1; }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure payload * to delegate work and return results back to the non-secure state. Lastly it * will also return any information that the secure payload needs to do the * work assigned to it. ******************************************************************************/ uint64_t tlkd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; gp_regs_t *gp_regs; uint32_t ns; uint64_t par; /* Passing a NULL context is a critical programming error */ assert(handle); /* These SMCs are only supported by CPU0 */ if ((read_mpidr() & MPIDR_CPU_MASK) != 0) SMC_RET1(handle, SMC_UNK); /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used by SP to indicate that it was * preempted by a non-secure world IRQ. */ case TLK_PREEMPTED: if (ns) SMC_RET1(handle, SMC_UNK); assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since the SP was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, x1); /* * Request from non secure world to resume the preempted * Standard SMC call. */ case TLK_RESUME_FID: /* RESUME should be invoked only by normal world */ if (!ns) SMC_RET1(handle, SMC_UNK); /* * This is a resume request from the non-secure client. * save the non-secure state and send the request to * the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted before resume */ if (!get_std_smc_active_flag(tlk_ctx.state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* We just need to return to the preempted point in * SP and the execution will resume as normal. */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); SMC_RET0(handle); /* * This is a request from the non-secure context to: * * a. register shared memory with the SP for storing it's * activity logs. * b. register shared memory with the SP for passing args * required for maintaining sessions with the Trusted * Applications. * c. open/close sessions * d. issue commands to the Trusted Apps */ case TLK_REGISTER_LOGBUF: case TLK_REGISTER_REQBUF: case TLK_OPEN_TA_SESSION: case TLK_CLOSE_TA_SESSION: case TLK_TA_LAUNCH_OP: case TLK_TA_SEND_EVENT: if (!ns) SMC_RET1(handle, SMC_UNK); /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted */ if (get_std_smc_active_flag(tlk_ctx.state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* * Verify if there is a valid context to use. */ assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE)); /* * Mark the SP state as active. */ set_std_smc_active_flag(tlk_ctx.state); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* * TLK is a 32-bit Trusted OS and so expects the SMC * arguments via r0-r7. TLK expects the monitor frame * registers to be 64-bits long. Hence, we pass x0 in * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7. * * As smc_fid is a uint32 value, r1 contains 0. */ gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx); write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2); write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32)); write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3); write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32)); SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1, (uint32_t)(x1 >> 32)); /* * Translate NS/EL1-S virtual addresses. * * x1 = virtual address * x3 = type (NS/S) * * Returns PA:lo in r0, PA:hi in r1. */ case TLK_VA_TRANSLATE: /* Should be invoked only by secure world */ if (ns) SMC_RET1(handle, SMC_UNK); /* NS virtual addresses are 64-bit long */ if (x3 & TLK_TRANSLATE_NS_VADDR) x1 = (uint32_t)x1 | (x2 << 32); if (!x1) SMC_RET1(handle, SMC_UNK); /* * TODO: Sanity check x1. This would require platform * support. */ /* virtual address and type: ns/s */ par = tlkd_va_translate(x1, x3); /* return physical address in r0-r1 */ SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0); /* * This is a request from the SP to mark completion of * a standard function ID. */ case TLK_REQUEST_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Mark the SP state as inactive. */ clr_std_smc_active_flag(tlk_ctx.state); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * This is a request completion SMC and we must switch to * the non-secure world to pass the result. */ cm_el1_sysregs_context_save(SECURE); /* * We are done stashing the secure context. Switch to the * non-secure context and return the result. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, x1); /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case TLK_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * SP has been successfully initialized. Register power * managemnt hooks with PSCI */ psci_register_spd_pm_hook(&tlkd_pm_ops); /* * TLK reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ tlkd_synchronous_sp_exit(&tlk_ctx, x1); /* * Return the number of service function IDs implemented to * provide service to non-secure */ case TOS_CALL_COUNT: SMC_RET1(handle, TLK_NUM_FID); /* * Return TLK's UID to the caller */ case TOS_UID: SMC_UUID_RET(handle, tlk_uuid); /* * Return the version of current implementation */ case TOS_CALL_VERSION: SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR); default: break; } SMC_RET1(handle, SMC_UNK); }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the XILSP to * delegate work and return results back to the non-secure state. Lastly it * will also return any information that the XILSP needs to do the work * assigned to it. ******************************************************************************/ uint64_t xilspd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; uint32_t linear_id = plat_my_core_pos(), ns; xilsp_context_t *xilsp_ctx = &xilspd_sp_context[linear_id]; /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case XILSP_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Stash the SP entry points information. This is done * only once on the primary cpu */ assert(xilsp_vectors == NULL); xilsp_vectors = (xilsp_vectors_t *) x1; if (xilsp_vectors) set_xilsp_pstate(xilsp_ctx->state, XILSP_PSTATE_ON); /* * SP reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ xilspd_synchronous_sp_exit(xilsp_ctx, x1); break; case XILSP_ARITH: if (ns) { /* * This is a fresh request from the non-secure client. * Figure out which registers need to be preserved, save * the non-secure state and send the request to the * secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&xilsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect the XILSP to manage the PSTATE.I and * PSTATE.F flags as appropriate. */ cm_set_elr_el3(SECURE, (uint64_t) &xilsp_vectors->fast_smc_entry); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X4, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X4)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X5, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X6, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X6)); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&xilsp_ctx->cpu_ctx, smc_fid, x1, x2, x3); } else { /* * This is the result from the secure client of an * earlier request. The results are in x1-x4. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); } break; default: break; } SMC_RET1(handle, SMC_UNK); }