/******************************************************************************* * The following function initializes the cpu_context 'ctx' for * first use, and sets the initial entrypoint state as specified by the * entry_point_info structure. * * The security state to initialize is determined by the SECURE attribute * of the entry_point_info. The function returns a pointer to the initialized * context and sets this as the next context to return to. * * The EE and ST attributes are used to configure the endianness and secure * timer availability for the new execution context. * * To prepare the register state for entry call cm_prepare_el3_exit() and * el3_exit(). For Secure-EL1 cm_prepare_el3_exit() is equivalent to * cm_e1_sysreg_context_restore(). ******************************************************************************/ static void cm_init_context_common(cpu_context_t *ctx, const entry_point_info_t *ep) { unsigned int security_state; uint32_t scr, sctlr; regs_t *reg_ctx; assert(ctx); security_state = GET_SECURITY_STATE(ep->h.attr); /* Clear any residual register values from the context */ memset(ctx, 0, sizeof(*ctx)); reg_ctx = get_regs_ctx(ctx); /* * Base the context SCR on the current value, adjust for entry point * specific requirements */ scr = read_scr(); scr &= ~(SCR_NS_BIT | SCR_HCE_BIT); if (security_state != SECURE) scr |= SCR_NS_BIT; /* * Set up SCTLR for the Non Secure context. * EE bit is taken from the entrypoint attributes * M, C and I bits must be zero (as required by PSCI specification) * * The target exception level is based on the spsr mode requested. * If execution is requested to hyp mode, HVC is enabled * via SCR.HCE. * * Always compute the SCTLR_EL1 value and save in the cpu_context * - the HYP registers are set up by cm_preapre_ns_entry() as they * are not part of the stored cpu_context * * TODO: In debug builds the spsr should be validated and checked * against the CPU support, security state, endianness and pc */ if (security_state != SECURE) { sctlr = EP_GET_EE(ep->h.attr) ? SCTLR_EE_BIT : 0; sctlr |= SCTLR_RES1; write_ctx_reg(reg_ctx, CTX_NS_SCTLR, sctlr); } if (GET_M32(ep->spsr) == MODE32_hyp) scr |= SCR_HCE_BIT; write_ctx_reg(reg_ctx, CTX_SCR, scr); write_ctx_reg(reg_ctx, CTX_LR, ep->pc); write_ctx_reg(reg_ctx, CTX_SPSR, ep->spsr); /* * Store the r0-r3 value from the entrypoint into the context * Use memcpy as we are in control of the layout of the structures */ memcpy((void *)reg_ctx, (void *)&ep->args, sizeof(aapcs32_params_t)); }
/* * Program Priority Mask to the original Non-secure priority such that * Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC * calls. The 'preempt_ret_code' parameter indicates the Yielding SMC's return * value in case the call was preempted. * * This API is expected to be invoked before delegating a yielding SMC to Secure * EL1. I.e. within the window of secure execution after Non-secure context is * saved (after entry into EL3) and Secure context is restored (before entering * Secure EL1). */ void ehf_allow_ns_preemption(uint64_t preempt_ret_code) { cpu_context_t *ns_ctx; unsigned int old_pmr __unused; pe_exc_data_t *pe_data = this_cpu_data(); /* * We should have been notified earlier of entering secure world, and * therefore have stashed the Non-secure priority mask. */ assert(pe_data->ns_pri_mask != 0); /* Make sure no priority levels are active when requesting this */ if (has_valid_pri_activations(pe_data)) { ERROR("PE %lx has priority activations: 0x%x\n", read_mpidr_el1(), pe_data->active_pri_bits); panic(); } /* * Program preempted return code to x0 right away so that, if the * Yielding SMC was indeed preempted before a dispatcher gets a chance * to populate it, the caller would find the correct return value. */ ns_ctx = cm_get_context(NON_SECURE); assert(ns_ctx); write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code); old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask); EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask); pe_data->ns_pri_mask = 0; }
/******************************************************************************* * This cpu has resumed from suspend. The SPD saved the TSP context when it * completed the preceding suspend call. Use that context to program an entry * into the TSP to allow it to do any remaining book keeping ******************************************************************************/ static void tspd_cpu_suspend_finish_handler(uint64_t max_off_pwrlvl) { int32_t rc = 0; uint32_t linear_id = plat_my_core_pos(); tsp_context_t *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_vectors); assert(get_tsp_pstate(tsp_ctx->state) == TSP_PSTATE_SUSPEND); /* Program the entry point, max_off_pwrlvl and enter the SP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, max_off_pwrlvl); cm_set_elr_el3(SECURE, (uint64_t) &tsp_vectors->cpu_resume_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* * Read the response from the TSP. A non-zero return means that * something went wrong while communicating with the TSP. */ if (rc != 0) panic(); /* Update its context to reflect the state the SP is in */ set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_ON); }
/******************************************************************************* * This cpu is being suspended. S-EL1 state must have been saved in the * resident cpu (mpidr format) if it is a UP/UP migratable TSP. ******************************************************************************/ static void tspd_cpu_suspend_handler(uint64_t power_state) { int32_t rc = 0; uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tsp_context *tsp_ctx = &tspd_sp_context[linear_id]; assert(tsp_entry_info); assert(tsp_ctx->state == TSP_STATE_ON); /* Program the entry point, power_state parameter and enter the TSP */ write_ctx_reg(get_gpregs_ctx(&tsp_ctx->cpu_ctx), CTX_GPREG_X0, power_state); cm_set_el3_elr(SECURE, (uint64_t) tsp_entry_info->cpu_suspend_entry); rc = tspd_synchronous_sp_entry(tsp_ctx); /* * Read the response from the TSP. A non-zero return means that * something went wrong while communicating with the TSP. */ if (rc != 0) panic(); /* Update its context to reflect the state the TSP is in */ tsp_ctx->state = TSP_STATE_SUSPEND; }
static int32_t tbase_init_secure_context(tbase_context *tbase_ctx) { uint32_t sctlr = read_sctlr_el3(); el1_sys_regs_t *el1_state; uint64_t mpidr = read_mpidr(); /* Passing a NULL context is a critical programming error */ assert(tbase_ctx); DBG_PRINTF("tbase_init_secure_context\n\r"); memset(tbase_ctx, 0, sizeof(*tbase_ctx)); /* Get a pointer to the S-EL1 context memory */ el1_state = get_sysregs_ctx(&tbase_ctx->cpu_ctx); // Program the sctlr for S-EL1 execution with caches and mmu off sctlr &= SCTLR_EE_BIT; sctlr |= SCTLR_EL1_RES1; write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); /* Set this context as ready to be initialised i.e OFF */ tbase_ctx->state = TBASE_STATE_OFF; /* Associate this context with the cpu specified */ tbase_ctx->mpidr = mpidr; // Set up cm context for this core cm_set_context(mpidr, &tbase_ctx->cpu_ctx, SECURE); // cm_init_exception_stack(mpidr, SECURE); return 0; }
void tbase_setup_entry_nwd( cpu_context_t *ns_context, uint32_t call_offset ) { uint64_t registerAddress = (int64_t)get_gpregs_ctx(ns_context); // Set up registers cpu_context_t *s_context = (cpu_context_t *) cm_get_context(SECURE); gp_regs_t *s_gpregs = get_gpregs_ctx(s_context); // Offset into registerFile uint64_t registerOffset = registerAddress -registerFileStart[REGISTER_FILE_NWD]; write_ctx_reg(s_gpregs, CTX_GPREG_X0, registerOffset); // Flags write_ctx_reg(s_gpregs, CTX_GPREG_X1, (TBASE_NWD_REGISTER_COUNT<<8) | TBASE_SMC_NWD); tbase_setup_entry_common( s_context, ns_context, call_offset ); }
static void tbase_setup_entry_common( cpu_context_t *s_context, cpu_context_t *ns_context, uint32_t call_offset) { // Set up registers gp_regs_t *s_gpregs = get_gpregs_ctx(s_context); // NWd spsr uint64_t ns_spsr = read_ctx_reg(get_el3state_ctx(ns_context), CTX_SPSR_EL3); write_ctx_reg(s_gpregs, CTX_GPREG_X2, ns_spsr); // Entry to tbase el3_state_t *el3sysregs = get_el3state_ctx(s_context); write_ctx_reg(el3sysregs, CTX_SPSR_EL3, tbaseEntrySpsr); cm_set_elr_el3(SECURE,tbaseEntryBase+call_offset); }
void tbase_setup_entry_monitor( cpu_context_t *ns_context ) { uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); uint64_t registerAddress = (int64_t)secure_context[linear_id].monitorCallRegs; // Set up registers cpu_context_t *s_context = (cpu_context_t *) cm_get_context(SECURE); gp_regs_t *s_gpregs = get_gpregs_ctx(s_context); // Offset into registerFile uint64_t registerOffset = registerAddress -registerFileStart[REGISTER_FILE_MONITOR]; write_ctx_reg(s_gpregs, CTX_GPREG_X0, registerOffset); // Flags write_ctx_reg(s_gpregs, CTX_GPREG_X1, (TBASE_MAX_MONITOR_CALL_REGS<<8) | TBASE_SMC_MONITOR); tbase_setup_entry_common( s_context, ns_context, ENTRY_OFFSET_FASTCALL ); }
/******************************************************************************* * This function is responsible for handling all T210 SiP calls ******************************************************************************/ int plat_sip_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, const void *cookie, void *handle, uint64_t flags) { uint32_t val, ns; /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); if (!ns) SMC_RET1(handle, SMC_UNK); switch (smc_fid) { case TEGRA_SIP_PMC_COMMANDS: /* check the address is within PMC range and is 4byte aligned */ if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3)) return -EINVAL; /* pmc_secure_scratch registers are not accessible */ if (((x2 >= PMC_SECURE_SCRATCH0) && (x2 <= PMC_SECURE_SCRATCH5)) || ((x2 >= PMC_SECURE_SCRATCH6) && (x2 <= PMC_SECURE_SCRATCH7)) || ((x2 >= PMC_SECURE_SCRATCH8) && (x2 <= PMC_SECURE_SCRATCH79)) || ((x2 >= PMC_SECURE_SCRATCH80) && (x2 <= PMC_SECURE_SCRATCH119))) return -EFAULT; /* PMC secure-only registers are not accessible */ if ((x2 == PMC_DPD_ENABLE_0) || (x2 == PMC_FUSE_CONTROL_0) || (x2 == PMC_CRYPTO_OP_0)) return -EFAULT; /* Perform PMC read/write */ if (x1 == PMC_READ) { val = mmio_read_32((uint32_t)(TEGRA_PMC_BASE + x2)); write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, val); } else if (x1 == PMC_WRITE) { mmio_write_32((uint32_t)(TEGRA_PMC_BASE + x2), (uint32_t)x3); } else { return -EINVAL; } break; default: ERROR("%s: unsupported function ID\n", __func__); return -ENOTSUP; } return 0; }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure payload * to delegate work and return results back to the non-secure state. Lastly it * will also return any information that the secure payload needs to do the * work assigned to it. ******************************************************************************/ uint64_t tlkd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; gp_regs_t *gp_regs; uint32_t ns; uint64_t par; /* Passing a NULL context is a critical programming error */ assert(handle); /* These SMCs are only supported by CPU0 */ if ((read_mpidr() & MPIDR_CPU_MASK) != 0) SMC_RET1(handle, SMC_UNK); /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used by SP to indicate that it was * preempted by a non-secure world IRQ. */ case TLK_PREEMPTED: if (ns) SMC_RET1(handle, SMC_UNK); assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since the SP was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, x1); /* * Request from non secure world to resume the preempted * Standard SMC call. */ case TLK_RESUME_FID: /* RESUME should be invoked only by normal world */ if (!ns) SMC_RET1(handle, SMC_UNK); /* * This is a resume request from the non-secure client. * save the non-secure state and send the request to * the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted before resume */ if (!get_std_smc_active_flag(tlk_ctx.state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* We just need to return to the preempted point in * SP and the execution will resume as normal. */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); SMC_RET0(handle); /* * This is a request from the non-secure context to: * * a. register shared memory with the SP for storing it's * activity logs. * b. register shared memory with the SP for passing args * required for maintaining sessions with the Trusted * Applications. * c. open/close sessions * d. issue commands to the Trusted Apps */ case TLK_REGISTER_LOGBUF: case TLK_REGISTER_REQBUF: case TLK_OPEN_TA_SESSION: case TLK_CLOSE_TA_SESSION: case TLK_TA_LAUNCH_OP: case TLK_TA_SEND_EVENT: if (!ns) SMC_RET1(handle, SMC_UNK); /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); /* Check if we are already preempted */ if (get_std_smc_active_flag(tlk_ctx.state)) SMC_RET1(handle, SMC_UNK); cm_el1_sysregs_context_save(NON_SECURE); /* * Verify if there is a valid context to use. */ assert(&tlk_ctx.cpu_ctx == cm_get_context(SECURE)); /* * Mark the SP state as active. */ set_std_smc_active_flag(tlk_ctx.state); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* * TLK is a 32-bit Trusted OS and so expects the SMC * arguments via r0-r7. TLK expects the monitor frame * registers to be 64-bits long. Hence, we pass x0 in * r0-r1, x1 in r2-r3, x3 in r4-r5 and x4 in r6-r7. * * As smc_fid is a uint32 value, r1 contains 0. */ gp_regs = get_gpregs_ctx(&tlk_ctx.cpu_ctx); write_ctx_reg(gp_regs, CTX_GPREG_X4, (uint32_t)x2); write_ctx_reg(gp_regs, CTX_GPREG_X5, (uint32_t)(x2 >> 32)); write_ctx_reg(gp_regs, CTX_GPREG_X6, (uint32_t)x3); write_ctx_reg(gp_regs, CTX_GPREG_X7, (uint32_t)(x3 >> 32)); SMC_RET4(&tlk_ctx.cpu_ctx, smc_fid, 0, (uint32_t)x1, (uint32_t)(x1 >> 32)); /* * Translate NS/EL1-S virtual addresses. * * x1 = virtual address * x3 = type (NS/S) * * Returns PA:lo in r0, PA:hi in r1. */ case TLK_VA_TRANSLATE: /* Should be invoked only by secure world */ if (ns) SMC_RET1(handle, SMC_UNK); /* NS virtual addresses are 64-bit long */ if (x3 & TLK_TRANSLATE_NS_VADDR) x1 = (uint32_t)x1 | (x2 << 32); if (!x1) SMC_RET1(handle, SMC_UNK); /* * TODO: Sanity check x1. This would require platform * support. */ /* virtual address and type: ns/s */ par = tlkd_va_translate(x1, x3); /* return physical address in r0-r1 */ SMC_RET4(handle, (uint32_t)par, (uint32_t)(par >> 32), 0, 0); /* * This is a request from the SP to mark completion of * a standard function ID. */ case TLK_REQUEST_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Mark the SP state as inactive. */ clr_std_smc_active_flag(tlk_ctx.state); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * This is a request completion SMC and we must switch to * the non-secure world to pass the result. */ cm_el1_sysregs_context_save(SECURE); /* * We are done stashing the secure context. Switch to the * non-secure context and return the result. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET1(ns_cpu_context, x1); /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case TLK_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * SP has been successfully initialized. Register power * managemnt hooks with PSCI */ psci_register_spd_pm_hook(&tlkd_pm_ops); /* * TLK reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ tlkd_synchronous_sp_exit(&tlk_ctx, x1); /* * Return the number of service function IDs implemented to * provide service to non-secure */ case TOS_CALL_COUNT: SMC_RET1(handle, TLK_NUM_FID); /* * Return TLK's UID to the caller */ case TOS_UID: SMC_UUID_RET(handle, tlk_uuid); /* * Return the version of current implementation */ case TOS_CALL_VERSION: SMC_RET2(handle, TLK_VERSION_MAJOR, TLK_VERSION_MINOR); default: break; } SMC_RET1(handle, SMC_UNK); }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure * payload to delegate work and return results back to the non-secure * state. Lastly it will also return any information that OPTEE needs to do * the work assigned to it. ******************************************************************************/ uint64_t opteed_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; uint64_t rc; /* * Determine which security state this SMC originated from */ if (is_caller_non_secure(flags)) { /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * OPTEE to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect OPTEE to manage the PSTATE.I and PSTATE.F * flags as appropriate. */ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->fast_smc_entry); } else { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->std_smc_entry); } cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3); } /* * Returning from OPTEE */ switch (smc_fid) { /* * OPTEE has finished initialising itself after a cold boot */ case TEESMC_OPTEED_RETURN_ENTRY_DONE: /* * Stash the OPTEE entry points information. This is done * only once on the primary cpu */ assert(optee_vectors == NULL); optee_vectors = (optee_vectors_t *) x1; if (optee_vectors) { set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON); /* * OPTEE has been successfully initialized. * Register power management hooks with PSCI */ psci_register_spd_pm_hook(&opteed_pm); /* * Register an interrupt handler for S-EL1 interrupts * when generated during code executing in the * non-secure state. */ flags = 0; set_interrupt_rm_flag(flags, NON_SECURE); rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, opteed_sel1_interrupt_handler, flags); if (rc) panic(); } /* * OPTEE reports completion. The OPTEED must have initiated * the original request through a synchronous entry into * OPTEE. Jump back to the original C runtime context. */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * These function IDs is used only by OP-TEE to indicate it has * finished: * 1. turning itself on in response to an earlier psci * cpu_on request * 2. resuming itself after an earlier psci cpu_suspend * request. */ case TEESMC_OPTEED_RETURN_ON_DONE: case TEESMC_OPTEED_RETURN_RESUME_DONE: /* * These function IDs is used only by the SP to indicate it has * finished: * 1. suspending itself after an earlier psci cpu_suspend * request. * 2. turning itself off in response to an earlier psci * cpu_off request. */ case TEESMC_OPTEED_RETURN_OFF_DONE: case TEESMC_OPTEED_RETURN_SUSPEND_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE: /* * OPTEE reports completion. The OPTEED must have initiated the * original request through a synchronous entry into OPTEE. * Jump back to the original C runtime context, and pass x1 as * return value to the caller */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * OPTEE is returning from a call or being preempted from a call, in * either case execution should resume in the normal world. */ case TEESMC_OPTEED_RETURN_CALL_DONE: /* * This is the result from the secure client of an * earlier request. The results are in x0-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); /* * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution * should resume in the normal world. */ case TEESMC_OPTEED_RETURN_FIQ_DONE: /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since OPTEE was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET0((uint64_t) ns_cpu_context); default: panic(); } }
static int32_t tbase_init_entry() { DBG_PRINTF("tbase_init\n\r"); // Save el1 registers in case non-secure world has already been set up. cm_el1_sysregs_context_save(NON_SECURE); uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; // Note: mapping is 1:1, so physical and virtual addresses are here the same. cpu_context_t *ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE); // ************************************************************************************ // Configure parameter passing to tbase // Calculate page start addresses for register areas. registerFileStart[REGISTER_FILE_NWD] = page_align((uint64_t)&ns_entry_context, DOWN); registerFileStart[REGISTER_FILE_MONITOR] = page_align((uint64_t)&msm_area, DOWN); // Calculate page end addresses for register areas. registerFileEnd[REGISTER_FILE_NWD] = (uint64_t)(&ns_entry_context[TBASE_CORE_COUNT]); registerFileEnd[REGISTER_FILE_MONITOR] = ((uint64_t)&msm_area) +sizeof(msm_area); int32_t totalPages = 0; for (int area=0; area<REGISTER_FILE_COUNT; area++) { int32_t pages = page_align(registerFileEnd[area] - registerFileStart[area], UP) / PAGE_SIZE; assert( pages +totalPages <= TBASE_INTERFACE_PAGES ); tbase_init_register_file(area, totalPages, pages); totalPages += pages; } // ************************************************************************************ // Create boot structure tbaseBootCfg.magic = TBASE_BOOTCFG_MAGIC; tbaseBootCfg.length = sizeof(bootCfg_t); tbaseBootCfg.version = TBASE_MONITOR_INTERFACE_VERSION; tbaseBootCfg.dRamBase = TBASE_NWD_DRAM_BASE; tbaseBootCfg.dRamSize = TBASE_NWD_DRAM_SIZE; tbaseBootCfg.secDRamBase = TBASE_SWD_DRAM_BASE; tbaseBootCfg.secDRamSize = TBASE_SWD_DRAM_SIZE; tbaseBootCfg.secIRamBase = TBASE_SWD_IMEM_BASE; tbaseBootCfg.secIRamSize = TBASE_SWD_IMEM_SIZE; tbaseBootCfg.conf_mair_el3 = read_mair_el3(); tbaseBootCfg.MSMPteCount = totalPages; tbaseBootCfg.MSMBase = (uint64_t)registerFileL2; tbaseBootCfg.gic_distributor_base = TBASE_GIC_DIST_BASE; tbaseBootCfg.gic_cpuinterface_base = TBASE_GIC_CPU_BASE; tbaseBootCfg.gic_version = TBASE_GIC_VERSION; tbaseBootCfg.total_number_spi = TBASE_SPI_COUNT; tbaseBootCfg.ssiq_number = TBASE_SSIQ_NRO; tbaseBootCfg.flags = TBASE_MONITOR_FLAGS; DBG_PRINTF("*** tbase boot cfg ***\n\r"); DBG_PRINTF("* magic : 0x%.X\n\r",tbaseBootCfg.magic); DBG_PRINTF("* length : 0x%.X\n\r",tbaseBootCfg.length); DBG_PRINTF("* version : 0x%.X\n\r",tbaseBootCfg.version); DBG_PRINTF("* dRamBase : 0x%.X\n\r",tbaseBootCfg.dRamBase); DBG_PRINTF("* dRamSize : 0x%.X\n\r",tbaseBootCfg.dRamSize); DBG_PRINTF("* secDRamBase : 0x%.X\n\r",tbaseBootCfg.secDRamBase); DBG_PRINTF("* secDRamSize : 0x%.X\n\r",tbaseBootCfg.secDRamSize); DBG_PRINTF("* secIRamBase : 0x%.X\n\r",tbaseBootCfg.secIRamBase); DBG_PRINTF("* secIRamSize : 0x%.X\n\r",tbaseBootCfg.secIRamSize); DBG_PRINTF("* conf_mair_el3 : 0x%.X\n\r",tbaseBootCfg.conf_mair_el3); DBG_PRINTF("* MSMPteCount : 0x%.X\n\r",tbaseBootCfg.MSMPteCount); DBG_PRINTF("* MSMBase : 0x%.X\n\r",tbaseBootCfg.MSMBase); DBG_PRINTF("* gic_distributor_base : 0x%.X\n\r",tbaseBootCfg.gic_distributor_base); DBG_PRINTF("* gic_cpuinterface_base : 0x%.X\n\r",tbaseBootCfg.gic_cpuinterface_base); DBG_PRINTF("* gic_version : 0x%.X\n\r",tbaseBootCfg.gic_version); DBG_PRINTF("* total_number_spi : 0x%.X\n\r",tbaseBootCfg.total_number_spi); DBG_PRINTF("* ssiq_number : 0x%.X\n\r",tbaseBootCfg.ssiq_number); DBG_PRINTF("* flags : 0x%.X\n\r",tbaseBootCfg.flags); // ************************************************************************************ // tbaseBootCfg and l2 entries may be accesses uncached, so must flush those. flush_dcache_range((unsigned long)&tbaseBootCfg, sizeof(bootCfg_t)); flush_dcache_range((unsigned long)®isterFileL2, sizeof(registerFileL2)); // ************************************************************************************ // Set registers for tbase initialization entry cpu_context_t *s_entry_context = &tbase_ctx->cpu_ctx; gp_regs_t *s_entry_gpregs = get_gpregs_ctx(s_entry_context); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, 0); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, (int64_t)&tbaseBootCfg); // SPSR for SMC handling (FIQ mode) tbaseEntrySpsr = TBASE_ENTRY_SPSR; DBG_PRINTF("tbase init SPSR 0x%x\n\r", read_ctx_reg(get_el3state_ctx(&tbase_ctx->cpu_ctx), CTX_SPSR_EL3) ); DBG_PRINTF("tbase SMC SPSR %x\nr\r", tbaseEntrySpsr ); // ************************************************************************************ // Start tbase tbase_synchronous_sp_entry(tbase_ctx); tbase_ctx->state = TBASE_STATE_ON; #if TBASE_PM_ENABLE // Register power managemnt hooks with PSCI psci_register_spd_pm_hook(&tbase_pm); #endif cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 1; }
static uint64_t tbase_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; int caller_security_state = flags&1; DBG_PRINTF("tbase_smc_handler %d %x\n\r", caller_security_state, smc_fid); if (caller_security_state==SECURE) { // Yield to NWd // TODO: Check id if (tbaseInitStatus==TBASE_INIT_CONFIG_OK) { // Save sysregs to all cores. // After this tbase can work on any core. save_sysregs_allcore(); tbaseInitStatus = TBASE_INIT_SYSREGS_OK; if (tbaseExecutionStatus==TBASE_STATUS_UNINIT) { tbaseExecutionStatus = TBASE_STATUS_NORMAL; } } // If above check fails, it is not possible to return to tbase. tbase_synchronous_sp_exit(tbase_ctx, 0, 1); } else { if ((tbaseExecutionStatus&TBASE_STATUS_SMC_OK_BIT)==0) { // TBASE must be initialized to be usable DBG_PRINTF( "tbase_smc_handler tbase not ready for smc.\n\r"); // TODO: What is correct error code? SMC_RET1(handle, SMC_UNK); return 1; } if(tbase_ctx->state == TBASE_STATE_OFF) { DBG_PRINTF( "tbase_smc_handler tbase not ready for fastcall\n\r" ); return 1; } // NSIQ, go to SWd // TODO: Check id? // Save NWd gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle); write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 ); cm_el1_sysregs_context_save(NON_SECURE); // Load SWd tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_SMC); // Enter tbase. tbase must return using normal SMC, which will continue here. tbase_synchronous_sp_entry(tbase_ctx); // Load NWd cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); } return 0; }
// ************************************************************************************ // fastcall handler static uint64_t tbase_fastcall_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; int caller_security_state = flags&1; if (caller_security_state==SECURE) { switch(maskSWdRegister(smc_fid)) { case TBASE_SMC_FASTCALL_RETURN: { // Return values from fastcall already in cpu_context! // TODO: Could we skip saving sysregs? DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_RETURN\n\r"); tbase_synchronous_sp_exit(tbase_ctx, 0, 1); } case TBASE_SMC_FASTCALL_CONFIG_OK: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_CONFIG_OK\n\r"); configure_tbase(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_OUTPUT: { output(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_STATUS: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_STATUS\n\r"); tbase_status(x1,x2); SMC_RET1(handle,smc_fid); break; } case TBASE_SMC_FASTCALL_INPUT: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_INPUT\n\r"); smc_fid = plat_tbase_input(x1,&x2,&(tbase_ctx->tbase_input_fastcall)); SMC_RET3(handle,smc_fid,page_align(registerFileEnd[REGISTER_FILE_NWD] - registerFileStart[REGISTER_FILE_NWD], UP)+(uint64_t)&(tbase_ctx->tbase_input_fastcall)- registerFileStart[REGISTER_FILE_MONITOR],x2); break; } case TBASE_SMC_FASTCALL_DUMP: { DBG_PRINTF( "tbase_fastcall_handler TBASE_SMC_FASTCALL_DUMP\n\r"); tbase_triggerSgiDump(); SMC_RET1(handle,smc_fid); break; } default: { // What now? DBG_PRINTF( "tbase_fastcall_handler SMC_UNK %x\n\r", smc_fid ); SMC_RET1(handle, SMC_UNK); break; } } } else { if (smc_fid == TBASE_SMC_AEE_DUMP) // N-world can request AEE Dump function { mt_atf_trigger_WDT_FIQ(); // Once we return to the N-world's caller, // FIQ will be trigged and bring us on EL3 (ATF) on core #0 because HW wiring. // Then FIQ will be handled the same way as for HW WDT FIQ. //Do we need to save-recover n-context before being able to use it for return? cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 0; } if ((tbaseExecutionStatus&TBASE_STATUS_FASTCALL_OK_BIT)==0) { // TBASE must be initialized to be usable // TODO: What is correct error code? DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" ); SMC_RET1(handle, SMC_UNK); return 0; } if(tbase_ctx->state == TBASE_STATE_OFF) { DBG_PRINTF( "tbase_fastcall_handler tbase not ready for fastcall\n\r" ); SMC_RET1(handle, SMC_UNK); return 0; } DBG_PRINTF( "tbase_fastcall_handler NWd %x\n\r", smc_fid ); // So far all fastcalls go to tbase // Save NWd context gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *)handle); write_ctx_reg(ns_gpregs, CTX_GPREG_X0, smc_fid ); // These are not saved yet write_ctx_reg(ns_gpregs, CTX_GPREG_X1, x1 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X2, x2 ); write_ctx_reg(ns_gpregs, CTX_GPREG_X3, x3 ); cm_el1_sysregs_context_save(NON_SECURE); // Load SWd context tbase_setup_entry_nwd((cpu_context_t *)handle,ENTRY_OFFSET_FASTCALL); #if DEBUG print_fastcall_params("entry", NON_SECURE); #endif tbase_synchronous_sp_entry(tbase_ctx); cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 0; // Does not seem to matter what we return } }
/* Setup context of the Secure Partition */ void secure_partition_setup(void) { VERBOSE("S-EL1/S-EL0 context setup start...\n"); cpu_context_t *ctx = cm_get_context(SECURE); /* Make sure that we got a Secure context. */ assert(ctx != NULL); /* Assert we are in Secure state. */ assert((read_scr_el3() & SCR_NS_BIT) == 0); /* Disable MMU at EL1. */ disable_mmu_icache_el1(); /* Invalidate TLBs at EL1. */ tlbivmalle1(); /* * General-Purpose registers * ------------------------- */ /* * X0: Virtual address of a buffer shared between EL3 and Secure EL0. * The buffer will be mapped in the Secure EL1 translation regime * with Normal IS WBWA attributes and RO data and Execute Never * instruction access permissions. * * X1: Size of the buffer in bytes * * X2: cookie value (Implementation Defined) * * X3: cookie value (Implementation Defined) * * X4 to X30 = 0 (already done by cm_init_my_context()) */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1); /* * SP_EL0: A non-zero value will indicate to the SP that the SPM has * initialized the stack pointer for the current CPU through * implementation defined means. The value will be 0 otherwise. */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0, PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE); /* * Setup translation tables * ------------------------ */ #if ENABLE_ASSERTIONS /* Get max granularity supported by the platform. */ u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1(); int tgran64_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & ID_AA64MMFR0_EL1_TGRAN64_MASK) == ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; int tgran16_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & ID_AA64MMFR0_EL1_TGRAN16_MASK) == ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; int tgran4_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & ID_AA64MMFR0_EL1_TGRAN4_MASK) == ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; uintptr_t max_granule_size; if (tgran64_supported) { max_granule_size = 64 * 1024; } else if (tgran16_supported) { max_granule_size = 16 * 1024; } else { assert(tgran4_supported); max_granule_size = 4 * 1024; } VERBOSE("Max translation granule supported: %lu KiB\n", max_granule_size); uintptr_t max_granule_size_mask = max_granule_size - 1; /* Base must be aligned to the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0); /* Size must be a multiple of the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0); #endif /* ENABLE_ASSERTIONS */ /* This region contains the exception vectors used at S-EL1. */ const mmap_region_t sel1_exception_vectors = MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, SPM_SHIM_EXCEPTIONS_SIZE, MT_CODE | MT_SECURE | MT_PRIVILEGED); mmap_add_region_ctx(&secure_partition_xlat_ctx, &sel1_exception_vectors); mmap_add_ctx(&secure_partition_xlat_ctx, plat_get_secure_partition_mmap(NULL)); init_xlat_tables_ctx(&secure_partition_xlat_ctx); /* * MMU-related registers * --------------------- */ /* Set attributes in the right indices of the MAIR */ u_register_t mair_el1 = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) | MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) | MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1); /* Setup TCR_EL1. */ u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); u_register_t tcr_el1 = /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) | /* Inner and outer WBWA, shareable. */ TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | /* Set the granularity to 4KB. */ TCR_TG0_4K | /* Limit Intermediate Physical Address Size. */ tcr_ps_bits << TCR_EL1_IPS_SHIFT | /* Disable translations using TBBR1_EL1. */ TCR_EPD1_BIT /* The remaining fields related to TBBR1_EL1 are left as zero. */ ; tcr_el1 &= ~( /* Enable translations using TBBR0_EL1 */ TCR_EPD0_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1); /* Setup SCTLR_EL1 */ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_el1 |= /*SCTLR_EL1_RES1 |*/ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */ SCTLR_UCI_BIT | /* RW regions at xlat regime EL1&0 are forced to be XN. */ SCTLR_WXN_BIT | /* Don't trap to EL1 execution of WFI or WFE at EL0. */ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT | /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */ SCTLR_UCT_BIT | /* Don't trap to EL1 execution of DZ ZVA at EL0. */ SCTLR_DZE_BIT | /* Enable SP Alignment check for EL0 */ SCTLR_SA0_BIT | /* Allow cacheable data and instr. accesses to normal memory. */ SCTLR_C_BIT | SCTLR_I_BIT | /* Alignment fault checking enabled when at EL1 and EL0. */ SCTLR_A_BIT | /* Enable MMU. */ SCTLR_M_BIT ; sctlr_el1 &= ~( /* Explicit data accesses at EL0 are little-endian. */ SCTLR_E0E_BIT | /* Accesses to DAIF from EL0 are trapped to EL1. */ SCTLR_UMA_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); /* Point TTBR0_EL1 at the tables of the context created for the SP. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1, (u_register_t)secure_partition_base_xlat_table); /* * Setup other system registers * ---------------------------- */ /* Shim Exception Vector Base Address */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1, SPM_SHIM_EXCEPTIONS_PTR); /* * FPEN: Forbid the Secure Partition to access FP/SIMD registers. * TTA: Enable access to trace registers. * ZEN (v8.2): Trap SVE instructions and access to SVE registers. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL)); /* * Prepare information in buffer shared between EL3 and S-EL0 * ---------------------------------------------------------- */ void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE; /* Copy the boot information into the shared buffer with the SP. */ assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t) <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)); assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1)); const secure_partition_boot_info_t *sp_boot_info = plat_get_secure_partition_boot_info(NULL); assert(sp_boot_info != NULL); memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info, sizeof(secure_partition_boot_info_t)); /* Pointer to the MP information from the platform port. */ secure_partition_mp_info_t *sp_mp_info = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; assert(sp_mp_info != NULL); /* * Point the shared buffer MP information pointer to where the info will * be populated, just after the boot info. */ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info = (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)); /* * Update the shared buffer pointer to where the MP information for the * payload will be populated */ shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; /* * Copy the cpu information into the shared buffer area after the boot * information. */ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT); assert((uintptr_t)shared_buf_ptr <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE - (sp_boot_info->num_cpus * sizeof(*sp_mp_info)))); memcpy(shared_buf_ptr, (const void *) sp_mp_info, sp_boot_info->num_cpus * sizeof(*sp_mp_info)); /* * Calculate the linear indices of cores in boot information for the * secure partition and flag the primary CPU */ sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr; for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) { u_register_t mpidr = sp_mp_info[index].mpidr; sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr); if (plat_my_core_pos() == sp_mp_info[index].linear_id) sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU; } VERBOSE("S-EL1/S-EL0 context setup end.\n"); }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the XILSP to * delegate work and return results back to the non-secure state. Lastly it * will also return any information that the XILSP needs to do the work * assigned to it. ******************************************************************************/ uint64_t xilspd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; uint32_t linear_id = plat_my_core_pos(), ns; xilsp_context_t *xilsp_ctx = &xilspd_sp_context[linear_id]; /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case XILSP_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Stash the SP entry points information. This is done * only once on the primary cpu */ assert(xilsp_vectors == NULL); xilsp_vectors = (xilsp_vectors_t *) x1; if (xilsp_vectors) set_xilsp_pstate(xilsp_ctx->state, XILSP_PSTATE_ON); /* * SP reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ xilspd_synchronous_sp_exit(xilsp_ctx, x1); break; case XILSP_ARITH: if (ns) { /* * This is a fresh request from the non-secure client. * Figure out which registers need to be preserved, save * the non-secure state and send the request to the * secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&xilsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect the XILSP to manage the PSTATE.I and * PSTATE.F flags as appropriate. */ cm_set_elr_el3(SECURE, (uint64_t) &xilsp_vectors->fast_smc_entry); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X4, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X4)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X5, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X6, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X6)); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&xilsp_ctx->cpu_ctx, smc_fid, x1, x2, x3); } else { /* * This is the result from the secure client of an * earlier request. The results are in x1-x4. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); } break; default: break; } SMC_RET1(handle, SMC_UNK); }