static void copy_cpu_ctx_to_smc_stx(const regs_t *cpu_reg_ctx, smc_ctx_t *next_smc_ctx) { next_smc_ctx->r0 = read_ctx_reg(cpu_reg_ctx, CTX_GPREG_R0); next_smc_ctx->lr_mon = read_ctx_reg(cpu_reg_ctx, CTX_LR); next_smc_ctx->spsr_mon = read_ctx_reg(cpu_reg_ctx, CTX_SPSR); }
void print_fastcall_params( char *msg, uint32_t secure ) { gp_regs_t *ns_gpregs = get_gpregs_ctx((cpu_context_t *) cm_get_context(secure)); DBG_PRINTF("tbase %s (%d) 0x%llx 0x%llx 0x%llx 0x%llx\n\r", msg, secure, read_ctx_reg(ns_gpregs, CTX_GPREG_X0), read_ctx_reg(ns_gpregs, CTX_GPREG_X1), read_ctx_reg(ns_gpregs, CTX_GPREG_X2), read_ctx_reg(ns_gpregs, CTX_GPREG_X3) ); }
/****************************************************************************** * This function is invoked during warm boot. Invoke the PSCI library * warm boot entry point which takes care of Architectural and platform setup/ * restore. Copy the relevant cpu_context register values to smc context which * will get programmed during `smc_exit`. *****************************************************************************/ void sp_min_warm_boot(void) { smc_ctx_t *next_smc_ctx; cpu_context_t *ctx = cm_get_context(NON_SECURE); u_register_t ns_sctlr; psci_warmboot_entrypoint(); smc_set_next_ctx(NON_SECURE); next_smc_ctx = smc_get_next_ctx(); zeromem(next_smc_ctx, sizeof(smc_ctx_t)); copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)), next_smc_ctx); /* Temporarily set the NS bit to access NS SCTLR */ write_scr(read_scr() | SCR_NS_BIT); isb(); ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); write_sctlr(ns_sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
/******************************************************************************* * This function invokes the PSCI library interface to initialize the * non secure cpu context and copies the relevant cpu context register values * to smc context. These registers will get programmed during `smc_exit`. ******************************************************************************/ static void sp_min_prepare_next_image_entry(void) { entry_point_info_t *next_image_info; cpu_context_t *ctx = cm_get_context(NON_SECURE); u_register_t ns_sctlr; /* Program system registers to proceed to non-secure */ next_image_info = sp_min_plat_get_bl33_ep_info(); assert(next_image_info); assert(NON_SECURE == GET_SECURITY_STATE(next_image_info->h.attr)); INFO("SP_MIN: Preparing exit to normal world\n"); psci_prepare_next_non_secure_ctx(next_image_info); smc_set_next_ctx(NON_SECURE); /* Copy r0, lr and spsr from cpu context to SMC context */ copy_cpu_ctx_to_smc_stx(get_regs_ctx(cm_get_context(NON_SECURE)), smc_get_next_ctx()); /* Temporarily set the NS bit to access NS SCTLR */ write_scr(read_scr() | SCR_NS_BIT); isb(); ns_sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); write_sctlr(ns_sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
static void tbase_setup_entry_common( cpu_context_t *s_context, cpu_context_t *ns_context, uint32_t call_offset) { // Set up registers gp_regs_t *s_gpregs = get_gpregs_ctx(s_context); // NWd spsr uint64_t ns_spsr = read_ctx_reg(get_el3state_ctx(ns_context), CTX_SPSR_EL3); write_ctx_reg(s_gpregs, CTX_GPREG_X2, ns_spsr); // Entry to tbase el3_state_t *el3sysregs = get_el3state_ctx(s_context); write_ctx_reg(el3sysregs, CTX_SPSR_EL3, tbaseEntrySpsr); cm_set_elr_el3(SECURE,tbaseEntryBase+call_offset); }
/* * Handle SMC from a lower exception level to switch its execution state * (either from AArch64 to AArch32, or vice versa). * * smc_fid: * SMC function ID - either ARM_SIP_SVC_STATE_SWITCH_64 or * ARM_SIP_SVC_STATE_SWITCH_32. * pc_hi, pc_lo: * PC upon re-entry to the calling exception level; width dependent on the * calling exception level. * cookie_hi, cookie_lo: * Opaque pointer pairs received from the caller to pass it back, upon * re-entry. * handle: * Handle to saved context. */ int arm_execution_state_switch(unsigned int smc_fid, uint32_t pc_hi, uint32_t pc_lo, uint32_t cookie_hi, uint32_t cookie_lo, void *handle) { /* Execution state can be switched only if EL3 is AArch64 */ #ifdef AARCH64 int caller_64, from_el2, el, endianness, thumb = 0; u_register_t spsr, pc, scr, sctlr; entry_point_info_t ep; cpu_context_t *ctx = (cpu_context_t *) handle; el3_state_t *el3_ctx = get_el3state_ctx(ctx); /* That the SMC originated from NS is already validated by the caller */ /* * Disallow state switch if any of the secondaries have been brought up. */ if (psci_secondaries_brought_up()) goto exec_denied; spsr = read_ctx_reg(el3_ctx, CTX_SPSR_EL3); caller_64 = (GET_RW(spsr) == MODE_RW_64); if (caller_64) { /* * If the call originated from AArch64, expect 32-bit pointers when * switching to AArch32. */ if ((pc_hi != 0) || (cookie_hi != 0)) goto invalid_param; pc = pc_lo; /* Instruction state when entering AArch32 */ thumb = pc & 1; } else { /* Construct AArch64 PC */ pc = (((u_register_t) pc_hi) << 32) | pc_lo; } /* Make sure PC is 4-byte aligned, except for Thumb */ if ((pc & 0x3) && !thumb) goto invalid_param; /* * EL3 controls register width of the immediate lower EL only. Expect * this request from EL2/Hyp unless: * * - EL2 is not implemented; * - EL2 is implemented, but was disabled. This can be inferred from * SCR_EL3.HCE. */ from_el2 = caller_64 ? (GET_EL(spsr) == MODE_EL2) : (GET_M32(spsr) == MODE32_hyp); scr = read_ctx_reg(el3_ctx, CTX_SCR_EL3); if (!from_el2) { /* The call is from NS privilege level other than HYP */ /* * Disallow switching state if there's a Hypervisor in place; * this request must be taken up with the Hypervisor instead. */ if (scr & SCR_HCE_BIT) goto exec_denied; } /* * Return to the caller using the same endianness. Extract * endianness bit from the respective system control register * directly. */ sctlr = from_el2 ? read_sctlr_el2() : read_sctlr_el1(); endianness = !!(sctlr & SCTLR_EE_BIT); /* Construct SPSR for the exception state we're about to switch to */ if (caller_64) { int impl; /* * Switching from AArch64 to AArch32. Ensure this CPU implements * the target EL in AArch32. */ impl = from_el2 ? EL_IMPLEMENTED(2) : EL_IMPLEMENTED(1); if (impl != EL_IMPL_A64_A32) goto exec_denied; /* Return to the equivalent AArch32 privilege level */ el = from_el2 ? MODE32_hyp : MODE32_svc; spsr = SPSR_MODE32(el, thumb ? SPSR_T_THUMB : SPSR_T_ARM, endianness, DISABLE_ALL_EXCEPTIONS); } else { /* * Switching from AArch32 to AArch64. Since it's not possible to * implement an EL as AArch32-only (from which this call was * raised), it's safe to assume AArch64 is also implemented. */ el = from_el2 ? MODE_EL2 : MODE_EL1; spsr = SPSR_64(el, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); } /* * Use the context management library to re-initialize the existing * context with the execution state flipped. Since the library takes * entry_point_info_t pointer as the argument, construct a dummy one * with PC, state width, endianness, security etc. appropriately set. * Other entries in the entry point structure are irrelevant for * purpose. */ zeromem(&ep, sizeof(ep)); ep.pc = pc; ep.spsr = spsr; SET_PARAM_HEAD(&ep, PARAM_EP, VERSION_1, ((endianness ? EP_EE_BIG : EP_EE_LITTLE) | NON_SECURE | EP_ST_DISABLE)); /* * Re-initialize the system register context, and exit EL3 as if for the * first time. State switch is effectively a soft reset of the * calling EL. */ cm_init_my_context(&ep); cm_prepare_el3_exit(NON_SECURE); /* * State switch success. The caller of SMC wouldn't see the SMC * returning. Instead, execution starts at the supplied entry point, * with context pointers populated in registers 0 and 1. */ SMC_RET2(handle, cookie_hi, cookie_lo); invalid_param: SMC_RET1(handle, STATE_SW_E_PARAM); exec_denied: #endif /* State switch denied */ SMC_RET1(handle, STATE_SW_E_DENIED); }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the Secure * payload to delegate work and return results back to the non-secure * state. Lastly it will also return any information that OPTEE needs to do * the work assigned to it. ******************************************************************************/ uint64_t opteed_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; unsigned long mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); optee_context_t *optee_ctx = &opteed_sp_context[linear_id]; uint64_t rc; /* * Determine which security state this SMC originated from */ if (is_caller_non_secure(flags)) { /* * This is a fresh request from the non-secure client. * The parameters are in x1 and x2. Figure out which * registers need to be preserved, save the non-secure * state and send the request to the secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * OPTEE to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&optee_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect OPTEE to manage the PSTATE.I and PSTATE.F * flags as appropriate. */ if (GET_SMC_TYPE(smc_fid) == SMC_TYPE_FAST) { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->fast_smc_entry); } else { cm_set_elr_el3(SECURE, (uint64_t) &optee_vectors->std_smc_entry); } cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&optee_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&optee_ctx->cpu_ctx, smc_fid, x1, x2, x3); } /* * Returning from OPTEE */ switch (smc_fid) { /* * OPTEE has finished initialising itself after a cold boot */ case TEESMC_OPTEED_RETURN_ENTRY_DONE: /* * Stash the OPTEE entry points information. This is done * only once on the primary cpu */ assert(optee_vectors == NULL); optee_vectors = (optee_vectors_t *) x1; if (optee_vectors) { set_optee_pstate(optee_ctx->state, OPTEE_PSTATE_ON); /* * OPTEE has been successfully initialized. * Register power management hooks with PSCI */ psci_register_spd_pm_hook(&opteed_pm); /* * Register an interrupt handler for S-EL1 interrupts * when generated during code executing in the * non-secure state. */ flags = 0; set_interrupt_rm_flag(flags, NON_SECURE); rc = register_interrupt_type_handler(INTR_TYPE_S_EL1, opteed_sel1_interrupt_handler, flags); if (rc) panic(); } /* * OPTEE reports completion. The OPTEED must have initiated * the original request through a synchronous entry into * OPTEE. Jump back to the original C runtime context. */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * These function IDs is used only by OP-TEE to indicate it has * finished: * 1. turning itself on in response to an earlier psci * cpu_on request * 2. resuming itself after an earlier psci cpu_suspend * request. */ case TEESMC_OPTEED_RETURN_ON_DONE: case TEESMC_OPTEED_RETURN_RESUME_DONE: /* * These function IDs is used only by the SP to indicate it has * finished: * 1. suspending itself after an earlier psci cpu_suspend * request. * 2. turning itself off in response to an earlier psci * cpu_off request. */ case TEESMC_OPTEED_RETURN_OFF_DONE: case TEESMC_OPTEED_RETURN_SUSPEND_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_OFF_DONE: case TEESMC_OPTEED_RETURN_SYSTEM_RESET_DONE: /* * OPTEE reports completion. The OPTEED must have initiated the * original request through a synchronous entry into OPTEE. * Jump back to the original C runtime context, and pass x1 as * return value to the caller */ opteed_synchronous_sp_exit(optee_ctx, x1); /* * OPTEE is returning from a call or being preempted from a call, in * either case execution should resume in the normal world. */ case TEESMC_OPTEED_RETURN_CALL_DONE: /* * This is the result from the secure client of an * earlier request. The results are in x0-x3. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); /* * OPTEE has finished handling a S-EL1 FIQ interrupt. Execution * should resume in the normal world. */ case TEESMC_OPTEED_RETURN_FIQ_DONE: /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* * Restore non-secure state. There is no need to save the * secure system register context since OPTEE was supposed * to preserve it during S-EL1 interrupt handling. */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET0((uint64_t) ns_cpu_context); default: panic(); } }
static int32_t tbase_init_entry() { DBG_PRINTF("tbase_init\n\r"); // Save el1 registers in case non-secure world has already been set up. cm_el1_sysregs_context_save(NON_SECURE); uint64_t mpidr = read_mpidr(); uint32_t linear_id = platform_get_core_pos(mpidr); tbase_context *tbase_ctx = &secure_context[linear_id]; // Note: mapping is 1:1, so physical and virtual addresses are here the same. cpu_context_t *ns_entry_context = (cpu_context_t *) cm_get_context(mpidr, NON_SECURE); // ************************************************************************************ // Configure parameter passing to tbase // Calculate page start addresses for register areas. registerFileStart[REGISTER_FILE_NWD] = page_align((uint64_t)&ns_entry_context, DOWN); registerFileStart[REGISTER_FILE_MONITOR] = page_align((uint64_t)&msm_area, DOWN); // Calculate page end addresses for register areas. registerFileEnd[REGISTER_FILE_NWD] = (uint64_t)(&ns_entry_context[TBASE_CORE_COUNT]); registerFileEnd[REGISTER_FILE_MONITOR] = ((uint64_t)&msm_area) +sizeof(msm_area); int32_t totalPages = 0; for (int area=0; area<REGISTER_FILE_COUNT; area++) { int32_t pages = page_align(registerFileEnd[area] - registerFileStart[area], UP) / PAGE_SIZE; assert( pages +totalPages <= TBASE_INTERFACE_PAGES ); tbase_init_register_file(area, totalPages, pages); totalPages += pages; } // ************************************************************************************ // Create boot structure tbaseBootCfg.magic = TBASE_BOOTCFG_MAGIC; tbaseBootCfg.length = sizeof(bootCfg_t); tbaseBootCfg.version = TBASE_MONITOR_INTERFACE_VERSION; tbaseBootCfg.dRamBase = TBASE_NWD_DRAM_BASE; tbaseBootCfg.dRamSize = TBASE_NWD_DRAM_SIZE; tbaseBootCfg.secDRamBase = TBASE_SWD_DRAM_BASE; tbaseBootCfg.secDRamSize = TBASE_SWD_DRAM_SIZE; tbaseBootCfg.secIRamBase = TBASE_SWD_IMEM_BASE; tbaseBootCfg.secIRamSize = TBASE_SWD_IMEM_SIZE; tbaseBootCfg.conf_mair_el3 = read_mair_el3(); tbaseBootCfg.MSMPteCount = totalPages; tbaseBootCfg.MSMBase = (uint64_t)registerFileL2; tbaseBootCfg.gic_distributor_base = TBASE_GIC_DIST_BASE; tbaseBootCfg.gic_cpuinterface_base = TBASE_GIC_CPU_BASE; tbaseBootCfg.gic_version = TBASE_GIC_VERSION; tbaseBootCfg.total_number_spi = TBASE_SPI_COUNT; tbaseBootCfg.ssiq_number = TBASE_SSIQ_NRO; tbaseBootCfg.flags = TBASE_MONITOR_FLAGS; DBG_PRINTF("*** tbase boot cfg ***\n\r"); DBG_PRINTF("* magic : 0x%.X\n\r",tbaseBootCfg.magic); DBG_PRINTF("* length : 0x%.X\n\r",tbaseBootCfg.length); DBG_PRINTF("* version : 0x%.X\n\r",tbaseBootCfg.version); DBG_PRINTF("* dRamBase : 0x%.X\n\r",tbaseBootCfg.dRamBase); DBG_PRINTF("* dRamSize : 0x%.X\n\r",tbaseBootCfg.dRamSize); DBG_PRINTF("* secDRamBase : 0x%.X\n\r",tbaseBootCfg.secDRamBase); DBG_PRINTF("* secDRamSize : 0x%.X\n\r",tbaseBootCfg.secDRamSize); DBG_PRINTF("* secIRamBase : 0x%.X\n\r",tbaseBootCfg.secIRamBase); DBG_PRINTF("* secIRamSize : 0x%.X\n\r",tbaseBootCfg.secIRamSize); DBG_PRINTF("* conf_mair_el3 : 0x%.X\n\r",tbaseBootCfg.conf_mair_el3); DBG_PRINTF("* MSMPteCount : 0x%.X\n\r",tbaseBootCfg.MSMPteCount); DBG_PRINTF("* MSMBase : 0x%.X\n\r",tbaseBootCfg.MSMBase); DBG_PRINTF("* gic_distributor_base : 0x%.X\n\r",tbaseBootCfg.gic_distributor_base); DBG_PRINTF("* gic_cpuinterface_base : 0x%.X\n\r",tbaseBootCfg.gic_cpuinterface_base); DBG_PRINTF("* gic_version : 0x%.X\n\r",tbaseBootCfg.gic_version); DBG_PRINTF("* total_number_spi : 0x%.X\n\r",tbaseBootCfg.total_number_spi); DBG_PRINTF("* ssiq_number : 0x%.X\n\r",tbaseBootCfg.ssiq_number); DBG_PRINTF("* flags : 0x%.X\n\r",tbaseBootCfg.flags); // ************************************************************************************ // tbaseBootCfg and l2 entries may be accesses uncached, so must flush those. flush_dcache_range((unsigned long)&tbaseBootCfg, sizeof(bootCfg_t)); flush_dcache_range((unsigned long)®isterFileL2, sizeof(registerFileL2)); // ************************************************************************************ // Set registers for tbase initialization entry cpu_context_t *s_entry_context = &tbase_ctx->cpu_ctx; gp_regs_t *s_entry_gpregs = get_gpregs_ctx(s_entry_context); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, 0); write_ctx_reg(s_entry_gpregs, CTX_GPREG_X1, (int64_t)&tbaseBootCfg); // SPSR for SMC handling (FIQ mode) tbaseEntrySpsr = TBASE_ENTRY_SPSR; DBG_PRINTF("tbase init SPSR 0x%x\n\r", read_ctx_reg(get_el3state_ctx(&tbase_ctx->cpu_ctx), CTX_SPSR_EL3) ); DBG_PRINTF("tbase SMC SPSR %x\nr\r", tbaseEntrySpsr ); // ************************************************************************************ // Start tbase tbase_synchronous_sp_entry(tbase_ctx); tbase_ctx->state = TBASE_STATE_ON; #if TBASE_PM_ENABLE // Register power managemnt hooks with PSCI psci_register_spd_pm_hook(&tbase_pm); #endif cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); return 1; }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* * Set the NS bit to access NS copies of certain banked * registers */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); /* * Avoid unexpected debug traps in case where HDCR * is not completely reset by the hardware - set * HDCR.HPMN to PMCR.N and zero the remaining bits. * The HDCR.HPMN and PMCR.N fields are the same size * (5 bits) and HPMN is at offset zero within HDCR. */ write_hdcr((read_pmcr() & PMCR_N_BITS) >> PMCR_N_SHIFT); /* * Reset CNTHP_CTL to disable the EL2 physical timer and * therefore prevent timer interrupts. */ write_cnthp_ctl(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); }
/* Setup context of the Secure Partition */ void secure_partition_setup(void) { VERBOSE("S-EL1/S-EL0 context setup start...\n"); cpu_context_t *ctx = cm_get_context(SECURE); /* Make sure that we got a Secure context. */ assert(ctx != NULL); /* Assert we are in Secure state. */ assert((read_scr_el3() & SCR_NS_BIT) == 0); /* Disable MMU at EL1. */ disable_mmu_icache_el1(); /* Invalidate TLBs at EL1. */ tlbivmalle1(); /* * General-Purpose registers * ------------------------- */ /* * X0: Virtual address of a buffer shared between EL3 and Secure EL0. * The buffer will be mapped in the Secure EL1 translation regime * with Normal IS WBWA attributes and RO data and Execute Never * instruction access permissions. * * X1: Size of the buffer in bytes * * X2: cookie value (Implementation Defined) * * X3: cookie value (Implementation Defined) * * X4 to X30 = 0 (already done by cm_init_my_context()) */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1); /* * SP_EL0: A non-zero value will indicate to the SP that the SPM has * initialized the stack pointer for the current CPU through * implementation defined means. The value will be 0 otherwise. */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0, PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE); /* * Setup translation tables * ------------------------ */ #if ENABLE_ASSERTIONS /* Get max granularity supported by the platform. */ u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1(); int tgran64_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & ID_AA64MMFR0_EL1_TGRAN64_MASK) == ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; int tgran16_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & ID_AA64MMFR0_EL1_TGRAN16_MASK) == ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; int tgran4_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & ID_AA64MMFR0_EL1_TGRAN4_MASK) == ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; uintptr_t max_granule_size; if (tgran64_supported) { max_granule_size = 64 * 1024; } else if (tgran16_supported) { max_granule_size = 16 * 1024; } else { assert(tgran4_supported); max_granule_size = 4 * 1024; } VERBOSE("Max translation granule supported: %lu KiB\n", max_granule_size); uintptr_t max_granule_size_mask = max_granule_size - 1; /* Base must be aligned to the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0); /* Size must be a multiple of the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0); #endif /* ENABLE_ASSERTIONS */ /* This region contains the exception vectors used at S-EL1. */ const mmap_region_t sel1_exception_vectors = MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, SPM_SHIM_EXCEPTIONS_SIZE, MT_CODE | MT_SECURE | MT_PRIVILEGED); mmap_add_region_ctx(&secure_partition_xlat_ctx, &sel1_exception_vectors); mmap_add_ctx(&secure_partition_xlat_ctx, plat_get_secure_partition_mmap(NULL)); init_xlat_tables_ctx(&secure_partition_xlat_ctx); /* * MMU-related registers * --------------------- */ /* Set attributes in the right indices of the MAIR */ u_register_t mair_el1 = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) | MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) | MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1); /* Setup TCR_EL1. */ u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); u_register_t tcr_el1 = /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) | /* Inner and outer WBWA, shareable. */ TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | /* Set the granularity to 4KB. */ TCR_TG0_4K | /* Limit Intermediate Physical Address Size. */ tcr_ps_bits << TCR_EL1_IPS_SHIFT | /* Disable translations using TBBR1_EL1. */ TCR_EPD1_BIT /* The remaining fields related to TBBR1_EL1 are left as zero. */ ; tcr_el1 &= ~( /* Enable translations using TBBR0_EL1 */ TCR_EPD0_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1); /* Setup SCTLR_EL1 */ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_el1 |= /*SCTLR_EL1_RES1 |*/ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */ SCTLR_UCI_BIT | /* RW regions at xlat regime EL1&0 are forced to be XN. */ SCTLR_WXN_BIT | /* Don't trap to EL1 execution of WFI or WFE at EL0. */ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT | /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */ SCTLR_UCT_BIT | /* Don't trap to EL1 execution of DZ ZVA at EL0. */ SCTLR_DZE_BIT | /* Enable SP Alignment check for EL0 */ SCTLR_SA0_BIT | /* Allow cacheable data and instr. accesses to normal memory. */ SCTLR_C_BIT | SCTLR_I_BIT | /* Alignment fault checking enabled when at EL1 and EL0. */ SCTLR_A_BIT | /* Enable MMU. */ SCTLR_M_BIT ; sctlr_el1 &= ~( /* Explicit data accesses at EL0 are little-endian. */ SCTLR_E0E_BIT | /* Accesses to DAIF from EL0 are trapped to EL1. */ SCTLR_UMA_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); /* Point TTBR0_EL1 at the tables of the context created for the SP. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1, (u_register_t)secure_partition_base_xlat_table); /* * Setup other system registers * ---------------------------- */ /* Shim Exception Vector Base Address */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1, SPM_SHIM_EXCEPTIONS_PTR); /* * FPEN: Forbid the Secure Partition to access FP/SIMD registers. * TTA: Enable access to trace registers. * ZEN (v8.2): Trap SVE instructions and access to SVE registers. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL)); /* * Prepare information in buffer shared between EL3 and S-EL0 * ---------------------------------------------------------- */ void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE; /* Copy the boot information into the shared buffer with the SP. */ assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t) <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)); assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1)); const secure_partition_boot_info_t *sp_boot_info = plat_get_secure_partition_boot_info(NULL); assert(sp_boot_info != NULL); memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info, sizeof(secure_partition_boot_info_t)); /* Pointer to the MP information from the platform port. */ secure_partition_mp_info_t *sp_mp_info = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; assert(sp_mp_info != NULL); /* * Point the shared buffer MP information pointer to where the info will * be populated, just after the boot info. */ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info = (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)); /* * Update the shared buffer pointer to where the MP information for the * payload will be populated */ shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; /* * Copy the cpu information into the shared buffer area after the boot * information. */ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT); assert((uintptr_t)shared_buf_ptr <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE - (sp_boot_info->num_cpus * sizeof(*sp_mp_info)))); memcpy(shared_buf_ptr, (const void *) sp_mp_info, sp_boot_info->num_cpus * sizeof(*sp_mp_info)); /* * Calculate the linear indices of cores in boot information for the * secure partition and flag the primary CPU */ sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr; for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) { u_register_t mpidr = sp_mp_info[index].mpidr; sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr); if (plat_my_core_pos() == sp_mp_info[index].linear_id) sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU; } VERBOSE("S-EL1/S-EL0 context setup end.\n"); }
/******************************************************************************* * Prepare the CPU system registers for first entry into secure or normal world * * If execution is requested to hyp mode, HSCTLR is initialized * If execution is requested to non-secure PL1, and the CPU supports * HYP mode then HYP mode is disabled by configuring all necessary HYP mode * registers. ******************************************************************************/ void cm_prepare_el3_exit(uint32_t security_state) { uint32_t sctlr, scr, hcptr; cpu_context_t *ctx = cm_get_context(security_state); assert(ctx); if (security_state == NON_SECURE) { scr = read_ctx_reg(get_regs_ctx(ctx), CTX_SCR); if (scr & SCR_HCE_BIT) { /* Use SCTLR value to initialize HSCTLR */ sctlr = read_ctx_reg(get_regs_ctx(ctx), CTX_NS_SCTLR); sctlr |= HSCTLR_RES1; /* Temporarily set the NS bit to access HSCTLR */ write_scr(read_scr() | SCR_NS_BIT); /* * Make sure the write to SCR is complete so that * we can access HSCTLR */ isb(); write_hsctlr(sctlr); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } else if (read_id_pfr1() & (ID_PFR1_VIRTEXT_MASK << ID_PFR1_VIRTEXT_SHIFT)) { /* Set the NS bit to access HCR, HCPTR, CNTHCTL, VPIDR, VMPIDR */ write_scr(read_scr() | SCR_NS_BIT); isb(); /* PL2 present but unused, need to disable safely */ write_hcr(0); /* HSCTLR : can be ignored when bypassing */ /* HCPTR : disable all traps TCPAC, TTA, TCP */ hcptr = read_hcptr(); hcptr &= ~(TCPAC_BIT | TTA_BIT | TCP11_BIT | TCP10_BIT); write_hcptr(hcptr); /* Enable EL1 access to timer */ write_cnthctl(PL1PCEN_BIT | PL1PCTEN_BIT); /* Reset CNTVOFF_EL2 */ write64_cntvoff(0); /* Set VPIDR, VMPIDR to match MIDR, MPIDR */ write_vpidr(read_midr()); write_vmpidr(read_mpidr()); /* * Reset VTTBR. * Needed because cache maintenance operations depend on * the VMID even when non-secure EL1&0 stage 2 address * translation are disabled. */ write64_vttbr(0); isb(); write_scr(read_scr() & ~SCR_NS_BIT); isb(); } } }
/******************************************************************************* * This function is responsible for handling all SMCs in the Trusted OS/App * range from the non-secure state as defined in the SMC Calling Convention * Document. It is also responsible for communicating with the XILSP to * delegate work and return results back to the non-secure state. Lastly it * will also return any information that the XILSP needs to do the work * assigned to it. ******************************************************************************/ uint64_t xilspd_smc_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, void *cookie, void *handle, uint64_t flags) { cpu_context_t *ns_cpu_context; uint32_t linear_id = plat_my_core_pos(), ns; xilsp_context_t *xilsp_ctx = &xilspd_sp_context[linear_id]; /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); switch (smc_fid) { /* * This function ID is used only by the SP to indicate it has * finished initialising itself after a cold boot */ case XILSP_ENTRY_DONE: if (ns) SMC_RET1(handle, SMC_UNK); /* * Stash the SP entry points information. This is done * only once on the primary cpu */ assert(xilsp_vectors == NULL); xilsp_vectors = (xilsp_vectors_t *) x1; if (xilsp_vectors) set_xilsp_pstate(xilsp_ctx->state, XILSP_PSTATE_ON); /* * SP reports completion. The SPD must have initiated * the original request through a synchronous entry * into the SP. Jump back to the original C runtime * context. */ xilspd_synchronous_sp_exit(xilsp_ctx, x1); break; case XILSP_ARITH: if (ns) { /* * This is a fresh request from the non-secure client. * Figure out which registers need to be preserved, save * the non-secure state and send the request to the * secure payload. */ assert(handle == cm_get_context(NON_SECURE)); cm_el1_sysregs_context_save(NON_SECURE); /* * We are done stashing the non-secure context. Ask the * secure payload to do the work now. */ /* * Verify if there is a valid context to use, copy the * operation type and parameters to the secure context * and jump to the fast smc entry point in the secure * payload. Entry into S-EL1 will take place upon exit * from this function. */ assert(&xilsp_ctx->cpu_ctx == cm_get_context(SECURE)); /* Set appropriate entry for SMC. * We expect the XILSP to manage the PSTATE.I and * PSTATE.F flags as appropriate. */ cm_set_elr_el3(SECURE, (uint64_t) &xilsp_vectors->fast_smc_entry); cm_el1_sysregs_context_restore(SECURE); cm_set_next_eret_context(SECURE); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X4, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X4)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X5, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X5)); write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X6, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X6)); /* Propagate hypervisor client ID */ write_ctx_reg(get_gpregs_ctx(&xilsp_ctx->cpu_ctx), CTX_GPREG_X7, read_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X7)); SMC_RET4(&xilsp_ctx->cpu_ctx, smc_fid, x1, x2, x3); } else { /* * This is the result from the secure client of an * earlier request. The results are in x1-x4. Copy it * into the non-secure context, save the secure state * and return to the non-secure state. */ assert(handle == cm_get_context(SECURE)); cm_el1_sysregs_context_save(SECURE); /* Get a reference to the non-secure context */ ns_cpu_context = cm_get_context(NON_SECURE); assert(ns_cpu_context); /* Restore non-secure state */ cm_el1_sysregs_context_restore(NON_SECURE); cm_set_next_eret_context(NON_SECURE); SMC_RET4(ns_cpu_context, x1, x2, x3, x4); } break; default: break; } SMC_RET1(handle, SMC_UNK); }