/******************************************************************************* * Given a secure payload entrypoint, register width, cpu id & pointer to a * context data structure, this function will create a secure context ready for * programming an entry into the secure payload. ******************************************************************************/ void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point, uint32_t rw, uint64_t pc, tlk_context_t *tlk_ctx) { uint32_t ep_attr, spsr; /* Passing a NULL context is a critical programming error */ assert(tlk_ctx); assert(tlk_entry_point); assert(pc); /* Associate this context with the cpu specified */ tlk_ctx->mpidr = read_mpidr_el1(); clr_std_smc_active_flag(tlk_ctx->state); cm_set_context(&tlk_ctx->cpu_ctx, SECURE); if (rw == SP_AARCH64) spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); else spsr = SPSR_MODE32(MODE32_svc, SPSR_T_ARM, read_sctlr_el3() & SCTLR_EE_BIT, DISABLE_ALL_EXCEPTIONS); /* initialise an entrypoint to set up the CPU context */ ep_attr = SECURE | EP_ST_ENABLE; if (read_sctlr_el3() & SCTLR_EE_BIT) ep_attr |= EP_EE_BIG; SET_PARAM_HEAD(tlk_entry_point, PARAM_EP, VERSION_1, ep_attr); tlk_entry_point->pc = pc; tlk_entry_point->spsr = spsr; }
/******************************************************************************* * Function that does the first bit of architectural setup that affects * execution in the non-secure address space. ******************************************************************************/ void bl1_arch_setup(void) { unsigned long tmp_reg = 0; /* Enable alignment checks and set the exception endianess to LE */ tmp_reg = read_sctlr_el3(); tmp_reg |= (SCTLR_A_BIT | SCTLR_SA_BIT); tmp_reg &= ~SCTLR_EE_BIT; write_sctlr_el3(tmp_reg); /* * Enable HVCs, route FIQs to EL3, set the next EL to be AArch64, route * external abort and SError interrupts to EL3 */ tmp_reg = SCR_RES1_BITS | SCR_RW_BIT | SCR_HCE_BIT | SCR_EA_BIT | SCR_FIQ_BIT; write_scr(tmp_reg); /* * Enable SError and Debug exceptions */ enable_serror(); enable_debug_exceptions(); return; }
static int32_t tbase_init_secure_context(tbase_context *tbase_ctx) { uint32_t sctlr = read_sctlr_el3(); el1_sys_regs_t *el1_state; uint64_t mpidr = read_mpidr(); /* Passing a NULL context is a critical programming error */ assert(tbase_ctx); DBG_PRINTF("tbase_init_secure_context\n\r"); memset(tbase_ctx, 0, sizeof(*tbase_ctx)); /* Get a pointer to the S-EL1 context memory */ el1_state = get_sysregs_ctx(&tbase_ctx->cpu_ctx); // Program the sctlr for S-EL1 execution with caches and mmu off sctlr &= SCTLR_EE_BIT; sctlr |= SCTLR_EL1_RES1; write_ctx_reg(el1_state, CTX_SCTLR_EL1, sctlr); /* Set this context as ready to be initialised i.e OFF */ tbase_ctx->state = TBASE_STATE_OFF; /* Associate this context with the cpu specified */ tbase_ctx->mpidr = mpidr; // Set up cm context for this core cm_set_context(mpidr, &tbase_ctx->cpu_ctx, SECURE); // cm_init_exception_stack(mpidr, SECURE); return 0; }
void bakery_lock_get(unsigned int id, unsigned int offset) { unsigned int they, me, is_cached; unsigned int my_ticket, my_prio, their_ticket; bakery_info_t *their_bakery_info; unsigned int their_bakery_data; me = plat_my_core_pos(); is_cached = read_sctlr_el3() & SCTLR_C_BIT; /* Get a ticket */ my_ticket = bakery_get_ticket(id, offset, me, is_cached); /* * Now that we got our ticket, compute our priority value, then compare * with that of others, and proceed to acquire the lock */ my_prio = PRIORITY(my_ticket, me); for (they = 0; they < BAKERY_LOCK_MAX_CPUS; they++) { if (me == they) continue; /* * Get a reference to the other contender's bakery info and * ensure that a stale copy is not read. */ their_bakery_info = get_bakery_info_by_index(offset, id, they); assert(their_bakery_info); /* Wait for the contender to get their ticket */ do { read_cache_op(their_bakery_info, is_cached); their_bakery_data = their_bakery_info->lock_data; } while (bakery_is_choosing(their_bakery_data)); /* * If the other party is a contender, they'll have non-zero * (valid) ticket value. If they do, compare priorities */ their_ticket = bakery_ticket_number(their_bakery_data); if (their_ticket && (PRIORITY(their_ticket, they) < my_prio)) { /* * They have higher priority (lower value). Wait for * their ticket value to change (either release the lock * to have it dropped to 0; or drop and probably content * again for the same lock to have an even higher value) */ do { wfe(); read_cache_op(their_bakery_info, is_cached); } while (their_ticket == bakery_ticket_number(their_bakery_info->lock_data)); } } /* Lock acquired */ }
/******************************************************************************* * Set the Secure EL1 required architectural state ******************************************************************************/ void bl1_arch_next_el_setup(void) { unsigned long next_sctlr; /* Use the same endianness than the current BL */ next_sctlr = (read_sctlr_el3() & SCTLR_EE_BIT); /* Set SCTLR Secure EL1 */ next_sctlr |= SCTLR_EL1_RES1; write_sctlr_el1(next_sctlr); }
void bakery_lock_release(unsigned int id, unsigned int offset) { bakery_info_t *my_bakery_info; unsigned int is_cached = read_sctlr_el3() & SCTLR_C_BIT; my_bakery_info = get_my_bakery_info(offset, id); assert(bakery_ticket_number(my_bakery_info->lock_data)); my_bakery_info->lock_data = 0; write_cache_op(my_bakery_info, is_cached); sev(); }
/******************************************************************************* * Given a secure payload entrypoint info pointer, entry point PC, register * width, cpu id & pointer to a context data structure, this function will * initialize tsp context and entry point info for the secure payload ******************************************************************************/ void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point, uint32_t rw, uint64_t pc, tsp_context_t *tsp_ctx) { uint32_t ep_attr; /* Passing a NULL context is a critical programming error */ assert(tsp_ctx); assert(tsp_entry_point); assert(pc); /* * We support AArch64 TSP for now. * TODO: Add support for AArch32 TSP */ assert(rw == TSP_AARCH64); /* Associate this context with the cpu specified */ tsp_ctx->mpidr = read_mpidr_el1(); tsp_ctx->state = 0; set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF); clr_yield_smc_active_flag(tsp_ctx->state); cm_set_context(&tsp_ctx->cpu_ctx, SECURE); /* initialise an entrypoint to set up the CPU context */ ep_attr = SECURE | EP_ST_ENABLE; if (read_sctlr_el3() & SCTLR_EE_BIT) ep_attr |= EP_EE_BIG; SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr); tsp_entry_point->pc = pc; tsp_entry_point->spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS); zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args)); }
/******************************************************************************* * The next three functions implement a handler for each supported affinity * level which is called when that affinity level is turned off. ******************************************************************************/ static int psci_afflvl0_off(unsigned long mpidr, aff_map_node *cpu_node) { unsigned int index, plat_state; int rc = PSCI_E_SUCCESS; unsigned long sctlr; assert(cpu_node->level == MPIDR_AFFLVL0); /* State management: mark this cpu as turned off */ psci_set_state(cpu_node, PSCI_STATE_OFF); /* * Generic management: Get the index for clearing any lingering re-entry * information and allow the secure world to switch itself off */ /* * Call the cpu off handler registered by the Secure Payload Dispatcher * to let it do any bookeeping. Assume that the SPD always reports an * E_DENIED error if SP refuse to power down */ if (psci_spd_pm && psci_spd_pm->svc_off) { rc = psci_spd_pm->svc_off(0); if (rc) return rc; } index = cpu_node->data; memset(&psci_ns_entry_info[index], 0, sizeof(psci_ns_entry_info[index])); /* * Arch. management. Perform the necessary steps to flush all * cpu caches. * * TODO: This power down sequence varies across cpus so it needs to be * abstracted out on the basis of the MIDR like in cpu_reset_handler(). * Do the bare minimal for the time being. Fix this before porting to * Cortex models. */ sctlr = read_sctlr_el3(); sctlr &= ~SCTLR_C_BIT; write_sctlr_el3(sctlr); /* * CAUTION: This flush to the level of unification makes an assumption * about the cache hierarchy at affinity level 0 (cpu) in the platform. * Ideally the platform should tell psci which levels to flush to exit * coherency. */ dcsw_op_louis(DCCISW); /* * Plat. management: Perform platform specific actions to turn this * cpu off e.g. exit cpu coherency, program the power controller etc. */ if (psci_plat_pm_ops->affinst_off) { /* Get the current physical state of this cpu */ plat_state = psci_get_phys_state(cpu_node); rc = psci_plat_pm_ops->affinst_off(mpidr, cpu_node->level, plat_state); } return rc; }
/******************************************************************************* * Function to perform late architectural and platform specific initialization. * It also locates and loads the BL2 raw binary image in the trusted DRAM. Only * called by the primary cpu after a cold boot. * TODO: Add support for alternative image load mechanism e.g using virtio/elf * loader etc. ******************************************************************************/ void bl1_main(void) { #if DEBUG unsigned long sctlr_el3 = read_sctlr_el3(); #endif unsigned long bl2_base; unsigned int load_type = TOP_LOAD, spsr; meminfo *bl1_tzram_layout; meminfo *bl2_tzram_layout = 0x0; /* * Ensure that MMU/Caches and coherency are turned on */ assert(sctlr_el3 | SCTLR_M_BIT); assert(sctlr_el3 | SCTLR_C_BIT); assert(sctlr_el3 | SCTLR_I_BIT); /* Perform remaining generic architectural setup from EL3 */ bl1_arch_setup(); /* Perform platform setup in BL1. */ bl1_platform_setup(); /* Announce our arrival */ printf(FIRMWARE_WELCOME_STR); printf("%s\n\r", build_message); /* * Find out how much free trusted ram remains after BL1 load * & load the BL2 image at its top */ bl1_tzram_layout = bl1_plat_sec_mem_layout(); bl2_base = load_image(bl1_tzram_layout, (const char *) BL2_IMAGE_NAME, load_type, BL2_BASE); /* * Create a new layout of memory for BL2 as seen by BL1 i.e. * tell it the amount of total and free memory available. * This layout is created at the first free address visible * to BL2. BL2 will read the memory layout before using its * memory for other purposes. */ bl2_tzram_layout = (meminfo *) bl1_tzram_layout->free_base; init_bl2_mem_layout(bl1_tzram_layout, bl2_tzram_layout, load_type, bl2_base); if (bl2_base) { bl1_arch_next_el_setup(); spsr = make_spsr(MODE_EL1, MODE_SP_ELX, MODE_RW_64); printf("Booting trusted firmware boot loader stage 2\n\r"); #if DEBUG printf("BL2 address = 0x%llx \n\r", (unsigned long long) bl2_base); printf("BL2 cpsr = 0x%x \n\r", spsr); printf("BL2 memory layout address = 0x%llx \n\r", (unsigned long long) bl2_tzram_layout); #endif run_image(bl2_base, spsr, SECURE, (void *) bl2_tzram_layout, NULL); } /* * TODO: print failure to load BL2 but also add a tzwdog timer * which will reset the system eventually. */ printf("Failed to load boot loader stage 2 (BL2) firmware.\n\r"); return; }