/******************************************************************************* * Simple routine to determine whether a mpidr is valid or not. ******************************************************************************/ int psci_validate_mpidr(u_register_t mpidr) { if (plat_core_pos_by_mpidr(mpidr) < 0) return PSCI_E_INVALID_PARAMS; return PSCI_E_SUCCESS; }
static int poplar_pwr_domain_on(u_register_t mpidr) { unsigned int cpu = plat_core_pos_by_mpidr(mpidr); unsigned int regval, regval_bak; /* Select 400MHz before start slave cores */ regval_bak = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x206); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x606); /* Clear the slave cpu arm_por_srst_req reset */ regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST)); regval &= ~(1 << (cpu + CPU_REG_COREPO_SRST)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval); /* Clear the slave cpu reset */ regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST)); regval &= ~(1 << (cpu + CPU_REG_CORE_SRST)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval); /* Restore cpu frequency */ regval = regval_bak & (~(1 << REG_CPU_LP_CPU_SW_BEGIN)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval_bak); return PSCI_E_SUCCESS; }
static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) { uint32_t cpu_id = plat_core_pos_by_mpidr(mpidr); assert(cpu_id < PLATFORM_CORE_COUNT); assert(cpuson_flags[cpu_id] == 0); cpuson_flags[cpu_id] = PMU_CPU_HOTPLUG; cpuson_entry_point[cpu_id] = entrypoint; dsb(); cpus_power_domain_on(cpu_id); return 0; }
/******************************************************************************* * PSCI Compatibility helper function to return the state id encoded in the * 'power_state' parameter of the CPU specified by 'mpidr'. Returns * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND. ******************************************************************************/ int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr) { int cpu_idx = plat_core_pos_by_mpidr(mpidr); if (cpu_idx == -1) return PSCI_INVALID_DATA; /* Sanity check to verify that the CPU is in CPU_SUSPEND */ if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON && !is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx))) return psci_get_pstate_id(psci_power_state_compat[cpu_idx]); return PSCI_INVALID_DATA; }
void plat_ic_raise_el3_sgi(int sgi_num, u_register_t target) { #if GICV2_G0_FOR_EL3 int id; /* Target must be a valid MPIDR in the system */ id = plat_core_pos_by_mpidr(target); assert(id >= 0); /* Verify that this is a secure SGI */ assert(plat_ic_get_interrupt_type(sgi_num) == INTR_TYPE_EL3); gicv2_raise_sgi(sgi_num, id); #else assert(false); #endif }
/******************************************************************************* * This function returns the appropriate count and residency time of the * local state for the highest power level expressed in the `power_state` * for the node represented by `target_cpu`. ******************************************************************************/ int psci_get_stat(u_register_t target_cpu, unsigned int power_state, psci_stat_t *psci_stat) { int rc, pwrlvl, lvl, parent_idx, stat_idx, target_idx; psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} }; plat_local_state_t local_state; /* Validate the target_cpu parameter and determine the cpu index */ target_idx = plat_core_pos_by_mpidr(target_cpu); if (target_idx == -1) return PSCI_E_INVALID_PARAMS; /* Validate the power_state parameter */ if (!psci_plat_pm_ops->translate_power_state_by_mpidr) rc = psci_validate_power_state(power_state, &state_info); else rc = psci_plat_pm_ops->translate_power_state_by_mpidr( target_cpu, power_state, &state_info); if (rc != PSCI_E_SUCCESS) return PSCI_E_INVALID_PARAMS; /* Find the highest power level */ pwrlvl = psci_find_target_suspend_lvl(&state_info); if (pwrlvl == PSCI_INVALID_PWR_LVL) return PSCI_E_INVALID_PARAMS; /* Get the index into the stats array */ local_state = state_info.pwr_domain_state[pwrlvl]; stat_idx = get_stat_idx(local_state, pwrlvl); if (pwrlvl > PSCI_CPU_PWR_LVL) { /* Get the power domain index */ parent_idx = psci_cpu_pd_nodes[target_idx].parent_node; for (lvl = PSCI_CPU_PWR_LVL + 1; lvl < pwrlvl; lvl++) parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node; /* Get the non cpu power domain stats */ *psci_stat = psci_non_cpu_stat[parent_idx][stat_idx]; } else { /* Get the cpu power domain stats */ *psci_stat = psci_cpu_stat[target_idx][stat_idx]; } return PSCI_E_SUCCESS; }
static int zynqmp_pwr_domain_on(u_register_t mpidr) { unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr); const struct pm_proc *proc; VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); if (cpu_id == -1) return PSCI_E_INTERN_FAIL; proc = pm_get_proc(cpu_id); /* Send request to PMU to wake up selected APU CPU core */ pm_req_wakeup(proc->node_id, 1, zynqmp_sec_entry, REQ_ACK_BLOCKING); return PSCI_E_SUCCESS; }
/******************************************************************************* * Platform handler called when a power domain is about to be turned on. The * mpidr determines the CPU to be turned on. ******************************************************************************/ static int rpi3_pwr_domain_on(u_register_t mpidr) { int rc = PSCI_E_SUCCESS; unsigned int pos = plat_core_pos_by_mpidr(mpidr); uint64_t *hold_base = (uint64_t *)PLAT_RPI3_TM_HOLD_BASE; assert(pos < PLATFORM_CORE_COUNT); hold_base[pos] = PLAT_RPI3_TM_HOLD_STATE_GO; /* Make sure that the write has completed */ dsb(); isb(); sev(); return rc; }
/* * Helper function to get the power state of a power domain node as reported * by the SCP. */ int css_scp_get_power_state(u_register_t mpidr, unsigned int power_level) { int ret, cpu_idx; uint32_t scmi_pwr_state = 0, lvl_state; /* We don't support get power state at the system power domain level */ if ((power_level > PLAT_MAX_PWR_LVL) || (power_level == CSS_SYSTEM_PWR_DMN_LVL)) { WARN("Invalid power level %u specified for SCMI get power state\n", power_level); return PSCI_E_INVALID_PARAMS; } cpu_idx = plat_core_pos_by_mpidr(mpidr); assert(cpu_idx > -1); ret = scmi_pwr_state_get(scmi_handle, plat_css_core_pos_to_scmi_dmn_id_map[cpu_idx], &scmi_pwr_state); if (ret != SCMI_E_SUCCESS) { WARN("SCMI get power state command return 0x%x unexpected\n", ret); return PSCI_E_INVALID_PARAMS; } /* * Find the maximum power level described in the get power state * command. If it is less than the requested power level, then assume * the requested power level is ON. */ if (SCMI_GET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state) < power_level) return HW_ON; lvl_state = SCMI_GET_PWR_STATE_LVL(scmi_pwr_state, power_level); if (lvl_state == scmi_power_state_on) return HW_ON; assert((lvl_state == scmi_power_state_off) || (lvl_state == scmi_power_state_sleep)); return HW_OFF; }
/* * This function gets the time-stamp value for the PMF services * registered for SMC interface based on `tid` and `mpidr`. */ int pmf_get_timestamp_smc(unsigned int tid, u_register_t mpidr, unsigned int flags, unsigned long long *ts_value) { pmf_svc_desc_t *svc_desc; assert(ts_value); /* Search for registered service. */ svc_desc = get_service(tid); if ((svc_desc == NULL) || (plat_core_pos_by_mpidr(mpidr) < 0)) { *ts_value = 0; return -EINVAL; } else { /* Call the service time-stamp handler. */ *ts_value = svc_desc->get_ts(tid, mpidr, flags); return 0; } }
void plat_ic_set_spi_routing(unsigned int id, unsigned int routing_mode, u_register_t mpidr) { int proc_num = 0; switch (routing_mode) { case INTR_ROUTING_MODE_PE: proc_num = plat_core_pos_by_mpidr(mpidr); assert(proc_num >= 0); break; case INTR_ROUTING_MODE_ANY: /* Bit mask selecting all 8 CPUs as candidates */ proc_num = -1; break; default: assert(0); /* Unreachable */ break; } gicv2_set_spi_routing(id, proc_num); }
static int zynqmp_nopmu_pwr_domain_on(u_register_t mpidr) { uint32_t r; unsigned int cpu_id = plat_core_pos_by_mpidr(mpidr); VERBOSE("%s: mpidr: 0x%lx\n", __func__, mpidr); if (cpu_id == -1) return PSCI_E_INTERN_FAIL; /* program RVBAR */ mmio_write_32(APU_RVBAR_L_0 + (cpu_id << 3), zynqmp_sec_entry); mmio_write_32(APU_RVBAR_H_0 + (cpu_id << 3), zynqmp_sec_entry >> 32); /* clear VINITHI */ r = mmio_read_32(APU_CONFIG_0); r &= ~(1 << APU_CONFIG_0_VINITHI_SHIFT << cpu_id); mmio_write_32(APU_CONFIG_0, r); /* clear power down request */ r = mmio_read_32(APU_PWRCTL); r &= ~(1 << cpu_id); mmio_write_32(APU_PWRCTL, r); /* power up island */ mmio_write_32(PMU_GLOBAL_REQ_PWRUP_EN, 1 << cpu_id); mmio_write_32(PMU_GLOBAL_REQ_PWRUP_TRIG, 1 << cpu_id); /* FIXME: we should have a way to break out */ while (mmio_read_32(PMU_GLOBAL_REQ_PWRUP_STATUS) & (1 << cpu_id)) ; /* release core reset */ r = mmio_read_32(CRF_APB_RST_FPD_APU); r &= ~((CRF_APB_RST_FPD_APU_ACPU_PWRON_RESET | CRF_APB_RST_FPD_APU_ACPU_RESET) << cpu_id); mmio_write_32(CRF_APB_RST_FPD_APU, r); return PSCI_E_SUCCESS; }
/* * Helper function to turn ON a CPU power domain and its parent power domains * if applicable. */ void css_scp_on(u_register_t mpidr) { int lvl = 0, ret, core_pos; uint32_t scmi_pwr_state = 0; for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) SCMI_SET_PWR_STATE_LVL(scmi_pwr_state, lvl, scmi_power_state_on); SCMI_SET_PWR_STATE_MAX_PWR_LVL(scmi_pwr_state, lvl - 1); core_pos = plat_core_pos_by_mpidr(mpidr); assert(core_pos >= 0 && core_pos < PLATFORM_CORE_COUNT); ret = scmi_pwr_state_set(scmi_handle, plat_css_core_pos_to_scmi_dmn_id_map[core_pos], scmi_pwr_state); if (ret != SCMI_E_QUEUED && ret != SCMI_E_SUCCESS) { ERROR("SCMI set power state command return 0x%x unexpected\n", ret); panic(); } }
static unsigned int plat_rockchip_mpidr_to_core_pos(unsigned long mpidr) { return (unsigned int)plat_core_pos_by_mpidr(mpidr); }
/* Setup context of the Secure Partition */ void secure_partition_setup(void) { VERBOSE("S-EL1/S-EL0 context setup start...\n"); cpu_context_t *ctx = cm_get_context(SECURE); /* Make sure that we got a Secure context. */ assert(ctx != NULL); /* Assert we are in Secure state. */ assert((read_scr_el3() & SCR_NS_BIT) == 0); /* Disable MMU at EL1. */ disable_mmu_icache_el1(); /* Invalidate TLBs at EL1. */ tlbivmalle1(); /* * General-Purpose registers * ------------------------- */ /* * X0: Virtual address of a buffer shared between EL3 and Secure EL0. * The buffer will be mapped in the Secure EL1 translation regime * with Normal IS WBWA attributes and RO data and Execute Never * instruction access permissions. * * X1: Size of the buffer in bytes * * X2: cookie value (Implementation Defined) * * X3: cookie value (Implementation Defined) * * X4 to X30 = 0 (already done by cm_init_my_context()) */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X0, PLAT_SPM_BUF_BASE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X1, PLAT_SPM_BUF_SIZE); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X2, PLAT_SPM_COOKIE_0); write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_X3, PLAT_SPM_COOKIE_1); /* * SP_EL0: A non-zero value will indicate to the SP that the SPM has * initialized the stack pointer for the current CPU through * implementation defined means. The value will be 0 otherwise. */ write_ctx_reg(get_gpregs_ctx(ctx), CTX_GPREG_SP_EL0, PLAT_SP_IMAGE_STACK_BASE + PLAT_SP_IMAGE_STACK_PCPU_SIZE); /* * Setup translation tables * ------------------------ */ #if ENABLE_ASSERTIONS /* Get max granularity supported by the platform. */ u_register_t id_aa64prf0_el1 = read_id_aa64pfr0_el1(); int tgran64_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN64_SHIFT) & ID_AA64MMFR0_EL1_TGRAN64_MASK) == ID_AA64MMFR0_EL1_TGRAN64_SUPPORTED; int tgran16_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN16_SHIFT) & ID_AA64MMFR0_EL1_TGRAN16_MASK) == ID_AA64MMFR0_EL1_TGRAN16_SUPPORTED; int tgran4_supported = ((id_aa64prf0_el1 >> ID_AA64MMFR0_EL1_TGRAN4_SHIFT) & ID_AA64MMFR0_EL1_TGRAN4_MASK) == ID_AA64MMFR0_EL1_TGRAN4_SUPPORTED; uintptr_t max_granule_size; if (tgran64_supported) { max_granule_size = 64 * 1024; } else if (tgran16_supported) { max_granule_size = 16 * 1024; } else { assert(tgran4_supported); max_granule_size = 4 * 1024; } VERBOSE("Max translation granule supported: %lu KiB\n", max_granule_size); uintptr_t max_granule_size_mask = max_granule_size - 1; /* Base must be aligned to the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_BASE & max_granule_size_mask) == 0); /* Size must be a multiple of the max granularity */ assert((ARM_SP_IMAGE_NS_BUF_SIZE & max_granule_size_mask) == 0); #endif /* ENABLE_ASSERTIONS */ /* This region contains the exception vectors used at S-EL1. */ const mmap_region_t sel1_exception_vectors = MAP_REGION_FLAT(SPM_SHIM_EXCEPTIONS_START, SPM_SHIM_EXCEPTIONS_SIZE, MT_CODE | MT_SECURE | MT_PRIVILEGED); mmap_add_region_ctx(&secure_partition_xlat_ctx, &sel1_exception_vectors); mmap_add_ctx(&secure_partition_xlat_ctx, plat_get_secure_partition_mmap(NULL)); init_xlat_tables_ctx(&secure_partition_xlat_ctx); /* * MMU-related registers * --------------------- */ /* Set attributes in the right indices of the MAIR */ u_register_t mair_el1 = MAIR_ATTR_SET(ATTR_DEVICE, ATTR_DEVICE_INDEX) | MAIR_ATTR_SET(ATTR_IWBWA_OWBWA_NTR, ATTR_IWBWA_OWBWA_NTR_INDEX) | MAIR_ATTR_SET(ATTR_NON_CACHEABLE, ATTR_NON_CACHEABLE_INDEX); write_ctx_reg(get_sysregs_ctx(ctx), CTX_MAIR_EL1, mair_el1); /* Setup TCR_EL1. */ u_register_t tcr_ps_bits = tcr_physical_addr_size_bits(PLAT_PHY_ADDR_SPACE_SIZE); u_register_t tcr_el1 = /* Size of region addressed by TTBR0_EL1 = 2^(64-T0SZ) bytes. */ (64 - __builtin_ctzl(PLAT_VIRT_ADDR_SPACE_SIZE)) | /* Inner and outer WBWA, shareable. */ TCR_SH_INNER_SHAREABLE | TCR_RGN_OUTER_WBA | TCR_RGN_INNER_WBA | /* Set the granularity to 4KB. */ TCR_TG0_4K | /* Limit Intermediate Physical Address Size. */ tcr_ps_bits << TCR_EL1_IPS_SHIFT | /* Disable translations using TBBR1_EL1. */ TCR_EPD1_BIT /* The remaining fields related to TBBR1_EL1 are left as zero. */ ; tcr_el1 &= ~( /* Enable translations using TBBR0_EL1 */ TCR_EPD0_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_TCR_EL1, tcr_el1); /* Setup SCTLR_EL1 */ u_register_t sctlr_el1 = read_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1); sctlr_el1 |= /*SCTLR_EL1_RES1 |*/ /* Don't trap DC CVAU, DC CIVAC, DC CVAC, DC CVAP, or IC IVAU */ SCTLR_UCI_BIT | /* RW regions at xlat regime EL1&0 are forced to be XN. */ SCTLR_WXN_BIT | /* Don't trap to EL1 execution of WFI or WFE at EL0. */ SCTLR_NTWI_BIT | SCTLR_NTWE_BIT | /* Don't trap to EL1 accesses to CTR_EL0 from EL0. */ SCTLR_UCT_BIT | /* Don't trap to EL1 execution of DZ ZVA at EL0. */ SCTLR_DZE_BIT | /* Enable SP Alignment check for EL0 */ SCTLR_SA0_BIT | /* Allow cacheable data and instr. accesses to normal memory. */ SCTLR_C_BIT | SCTLR_I_BIT | /* Alignment fault checking enabled when at EL1 and EL0. */ SCTLR_A_BIT | /* Enable MMU. */ SCTLR_M_BIT ; sctlr_el1 &= ~( /* Explicit data accesses at EL0 are little-endian. */ SCTLR_E0E_BIT | /* Accesses to DAIF from EL0 are trapped to EL1. */ SCTLR_UMA_BIT ); write_ctx_reg(get_sysregs_ctx(ctx), CTX_SCTLR_EL1, sctlr_el1); /* Point TTBR0_EL1 at the tables of the context created for the SP. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_TTBR0_EL1, (u_register_t)secure_partition_base_xlat_table); /* * Setup other system registers * ---------------------------- */ /* Shim Exception Vector Base Address */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_VBAR_EL1, SPM_SHIM_EXCEPTIONS_PTR); /* * FPEN: Forbid the Secure Partition to access FP/SIMD registers. * TTA: Enable access to trace registers. * ZEN (v8.2): Trap SVE instructions and access to SVE registers. */ write_ctx_reg(get_sysregs_ctx(ctx), CTX_CPACR_EL1, CPACR_EL1_FPEN(CPACR_EL1_FP_TRAP_ALL)); /* * Prepare information in buffer shared between EL3 and S-EL0 * ---------------------------------------------------------- */ void *shared_buf_ptr = (void *) PLAT_SPM_BUF_BASE; /* Copy the boot information into the shared buffer with the SP. */ assert((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t) <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE)); assert(PLAT_SPM_BUF_BASE <= (UINTPTR_MAX - PLAT_SPM_BUF_SIZE + 1)); const secure_partition_boot_info_t *sp_boot_info = plat_get_secure_partition_boot_info(NULL); assert(sp_boot_info != NULL); memcpy((void *) shared_buf_ptr, (const void *) sp_boot_info, sizeof(secure_partition_boot_info_t)); /* Pointer to the MP information from the platform port. */ secure_partition_mp_info_t *sp_mp_info = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; assert(sp_mp_info != NULL); /* * Point the shared buffer MP information pointer to where the info will * be populated, just after the boot info. */ ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info = (secure_partition_mp_info_t *) ((uintptr_t)shared_buf_ptr + sizeof(secure_partition_boot_info_t)); /* * Update the shared buffer pointer to where the MP information for the * payload will be populated */ shared_buf_ptr = ((secure_partition_boot_info_t *) shared_buf_ptr)->mp_info; /* * Copy the cpu information into the shared buffer area after the boot * information. */ assert(sp_boot_info->num_cpus <= PLATFORM_CORE_COUNT); assert((uintptr_t)shared_buf_ptr <= (PLAT_SPM_BUF_BASE + PLAT_SPM_BUF_SIZE - (sp_boot_info->num_cpus * sizeof(*sp_mp_info)))); memcpy(shared_buf_ptr, (const void *) sp_mp_info, sp_boot_info->num_cpus * sizeof(*sp_mp_info)); /* * Calculate the linear indices of cores in boot information for the * secure partition and flag the primary CPU */ sp_mp_info = (secure_partition_mp_info_t *) shared_buf_ptr; for (unsigned int index = 0; index < sp_boot_info->num_cpus; index++) { u_register_t mpidr = sp_mp_info[index].mpidr; sp_mp_info[index].linear_id = plat_core_pos_by_mpidr(mpidr); if (plat_my_core_pos() == sp_mp_info[index].linear_id) sp_mp_info[index].flags |= MP_INFO_FLAG_PRIMARY_CPU; } VERBOSE("S-EL1/S-EL0 context setup end.\n"); }
/******************************************************************************* * Generic handler which is called to physically power on a cpu identified by * its mpidr. It performs the generic, architectural, platform setup and state * management to power on the target cpu e.g. it will ensure that * enough information is stashed for it to resume execution in the non-secure * security state. * * The state of all the relevant power domains are changed after calling the * platform handler as it can return error. ******************************************************************************/ int psci_cpu_on_start(u_register_t target_cpu, entry_point_info_t *ep) { int rc; unsigned int target_idx = plat_core_pos_by_mpidr(target_cpu); aff_info_state_t target_aff_state; /* Calling function must supply valid input arguments */ assert((int) target_idx >= 0); assert(ep != NULL); /* * This function must only be called on platforms where the * CPU_ON platform hooks have been implemented. */ assert(psci_plat_pm_ops->pwr_domain_on && psci_plat_pm_ops->pwr_domain_on_finish); /* Protect against multiple CPUs trying to turn ON the same target CPU */ psci_spin_lock_cpu(target_idx); /* * Generic management: Ensure that the cpu is off to be * turned on. * Perform cache maintanence ahead of reading the target CPU state to * ensure that the data is not stale. * There is a theoretical edge case where the cache may contain stale * data for the target CPU data - this can occur under the following * conditions: * - the target CPU is in another cluster from the current * - the target CPU was the last CPU to shutdown on its cluster * - the cluster was removed from coherency as part of the CPU shutdown * * In this case the cache maintenace that was performed as part of the * target CPUs shutdown was not seen by the current CPU's cluster. And * so the cache may contain stale data for the target CPU. */ flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); rc = cpu_on_validate_state(psci_get_aff_info_state_by_idx(target_idx)); if (rc != PSCI_E_SUCCESS) goto exit; /* * Call the cpu on handler registered by the Secure Payload Dispatcher * to let it do any bookeeping. If the handler encounters an error, it's * expected to assert within */ if (psci_spd_pm && psci_spd_pm->svc_on) psci_spd_pm->svc_on(target_cpu); /* * Set the Affinity info state of the target cpu to ON_PENDING. * Flush aff_info_state as it will be accessed with caches * turned OFF. */ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); /* * The cache line invalidation by the target CPU after setting the * state to OFF (see psci_do_cpu_off()), could cause the update to * aff_info_state to be invalidated. Retry the update if the target * CPU aff_info_state is not ON_PENDING. */ target_aff_state = psci_get_aff_info_state_by_idx(target_idx); if (target_aff_state != AFF_STATE_ON_PENDING) { assert(target_aff_state == AFF_STATE_OFF); psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_ON_PENDING); flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); assert(psci_get_aff_info_state_by_idx(target_idx) == AFF_STATE_ON_PENDING); } /* * Perform generic, architecture and platform specific handling. */ /* * Plat. management: Give the platform the current state * of the target cpu to allow it to perform the necessary * steps to power on. */ rc = psci_plat_pm_ops->pwr_domain_on(target_cpu); assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL); if (rc == PSCI_E_SUCCESS) /* Store the re-entry information for the non-secure world. */ cm_init_context_by_index(target_idx, ep); else { /* Restore the state on error. */ psci_set_aff_info_state_by_idx(target_idx, AFF_STATE_OFF); flush_cpu_data_by_index(target_idx, psci_svc_cpu_data.aff_info_state); } exit: psci_spin_unlock_cpu(target_idx); return rc; }
/* * Helper function for platform_get_pos() when platform compatibility is * disabled. This is to enable SPDs using the older platform API to continue * to work. */ unsigned int platform_core_pos_helper(unsigned long mpidr) { int idx = plat_core_pos_by_mpidr(mpidr); assert(idx >= 0); return idx; }