/******************************************************************************* * Setup secondary CPU vectors ******************************************************************************/ void plat_secondary_setup(void) { uint32_t addr_low, addr_high; plat_params_from_bl2_t *params_from_bl2 = bl31_get_plat_params(); uint64_t cpu_reset_handler_base; INFO("Setting up secondary CPU boot\n"); if ((tegra_bl31_phys_base >= TEGRA_TZRAM_BASE) && (tegra_bl31_phys_base <= (TEGRA_TZRAM_BASE + TEGRA_TZRAM_SIZE))) { /* * The BL31 code resides in the TZSRAM which loses state * when we enter System Suspend. Copy the wakeup trampoline * code to TZDRAM to help us exit from System Suspend. */ cpu_reset_handler_base = params_from_bl2->tzdram_base; memcpy16((void *)((uintptr_t)cpu_reset_handler_base), (void *)(uintptr_t)tegra186_cpu_reset_handler, (uintptr_t)&__tegra186_cpu_reset_handler_end - (uintptr_t)tegra186_cpu_reset_handler); } else { cpu_reset_handler_base = (uintptr_t)tegra_secure_entrypoint; } addr_low = (uint32_t)cpu_reset_handler_base | CPU_RESET_MODE_AA64; addr_high = (uint32_t)((cpu_reset_handler_base >> 32) & 0x7ff); /* write lower 32 bits first, then the upper 11 bits */ mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_LOW, addr_low); mmio_write_32(TEGRA_MISC_BASE + MISCREG_AA64_RST_HIGH, addr_high); /* save reset vector to be used during SYSTEM_SUSPEND exit */ mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_0, addr_low); mmio_write_32(TEGRA_SCRATCH_BASE + SCRATCH_SECURE_RSV1_SCRATCH_1, addr_high); /* update reset vector address to the CCPLEX */ mce_update_reset_vector(); }
/******************************************************************************* * Global gic distributor setup which will be done by the primary cpu after a * cold boot. It marks out the secure SPIs, PPIs & SGIs and enables them. It * then enables the secure GIC distributor interface. ******************************************************************************/ static void gic_distif_setup(unsigned int gicd_base) { unsigned int i, ctlr; const unsigned int ITLinesNumber = gicd_read_typer(gicd_base) & IT_LINES_NO_MASK; /* Disable the distributor before going further */ ctlr = gicd_read_ctlr(gicd_base); ctlr &= ~(ENABLE_GRP0 | ENABLE_GRP1); gicd_write_ctlr(gicd_base, ctlr); /* Mark all lines of SPIs as Group 1 (non-secure) */ for (i = 0; i < ITLinesNumber; i++) mmio_write_32(gicd_base + GICD_IGROUPR + 4 + i * 4, 0xffffffffu); /* Setup SPI priorities doing four at a time */ for (i = 0; i < ITLinesNumber * 32; i += 4) mmio_write_32(gicd_base + GICD_IPRIORITYR + 32 + i, DEFAULT_NS_PRIORITY_X4); /* Configure the SPIs we want as secure */ static const char sec_irq[] = { IRQ_MHU, IRQ_GPU_SMMU_0, IRQ_GPU_SMMU_1, IRQ_ETR_SMMU, IRQ_TZC400, IRQ_TZ_WDOG }; for (i = 0; i < sizeof(sec_irq) / sizeof(sec_irq[0]); i++) gic_set_secure(gicd_base, sec_irq[i]); /* Route watchdog interrupt to this CPU and enable it. */ gicd_set_itargetsr(gicd_base, IRQ_TZ_WDOG, platform_get_core_pos(read_mpidr())); gicd_set_isenabler(gicd_base, IRQ_TZ_WDOG); /* Now setup the PPIs */ gic_pcpu_distif_setup(gicd_base); /* Enable Group 0 (secure) interrupts */ gicd_write_ctlr(gicd_base, ctlr | ENABLE_GRP0); }
static void hisi_resource_lock(unsigned int lockid, unsigned int offset) { unsigned int lock_id = (lockid << 29); unsigned int lock_val = lock_id | LOCK_BIT; unsigned int lock_state; do { mmio_write_32(offset, lock_val); lock_state = mmio_read_32(LOCK_STAT_OFFSET + (uintptr_t)offset); } while ((lock_state & LOCK_ID_MASK) != lock_id); }
/******************************************************************************* * This function is responsible for handling all T210 SiP calls ******************************************************************************/ int plat_sip_handler(uint32_t smc_fid, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, const void *cookie, void *handle, uint64_t flags) { uint32_t val, ns; /* Determine which security state this SMC originated from */ ns = is_caller_non_secure(flags); if (!ns) SMC_RET1(handle, SMC_UNK); switch (smc_fid) { case TEGRA_SIP_PMC_COMMANDS: /* check the address is within PMC range and is 4byte aligned */ if ((x2 >= TEGRA_PMC_SIZE) || (x2 & 0x3)) return -EINVAL; /* pmc_secure_scratch registers are not accessible */ if (((x2 >= PMC_SECURE_SCRATCH0) && (x2 <= PMC_SECURE_SCRATCH5)) || ((x2 >= PMC_SECURE_SCRATCH6) && (x2 <= PMC_SECURE_SCRATCH7)) || ((x2 >= PMC_SECURE_SCRATCH8) && (x2 <= PMC_SECURE_SCRATCH79)) || ((x2 >= PMC_SECURE_SCRATCH80) && (x2 <= PMC_SECURE_SCRATCH119))) return -EFAULT; /* PMC secure-only registers are not accessible */ if ((x2 == PMC_DPD_ENABLE_0) || (x2 == PMC_FUSE_CONTROL_0) || (x2 == PMC_CRYPTO_OP_0)) return -EFAULT; /* Perform PMC read/write */ if (x1 == PMC_READ) { val = mmio_read_32((uint32_t)(TEGRA_PMC_BASE + x2)); write_ctx_reg(get_gpregs_ctx(handle), CTX_GPREG_X1, val); } else if (x1 == PMC_WRITE) { mmio_write_32((uint32_t)(TEGRA_PMC_BASE + x2), (uint32_t)x3); } else { return -EINVAL; } break; default: ERROR("%s: unsupported function ID\n", __func__); return -ENOTSUP; } return 0; }
void cci_enable_cluster_coherency(unsigned long mpidr) { assert(cci_base_addr); /* Enable Snoops and DVM messages */ mmio_write_32(get_slave_iface_base(mpidr) + SNOOP_CTRL_REG, DVM_EN_BIT | SNOOP_EN_BIT); /* Wait for the dust to settle down */ while (mmio_read_32(cci_base_addr + STATUS_REG) & CHANGE_PENDING_BIT) ; }
/******************************************************************************* * Function which will perform any remaining platform-specific setup that can * occur after the MMU and data cache have been enabled. ******************************************************************************/ void bl1_platform_setup(void) { init_nic400(); init_pcie(); /* Initialise the IO layer and register platform IO devices */ io_setup(); /* Enable and initialize the System level generic timer */ mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN); }
void imx_clock_target_clr(unsigned int id, uint32_t val) { struct ccm *ccm = ((struct ccm *)CCM_BASE); uintptr_t addr; if (id > CCM_ROOT_CTRL_NUM) return; addr = (uintptr_t)&ccm->ccm_root_ctrl[id].ccm_target_root_clr; mmio_write_32(addr, val); }
static void gpio_set_value(uint32_t addr, uint8_t gpio, uint32_t val) { uint32_t reg; reg = mmio_read_32(addr); if (val) reg |= (1 << gpio); else reg &= ~(1 << gpio); mmio_write_32(addr, reg); }
/** * pm_client_suspend() - Client-specific suspend actions * * This function should contain any PU-specific actions * required prior to sending suspend request to PMU * Actions taken depend on the state system is suspending to. */ void pm_client_suspend(const struct pm_proc *proc, unsigned int state) { bakery_lock_get(&pm_client_secure_lock); if (state == PM_STATE_SUSPEND_TO_RAM) pm_client_set_wakeup_sources(); /* Set powerdown request */ mmio_write_32(APU_PWRCTL, mmio_read_32(APU_PWRCTL) | proc->pwrdn_mask); bakery_lock_release(&pm_client_secure_lock); }
static void __dead2 zynqmp_nopmu_system_reset(void) { /* * This currently triggers a system reset. I.e. the whole * system will be reset! Including RPUs, PMU, PL, etc. */ /* disable coherency */ plat_arm_interconnect_exit_coherency(); /* bypass RPLL (needed on 1.0 silicon) */ uint32_t reg = mmio_read_32(CRL_APB_RPLL_CTRL); reg |= CRL_APB_RPLL_CTRL_BYPASS; mmio_write_32(CRL_APB_RPLL_CTRL, reg); /* trigger system reset */ mmio_write_32(CRL_APB_RESET_CTRL, CRL_APB_RESET_CTRL_SOFT_RESET); while (1) wfi(); }
void bl31_platform_setup(void) { generic_delay_timer_init(); /* select the CKIL source to 32K OSC */ mmio_write_32(IMX_ANAMIX_BASE + ANAMIX_MISC_CTL, 0x1); plat_gic_driver_init(); plat_gic_init(); imx_gpc_init(); }
static void plat_save_el3_dormant_data() { struct _el3_dormant_data *p = &el3_dormant_data[0]; p->mp0_l2actlr_el1 = read_l2actlr(); p->mp0_l2ectlr_el1 = read_l2ectlr(); //backup L2RSTDISABLE and set as "not disable L2 reset" p->mp0_l2rstdisable = mmio_read_32(MP0_CA7L_CACHE_CONFIG); mmio_write_32(MP0_CA7L_CACHE_CONFIG, mmio_read_32(MP0_CA7L_CACHE_CONFIG) & ~L2RSTDISABLE); }
static void __dead2 rpi3_watchdog_reset(void) { uint32_t rstc; console_flush(); dsbsy(); isb(); mmio_write_32(RPI3_PM_BASE + RPI3_PM_WDOG_OFFSET, RPI3_PM_PASSWORD | RESET_TIMEOUT); rstc = mmio_read_32(RPI3_PM_BASE + RPI3_PM_RSTC_OFFSET); rstc &= ~RPI3_PM_RSTC_WRCFG_MASK; rstc |= RPI3_PM_PASSWORD | RPI3_PM_RSTC_WRCFG_FULL_RESET; mmio_write_32(RPI3_PM_BASE + RPI3_PM_RSTC_OFFSET, rstc); for (;;) { wfi(); } }
/** * pm_ipi_send_common() - Sends IPI request to the PMU * @proc Pointer to the processor who is initiating request * @payload API id and call arguments to be written in IPI buffer * * Send an IPI request to the power controller. Caller needs to hold * the 'pm_secure_lock' lock. * * @return Returns status, either success or error+reason */ static enum pm_ret_status pm_ipi_send_common(const struct pm_proc *proc, uint32_t payload[PAYLOAD_ARG_CNT]) { unsigned int offset = 0; uintptr_t buffer_base = proc->ipi->buffer_base + IPI_BUFFER_TARGET_PMU_OFFSET + IPI_BUFFER_REQ_OFFSET; /* Wait until previous interrupt is handled by PMU */ pm_ipi_wait(proc); /* Write payload into IPI buffer */ for (size_t i = 0; i < PAYLOAD_ARG_CNT; i++) { mmio_write_32(buffer_base + offset, payload[i]); offset += PAYLOAD_ARG_SIZE; } /* Generate IPI to PMU */ mmio_write_32(proc->ipi->base + IPI_TRIG_OFFSET, IPI_PMU_PM_INT_MASK); return PM_RET_SUCCESS; }
void mhu_secure_message_end(unsigned int slot_id) { assert(slot_id <= MHU_MAX_SLOT_ID); /* * Clear any response we got by writing one in the relevant slot bit to * the CLEAR register */ mmio_write_32(MHU_BASE + SCP_INTR_S_CLEAR, 1 << slot_id); arm_lock_release(); }
void mailbox_send_cmd_async(int job_id, unsigned int cmd, uint32_t *args, int len, int urgent) { if (urgent) mmio_write_32(MBOX_OFFSET + MBOX_URG, 1); fill_mailbox_circular_buffer(MBOX_CLIENT_ID_CMD(MBOX_ATF_CLIENT_ID) | MBOX_JOB_ID_CMD(job_id) | MBOX_CMD_LEN_CMD(len) | MBOX_INDIRECT | cmd, args, len); }
/******************************************************************************* * This function checks the DDR size. It has to be run with Data Cache off. * This test is run before data have been put in DDR, and is only done for * cold boot. The DDR data can then be overwritten, and it is not useful to * restore its content. * Returns DDR computed size. ******************************************************************************/ static uint32_t ddr_check_size(void) { uint32_t offset = sizeof(uint32_t); mmio_write_32(STM32MP_DDR_BASE, DDR_PATTERN); while (offset < STM32MP_DDR_MAX_SIZE) { mmio_write_32(STM32MP_DDR_BASE + offset, DDR_ANTIPATTERN); dsb(); if (mmio_read_32(STM32MP_DDR_BASE) != DDR_PATTERN) { break; } offset <<= 1; } INFO("Memory size = 0x%x (%d MB)\n", offset, offset / (1024U * 1024U)); return offset; }
static int poplar_pwr_domain_on(u_register_t mpidr) { unsigned int cpu = plat_core_pos_by_mpidr(mpidr); unsigned int regval, regval_bak; /* Select 400MHz before start slave cores */ regval_bak = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x206); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), 0x606); /* Clear the slave cpu arm_por_srst_req reset */ regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST)); regval &= ~(1 << (cpu + CPU_REG_COREPO_SRST)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval); /* Clear the slave cpu reset */ regval = mmio_read_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST)); regval &= ~(1 << (cpu + CPU_REG_CORE_SRST)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_RST), regval); /* Restore cpu frequency */ regval = regval_bak & (~(1 << REG_CPU_LP_CPU_SW_BEGIN)); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval); mmio_write_32((uintptr_t)(REG_BASE_CRG + REG_CPU_LP), regval_bak); return PSCI_E_SUCCESS; }
/******************************************************************************* * Initialize the gic, configure the CLCD and zero out variables needed by the * secondaries to boot up correctly. ******************************************************************************/ void bl31_platform_setup() { unsigned int reg_val; /* Initialize the gic cpu and distributor interfaces */ gic_setup(); /* * TODO: Configure the CLCD before handing control to * linux. Need to see if a separate driver is needed * instead. */ mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGDATA, 0); mmio_write_32(VE_SYSREGS_BASE + V2M_SYS_CFGCTRL, (1ull << 31) | (1 << 30) | (7 << 20) | (0 << 16)); /* Enable and initialize the System level generic timer */ mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_FCREQ(0) | CNTCR_EN); /* Allow access to the System counter timer module */ reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1)); mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); /* Intialize the power controller */ fvp_pwrc_setup(); /* Topologies are best known to the platform. */ plat_setup_topology(); }
void rcar_swdt_release(void) { uintptr_t itarget = SWDT_GICD_ITARGETSR + (ARM_IRQ_SEC_WDT & ~ITARGET_MASK); uint32_t i; /* Disable FIQ interrupt */ write_daifset(DAIF_FIQ_BIT); /* FIQ interrupts are not taken to EL3 */ write_scr_el3(read_scr_el3() & ~SCR_FIQ_BIT); swdt_disable(); gicv2_cpuif_disable(); for (i = 0; i < IGROUPR_NUM; i++) mmio_write_32(SWDT_GICD_IGROUPR + i * 4, 0U); for (i = 0; i < ISPRIORITY_NUM; i++) mmio_write_32(SWDT_GICD_ISPRIORITYR + i * 4, 0U); mmio_write_32(itarget, 0U); mmio_write_32(SWDT_GICD_CTLR, 0U); mmio_write_32(SWDT_GICC_CTLR, 0U); mmio_write_32(SWDT_GICC_PMR, 0U); }
static void init_pll(void) { unsigned int data; data = mmio_read_32((0xf7032000 + 0x000)); data |= 0x1; mmio_write_32((0xf7032000 + 0x000), data); dsb(); do { data = mmio_read_32((0xf7032000 + 0x000)); } while (!(data & (1 << 28))); data = mmio_read_32((0xf7800000 + 0x000)); data &= ~0x007; data |= 0x004; mmio_write_32((0xf7800000 + 0x000), data); dsb(); do { data = mmio_read_32((0xf7800000 + 0x014)); data &= 0x007; } while (data != 0x004); mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101); data = mmio_read_32(PERI_SC_PERIPH_STAT1); mmio_write_32(0xf7032000 + 0x02c, 0x5110103e); data = mmio_read_32(0xf7032000 + 0x050); data |= 1 << 28; mmio_write_32(0xf7032000 + 0x050, data); mmio_write_32(PERI_SC_PERIPH_CTRL14, 0x2101); mdelay(1); data = mmio_read_32(PERI_SC_PERIPH_STAT1); NOTICE("syspll frequency:%dHz\n", data); }
void spm_kick_im_to_fetch(const struct pcm_desc *pcmdesc) { unsigned int ptr = 0, len, con0; ptr = (unsigned int)(unsigned long)(pcmdesc->base); len = pcmdesc->size - 1; if (mmio_read_32(SPM_PCM_IM_PTR) != ptr || mmio_read_32(SPM_PCM_IM_LEN) != len || pcmdesc->sess > 2) { mmio_write_32(SPM_PCM_IM_PTR, ptr); mmio_write_32(SPM_PCM_IM_LEN, len); } else { mmio_setbits_32(SPM_PCM_CON1, CON1_CFG_KEY | CON1_IM_SLAVE); } /* kick IM to fetch (only toggle IM_KICK) */ con0 = mmio_read_32(SPM_PCM_CON0) & ~(CON0_IM_KICK | CON0_PCM_KICK); mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY | CON0_IM_KICK); mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY); /* kick IM to fetch (only toggle PCM_KICK) */ con0 = mmio_read_32(SPM_PCM_CON0) & ~(CON0_IM_KICK | CON0_PCM_KICK); mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY | CON0_PCM_KICK); mmio_write_32(SPM_PCM_CON0, con0 | CON0_CFG_KEY); }
void spm_init_event_vector(const struct pcm_desc *pcmdesc) { /* init event vector register */ mmio_write_32(SPM_PCM_EVENT_VECTOR0, pcmdesc->vec0); mmio_write_32(SPM_PCM_EVENT_VECTOR1, pcmdesc->vec1); mmio_write_32(SPM_PCM_EVENT_VECTOR2, pcmdesc->vec2); mmio_write_32(SPM_PCM_EVENT_VECTOR3, pcmdesc->vec3); mmio_write_32(SPM_PCM_EVENT_VECTOR4, pcmdesc->vec4); mmio_write_32(SPM_PCM_EVENT_VECTOR5, pcmdesc->vec5); mmio_write_32(SPM_PCM_EVENT_VECTOR6, pcmdesc->vec6); mmio_write_32(SPM_PCM_EVENT_VECTOR7, pcmdesc->vec7); /* event vector will be enabled by PCM itself */ }
/* ipi_mb_notify() - Trigger IPI mailbox notification * * @local - local IPI ID * @remote - remote IPI ID * @is_blocking - if to trigger the notification in blocking mode or not. * * It sets the remote bit in the IPI agent trigger register. * */ void ipi_mb_notify(uint32_t local, uint32_t remote, uint32_t is_blocking) { uint32_t status; mmio_write_32(IPI_REG_BASE(local) + IPI_TRIG_OFFSET, IPI_BIT_MASK(remote)); if (is_blocking) { do { status = mmio_read_32(IPI_REG_BASE(local) + IPI_OBR_OFFSET); } while (status & IPI_BIT_MASK(remote)); } }
uint64_t mt_sip_set_authorized_sreg(uint32_t sreg, uint32_t val) { uint64_t i; for (i = 0; i < authorized_sreg_cnt; i++) { if (authorized_sreg[i] == sreg) { mmio_write_32(sreg, val); return MTK_SIP_E_SUCCESS; } } return MTK_SIP_E_INVALID_PARAM; }
static void mpp_config(void) { uint32_t val; uintptr_t reg = MVEBU_CP_MPP_REGS(0, 4); /* configure CP0 MPP 37 and 38 to i2c */ val = mmio_read_32(reg); val &= ~((MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL37_OFFS) | (MVEBU_MPP_CTRL_MASK << MVEBU_CP_MPP_CTRL38_OFFS)); val |= (MVEBU_CP_MPP_CTRL37_I2C0_SCK_ENA << MVEBU_CP_MPP_CTRL37_OFFS) | (MVEBU_CP_MPP_CTRL38_I2C0_SDA_ENA << MVEBU_CP_MPP_CTRL38_OFFS); mmio_write_32(reg, val); }
/** * pm_client_abort_suspend() - Client-specific abort-suspend actions * * This function should contain any PU-specific actions * required for aborting a prior suspend request */ void pm_client_abort_suspend(void) { /* Enable interrupts at processor level (for current cpu) */ gicv2_cpuif_enable(); bakery_lock_get(&pm_client_secure_lock); /* Clear powerdown request */ mmio_write_32(APU_PWRCTL, mmio_read_32(APU_PWRCTL) & ~primary_proc->pwrdn_mask); bakery_lock_release(&pm_client_secure_lock); }
/******************************************************************************* * Function which will evaluate how much of the trusted ram has been gobbled * up by BL1 and return the base and size of whats available for loading BL2. * Its called after coherency and the MMU have been turned on. ******************************************************************************/ void bl1_platform_setup(void) { /* Initialise the IO layer and register platform IO devices */ io_setup(); /* Enable and initialize the System level generic timer */ mmio_write_32(SYS_CNTCTL_BASE + CNTCR_OFF, CNTCR_EN); /* Initialize the console */ console_init(); return; }
void hisi_set_cluster_pwdn_flag(unsigned int cluster, unsigned int core, unsigned int value) { unsigned int val; hisi_cpuhotplug_lock(cluster, core); val = mmio_read_32(REG_SCBAKDATA3_OFFSET); val = (value << (cluster << 1)) | (val & 0xFFFFFFF); mmio_write_32(REG_SCBAKDATA3_OFFSET, val); hisi_cpuhotplug_unlock(cluster, core); }
static int cpus_power_domain_on(uint32_t cpu_id) { uint32_t cfg_info; uint32_t cpu_pd = PD_CPUL0 + cpu_id; /* * There are two ways to powering on or off on core. * 1) Control it power domain into on or off in PMU_PWRDN_CON reg * 2) Enable the core power manage in PMU_CORE_PM_CON reg, * then, if the core enter into wfi, it power domain will be * powered off automatically. */ cfg_info = get_cpus_pwr_domain_cfg_info(cpu_id); if (cfg_info == core_pwr_pd) { /* disable core_pm cfg */ mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), CORES_PM_DISABLE); /* if the cores have be on, power off it firstly */ if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), 0); pmu_power_domain_ctr(cpu_pd, pmu_pd_off); } pmu_power_domain_ctr(cpu_pd, pmu_pd_on); } else { if (pmu_power_domain_st(cpu_pd) == pmu_pd_on) { WARN("%s: cpu%d is not in off,!\n", __func__, cpu_id); return -EINVAL; } mmio_write_32(PMU_BASE + PMU_CORE_PM_CON(cpu_id), BIT(core_pm_sft_wakeup_en)); dsb(); } return 0; }