static void sys_slp_config(void) { uint32_t slp_mode_cfg = 0; mmio_write_32(PMU_BASE + PMU_CCI500_CON, BIT_WITH_WMSK(PMU_CLR_PREQ_CCI500_HW) | BIT_WITH_WMSK(PMU_CLR_QREQ_CCI500_HW) | BIT_WITH_WMSK(PMU_QGATING_CCI500_CFG)); mmio_write_32(PMU_BASE + PMU_ADB400_CON, BIT_WITH_WMSK(PMU_CLR_CORE_L_HW) | BIT_WITH_WMSK(PMU_CLR_CORE_L_2GIC_HW) | BIT_WITH_WMSK(PMU_CLR_GIC2_CORE_L_HW)); mmio_write_32(PMUGRF_BASE + PMUGRF_GPIO1A_IOMUX, BIT_WITH_WMSK(AP_PWROFF)); slp_mode_cfg = BIT(PMU_PWR_MODE_EN) | BIT(PMU_POWER_OFF_REQ_CFG) | BIT(PMU_CPU0_PD_EN) | BIT(PMU_L2_FLUSH_EN) | BIT(PMU_L2_IDLE_EN) | BIT(PMU_SCU_PD_EN); mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_L_WKUP_EN); mmio_setbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_CLUSTER_B_WKUP_EN); mmio_clrbits_32(PMU_BASE + PMU_WKUP_CFG4, PMU_GPIO_WKUP_EN); mmio_write_32(PMU_BASE + PMU_PWRMODE_CON, slp_mode_cfg); mmio_write_32(PMU_BASE + PMU_STABLE_CNT, CYCL_24M_CNT_MS(5)); mmio_write_32(PMU_BASE + PMU_SCU_L_PWRDN_CNT, CYCL_24M_CNT_MS(2)); mmio_write_32(PMU_BASE + PMU_SCU_L_PWRUP_CNT, CYCL_24M_CNT_MS(2)); mmio_write_32(PMU_BASE + PMU_SCU_B_PWRDN_CNT, CYCL_24M_CNT_MS(2)); mmio_write_32(PMU_BASE + PMU_SCU_B_PWRUP_CNT, CYCL_24M_CNT_MS(2)); }
void rk3399_flash_l2_b(void) { uint32_t wait_cnt = 0; mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); dsb(); while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & BIT(L2_FLUSHDONE_CLUSTER_B))) { wait_cnt++; if (!(wait_cnt % MAX_WAIT_CONUT)) WARN("%s:reg %x,wait\n", __func__, mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); } mmio_clrbits_32(PMU_BASE + PMU_SFT_CON, BIT(L2_FLUSH_REQ_CLUSTER_B)); }
static void spm_mcdi_set_cputop_pwrctrl_for_cluster_off(unsigned long mpidr) { unsigned long cluster_id = mpidr & MPIDR_CLUSTER_MASK; unsigned long cpu_id = mpidr & MPIDR_CPU_MASK; unsigned int pwr_status, shift, i, flag = 0; pwr_status = mmio_read_32(SPM_PWR_STATUS) | mmio_read_32(SPM_PWR_STATUS_2ND); if (cluster_id) { for (i = 0; i < PLATFORM_CLUSTER1_CORE_COUNT; i++) { if (i == cpu_id) continue; shift = i + PCM_MCDI_CA72_PWRSTA_SHIFT; flag |= (pwr_status & (1 << shift)) >> shift; } if (!flag) mmio_setbits_32(SPM_PCM_RESERVE, PCM_MCDI_CA72_CPUTOP_PWRCTL); } else { for (i = 0; i < PLATFORM_CLUSTER0_CORE_COUNT; i++) {
static int sys_pwr_domain_suspend(void) { sys_slp_config(); plls_suspend(); pmu_sgrf_rst_hld(); mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1), (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); pmu_scu_b_pwrdn(); mmio_write_32(PMU_BASE + PMU_ADB400_CON, BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_2GIC_SW) | BIT_WITH_WMSK(PMU_PWRDWN_REQ_CORE_B_SW) | BIT_WITH_WMSK(PMU_PWRDWN_REQ_GIC2_CORE_B_SW)); dsb(); mmio_setbits_32(PMU_BASE + PMU_PWRDN_CON, BIT(PMU_SCU_B_PWRDWN_EN)); return 0; }
void spm_set_wakeup_event(const struct pwr_ctrl *pwrctrl) { unsigned int val, mask; if (pwrctrl->timer_val_cust == 0) val = pwrctrl->timer_val ? pwrctrl->timer_val : PCM_TIMER_MAX; else val = pwrctrl->timer_val_cust; mmio_write_32(SPM_PCM_TIMER_VAL, val); mmio_setbits_32(SPM_PCM_CON1, CON1_CFG_KEY); if (pwrctrl->wake_src_cust == 0) mask = pwrctrl->wake_src; else mask = pwrctrl->wake_src_cust; if (pwrctrl->syspwreq_mask) mask &= ~WAKE_SRC_SYSPWREQ; mmio_write_32(SPM_SLEEP_WAKEUP_EVENT_MASK, ~mask); mmio_write_32(SPM_SLEEP_ISR_MASK, 0xfe04); }
static void pmu_scu_b_pwrdn(void) { uint32_t wait_cnt = 0; if ((mmio_read_32(PMU_BASE + PMU_PWRDN_ST) & (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) != (BIT(PMU_A72_B0_PWRDWN_ST) | BIT(PMU_A72_B1_PWRDWN_ST))) { ERROR("%s: not all cpus is off\n", __func__); return; } rk3399_flash_l2_b(); mmio_setbits_32(PMU_BASE + PMU_SFT_CON, BIT(ACINACTM_CLUSTER_B_CFG)); while (!(mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST) & BIT(STANDBY_BY_WFIL2_CLUSTER_B))) { wait_cnt++; if (!(wait_cnt % MAX_WAIT_CONUT)) ERROR("%s:wait cluster-b l2(%x)\n", __func__, mmio_read_32(PMU_BASE + PMU_CORE_PWR_ST)); } }
static void platform_setup_cpu(void) { /* turn off all the little core's power except cpu 0 */ mtcmos_little_cpu_off(); /* setup big cores */ mmio_write_32((uintptr_t)&mt8173_mcucfg->mp1_config_res, MP1_DIS_RGU0_WAIT_PD_CPUS_L1_ACK | MP1_DIS_RGU1_WAIT_PD_CPUS_L1_ACK | MP1_DIS_RGU2_WAIT_PD_CPUS_L1_ACK | MP1_DIS_RGU3_WAIT_PD_CPUS_L1_ACK | MP1_DIS_RGU_NOCPU_WAIT_PD_CPUS_L1_ACK); mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_miscdbg, MP1_AINACTS); mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_clkenm_div, MP1_SW_CG_GEN); mmio_clrbits_32((uintptr_t)&mt8173_mcucfg->mp1_rst_ctl, MP1_L2RSTDISABLE); /* set big cores arm64 boot mode */ mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp1_cpucfg, MP1_CPUCFG_64BIT); /* set LITTLE cores arm64 boot mode */ mmio_setbits_32((uintptr_t)&mt8173_mcucfg->mp0_rv_addr[0].rv_addr_hw, MP0_CPUCFG_64BIT); /* enable dcm control */ mmio_setbits_32((uintptr_t)&mt8173_mcucfg->bus_fabric_dcm_ctrl, ADB400_GRP_DCM_EN | CCI400_GRP_DCM_EN | ADBCLK_GRP_DCM_EN | EMICLK_GRP_DCM_EN | ACLK_GRP_DCM_EN | L2C_IDLE_DCM_EN | INFRACLK_PSYS_DYNAMIC_CG_EN); mmio_setbits_32((uintptr_t)&mt8173_mcucfg->l2c_sram_ctrl, L2C_SRAM_DCM_EN); mmio_setbits_32((uintptr_t)&mt8173_mcucfg->cci_clk_ctrl, MCU_BUS_DCM_EN); }
static int stm32mp1_ddr_setup(void) { struct ddr_info *priv = &ddr_priv_data; int ret; struct stm32mp1_ddr_config config; int node, len; uint32_t uret, idx; void *fdt; #define PARAM(x, y) \ { \ .name = x, \ .offset = offsetof(struct stm32mp1_ddr_config, y), \ .size = sizeof(config.y) / sizeof(uint32_t) \ } #define CTL_PARAM(x) PARAM("st,ctl-"#x, c_##x) #define PHY_PARAM(x) PARAM("st,phy-"#x, p_##x) const struct { const char *name; /* Name in DT */ const uint32_t offset; /* Offset in config struct */ const uint32_t size; /* Size of parameters */ } param[] = { CTL_PARAM(reg), CTL_PARAM(timing), CTL_PARAM(map), CTL_PARAM(perf), PHY_PARAM(reg), PHY_PARAM(timing), PHY_PARAM(cal) }; if (fdt_get_address(&fdt) == 0) { return -ENOENT; } node = fdt_node_offset_by_compatible(fdt, -1, DT_DDR_COMPAT); if (node < 0) { ERROR("%s: Cannot read DDR node in DT\n", __func__); return -EINVAL; } config.info.speed = fdt_read_uint32_default(node, "st,mem-speed", 0); if (!config.info.speed) { VERBOSE("%s: no st,mem-speed\n", __func__); return -EINVAL; } config.info.size = fdt_read_uint32_default(node, "st,mem-size", 0); if (!config.info.size) { VERBOSE("%s: no st,mem-size\n", __func__); return -EINVAL; } config.info.name = fdt_getprop(fdt, node, "st,mem-name", &len); if (config.info.name == NULL) { VERBOSE("%s: no st,mem-name\n", __func__); return -EINVAL; } INFO("RAM: %s\n", config.info.name); for (idx = 0; idx < ARRAY_SIZE(param); idx++) { ret = fdt_read_uint32_array(node, param[idx].name, (void *)((uintptr_t)&config + param[idx].offset), param[idx].size); VERBOSE("%s: %s[0x%x] = %d\n", __func__, param[idx].name, param[idx].size, ret); if (ret != 0) { ERROR("%s: Cannot read %s\n", __func__, param[idx].name); return -EINVAL; } } /* Disable axidcg clock gating during init */ mmio_clrbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN); stm32mp1_ddr_init(priv, &config); /* Enable axidcg clock gating */ mmio_setbits_32(priv->rcc + RCC_DDRITFCR, RCC_DDRITFCR_AXIDCGEN); priv->info.size = config.info.size; VERBOSE("%s : ram size(%x, %x)\n", __func__, (uint32_t)priv->info.base, (uint32_t)priv->info.size); write_sctlr(read_sctlr() & ~SCTLR_C_BIT); dcsw_op_all(DC_OP_CISW); uret = ddr_test_data_bus(); if (uret != 0U) { ERROR("DDR data bus test: can't access memory @ 0x%x\n", uret); panic(); } uret = ddr_test_addr_bus(); if (uret != 0U) { ERROR("DDR addr bus test: can't access memory @ 0x%x\n", uret); panic(); } uret = ddr_check_size(); if (uret < config.info.size) { ERROR("DDR size: 0x%x does not match DT config: 0x%x\n", uret, config.info.size); panic(); } write_sctlr(read_sctlr() | SCTLR_C_BIT); return 0; }
void hisi_set_cpuidle_flag(unsigned int cluster, unsigned int core) { mmio_setbits_32(CPUIDLE_FLAG_REG(cluster), BIT(core)); }
static void hikey960_peri_init(void) { /* unreset */ mmio_setbits_32(CRG_PERRSTDIS4_REG, 1); }