static int kick_l2spm(struct device_node *l2ccc_node, struct device_node *vctl_node) { struct resource res, acinactm_res; int val; int timeout = 10, ret = 0; void __iomem *l2spm_base = of_iomap(vctl_node, 0); bool use_acinactm = false; int index; if (!l2spm_base) return -ENOMEM; if (!(__raw_readl(l2spm_base + L2_SPM_STS) & 0xFFFF0000)) goto bail_l2_pwr_bit; index = of_property_match_string(l2ccc_node, "reg-names", "l2-common"); if (index < 0) goto bail_l2_pwr_bit; ret = of_address_to_resource(l2ccc_node, index, &res); if (ret) goto bail_l2_pwr_bit; /* L2 is executing sleep state machine, * let's softly kick it awake */ val = scm_io_read((u32)res.start); val |= BIT(0); scm_io_write((u32)res.start, val); use_acinactm = of_property_read_bool(l2ccc_node, "qcom,use-acinactm"); if (use_acinactm) { index = of_property_match_string(l2ccc_node, "reg-names", "l2-acinactm"); if (index < 0) goto bail_l2_pwr_bit; ret = of_address_to_resource(l2ccc_node, index, &acinactm_res); if (ret) goto bail_l2_pwr_bit; val = scm_io_read((u32)acinactm_res.start); val &= ~BIT(4); scm_io_write((u32)acinactm_res.start, val); } /* Wait until the SPM status indicates that the PWR_CTL * bits are clear. */ while (readl_relaxed(l2spm_base + L2_SPM_STS) & 0xFFFF0000) { BUG_ON(!timeout--); cpu_relax(); usleep(100); } bail_l2_pwr_bit: iounmap(l2spm_base); return ret; }
static int kick_l2spm_8994(struct device_node *l2ccc_node, struct device_node *vctl_node) { struct resource res; int val, ret = 0; void __iomem *l2spm_base = of_iomap(vctl_node, 0); if (!l2spm_base) return -ENOMEM; if (!(__raw_readl(l2spm_base + L2_SPM_STS) & 0xFFFF0000)) goto bail_l2_pwr_bit; ret = of_address_to_resource(l2ccc_node, 1, &res); if (ret) goto bail_l2_pwr_bit; /* L2 is executing sleep state machine, * let's softly kick it awake */ val = scm_io_read((u32)res.start); val |= BIT(0); scm_io_write((u32)res.start, val); /* Wait until the SPM status indicates that the PWR_CTL * bits are clear. */ while (readl_relaxed(l2spm_base + L2_SPM_STS) & 0xFFFF0000) { int timeout = 10; BUG_ON(!timeout--); cpu_relax(); usleep(100); } val = scm_io_read((u32)res.start); val &= ~BIT(0); scm_io_write((u32)res.start, val); bail_l2_pwr_bit: iounmap(l2spm_base); return ret; }
int msm_unclamp_secondary_arm_cpu(unsigned int cpu) { int ret = 0; struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node; void __iomem *reg; struct resource res; int val; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); if (!l2_node) { ret = -ENODEV; goto out_l2; } l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0); if (!l2ccc_node) { ret = -ENODEV; goto out_l2; } /* Ensure L2-cache of the CPU is powered on before * unclamping cpu power rails. */ ret = power_on_l2_cache(l2ccc_node, cpu); if (ret) { pr_err("L2 cache power up failed for CPU%d\n", cpu); goto out_l2ccc; } reg = of_iomap(acc_node, 0); if (!reg) { ret = -ENOMEM; goto out_acc_reg; } /* Assert Reset on cpu-n */ writel_relaxed(0x00000033, reg + CPU_PWR_CTL); mb(); /*Program skew to 16 X0 clock cycles*/ writel_relaxed(0x10000001, reg + CPU_PWR_GATE_CTL); mb(); udelay(2); /* De-assert coremem clamp */ writel_relaxed(0x00000031, reg + CPU_PWR_CTL); mb(); /* Close coremem array gdhs */ writel_relaxed(0x00000039, reg + CPU_PWR_CTL); mb(); udelay(2); /* De-assert cpu-n clamp */ writel_relaxed(0x00020038, reg + CPU_PWR_CTL); mb(); udelay(2); /* De-assert cpu-n reset */ writel_relaxed(0x00020008, reg + CPU_PWR_CTL); mb(); /* Assert PWRDUP signal on core-n */ writel_relaxed(0x00020088, reg + CPU_PWR_CTL); mb(); /* Secondary CPU-N is now alive. * Allowing L2 Low power modes */ if (!vctl_parsed) goto out_l2ccc_1; else { ret = of_address_to_resource(l2ccc_node, 1, &res); if (ret) goto out_l2ccc_1; } val = scm_io_read((u32)res.start); val &= ~BIT(0); scm_io_write((u32)res.start, val); out_l2ccc_1: iounmap(reg); out_acc_reg: of_node_put(l2ccc_node); out_l2ccc: of_node_put(l2_node); out_l2: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; }
int msm8976_unclamp_secondary_arm_cpu(unsigned int cpu) { int ret = 0; struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node; void __iomem *reg; u32 mpidr = cpu_logical_map(cpu); struct resource res; int val; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); if (!l2_node) { ret = -ENODEV; goto out_l2; } l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0); if (!l2ccc_node) { ret = -ENODEV; goto out_l2ccc; } /* * Ensure L2-cache of the CPU is powered on before * unclamping cpu power rails. */ ret = power_on_l2_cache(l2ccc_node, cpu); if (ret) { pr_err("L2 cache power up failed for CPU%d\n", cpu); goto out_acc_reg; } reg = of_iomap(acc_node, 0); if (!reg) { ret = -ENOMEM; goto out_acc_reg; } if (MPIDR_AFFINITY_LEVEL(mpidr, 1)) msm8976_unclamp_perf_cluster_cpu(reg); else msm8976_unclamp_power_cluster_cpu(reg); /* Secondary CPU-N is now alive. * Allowing L2 Low power modes */ ret = of_address_to_resource(l2ccc_node, 1, &res); if (ret) goto out_l2ccc_1; val = scm_io_read((u32)res.start); val &= ~BIT(0); scm_io_write((u32)res.start, val); out_l2ccc_1: iounmap(reg); out_acc_reg: of_node_put(l2ccc_node); out_l2ccc: of_node_put(l2_node); out_l2: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; }
int msm8994_unclamp_secondary_arm_cpu(unsigned int cpu) { int ret = 0; int val; struct device_node *cpu_node, *acc_node, *l2_node, *l2ccc_node; void __iomem *acc_reg, *ldo_bhs_reg; struct resource res; cpu_node = of_get_cpu_node(cpu, NULL); if (!cpu_node) return -ENODEV; acc_node = of_parse_phandle(cpu_node, "qcom,acc", 0); if (!acc_node) { ret = -ENODEV; goto out_acc; } l2_node = of_parse_phandle(cpu_node, "next-level-cache", 0); if (!l2_node) { ret = -ENODEV; goto out_l2; } l2ccc_node = of_parse_phandle(l2_node, "power-domain", 0); if (!l2ccc_node) { ret = -ENODEV; goto out_l2; } /* * Ensure L2-cache of the CPU is powered on before * unclamping cpu power rails. */ ret = power_on_l2_cache(l2ccc_node, cpu); if (ret) { pr_err("L2 cache power up failed for CPU%d\n", cpu); goto out_l2ccc; } ldo_bhs_reg = of_iomap(acc_node, 0); if (!ldo_bhs_reg) { ret = -ENOMEM; goto out_bhs_reg; } acc_reg = of_iomap(acc_node, 1); if (!acc_reg) { ret = -ENOMEM; goto out_acc_reg; } /* Assert head switch enable few */ writel_relaxed(0x00000001, acc_reg + CPU_PWR_GATE_CTL); mb(); udelay(1); /* Assert head switch enable rest */ writel_relaxed(0x00000003, acc_reg + CPU_PWR_GATE_CTL); mb(); udelay(1); /* De-assert coremem clamp. This is asserted by default */ writel_relaxed(0x00000079, acc_reg + CPU_PWR_CTL); mb(); udelay(2); /* Close coremem array gdhs */ writel_relaxed(0x0000007D, acc_reg + CPU_PWR_CTL); mb(); udelay(2); /* De-assert clamp */ writel_relaxed(0x0000003D, acc_reg + CPU_PWR_CTL); mb(); /* De-assert clamp */ writel_relaxed(0x0000003C, acc_reg + CPU_PWR_CTL); mb(); udelay(1); /* De-assert core0 reset */ writel_relaxed(0x0000000C, acc_reg + CPU_PWR_CTL); mb(); /* Assert PWRDUP */ writel_relaxed(0x0000008C, acc_reg + CPU_PWR_CTL); mb(); iounmap(acc_reg); ret = of_address_to_resource(l2ccc_node, 1, &res); if (ret) goto out_acc_reg; val = scm_io_read((u32)res.start); val &= ~BIT(0); scm_io_write((u32)res.start, val); out_acc_reg: iounmap(ldo_bhs_reg); out_bhs_reg: of_node_put(l2ccc_node); out_l2ccc: of_node_put(l2_node); out_l2: of_node_put(acc_node); out_acc: of_node_put(cpu_node); return ret; }