/****************************************************************************** * Public functions *****************************************************************************/ int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); uint32_t rpm_bypass = notify_rpm ? 0x00 : 0x01; if (mode == dev->low_power_mode && notify_rpm == dev->notify_rpm && !dev->dirty) return 0; switch (mode) { case MSM_SPM_MODE_CLOCK_GATING: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x00); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_RETENTION: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->retention_mid_vlevel, dev->retention_vlevel); msm_spm_set_slp_rst_en(dev, 0x00); break; case MSM_SPM_MODE_POWER_COLLAPSE: msm_spm_set_spm_ctl(dev, rpm_bypass, 0x02); msm_spm_set_pmic_ctl(dev, dev->awake_vlevel, dev->collapse_mid_vlevel, dev->collapse_vlevel); msm_spm_set_slp_rst_en(dev, 0x01); break; default: BUG(); } msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SPM_PMIC_CTL); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_SLP_RST_EN); /* Ensure that the registers are written before returning */ dsb(); dev->low_power_mode = mode; dev->notify_rpm = notify_rpm; dev->dirty = false; if (msm_spm_debug_mask & MSM_SPM_DEBUG_SHADOW) { int i; for (i = 0; i < MSM_SPM_REG_NR; i++) pr_info("%s: reg %02x = 0x%08x\n", __func__, msm_spm_reg_offsets[i], dev->reg_shadow[i]); } return 0; }
int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs) { unsigned int cpu; BUG_ON(nr_devs < num_possible_cpus()); for_each_possible_cpu(cpu) { struct msm_spm_device *dev = &per_cpu(msm_spm_devices, cpu); int i; dev->reg_base_addr = data[cpu].reg_base_addr; memcpy(dev->reg_shadow, data[cpu].reg_init_values, sizeof(data[cpu].reg_init_values)); dev->awake_vlevel = data[cpu].awake_vlevel; dev->retention_vlevel = data[cpu].retention_vlevel; dev->collapse_vlevel = data[cpu].collapse_vlevel; dev->retention_mid_vlevel = data[cpu].retention_mid_vlevel; dev->collapse_mid_vlevel = data[cpu].collapse_mid_vlevel; dev->vctl_timeout_us = data[cpu].vctl_timeout_us; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); dev->low_power_mode = MSM_SPM_MODE_CLOCK_GATING; dev->notify_rpm = false; dev->dirty = true; } return 0; }
void msm_spm_reinit(void) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); int i; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); }
void msm_spm_reinit(void) { struct msm_spm_device *dev = &__get_cpu_var(msm_spm_devices); int i; for (i = 0; i < MSM_SPM_REG_NR_INITIALIZE; i++) msm_spm_flush_shadow(dev, i); /* Ensure that the registers are written before returning */ dsb(); }
int msm_spm_set_vdd(unsigned int vlevel) { unsigned long flags; struct msm_spm_device *dev; uint32_t timeout_us; local_irq_save(flags); dev = &__get_cpu_var(msm_spm_devices); if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting vlevel 0x%x\n", __func__, vlevel); msm_spm_set_vctl(dev, vlevel); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); } if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; dev->awake_vlevel = vlevel; dev->dirty = true; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: done, remaining timeout %uus\n", __func__, timeout_us); local_irq_restore(flags); return 0; set_vdd_bail: pr_err("%s: failed, remaining timeout %uus, vlevel 0x%x\n", __func__, timeout_us, msm_spm_get_sts_curr_pmic_data(dev)); local_irq_restore(flags); return -EIO; }
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) { struct msm_spm_device *dev; uint32_t timeout_us; dev = &per_cpu(msm_spm_devices, cpu); if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting cpu %u vlevel 0x%x\n", __func__, cpu, vlevel); msm_spm_set_vctl(dev, vlevel); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); timeout_us = dev->vctl_timeout_us; msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); } if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; dev->awake_vlevel = vlevel; dev->dirty = true; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: cpu %u done, remaining timeout %uus\n", __func__, cpu, timeout_us); return 0; set_vdd_bail: pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n", __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev)); return -EIO; }
int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel) { unsigned long flags; struct msm_spm_device *dev; uint32_t timeout_us; local_irq_save(flags); if (!atomic_read(&msm_spm_set_vdd_x_cpu_allowed) && unlikely(smp_processor_id() != cpu)) { if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: attempting to set vdd of cpu %u from " "cpu %u\n", __func__, cpu, smp_processor_id()); goto set_vdd_x_cpu_bail; } dev = &per_cpu(msm_spm_devices, cpu); if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: requesting cpu %u vlevel 0x%x\n", __func__, cpu, vlevel); msm_spm_set_vctl(dev, vlevel); msm_spm_flush_shadow(dev, MSM_SPM_REG_SAW_VCTL); /* Wait for PMIC state to return to idle or until timeout */ timeout_us = dev->vctl_timeout_us; msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); while (msm_spm_get_sts_pmic_state(dev) != MSM_SPM_PMIC_STATE_IDLE) { if (!timeout_us) goto set_vdd_bail; if (timeout_us > 10) { udelay(10); timeout_us -= 10; } else { udelay(timeout_us); timeout_us = 0; } msm_spm_load_shadow(dev, MSM_SPM_REG_SAW_STS); } if (msm_spm_get_sts_curr_pmic_data(dev) != vlevel) goto set_vdd_bail; dev->awake_vlevel = vlevel; dev->dirty = true; if (msm_spm_debug_mask & MSM_SPM_DEBUG_VCTL) pr_info("%s: cpu %u done, remaining timeout %uus\n", __func__, cpu, timeout_us); local_irq_restore(flags); return 0; set_vdd_bail: pr_err("%s: cpu %u failed, remaining timeout %uus, vlevel 0x%x\n", __func__, cpu, timeout_us, msm_spm_get_sts_curr_pmic_data(dev)); set_vdd_x_cpu_bail: local_irq_restore(flags); return -EIO; }