/* * Setup the voltages for vdd_mpu, vdd_core, and vdd_iva * We set the maximum voltages allowed here because Smart-Reflex is not * enabled in bootloader. Voltage initialization in the kernel will set * these to the nominal values after enabling Smart-Reflex */ void scale_vcores(struct vcores_data const *vcores) { u32 val; val = optimize_vcore_voltage(&vcores->core); do_scale_vcore(vcores->core.addr, val, vcores->core.pmic); val = optimize_vcore_voltage(&vcores->mpu); do_scale_vcore(vcores->mpu.addr, val, vcores->mpu.pmic); /* Configure MPU ABB LDO after scale */ abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2, (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, (*prcm)->prm_abbldo_mpu_setup, (*prcm)->prm_abbldo_mpu_ctrl, (*prcm)->prm_irqstatus_mpu_2, OMAP_ABB_MPU_TXDONE_MASK, OMAP_ABB_FAST_OPP); val = optimize_vcore_voltage(&vcores->mm); do_scale_vcore(vcores->mm.addr, val, vcores->mm.pmic); val = optimize_vcore_voltage(&vcores->gpu); do_scale_vcore(vcores->gpu.addr, val, vcores->gpu.pmic); val = optimize_vcore_voltage(&vcores->eve); do_scale_vcore(vcores->eve.addr, val, vcores->eve.pmic); val = optimize_vcore_voltage(&vcores->iva); do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic); }
/* * Setup the voltages for vdd_mpu, vdd_core, and vdd_iva * We set the maximum voltages allowed here because Smart-Reflex is not * enabled in bootloader. Voltage initialization in the kernel will set * these to the nominal values after enabling Smart-Reflex */ void scale_vcores(struct vcores_data const *vcores) { u32 val; val = optimize_vcore_voltage(&vcores->core); do_scale_vcore(vcores->core.addr, val, vcores->core.pmic); val = optimize_vcore_voltage(&vcores->mpu); do_scale_vcore(vcores->mpu.addr, val, vcores->mpu.pmic); /* Configure MPU ABB LDO after scale */ abb_setup((*ctrl)->control_std_fuse_opp_vdd_mpu_2, (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, (*prcm)->prm_abbldo_mpu_setup, (*prcm)->prm_abbldo_mpu_ctrl, (*prcm)->prm_irqstatus_mpu_2, OMAP_ABB_MPU_TXDONE_MASK, OMAP_ABB_FAST_OPP); val = optimize_vcore_voltage(&vcores->mm); do_scale_vcore(vcores->mm.addr, val, vcores->mm.pmic); val = optimize_vcore_voltage(&vcores->gpu); do_scale_vcore(vcores->gpu.addr, val, vcores->gpu.pmic); val = optimize_vcore_voltage(&vcores->eve); do_scale_vcore(vcores->eve.addr, val, vcores->eve.pmic); val = optimize_vcore_voltage(&vcores->iva); do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic); if (emif_sdram_type() == EMIF_SDRAM_TYPE_DDR3) { /* Configure LDO SRAM "magic" bits */ writel(2, (*prcm)->prm_sldo_core_setup); writel(2, (*prcm)->prm_sldo_mpu_setup); writel(2, (*prcm)->prm_sldo_mm_setup); } }
/* * Setup the voltages for the main SoC core power domains. * We start with the maximum voltages allowed here, as set in the corresponding * vcores_data struct, and then scale (usually down) to the fused values that * are retrieved from the SoC. The scaling happens only if the efuse.reg fields * are initialised. * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is * compiled conditionally. Note that the new code writes the scaled (or zeroed) * values back to the vcores_data struct for eventual reuse. Zero values mean * that the corresponding rails are not controlled separately, and are not sent * to the PMIC. */ void scale_vcores(struct vcores_data const *vcores) { int i, opp, j, ol; struct volts *pv = (struct volts *)vcores; struct volts *px; for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) { opp = get_voltrail_opp(i); debug("%d -> ", pv->value[opp]); if (pv->value[opp]) { /* Handle non-empty members only */ pv->value[opp] = optimize_vcore_voltage(pv, opp); px = (struct volts *)vcores; j = 0; while (px < pv) { /* * Scan already handled non-empty members to see * if we have a group and find the max voltage, * which is set to the first occurance of the * particular SMPS; the other group voltages are * zeroed. */ ol = get_voltrail_opp(j); if (px->value[ol] && (pv->pmic->i2c_slave_addr == px->pmic->i2c_slave_addr) && (pv->addr == px->addr)) { /* Same PMIC, same SMPS */ if (pv->value[opp] > px->value[ol]) px->value[ol] = pv->value[opp]; pv->value[opp] = 0; } px++; j++; } } debug("%d\n", pv->value[opp]); pv++; } opp = get_voltrail_opp(VOLT_CORE); debug("cor: %d\n", vcores->core.value[opp]); do_scale_vcore(vcores->core.addr, vcores->core.value[opp], vcores->core.pmic); /* * IO delay recalibration should be done immediately after * adjusting AVS voltages for VDD_CORE_L. * Respective boards should call __recalibrate_iodelay() * with proper mux, virtual and manual mode configurations. */ #ifdef CONFIG_IODELAY_RECALIBRATION recalibrate_iodelay(); #endif opp = get_voltrail_opp(VOLT_MPU); debug("mpu: %d\n", vcores->mpu.value[opp]); do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp], vcores->mpu.pmic); /* Configure MPU ABB LDO after scale */ abb_setup(vcores->mpu.efuse.reg[opp], (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, (*prcm)->prm_abbldo_mpu_setup, (*prcm)->prm_abbldo_mpu_ctrl, (*prcm)->prm_irqstatus_mpu_2, vcores->mpu.abb_tx_done_mask, OMAP_ABB_FAST_OPP); opp = get_voltrail_opp(VOLT_MM); debug("mm: %d\n", vcores->mm.value[opp]); do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp], vcores->mm.pmic); /* Configure MM ABB LDO after scale */ abb_setup(vcores->mm.efuse.reg[opp], (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl, (*prcm)->prm_abbldo_mm_setup, (*prcm)->prm_abbldo_mm_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->mm.abb_tx_done_mask, OMAP_ABB_FAST_OPP); opp = get_voltrail_opp(VOLT_GPU); debug("gpu: %d\n", vcores->gpu.value[opp]); do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp], vcores->gpu.pmic); /* Configure GPU ABB LDO after scale */ abb_setup(vcores->gpu.efuse.reg[opp], (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl, (*prcm)->prm_abbldo_gpu_setup, (*prcm)->prm_abbldo_gpu_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->gpu.abb_tx_done_mask, OMAP_ABB_FAST_OPP); opp = get_voltrail_opp(VOLT_EVE); debug("eve: %d\n", vcores->eve.value[opp]); do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp], vcores->eve.pmic); /* Configure EVE ABB LDO after scale */ abb_setup(vcores->eve.efuse.reg[opp], (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl, (*prcm)->prm_abbldo_eve_setup, (*prcm)->prm_abbldo_eve_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->eve.abb_tx_done_mask, OMAP_ABB_FAST_OPP); opp = get_voltrail_opp(VOLT_IVA); debug("iva: %d\n", vcores->iva.value[opp]); do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp], vcores->iva.pmic); /* Configure IVA ABB LDO after scale */ abb_setup(vcores->iva.efuse.reg[opp], (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl, (*prcm)->prm_abbldo_iva_setup, (*prcm)->prm_abbldo_iva_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->iva.abb_tx_done_mask, OMAP_ABB_FAST_OPP); }
/* * Setup the voltages for the main SoC core power domains. * We start with the maximum voltages allowed here, as set in the corresponding * vcores_data struct, and then scale (usually down) to the fused values that * are retrieved from the SoC. The scaling happens only if the efuse.reg fields * are initialised. * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is * compiled conditionally. Note that the new code writes the scaled (or zeroed) * values back to the vcores_data struct for eventual reuse. Zero values mean * that the corresponding rails are not controlled separately, and are not sent * to the PMIC. */ void scale_vcores(struct vcores_data const *vcores) { #if defined(CONFIG_DRA7XX) int i; struct volts *pv = (struct volts *)vcores; struct volts *px; for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) { debug("%d -> ", pv->value); if (pv->value) { /* Handle non-empty members only */ pv->value = optimize_vcore_voltage(pv); px = (struct volts *)vcores; while (px < pv) { /* * Scan already handled non-empty members to see * if we have a group and find the max voltage, * which is set to the first occurance of the * particular SMPS; the other group voltages are * zeroed. */ if (px->value) { if ((pv->pmic->i2c_slave_addr == px->pmic->i2c_slave_addr) && (pv->addr == px->addr)) { /* Same PMIC, same SMPS */ if (pv->value > px->value) px->value = pv->value; pv->value = 0; } } px++; } } debug("%d\n", pv->value); pv++; } debug("cor: %d\n", vcores->core.value); do_scale_vcore(vcores->core.addr, vcores->core.value, vcores->core.pmic); /* * IO delay recalibration should be done immediately after * adjusting AVS voltages for VDD_CORE_L. * Respective boards should call __recalibrate_iodelay() * with proper mux, virtual and manual mode configurations. */ #ifdef CONFIG_IODELAY_RECALIBRATION recalibrate_iodelay(); #endif debug("mpu: %d\n", vcores->mpu.value); do_scale_vcore(vcores->mpu.addr, vcores->mpu.value, vcores->mpu.pmic); /* Configure MPU ABB LDO after scale */ abb_setup(vcores->mpu.efuse.reg, (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, (*prcm)->prm_abbldo_mpu_setup, (*prcm)->prm_abbldo_mpu_ctrl, (*prcm)->prm_irqstatus_mpu_2, vcores->mpu.abb_tx_done_mask, OMAP_ABB_FAST_OPP); /* The .mm member is not used for the DRA7xx */ debug("gpu: %d\n", vcores->gpu.value); do_scale_vcore(vcores->gpu.addr, vcores->gpu.value, vcores->gpu.pmic); /* Configure GPU ABB LDO after scale */ abb_setup(vcores->gpu.efuse.reg, (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl, (*prcm)->prm_abbldo_gpu_setup, (*prcm)->prm_abbldo_gpu_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->gpu.abb_tx_done_mask, OMAP_ABB_FAST_OPP); debug("eve: %d\n", vcores->eve.value); do_scale_vcore(vcores->eve.addr, vcores->eve.value, vcores->eve.pmic); /* Configure EVE ABB LDO after scale */ abb_setup(vcores->eve.efuse.reg, (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl, (*prcm)->prm_abbldo_eve_setup, (*prcm)->prm_abbldo_eve_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->eve.abb_tx_done_mask, OMAP_ABB_FAST_OPP); debug("iva: %d\n", vcores->iva.value); do_scale_vcore(vcores->iva.addr, vcores->iva.value, vcores->iva.pmic); /* Configure IVA ABB LDO after scale */ abb_setup(vcores->iva.efuse.reg, (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl, (*prcm)->prm_abbldo_iva_setup, (*prcm)->prm_abbldo_iva_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->iva.abb_tx_done_mask, OMAP_ABB_FAST_OPP); /* Might need udelay(1000) here if debug is enabled to see all prints */ #else u32 val; val = optimize_vcore_voltage(&vcores->core); do_scale_vcore(vcores->core.addr, val, vcores->core.pmic); /* * IO delay recalibration should be done immediately after * adjusting AVS voltages for VDD_CORE_L. * Respective boards should call __recalibrate_iodelay() * with proper mux, virtual and manual mode configurations. */ #ifdef CONFIG_IODELAY_RECALIBRATION recalibrate_iodelay(); #endif val = optimize_vcore_voltage(&vcores->mpu); do_scale_vcore(vcores->mpu.addr, val, vcores->mpu.pmic); /* Configure MPU ABB LDO after scale */ abb_setup(vcores->mpu.efuse.reg, (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl, (*prcm)->prm_abbldo_mpu_setup, (*prcm)->prm_abbldo_mpu_ctrl, (*prcm)->prm_irqstatus_mpu_2, vcores->mpu.abb_tx_done_mask, OMAP_ABB_FAST_OPP); val = optimize_vcore_voltage(&vcores->mm); do_scale_vcore(vcores->mm.addr, val, vcores->mm.pmic); /* Configure MM ABB LDO after scale */ abb_setup(vcores->mm.efuse.reg, (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl, (*prcm)->prm_abbldo_mm_setup, (*prcm)->prm_abbldo_mm_ctrl, (*prcm)->prm_irqstatus_mpu, vcores->mm.abb_tx_done_mask, OMAP_ABB_FAST_OPP); val = optimize_vcore_voltage(&vcores->gpu); do_scale_vcore(vcores->gpu.addr, val, vcores->gpu.pmic); val = optimize_vcore_voltage(&vcores->eve); do_scale_vcore(vcores->eve.addr, val, vcores->eve.pmic); val = optimize_vcore_voltage(&vcores->iva); do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic); #endif }