Exemple #1
0
static void ft_opp_clock_fixups(void *fdt, bd_t *bd)
{
	const char **clk_names;
	u32 *clk_rates;
	int ret;

	if (!is_dra72x() && !is_dra7xx())
		return;

	/* fixup DSP clocks */
	clk_names = dra7_opp_dsp_clk_names;
	clk_rates = dra7_opp_dsp_clk_rates[get_voltrail_opp(VOLT_EVE)];
	ret = ft_fixup_clocks(fdt, clk_names, clk_rates, OPP_DSP_CLK_NUM);
	if (ret) {
		printf("ft_fixup_clocks failed for DSP voltage domain: %s\n",
		       fdt_strerror(ret));
		return;
	}

	/* fixup IVA clocks */
	clk_names = dra7_opp_iva_clk_names;
	clk_rates = dra7_opp_iva_clk_rates[get_voltrail_opp(VOLT_IVA)];
	ret = ft_fixup_clocks(fdt, clk_names, clk_rates, OPP_IVA_CLK_NUM);
	if (ret) {
		printf("ft_fixup_clocks failed for IVA voltage domain: %s\n",
		       fdt_strerror(ret));
		return;
	}

	/* fixup GPU clocks */
	clk_names = dra7_opp_gpu_clk_names;
	clk_rates = dra7_opp_gpu_clk_rates[get_voltrail_opp(VOLT_GPU)];
	ret = ft_fixup_clocks(fdt, clk_names, clk_rates, OPP_GPU_CLK_NUM);
	if (ret) {
		printf("ft_fixup_clocks failed for GPU voltage domain: %s\n",
		       fdt_strerror(ret));
		return;
	}
}
/*
 * Setup the voltages for the main SoC core power domains.
 * We start with the maximum voltages allowed here, as set in the corresponding
 * vcores_data struct, and then scale (usually down) to the fused values that
 * are retrieved from the SoC. The scaling happens only if the efuse.reg fields
 * are initialised.
 * Rail grouping is supported for the DRA7xx SoCs only, therefore the code is
 * compiled conditionally. Note that the new code writes the scaled (or zeroed)
 * values back to the vcores_data struct for eventual reuse. Zero values mean
 * that the corresponding rails are not controlled separately, and are not sent
 * to the PMIC.
 */
void scale_vcores(struct vcores_data const *vcores)
{
	int i, opp, j, ol;
	struct volts *pv = (struct volts *)vcores;
	struct volts *px;

	for (i=0; i<(sizeof(struct vcores_data)/sizeof(struct volts)); i++) {
		opp = get_voltrail_opp(i);
		debug("%d -> ", pv->value[opp]);

		if (pv->value[opp]) {
			/* Handle non-empty members only */
			pv->value[opp] = optimize_vcore_voltage(pv, opp);
     			px = (struct volts *)vcores;
			j = 0;
			while (px < pv) {
				/*
				 * Scan already handled non-empty members to see
				 * if we have a group and find the max voltage,
				 * which is set to the first occurance of the
				 * particular SMPS; the other group voltages are
				 * zeroed.
				 */
				ol = get_voltrail_opp(j);
				if (px->value[ol] &&
				    (pv->pmic->i2c_slave_addr ==
				     px->pmic->i2c_slave_addr) &&
				    (pv->addr == px->addr)) {
					/* Same PMIC, same SMPS */
					if (pv->value[opp] > px->value[ol])
						px->value[ol] = pv->value[opp];

					pv->value[opp] = 0;
				}
				px++;
				j++;
			}
		}
		debug("%d\n", pv->value[opp]);
		pv++;
	}

	opp = get_voltrail_opp(VOLT_CORE);
	debug("cor: %d\n", vcores->core.value[opp]);
	do_scale_vcore(vcores->core.addr, vcores->core.value[opp],
		       vcores->core.pmic);
	/*
	 * IO delay recalibration should be done immediately after
	 * adjusting AVS voltages for VDD_CORE_L.
	 * Respective boards should call __recalibrate_iodelay()
	 * with proper mux, virtual and manual mode configurations.
	 */
#ifdef CONFIG_IODELAY_RECALIBRATION
	recalibrate_iodelay();
#endif

	opp = get_voltrail_opp(VOLT_MPU);
	debug("mpu: %d\n", vcores->mpu.value[opp]);
	do_scale_vcore(vcores->mpu.addr, vcores->mpu.value[opp],
		       vcores->mpu.pmic);
	/* Configure MPU ABB LDO after scale */
	abb_setup(vcores->mpu.efuse.reg[opp],
		  (*ctrl)->control_wkup_ldovbb_mpu_voltage_ctrl,
		  (*prcm)->prm_abbldo_mpu_setup,
		  (*prcm)->prm_abbldo_mpu_ctrl,
		  (*prcm)->prm_irqstatus_mpu_2,
		  vcores->mpu.abb_tx_done_mask,
		  OMAP_ABB_FAST_OPP);

	opp = get_voltrail_opp(VOLT_MM);
	debug("mm: %d\n", vcores->mm.value[opp]);
	do_scale_vcore(vcores->mm.addr, vcores->mm.value[opp],
		       vcores->mm.pmic);
	/* Configure MM ABB LDO after scale */
	abb_setup(vcores->mm.efuse.reg[opp],
		  (*ctrl)->control_wkup_ldovbb_mm_voltage_ctrl,
		  (*prcm)->prm_abbldo_mm_setup,
		  (*prcm)->prm_abbldo_mm_ctrl,
		  (*prcm)->prm_irqstatus_mpu,
		  vcores->mm.abb_tx_done_mask,
		  OMAP_ABB_FAST_OPP);

	opp = get_voltrail_opp(VOLT_GPU);
	debug("gpu: %d\n", vcores->gpu.value[opp]);
	do_scale_vcore(vcores->gpu.addr, vcores->gpu.value[opp],
		       vcores->gpu.pmic);
	/* Configure GPU ABB LDO after scale */
	abb_setup(vcores->gpu.efuse.reg[opp],
		  (*ctrl)->control_wkup_ldovbb_gpu_voltage_ctrl,
		  (*prcm)->prm_abbldo_gpu_setup,
		  (*prcm)->prm_abbldo_gpu_ctrl,
		  (*prcm)->prm_irqstatus_mpu,
		  vcores->gpu.abb_tx_done_mask,
		  OMAP_ABB_FAST_OPP);

	opp = get_voltrail_opp(VOLT_EVE);
	debug("eve: %d\n", vcores->eve.value[opp]);
	do_scale_vcore(vcores->eve.addr, vcores->eve.value[opp],
		       vcores->eve.pmic);
	/* Configure EVE ABB LDO after scale */
	abb_setup(vcores->eve.efuse.reg[opp],
		  (*ctrl)->control_wkup_ldovbb_eve_voltage_ctrl,
		  (*prcm)->prm_abbldo_eve_setup,
		  (*prcm)->prm_abbldo_eve_ctrl,
		  (*prcm)->prm_irqstatus_mpu,
		  vcores->eve.abb_tx_done_mask,
		  OMAP_ABB_FAST_OPP);

	opp = get_voltrail_opp(VOLT_IVA);
	debug("iva: %d\n", vcores->iva.value[opp]);
	do_scale_vcore(vcores->iva.addr, vcores->iva.value[opp],
		       vcores->iva.pmic);
	/* Configure IVA ABB LDO after scale */
	abb_setup(vcores->iva.efuse.reg[opp],
		  (*ctrl)->control_wkup_ldovbb_iva_voltage_ctrl,
		  (*prcm)->prm_abbldo_iva_setup,
		  (*prcm)->prm_abbldo_iva_ctrl,
		  (*prcm)->prm_irqstatus_mpu,
		  vcores->iva.abb_tx_done_mask,
		  OMAP_ABB_FAST_OPP);
}