Exemplo n.º 1
0
static int omap_rproc_iommu_init(struct rproc *rproc,
		 int (*callback)(struct rproc *rproc, u64 fa, u32 flags))
{
	struct device *dev = rproc->dev;
	struct omap_rproc_pdata *pdata = dev->platform_data;
	int ret, i;
	struct iommu *iommu;
	struct omap_rproc_priv *rpp;

	rpp = kzalloc(sizeof(*rpp), GFP_KERNEL);
	if (!rpp)
		return -ENOMEM;

	if (pdata->clkdm)
		clkdm_wakeup(pdata->clkdm);
	iommu_set_isr(pdata->iommu_name, omap_rproc_iommu_isr, rproc);
	iommu_set_secure(pdata->iommu_name, rproc->secure_mode,
						rproc->secure_ttb);
	iommu = iommu_get(pdata->iommu_name);
	if (IS_ERR(iommu)) {
		ret = PTR_ERR(iommu);
		dev_err(dev, "iommu_get error: %d\n", ret);
		goto err_mmu;
	}

	rpp->iommu = iommu;
	rpp->iommu_cb = callback;
	rproc->priv = rpp;

	if (!rproc->secure_mode) {
		for (i = 0; rproc->memory_maps[i].size; i++) {
			const struct rproc_mem_entry *me =
							&rproc->memory_maps[i];

			ret = omap_rproc_map(dev, iommu, me->da, me->pa,
								 me->size);
			if (ret)
				goto err_map;
		}
	}
	if (pdata->clkdm)
		clkdm_allow_idle(pdata->clkdm);

	return 0;

err_map:
	iommu_put(iommu);
err_mmu:
	iommu_set_secure(pdata->iommu_name, false, NULL);
	if (pdata->clkdm)
		clkdm_allow_idle(pdata->clkdm);
	kfree(rpp);
	return ret;
}
Exemplo n.º 2
0
/**
 * omap_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the non-zero error value on failure.
 */
u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret;
	u32 param[5];

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	if (!l4_secure_clkdm)
		l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");

	clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));
	ret = omap_smc2(idx, flag, __pa(param));

	clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
Exemplo n.º 3
0
static void omap2_iommu_disable(struct omap_iommu *obj)
{
	struct omap_hwmod *oh;
	u32 l;

	oh = omap_hwmod_lookup(obj->name);
	if (!oh)
		return;
	/*
	 * IPU and DSP iommus are not directly connected to the processor
	 * instead they are  behind a shared MMU. Therefore in the case of
	 * a mmu fault and the mmu fault was not handled, even if the processor
	 * is under reset, the shared MMU will try to translation the address
	 * again causing that the status flag cannot be clear and therefore
	 * as soon as the clkdm wants to go to idle the clkdm will be stuck
	 * in transition state. The only way to reset the shared MMU is doing
	 * a hardreset of the L2 iommu which shared the reset line with the
	 * shared MMU. That way we can clean the status bit and turn off
	 * the iommu without any issue.
	 */
	if (!strcmp(obj->name, "ipu") || !strcmp(obj->name, "dsp")) {
		omap_hwmod_assert_hardreset(oh, oh->rst_lines->name);
		omap_hwmod_deassert_hardreset(oh, oh->rst_lines->name);
		goto out;
	}

	l = iommu_read_reg(obj, MMU_IRQSTATUS);
	iommu_write_reg(obj, l, MMU_IRQSTATUS);
	l = iommu_read_reg(obj, MMU_CNTL);
	l &= ~MMU_CNTL_MASK;
	iommu_write_reg(obj, l, MMU_CNTL);
out:
	clkdm_allow_idle(oh->clkdm);
	dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
Exemplo n.º 4
0
int omap_rproc_activate(struct omap_device *od)
{
	int i, ret = 0;
	struct rproc *rproc = platform_get_drvdata(&od->pdev);
	struct device *dev = rproc->dev;
	struct omap_rproc_pdata *pdata = dev->platform_data;
	struct omap_rproc_timers_info *timers = pdata->timers;
	struct omap_rproc_priv *rpp = rproc->priv;
#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
	struct iommu *iommu;

	if (!rpp->iommu) {
		iommu = iommu_get(pdata->iommu_name);
		if (IS_ERR(iommu)) {
			dev_err(dev, "iommu_get error: %ld\n",
				PTR_ERR(iommu));
			return PTR_ERR(iommu);
		}
		rpp->iommu = iommu;
	}

	if (!rpp->mbox)
		rpp->mbox = omap_mbox_get(pdata->sus_mbox_name, NULL);
#endif
	/**
	 * explicitly configure a boot address from which remoteproc
	 * starts executing code when taken out of reset.
	 */
	_load_boot_addr(rproc, rpp->bootaddr);

	/**
	 * Domain is in HW SUP thus in hw_auto but
	 * since remoteproc will be enabled clkdm
	 * needs to be in sw_sup (Do not let it idle).
	 */
	if (pdata->clkdm)
		clkdm_wakeup(pdata->clkdm);

	for (i = 0; i < pdata->timers_cnt; i++)
		omap_dm_timer_start(timers[i].odt);

	for (i = 0; i < od->hwmods_cnt; i++) {
		ret = omap_hwmod_enable(od->hwmods[i]);
		if (ret) {
			for (i = 0; i < pdata->timers_cnt; i++)
				omap_dm_timer_stop(timers[i].odt);
			break;
		}
	}

	/**
	 * Domain is in force_wkup but since remoteproc
	 * was enabled it is safe now to switch clkdm
	 * to hw_auto (let it idle).
	 */
	if (pdata->clkdm)
		clkdm_allow_idle(pdata->clkdm);

	return ret;
}
Exemplo n.º 5
0
/**
 * omap2_clk_enable - request that the system enable a clock
 * @clk: struct clk * to enable
 *
 * Increments the usecount on struct clk @clk.  If there were no users
 * previously, then recurse up the clock tree, enabling all of the
 * clock's parents and all of the parent clockdomains, and finally,
 * enabling @clk's clockdomain, and @clk itself.  Intended to be
 * called with the clockfw_lock spinlock held.  Returns 0 upon success
 * or a negative error code upon failure.
 */
int omap2_clk_enable(struct clk *clk)
{
	int ret;

	pr_debug("clock: %s: incrementing usecount\n", clk->name);

	clk->usecount++;

	if (clk->usecount > 1)
		return 0;

	pr_debug("clock: %s: enabling in hardware\n", clk->name);

	if (clk->parent) {
		ret = omap2_clk_enable(clk->parent);
		if (ret) {
			WARN(1, "clock: %s: could not enable parent %s: %d\n",
			     clk->name, clk->parent->name, ret);
			goto oce_err1;
		}
	}

	if (clk->clkdm) {
		ret = clkdm_clk_enable(clk->clkdm, clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable clockdomain %s: "
			     "%d\n", clk->name, clk->clkdm->name, ret);
			goto oce_err2;
		}
	}

	if (clk->ops && clk->ops->enable) {
		trace_clock_enable(clk->name, 1, smp_processor_id());
		ret = clk->ops->enable(clk);
		if (ret) {
			WARN(1, "clock: %s: could not enable: %d\n",
			     clk->name, ret);
			goto oce_err3;
		}
	}

	/* If clockdomain supports hardware control, enable it */
	if (clk->clkdm)
		clkdm_allow_idle(clk->clkdm);

	return 0;

oce_err3:
	if (clk->clkdm)
		clkdm_clk_disable(clk->clkdm, clk);
oce_err2:
	if (clk->parent)
		omap2_clk_disable(clk->parent);
oce_err1:
	clk->usecount--;

	return ret;
}
Exemplo n.º 6
0
static int __cpuinit omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static struct clockdomain *cpu1_clkdm;
	static bool booted;
	void __iomem *base = omap_get_wakeupgen_base();

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	if (omap_secure_apis_support())
		omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	else
		__raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0);

	flush_cache_all();
	smp_wmb();

	if (!cpu1_clkdm)
		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");

	/*
	 * The SGI(Software Generated Interrupts) are not wakeup capable
	 * from low power states. This is known limitation on OMAP4 and
	 * needs to be worked around by using software forced clockdomain
	 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
	 * software force wakeup. The clockdomain is then put back to
	 * hardware supervised mode.
	 * More details can be found in OMAP4430 TRM - Version J
	 * Section :
	 *	4.3.4.2 Power States of CPU0 and CPU1
	 */
	if (booted) {
		clkdm_wakeup(cpu1_clkdm);
		clkdm_allow_idle(cpu1_clkdm);
	} else {
		dsb_sev();
		booted = true;
	}

	gic_raise_softirq(cpumask_of(cpu), 0);

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
/* XXX This function should be shareable between OMAP2xxx and OMAP3 */
static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
{
	if (clkdm->flags & CLKDM_CAN_ENABLE_AUTO)
		clkdm_allow_idle(clkdm);
	else if (clkdm->flags & CLKDM_CAN_FORCE_SLEEP &&
		 atomic_read(&clkdm->usecount) == 0)
		clkdm_sleep(clkdm);
	return 0;
}
Exemplo n.º 8
0
/**
 * omap4_prcm_freq_update - set freq_update bit
 *
 * Programs the CM shadow registers to update EMIF
 * parametrs. Few usecase only few registers needs to
 * be updated using prcm freq update sequence.
 * EMIF read-idle control and zq-config needs to be
 * updated for temprature alerts and voltage change
 * Returns -1 on error and 0 on success.
 */
int omap4_prcm_freq_update(void)
{
	u32 shadow_freq_cfg1;
	int i = 0;
	unsigned long flags;

	if (!l3_emif_clkdm) {
		pr_err("%s: clockdomain lookup failed\n", __func__);
		return -EINVAL;
	}

	spin_lock_irqsave(&l3_emif_lock, flags);
	/* Configures MEMIF domain in SW_WKUP */
	clkdm_wakeup(l3_emif_clkdm);

	/* Disable DDR self refresh (Errata ID: i728) */
	omap_emif_frequency_pre_notify();

	/*
	 * FREQ_UPDATE sequence:
	 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
	 *	after CORE DPLL lock)
	 * - FREQ_UPDATE=1 (to start HW sequence)
	 */
	shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1);
	shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) |
			   (1 << OMAP4430_FREQ_UPDATE_SHIFT);
	shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
				& OMAP4430_FREQ_UPDATE_MASK) == 0),
				MAX_FREQ_UPDATE_TIMEOUT, i);

	/* Re-enable DDR self refresh */
	omap_emif_frequency_post_notify();

	/* Configures MEMIF domain back to HW_WKUP */
	clkdm_allow_idle(l3_emif_clkdm);

	spin_unlock_irqrestore(&l3_emif_lock, flags);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update failed (call from %pF)\n",
			__func__, (void *)_RET_IP_);
		pr_err("CLKCTRL: EMIF_1=0x%x EMIF_2=0x%x DMM=0x%x\n",
		       __raw_readl(OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL),
		       __raw_readl(OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL),
		       __raw_readl(OMAP4430_CM_MEMIF_DMM_CLKCTRL));
		emif_dump(0);
		emif_dump(1);
		return -1;
	}

	return 0;
}
Exemplo n.º 9
0
/*
 * This sets pwrdm state (other than mpu & core. Currently only ON &
 * RET are supported.
 */
int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
{
	u32 cur_state;
	int sleep_switch = -1;
	int ret = 0;
	int hwsup = 0;

	if (pwrdm == NULL || IS_ERR(pwrdm))
		return -EINVAL;

	while (!(pwrdm->pwrsts & (1 << state))) {
		if (state == PWRDM_POWER_OFF)
			return ret;
		state--;
	}

	cur_state = pwrdm_read_next_pwrst(pwrdm);
	if (cur_state == state)
		return ret;

	if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
		if ((pwrdm_read_pwrst(pwrdm) > state) &&
			(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
			sleep_switch = LOWPOWERSTATE_SWITCH;
		} else {
			hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
			clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
			sleep_switch = FORCEWAKEUP_SWITCH;
		}
	}

	ret = pwrdm_set_next_pwrst(pwrdm, state);
	if (ret) {
		pr_err("%s: unable to set state of powerdomain: %s\n",
		       __func__, pwrdm->name);
		goto err;
	}

	switch (sleep_switch) {
	case FORCEWAKEUP_SWITCH:
		if (hwsup)
			clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
		else
			clkdm_sleep(pwrdm->pwrdm_clkdms[0]);
		break;
	case LOWPOWERSTATE_SWITCH:
		pwrdm_set_lowpwrstchange(pwrdm);
		break;
	default:
		return ret;
	}

	pwrdm_state_switch(pwrdm);
err:
	return ret;
}
Exemplo n.º 10
0
/*
 * This sets pwrdm state (other than mpu & core. Currently only ON &
 * RET are supported.
 */
int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 state)
{
	u32 cur_state;
	int sleep_switch = 0;
	int ret = 0;

	if (pwrdm == NULL || IS_ERR(pwrdm))
		return -EINVAL;

	while (!(pwrdm->pwrsts & (1 << state))) {
		if (state == PWRDM_POWER_OFF)
			return ret;
		state--;
	}

	cur_state = pwrdm_read_next_pwrst(pwrdm);
	if (cur_state == state)
		return ret;

	if (pwrdm_read_pwrst(pwrdm) < PWRDM_POWER_ON) {
		if ((pwrdm_read_pwrst(pwrdm) > state) &&
			(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
			sleep_switch = LOWPOWERSTATE_SWITCH;
		} else {
			clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
			pwrdm_wait_transition(pwrdm);
			sleep_switch = FORCEWAKEUP_SWITCH;
		}
	}

	ret = pwrdm_set_next_pwrst(pwrdm, state);
	if (ret) {
		printk(KERN_ERR "Unable to set state of powerdomain: %s\n",
		       pwrdm->name);
		goto err;
	}

	switch (sleep_switch) {
	case FORCEWAKEUP_SWITCH:
		if (pwrdm->pwrdm_clkdms[0]->flags & CLKDM_CAN_ENABLE_AUTO)
			clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
		else
			clkdm_sleep(pwrdm->pwrdm_clkdms[0]);
		break;
	case LOWPOWERSTATE_SWITCH:
		pwrdm_set_lowpwrstchange(pwrdm);
		break;
	default:
		return ret;
	}

	pwrdm_wait_transition(pwrdm);
	pwrdm_state_switch(pwrdm);
err:
	return ret;
}
Exemplo n.º 11
0
void tf_l4sec_clkdm_allow_idle(bool wakeunlock)
{
	spin_lock(&tf_get_device()->sm.lock);
	if (atomic_dec_return(&smc_l4_sec_clkdm_use_count) == 0)
		clkdm_allow_idle(smc_l4_sec_clkdm);
#ifdef CONFIG_HAS_WAKELOCK
	if (wakeunlock)
		if (atomic_dec_return(&tf_wake_lock_count) == 0)
			wake_unlock(&g_tf_wake_lock);
#endif
	spin_unlock(&tf_get_device()->sm.lock);
}
Exemplo n.º 12
0
Arquivo: pm.c Projeto: ivdok/linux
/*
 * This sets pwrdm state (other than mpu & core. Currently only ON &
 * RET are supported.
 */
int omap_set_pwrdm_state(struct powerdomain *pwrdm, u32 pwrst)
{
	u8 curr_pwrst, next_pwrst;
	int sleep_switch = -1, ret = 0, hwsup = 0;

	if (!pwrdm || IS_ERR(pwrdm))
		return -EINVAL;

	while (!(pwrdm->pwrsts & (1 << pwrst))) {
		if (pwrst == PWRDM_POWER_OFF)
			return ret;
		pwrst--;
	}

	next_pwrst = pwrdm_read_next_pwrst(pwrdm);
	if (next_pwrst == pwrst)
		return ret;

	curr_pwrst = pwrdm_read_pwrst(pwrdm);
	if (curr_pwrst < PWRDM_POWER_ON) {
		if ((curr_pwrst > pwrst) &&
			(pwrdm->flags & PWRDM_HAS_LOWPOWERSTATECHANGE)) {
			sleep_switch = LOWPOWERSTATE_SWITCH;
		} else {
			hwsup = clkdm_in_hwsup(pwrdm->pwrdm_clkdms[0]);
			clkdm_wakeup(pwrdm->pwrdm_clkdms[0]);
			sleep_switch = FORCEWAKEUP_SWITCH;
		}
	}

	ret = pwrdm_set_next_pwrst(pwrdm, pwrst);
	if (ret)
		pr_err("%s: unable to set power state of powerdomain: %s\n",
		       __func__, pwrdm->name);

	switch (sleep_switch) {
	case FORCEWAKEUP_SWITCH:
		if (hwsup)
			clkdm_allow_idle(pwrdm->pwrdm_clkdms[0]);
		else
			clkdm_sleep(pwrdm->pwrdm_clkdms[0]);
		break;
	case LOWPOWERSTATE_SWITCH:
		pwrdm_set_lowpwrstchange(pwrdm);
		pwrdm_wait_transition(pwrdm);
		pwrdm_state_switch(pwrdm);
		break;
	}

	return ret;
}
Exemplo n.º 13
0
static void tf_clock_timer_cb(unsigned long data)
{
	unsigned long flags;
	u32 ret = 0;

	dprintk(KERN_INFO "%s called...\n", __func__);

	spin_lock_irqsave(&clk_timer_lock, flags);

	/*
	 * If one of the HWA is used (by secure or public) the timer
	 * function cuts all the HWA clocks
	 */
	if (tf_crypto_clock_enabled) {
		dprintk(KERN_INFO "%s; tf_crypto_clock_enabled = %d\n",
			__func__, tf_crypto_clock_enabled);
		goto restart;
	}

	ret = tf_crypto_turn_off_clocks();

	/*
	 * From MShield-DK 1.3.3 sources:
	 *
	 * Digest: 1 << 0
	 * DES   : 1 << 1
	 * AES1  : 1 << 2
	 * AES2  : 1 << 3
	 * RNG   : 1 << 4
	 * PKA   : 1 << 5
	 *
	 * Clock patch active: 1 << 7
	 */
	if (ret & 0x3f)
		goto restart;

	wake_unlock(&g_tf_wake_lock);
	clkdm_allow_idle(smc_l4_sec_clkdm);

	spin_unlock_irqrestore(&clk_timer_lock, flags);

	dprintk(KERN_INFO "%s success\n", __func__);
	return;

restart:
	dprintk("%s: will wait one more time ret=0x%x\n", __func__, ret);
	mod_timer(&tf_crypto_clock_timer,
		jiffies + msecs_to_jiffies(INACTIVITY_TIMER_TIMEOUT));

	spin_unlock_irqrestore(&clk_timer_lock, flags);
}
Exemplo n.º 14
0
/**
 * omap_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the non-zero error value on failure.
 */
u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret = 0;
	u32 param[5];
	unsigned long flags;

	/* If we have an alternate dispatcher api, use it, else use default */
	spin_lock_irqsave(&_secure_dispatcher_lock, flags);
	if (_alternate_secure_dispatcher) {
		ret = _alternate_secure_dispatcher(idx, flag, nargs, arg1,
						   arg2, arg3, arg4);
		spin_unlock_irqrestore(&_secure_dispatcher_lock, flags);
		return ret;
	}
	spin_unlock_irqrestore(&_secure_dispatcher_lock, flags);

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	if (!l4_secure_clkdm) {
		if (cpu_is_omap54xx())
			l4_secure_clkdm = clkdm_lookup("l4sec_clkdm");
		else
			l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");
	}

	if (!l4_secure_clkdm) {
		pr_err("%s: failed to get l4_secure_clkdm\n", __func__);
		return -EINVAL;
	}

	clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));
	ret = omap_smc2(idx, flag, __pa(param));

	clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
Exemplo n.º 15
0
/*
 * Function responsible for formatting parameters to pass from NS world to
 * S world
 */
u32 omap4_secure_dispatcher(u32 app_id, u32 flags, u32 nargs,
	u32 arg1, u32 arg2, u32 arg3, u32 arg4)
{
	u32 ret;
	unsigned long iflags;
	u32 pub2sec_args[5] = {0, 0, 0, 0, 0};

	/*dpr_info("%s: app_id=0x%08x, flags=0x%08x, nargs=%u\n",
		__func__, app_id, flags, nargs);*/

	/*if (nargs != 0)
		dpr_info("%s: args=%08x, %08x, %08x, %08x\n",
			__func__, arg1, arg2, arg3, arg4);*/

	pub2sec_args[0] = nargs;
	pub2sec_args[1] = arg1;
	pub2sec_args[2] = arg2;
	pub2sec_args[3] = arg3;
	pub2sec_args[4] = arg4;

	/* Make sure parameters are visible to the secure world */
	dmac_flush_range((void *)pub2sec_args,
		(void *)(((u32)(pub2sec_args)) + 5*sizeof(u32)));
	outer_clean_range(__pa(pub2sec_args),
		__pa(pub2sec_args) + 5*sizeof(u32));
	wmb();

	/*
	 * Put L4 Secure clock domain to SW_WKUP so that modules are accessible
	 */
	clkdm_wakeup(smc_l4_sec_clkdm);

	local_irq_save(iflags);

	/* proc_id is always 0 */
	ret = schedule_secure_world(app_id, 0, flags, __pa(pub2sec_args));
	local_irq_restore(iflags);

	/* Restore the HW_SUP on L4 Sec clock domain so hardware can idle */
	if ((app_id != API_HAL_HWATURNOFF_INDEX) &&
	    (!timer_pending(&tf_crypto_clock_timer))) {
		(void) tf_crypto_turn_off_clocks();
		clkdm_allow_idle(smc_l4_sec_clkdm);
	}

	/*dpr_info("%s()\n", __func__);*/

	return ret;
}
Exemplo n.º 16
0
static int omap_rproc_iommu_exit(struct rproc *rproc)
{
	struct omap_rproc_priv *rpp = rproc->priv;
	struct omap_rproc_pdata *pdata = rproc->dev->platform_data;

	if (pdata->clkdm)
		clkdm_wakeup(pdata->clkdm);

	if (rpp->iommu)
		iommu_put(rpp->iommu);
	kfree(rpp);
	if (pdata->clkdm)
		clkdm_allow_idle(pdata->clkdm);

	return 0;
}
Exemplo n.º 17
0
 /*
  * omap4_sar_save -
  * Save the context to SAR_RAM1 and SAR_RAM2 as per
  * omap4xxx_sar_ram1_layout and omap4xxx_sar_ram2_layout for the device OFF
  * mode
  */
int omap4_sar_save(void)
{
	/*
	 * Not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
		return -ENODEV;
	}

	if (omap4_sar_not_accessible()) {
		pr_debug("%s: USB SAR CNTX registers are not accessible!\n",
			 __func__);
		return -EBUSY;
	}

	/*
	 * SAR bits and clocks needs to be enabled
	 */
	clkdm_wakeup(l3init_clkdm);
	pwrdm_enable_hdwr_sar(l3init_pwrdm);
	clk_enable(usb_host_ck);
	clk_enable(usb_tll_ck);

	/* Save SAR BANK1 */
	if (cpu_is_omap446x())
		sar_save(ARRAY_SIZE(omap446x_sar_ram1_layout), SAR_BANK1_OFFSET,
			 omap446x_sar_ram1_layout);
	else
		sar_save(ARRAY_SIZE(omap443x_sar_ram1_layout), SAR_BANK1_OFFSET,
			 omap443x_sar_ram1_layout);

	clk_disable(usb_host_ck);
	clk_disable(usb_tll_ck);
	pwrdm_disable_hdwr_sar(l3init_pwrdm);
	clkdm_allow_idle(l3init_clkdm);

	/* Save SAR BANK2 */
	if (cpu_is_omap446x())
		sar_save(ARRAY_SIZE(omap446x_sar_ram2_layout), SAR_BANK2_OFFSET,
			 omap446x_sar_ram2_layout);
	else
		sar_save(ARRAY_SIZE(omap443x_sar_ram2_layout), SAR_BANK2_OFFSET,
			 omap443x_sar_ram2_layout);

	return 0;
}
Exemplo n.º 18
0
void omap4_usb_sar_restore(void)
{
	u32 i;

	pr_err("USB SAR- SW Restore\n");
	clkdm_wakeup(l3init_clkdm);
	pwrdm_enable_hdwr_sar(l3init_pwrdm);
	clk_enable(usb_host_ck);
	clk_enable(usb_tll_ck);

	for (i = 0; i < (USB_SAR_AREA_END-USB_SAR_AREA_START)/4; i++)
		__raw_writel(usb_sar_data[i].val, usb_sar_data[i].reg_addr);

	clk_disable(usb_host_ck);
	clk_disable(usb_tll_ck);
	pwrdm_disable_hdwr_sar(l3init_pwrdm);
	clkdm_allow_idle(l3init_clkdm);
}
Exemplo n.º 19
0
int omap_rproc_deactivate(struct omap_device *od)
{
	int i, ret = 0;
	struct rproc *rproc = platform_get_drvdata(&od->pdev);
	struct device *dev = rproc->dev;
	struct omap_rproc_pdata *pdata = dev->platform_data;
	struct omap_rproc_timers_info *timers = pdata->timers;
#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
	struct omap_rproc_priv *rpp = rproc->priv;
#endif
	if (pdata->clkdm)
		clkdm_wakeup(pdata->clkdm);

	for (i = 0; i < od->hwmods_cnt; i++) {
		ret = omap_hwmod_shutdown(od->hwmods[i]);
		if (ret)
			goto err;
	}

	for (i = 0; i < pdata->timers_cnt; i++)
		omap_dm_timer_stop(timers[i].odt);

#ifdef CONFIG_REMOTE_PROC_AUTOSUSPEND
	if (rpp->iommu) {
		iommu_put(rpp->iommu);
		rpp->iommu = NULL;
	}

	if (rpp->mbox) {
		omap_mbox_put(rpp->mbox, NULL);
		rpp->mbox = NULL;
	}
#endif
err:
	if (pdata->clkdm)
		clkdm_allow_idle(pdata->clkdm);

	return ret;
}
Exemplo n.º 20
0
/*
 * omap4_sec_dispatcher: Routine to dispatch low power secure
 * service routines
 *
 * @idx: The HAL API index
 * @flag: The flag indicating criticality of operation
 * @nargs: Number of valid arguments out of four.
 * @arg1, arg2, arg3 args4: Parameters passed to secure API
 *
 * Return the error value on success/failure
 */
u32 omap4_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
							 u32 arg3, u32 arg4)
{
	u32 ret;
	u32 param[5];

	param[0] = nargs;
	param[1] = arg1;
	param[2] = arg2;
	param[3] = arg3;
	param[4] = arg4;

	/* Look-up Only once */
	if (!l4_secure_clkdm)
		l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");

	/*
	 * Put l4 secure to software wakeup  so that secure
	 * modules are accessible
	 */
	clkdm_wakeup(l4_secure_clkdm);

	/*
	 * Secure API needs physical address
	 * pointer for the parameters
	 */
	flush_cache_all();
	outer_clean_range(__pa(param), __pa(param + 5));

	ret = omap_smc2(idx, flag, __pa(param));

	/*
	 * Restore l4 secure to hardware superwised to allow
	 * secure modules idle
	 */
	clkdm_allow_idle(l4_secure_clkdm);

	return ret;
}
Exemplo n.º 21
0
static void save_sar_bank3(void)
{
	struct clockdomain *l4_secure_clkdm;

	/*
	 * Not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN_ONCE(1, "omap4: SAR backup not supported on ES1.0 ..\n");
		return;
	}

	l4_secure_clkdm = clkdm_lookup("l4_secure_clkdm");
	clkdm_wakeup(l4_secure_clkdm);

	if (cpu_is_omap446x())
		sar_save(ARRAY_SIZE(omap446x_sar_ram3_layout), SAR_BANK3_OFFSET,
			 omap446x_sar_ram3_layout);
	else
		sar_save(ARRAY_SIZE(omap443x_sar_ram3_layout), SAR_BANK3_OFFSET,
			 omap443x_sar_ram3_layout);

	clkdm_allow_idle(l4_secure_clkdm);
}
Exemplo n.º 22
0
static void usbhs_wakeup_work(struct work_struct *unused)
{
	pm_runtime_put_sync(&pdev_usbhs->dev);
	clkdm_allow_idle(l3init_clkdm);
}
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2
 * divider. M2 divider is used to clock external DDR and its
 * reconfiguration on frequency change is managed through a
 * hardware sequencer. This is managed by the PRCM with EMIF
 * uding shadow registers.
 * Returns -EINVAL/-1 on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
    int i = 0;
    u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;
    unsigned long flags;

    if (!clk || !rate)
        return -EINVAL;

    validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
    if (validrate != rate)
        return -EINVAL;

    /* Just to avoid look-up on every call to speed up */
    if (!l3_emif_clkdm) {
        l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
        if (!l3_emif_clkdm) {
            pr_err("%s: clockdomain lookup failed\n", __func__);
            return -EINVAL;
        }
    }

    spin_lock_irqsave(&l3_emif_lock, flags);

    /*
     * Errata ID: i728
     *
     * DESCRIPTION:
     *
     * If during a small window the following three events occur:
     *
     * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires
     * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1
     *    FREQ_UPDATE set to 1
     * 3) OCP access is requested
     *
     * There will be instable clock on the DDR interface.
     *
     * WORKAROUND:
     *
     * Prevent event 1) while event 2) is happening.
     *
     * Disable the self-refresh when requesting a frequency change.
     * Before requesting a frequency change, program
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0
     * (omap_emif_frequency_pre_notify)
     *
     * When the frequency change is completed, reprogram
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2.
     * (omap_emif_frequency_post_notify)
     */
    omap_emif_frequency_pre_notify();

    /* Configures MEMIF domain in SW_WKUP */
    clkdm_wakeup(l3_emif_clkdm);

    /*
     * Program EMIF timing parameters in EMIF shadow registers
     * for targetted DRR clock.
     * DDR Clock = core_dpll_m2 / 2
     */
    omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE);

    /*
     * FREQ_UPDATE sequence:
     * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
     *	after CORE DPLL lock)
     * - DLL_RESET=1 (DLL must be reset upon frequency change)
     * - DPLL_CORE_M2_DIV with same value as the one already
     *	in direct register
     * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
     * - FREQ_UPDATE=1 (to start HW sequence)
     */
    shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) |
                       (new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
                       (DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
                       (1 << OMAP4430_FREQ_UPDATE_SHIFT);
    shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
    __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

    /* wait for the configuration to be applied */
    omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
                        & OMAP4430_FREQ_UPDATE_MASK) == 0),
                      MAX_FREQ_UPDATE_TIMEOUT, i);

    /* Configures MEMIF domain back to HW_WKUP */
    clkdm_allow_idle(l3_emif_clkdm);

    /* Re-enable DDR self refresh */
    omap_emif_frequency_post_notify();

    spin_unlock_irqrestore(&l3_emif_lock, flags);

    if (i == MAX_FREQ_UPDATE_TIMEOUT) {
        pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
               __func__);
        return -1;
    }

    /* Update the clock change */
    clk->rate = validrate;

    return 0;
}
/**
 * omap4_prcm_freq_update - set freq_update bit
 *
 * Programs the CM shadow registers to update EMIF
 * parametrs. Few usecase only few registers needs to
 * be updated using prcm freq update sequence.
 * EMIF read-idle control and zq-config needs to be
 * updated for temprature alerts and voltage change
 * Returns -1 on error and 0 on success.
 */
int omap4_prcm_freq_update(void)
{
    u32 shadow_freq_cfg1;
    int i = 0;
    unsigned long flags;

    if (!l3_emif_clkdm) {
        pr_err("%s: clockdomain lookup failed\n", __func__);
        return -EINVAL;
    }

    spin_lock_irqsave(&l3_emif_lock, flags);

    /*
     * Errata ID: i728
     *
     * DESCRIPTION:
     *
     * If during a small window the following three events occur:
     *
     * 1) The EMIF_PWR_MGMT_CTRL[7:4] REG_SR_TIM SR_TIMING counter expires
     * 2) Frequency change update is requested CM_SHADOW_FREQ_CONFIG1
     *    FREQ_UPDATE set to 1
     * 3) OCP access is requested
     *
     * There will be instable clock on the DDR interface.
     *
     * WORKAROUND:
     *
     * Prevent event 1) while event 2) is happening.
     *
     * Disable the self-refresh when requesting a frequency change.
     * Before requesting a frequency change, program
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x0
     * (omap_emif_frequency_pre_notify)
     *
     * When the frequency change is completed, reprogram
     * EMIF_PWR_MGMT_CTRL[10:8] REG_LP_MODE to 0x2.
     * (omap_emif_frequency_post_notify)
     */
    omap_emif_frequency_pre_notify();

    /* Configures MEMIF domain in SW_WKUP */
    clkdm_wakeup(l3_emif_clkdm);

    /*
     * FREQ_UPDATE sequence:
     * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
     *	after CORE DPLL lock)
     * - FREQ_UPDATE=1 (to start HW sequence)
     */
    shadow_freq_cfg1 = __raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1);
    shadow_freq_cfg1 |= (1 << OMAP4430_DLL_RESET_SHIFT) |
                        (1 << OMAP4430_FREQ_UPDATE_SHIFT);
    shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
    __raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

    /* wait for the configuration to be applied */
    omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
                        & OMAP4430_FREQ_UPDATE_MASK) == 0),
                      MAX_FREQ_UPDATE_TIMEOUT, i);

    /* Configures MEMIF domain back to HW_WKUP */
    clkdm_allow_idle(l3_emif_clkdm);

    /* Re-enable DDR self refresh */
    omap_emif_frequency_post_notify();

    spin_unlock_irqrestore(&l3_emif_lock, flags);

    if (i == MAX_FREQ_UPDATE_TIMEOUT) {
        pr_err("%s: Frequency update failed (call from %pF)\n",
               __func__, (void *)_RET_IP_);
        pr_err("CLKCTRL: EMIF_1=0x%x EMIF_2=0x%x DMM=0x%x\n",
               __raw_readl(OMAP4430_CM_MEMIF_EMIF_1_CLKCTRL),
               __raw_readl(OMAP4430_CM_MEMIF_EMIF_2_CLKCTRL),
               __raw_readl(OMAP4430_CM_MEMIF_DMM_CLKCTRL));
        emif_dump(0);
        emif_dump(1);
        return -1;
    }

    return 0;
}
Exemplo n.º 25
0
/**
 * omap4_core_dpll_m2_set_rate - set CORE DPLL M2 divider
 * @clk: struct clk * of DPLL to set
 * @rate: rounded target rate
 *
 * Programs the CM shadow registers to update CORE DPLL M2
 * divider. M2 divider is used to clock external DDR and its
 * reconfiguration on frequency change is managed through a
 * hardware sequencer. This is managed by the PRCM with EMIF
 * uding shadow registers.
 * Returns -EINVAL/-1 on error and 0 on success.
 */
int omap4_core_dpll_m2_set_rate(struct clk *clk, unsigned long rate)
{
	int i = 0;
	u32 validrate = 0, shadow_freq_cfg1 = 0, new_div = 0;
	unsigned long flags;

	if (!clk || !rate)
		return -EINVAL;

	validrate = omap2_clksel_round_rate_div(clk, rate, &new_div);
	if (validrate != rate)
		return -EINVAL;

	/* Just to avoid look-up on every call to speed up */
	if (!l3_emif_clkdm) {
		l3_emif_clkdm = clkdm_lookup("l3_emif_clkdm");
		if (!l3_emif_clkdm) {
			pr_err("%s: clockdomain lookup failed\n", __func__);
			return -EINVAL;
		}
	}

	spin_lock_irqsave(&l3_emif_lock, flags);

	/* Configures MEMIF domain in SW_WKUP */
	clkdm_wakeup(l3_emif_clkdm);

	/*
	 * Program EMIF timing parameters in EMIF shadow registers
	 * for targetted DRR clock.
	 * DDR Clock = core_dpll_m2 / 2
	 */
	omap_emif_setup_registers(validrate >> 1, LPDDR2_VOLTAGE_STABLE);

	/*
	 * FREQ_UPDATE sequence:
	 * - DLL_OVERRIDE=0 (DLL lock & code must not be overridden
	 *	after CORE DPLL lock)
	 * - DLL_RESET=1 (DLL must be reset upon frequency change)
	 * - DPLL_CORE_M2_DIV with same value as the one already
	 *	in direct register
	 * - DPLL_CORE_DPLL_EN=0x7 (to make CORE DPLL lock)
	 * - FREQ_UPDATE=1 (to start HW sequence)
	 */
	shadow_freq_cfg1 = (1 << OMAP4430_DLL_RESET_SHIFT) |
			(new_div << OMAP4430_DPLL_CORE_M2_DIV_SHIFT) |
			(DPLL_LOCKED << OMAP4430_DPLL_CORE_DPLL_EN_SHIFT) |
			(1 << OMAP4430_FREQ_UPDATE_SHIFT);
	shadow_freq_cfg1 &= ~OMAP4430_DLL_OVERRIDE_MASK;
	__raw_writel(shadow_freq_cfg1, OMAP4430_CM_SHADOW_FREQ_CONFIG1);

	/* wait for the configuration to be applied */
	omap_test_timeout(((__raw_readl(OMAP4430_CM_SHADOW_FREQ_CONFIG1)
				& OMAP4430_FREQ_UPDATE_MASK) == 0),
				MAX_FREQ_UPDATE_TIMEOUT, i);

	/* Configures MEMIF domain back to HW_WKUP */
	clkdm_allow_idle(l3_emif_clkdm);

	spin_unlock_irqrestore(&l3_emif_lock, flags);

	if (i == MAX_FREQ_UPDATE_TIMEOUT) {
		pr_err("%s: Frequency update for CORE DPLL M2 change failed\n",
				__func__);
		return -1;
	}

	/* Update the clock change */
	clk->rate = validrate;

	return 0;
}
Exemplo n.º 26
0
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
{
	static struct clockdomain *cpu1_clkdm;
	static bool booted;
	static struct powerdomain *cpu1_pwrdm;
	void __iomem *base = omap_get_wakeupgen_base();

	/*
	 * Set synchronisation state between this boot processor
	 * and the secondary one
	 */
	spin_lock(&boot_lock);

	/*
	 * Update the AuxCoreBoot0 with boot state for secondary core.
	 * omap4_secondary_startup() routine will hold the secondary core till
	 * the AuxCoreBoot1 register is updated with cpu state
	 * A barrier is added to ensure that write buffer is drained
	 */
	if (omap_secure_apis_support())
		omap_modify_auxcoreboot0(0x200, 0xfffffdff);
	else
		__raw_writel(0x20, base + OMAP_AUX_CORE_BOOT_0);

	if (!cpu1_clkdm && !cpu1_pwrdm) {
		cpu1_clkdm = clkdm_lookup("mpu1_clkdm");
		cpu1_pwrdm = pwrdm_lookup("cpu1_pwrdm");
	}

	/*
	 * The SGI(Software Generated Interrupts) are not wakeup capable
	 * from low power states. This is known limitation on OMAP4 and
	 * needs to be worked around by using software forced clockdomain
	 * wake-up. To wakeup CPU1, CPU0 forces the CPU1 clockdomain to
	 * software force wakeup. The clockdomain is then put back to
	 * hardware supervised mode.
	 * More details can be found in OMAP4430 TRM - Version J
	 * Section :
	 *	4.3.4.2 Power States of CPU0 and CPU1
	 */
	if (booted && cpu1_pwrdm && cpu1_clkdm) {
		/*
		 * GIC distributor control register has changed between
		 * CortexA9 r1pX and r2pX. The Control Register secure
		 * banked version is now composed of 2 bits:
		 * bit 0 == Secure Enable
		 * bit 1 == Non-Secure Enable
		 * The Non-Secure banked register has not changed
		 * Because the ROM Code is based on the r1pX GIC, the CPU1
		 * GIC restoration will cause a problem to CPU0 Non-Secure SW.
		 * The workaround must be:
		 * 1) Before doing the CPU1 wakeup, CPU0 must disable
		 * the GIC distributor
		 * 2) CPU1 must re-enable the GIC distributor on
		 * it's wakeup path.
		 */
		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			local_irq_disable();
			gic_dist_disable();
		}

		/*
		 * Ensure that CPU power state is set to ON to avoid CPU
		 * powerdomain transition on wfi
		 */
		clkdm_wakeup(cpu1_clkdm);
		omap_set_pwrdm_state(cpu1_pwrdm, PWRDM_POWER_ON);
		clkdm_allow_idle(cpu1_clkdm);

		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_SMP_BOOT_ERRATUM_GICD)) {
			while (gic_dist_disabled()) {
				udelay(1);
				cpu_relax();
			}
			gic_timer_retrigger();
			local_irq_enable();
		}
	} else {
		dsb_sev();
		booted = true;
	}

	arch_send_wakeup_ipi_mask(cpumask_of(cpu));

	/*
	 * Now the secondary core is starting up let it run its
	 * calibrations, then wait for it to finish
	 */
	spin_unlock(&boot_lock);

	return 0;
}
Exemplo n.º 27
0
static int _cpuidle_allow_idle(struct powerdomain *pwrdm,
				struct clockdomain *clkdm)
{
	clkdm_allow_idle(clkdm);
	return 0;
}