Пример #1
0
/*
 * Initialise the wakeupgen module.
 */
int __init omap_wakeupgen_init(void)
{
	int i;
	unsigned int boot_cpu = smp_processor_id();

	/* Not supported on OMAP4 ES1.0 silicon */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
		return -EPERM;
	}

	/* Static mapping, never released */
	wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
	if (WARN_ON(!wakeupgen_base))
		return -ENOMEM;

	if (cpu_is_omap44xx()) {
		irq_banks = OMAP4_NR_BANKS;
		max_irqs = OMAP4_NR_IRQS;
		omap_secure_apis = 1;
	} else if (soc_is_am43xx()) {
		irq_banks = AM43XX_NR_REG_BANKS;
		max_irqs = AM43XX_IRQS;
	}

	/* Clear all IRQ bitmasks at wakeupGen level */
	for (i = 0; i < irq_banks; i++) {
		wakeupgen_writel(0, i, CPU0_ID);
		if (!soc_is_am43xx())
			wakeupgen_writel(0, i, CPU1_ID);
	}

	/*
	 * Override GIC architecture specific functions to add
	 * OMAP WakeupGen interrupt controller along with GIC
	 */
	gic_arch_extn.irq_mask = wakeupgen_mask;
	gic_arch_extn.irq_unmask = wakeupgen_unmask;
	gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;

	/*
	 * FIXME: Add support to set_smp_affinity() once the core
	 * GIC code has necessary hooks in place.
	 */

	/* Associate all the IRQs to boot CPU like GIC init does. */
	for (i = 0; i < max_irqs; i++)
		irq_target_cpu[i] = boot_cpu;

	irq_hotplug_init();
	irq_pm_init();

	return 0;
}
Пример #2
0
void omap4_prminst_global_warm_sw_reset(void)
{
    u32 v;
    s16 dev_inst;

    if (cpu_is_omap44xx())
        dev_inst = OMAP4430_PRM_DEVICE_INST;
    else if (soc_is_omap54xx())
        dev_inst = OMAP54XX_PRM_DEVICE_INST;
    else if (soc_is_dra7xx())
        dev_inst = DRA7XX_PRM_DEVICE_INST;
    else if (soc_is_am43xx())
        dev_inst = AM43XX_PRM_DEVICE_INST;
    else
        return;

    v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, dev_inst,
                                    OMAP4_PRM_RSTCTRL_OFFSET);
    v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
    omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
                                 dev_inst,
                                 OMAP4_PRM_RSTCTRL_OFFSET);

    /* OCP barrier */
    v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
                                    dev_inst,
                                    OMAP4_PRM_RSTCTRL_OFFSET);
}
Пример #3
0
static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
{
	/* platforms which support all ECC schemes */
	if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() ||
		 soc_is_omap54xx() || soc_is_dra7xx())
		return 1;

	if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
		 ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
		if (cpu_is_omap24xx())
			return 0;
		else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
			return 0;
		else
			return 1;
	}

	/* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
	 * which require H/W based ECC error detection */
	if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
	    ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
		 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
		return 0;

	/* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
	if (ecc_opt == OMAP_ECC_HAM1_CODE_HW ||
	    ecc_opt == OMAP_ECC_HAM1_CODE_SW)
		return 1;
	else
		return 0;
}
Пример #4
0
/* Public functions */
u8 omap2_init_dpll_parent(struct clk_hw *hw)
{
	struct clk_hw_omap *clk = to_clk_hw_omap(hw);
	u32 v;
	struct dpll_data *dd;

	dd = clk->dpll_data;
	if (!dd)
		return -EINVAL;

	v = omap2_clk_readl(clk, dd->control_reg);
	v &= dd->enable_mask;
	v >>= __ffs(dd->enable_mask);

	/* Reparent the struct clk in case the dpll is in bypass */
	if (cpu_is_omap24xx()) {
		if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP2XXX_EN_DPLL_FRBYPASS)
			return 1;
	} else if (cpu_is_omap34xx()) {
		if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP3XXX_EN_DPLL_FRBYPASS)
			return 1;
	} else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
		if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
		    v == OMAP4XXX_EN_DPLL_MNBYPASS)
			return 1;
	}
	return 0;
}
Пример #5
0
static void __init omap2_gptimer_clocksource_init(int gptimer_id,
						  const char *fck_source,
						  const char *property)
{
	int res;

	clksrc.id = gptimer_id;
	clksrc.errata = omap_dm_timer_get_errata();

	if (soc_is_am43xx()) {
		clocksource_gpt.suspend = omap2_gptimer_clksrc_suspend;
		clocksource_gpt.resume = omap2_gptimer_clksrc_resume;
	}

	res = omap_dm_timer_init_one(&clksrc, fck_source, property,
				     &clocksource_gpt.name,
				     OMAP_TIMER_NONPOSTED);
	BUG_ON(res);

	__omap_dm_timer_load_start(&clksrc,
				   OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0,
				   OMAP_TIMER_NONPOSTED);
	sched_clock_register(dmtimer_read_sched_clock, 32, clksrc.rate);

	if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
		pr_err("Could not register clocksource %s\n",
			clocksource_gpt.name);
	else
		pr_info("OMAP clocksource: %s at %lu Hz\n",
			clocksource_gpt.name, clksrc.rate);
}
Пример #6
0
/*
 * The amount of SRAM depends on the core type.
 * Note that we cannot try to test for SRAM here because writes
 * to secure SRAM will hang the system. Also the SRAM is not
 * yet mapped at this point.
 */
static void __init omap_detect_sram(void)
{
	omap_sram_skip = SRAM_BOOTLOADER_SZ;
	if (is_sram_locked()) {
		if (cpu_is_omap34xx()) {
			omap_sram_start = OMAP3_SRAM_PUB_PA;
			if ((omap_type() == OMAP2_DEVICE_TYPE_EMU) ||
			    (omap_type() == OMAP2_DEVICE_TYPE_SEC)) {
				omap_sram_size = 0x7000; /* 28K */
				omap_sram_skip += SZ_16K;
			} else {
				omap_sram_size = 0x8000; /* 32K */
			}
		} else if (cpu_is_omap44xx()) {
			omap_sram_start = OMAP4_SRAM_PUB_PA;
			omap_sram_size = 0xa000; /* 40K */
		} else if (soc_is_omap54xx()) {
			omap_sram_start = OMAP5_SRAM_PA;
			omap_sram_size = SZ_128K; /* 128KB */
		} else {
			omap_sram_start = OMAP2_SRAM_PUB_PA;
			omap_sram_size = 0x800; /* 2K */
		}
	} else {
		if (soc_is_am33xx()) {
			omap_sram_start = AM33XX_SRAM_PA;
			omap_sram_size = 0x10000; /* 64K */
		} else if (soc_is_am43xx()) {
			omap_sram_start = AM33XX_SRAM_PA;
			omap_sram_size = SZ_64K;
		} else if (cpu_is_omap34xx()) {
			omap_sram_start = OMAP3_SRAM_PA;
			omap_sram_size = 0x10000; /* 64K */
		} else if (cpu_is_omap44xx()) {
			omap_sram_start = OMAP4_SRAM_PA;
			omap_sram_size = 0xe000; /* 56K */
		} else if (soc_is_omap54xx()) {
			omap_sram_start = OMAP5_SRAM_PA;
			omap_sram_size = SZ_128K; /* 128KB */
		} else {
			omap_sram_start = OMAP2_SRAM_PA;
			if (cpu_is_omap242x())
				omap_sram_size = 0xa0000; /* 640K */
			else if (cpu_is_omap243x())
				omap_sram_size = 0x10000; /* 64K */
		}
	}
}
Пример #7
0
/**
 * ti_clk_init_features - init clock features struct for the SoC
 *
 * Initializes the clock features struct based on the SoC type.
 */
void __init ti_clk_init_features(void)
{
	/* Fint setup for DPLLs */
	if (cpu_is_omap3430()) {
		ti_clk_features.fint_min = OMAP3430_DPLL_FINT_BAND1_MIN;
		ti_clk_features.fint_max = OMAP3430_DPLL_FINT_BAND2_MAX;
		ti_clk_features.fint_band1_max = OMAP3430_DPLL_FINT_BAND1_MAX;
		ti_clk_features.fint_band2_min = OMAP3430_DPLL_FINT_BAND2_MIN;
	} else {
		ti_clk_features.fint_min = OMAP3PLUS_DPLL_FINT_MIN;
		ti_clk_features.fint_max = OMAP3PLUS_DPLL_FINT_MAX;
	}

	/* Bypass value setup for DPLLs */
	if (cpu_is_omap24xx()) {
		ti_clk_features.dpll_bypass_vals |=
			(1 << OMAP2XXX_EN_DPLL_LPBYPASS) |
			(1 << OMAP2XXX_EN_DPLL_FRBYPASS);
	} else if (cpu_is_omap34xx()) {
		ti_clk_features.dpll_bypass_vals |=
			(1 << OMAP3XXX_EN_DPLL_LPBYPASS) |
			(1 << OMAP3XXX_EN_DPLL_FRBYPASS);
	} else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx() ||
		   soc_is_omap54xx() || soc_is_dra7xx()) {
		ti_clk_features.dpll_bypass_vals |=
			(1 << OMAP4XXX_EN_DPLL_LPBYPASS) |
			(1 << OMAP4XXX_EN_DPLL_FRBYPASS) |
			(1 << OMAP4XXX_EN_DPLL_MNBYPASS);
	}

	/* Jitter correction only available on OMAP343X */
	if (cpu_is_omap343x())
		ti_clk_features.flags |= TI_CLK_DPLL_HAS_FREQSEL;

	/* Idlest value for interface clocks.
	 * 24xx uses 0 to indicate not ready, and 1 to indicate ready.
	 * 34xx reverses this, just to keep us on our toes
	 * AM35xx uses both, depending on the module.
	 */
	if (cpu_is_omap24xx())
		ti_clk_features.cm_idlest_val = OMAP24XX_CM_IDLEST_VAL;
	else if (cpu_is_omap34xx())
		ti_clk_features.cm_idlest_val = OMAP34XX_CM_IDLEST_VAL;

	/* On OMAP3430 ES1.0, DPLL4 can't be re-programmed */
	if (omap_rev() == OMAP3430_REV_ES1_0)
		ti_clk_features.flags |= TI_CLK_DPLL4_DENY_REPROGRAM;
}
Пример #8
0
s32 omap4_prmst_get_prm_dev_inst(void)
{
	if (prm_dev_inst != PRM_INSTANCE_UNKNOWN)
		return prm_dev_inst;

	/* This cannot be done way early at boot.. as things are not setup */
	if (cpu_is_omap44xx())
		prm_dev_inst = OMAP4430_PRM_DEVICE_INST;
	else if (soc_is_omap54xx())
		prm_dev_inst = OMAP54XX_PRM_DEVICE_INST;
	else if (soc_is_dra7xx())
		prm_dev_inst = DRA7XX_PRM_DEVICE_INST;
	else if (soc_is_am43xx())
		prm_dev_inst = AM43XX_PRM_DEVICE_INST;

	return prm_dev_inst;
}
Пример #9
0
int __init omap_sram_init(void)
{
	omap_detect_sram();
	omap2_map_sram();

	if (cpu_is_omap242x())
		omap242x_sram_init();
	else if (cpu_is_omap2430())
		omap243x_sram_init();
	else if (soc_is_am33xx())
		am33xx_sram_init();
	else if (soc_is_am43xx())
		am43xx_sram_init();
	else if (cpu_is_omap34xx())
		omap34xx_sram_init();

	return 0;
}
Пример #10
0
int omap_type(void)
{
	u32 val = 0;

	if (cpu_is_omap24xx()) {
		val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
	} else if (soc_is_am33xx() || soc_is_am43xx()) {
		val = omap_ctrl_readl(AM33XX_CONTROL_STATUS);
	} else if (cpu_is_omap34xx()) {
		val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
	} else if (cpu_is_omap44xx()) {
		val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
	} else if (soc_is_omap54xx() || soc_is_dra7xx()) {
		val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS);
		val &= OMAP5_DEVICETYPE_MASK;
		val >>= 6;
		goto out;
	} else {
Пример #11
0
static void __init omap2_gp_clockevent_init(int gptimer_id,
						const char *fck_source,
						const char *property)
{
	int res;

	clkev.id = gptimer_id;
	clkev.errata = omap_dm_timer_get_errata();

	/*
	 * For clock-event timers we never read the timer counter and
	 * so we are not impacted by errata i103 and i767. Therefore,
	 * we can safely ignore this errata for clock-event timers.
	 */
	__omap_dm_timer_override_errata(&clkev, OMAP_TIMER_ERRATA_I103_I767);

	res = omap_dm_timer_init_one(&clkev, fck_source, property,
				     &clockevent_gpt.name, OMAP_TIMER_POSTED);
	BUG_ON(res);

	omap2_gp_timer_irq.dev_id = &clkev;
	setup_irq(clkev.irq, &omap2_gp_timer_irq);

	__omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW);

	clockevent_gpt.cpumask = cpu_possible_mask;
	clockevent_gpt.irq = omap_dm_timer_get_irq(&clkev);
	clockevents_config_and_register(&clockevent_gpt, clkev.rate,
					3, /* Timer internal resynch latency */
					0xffffffff);

	if (soc_is_am33xx() || soc_is_am43xx()) {
		clockevent_gpt.suspend = omap_clkevt_suspend;
		clockevent_gpt.resume = omap_clkevt_resume;

		clockevent_gpt_hwmod =
			omap_hwmod_lookup(clockevent_gpt.name);
	}

	pr_info("OMAP clockevent source: %s at %lu Hz\n", clockevent_gpt.name,
		clkev.rate);
}
Пример #12
0
/**
 * omap2_get_dpll_rate - returns the current DPLL CLKOUT rate
 * @clk: struct clk * of a DPLL
 *
 * DPLLs can be locked or bypassed - basically, enabled or disabled.
 * When locked, the DPLL output depends on the M and N values.  When
 * bypassed, on OMAP2xxx, the output rate is either the 32KiHz clock
 * or sys_clk.  Bypass rates on OMAP3 depend on the DPLL: DPLLs 1 and
 * 2 are bypassed with dpll1_fclk and dpll2_fclk respectively
 * (generated by DPLL3), while DPLL 3, 4, and 5 bypass rates are sys_clk.
 * Returns the current DPLL CLKOUT rate (*not* CLKOUTX2) if the DPLL is
 * locked, or the appropriate bypass rate if the DPLL is bypassed, or 0
 * if the clock @clk is not a DPLL.
 */
unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk)
{
	long long dpll_clk;
	u32 dpll_mult, dpll_div, v;
	struct dpll_data *dd;

	dd = clk->dpll_data;
	if (!dd)
		return 0;

	/* Return bypass rate if DPLL is bypassed */
	v = omap2_clk_readl(clk, dd->control_reg);
	v &= dd->enable_mask;
	v >>= __ffs(dd->enable_mask);

	if (cpu_is_omap24xx()) {
		if (v == OMAP2XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP2XXX_EN_DPLL_FRBYPASS)
			return __clk_get_rate(dd->clk_bypass);
	} else if (cpu_is_omap34xx()) {
		if (v == OMAP3XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP3XXX_EN_DPLL_FRBYPASS)
			return __clk_get_rate(dd->clk_bypass);
	} else if (soc_is_am33xx() || cpu_is_omap44xx() || soc_is_am43xx()) {
		if (v == OMAP4XXX_EN_DPLL_LPBYPASS ||
		    v == OMAP4XXX_EN_DPLL_FRBYPASS ||
		    v == OMAP4XXX_EN_DPLL_MNBYPASS)
			return __clk_get_rate(dd->clk_bypass);
	}

	v = omap2_clk_readl(clk, dd->mult_div1_reg);
	dpll_mult = v & dd->mult_mask;
	dpll_mult >>= __ffs(dd->mult_mask);
	dpll_div = v & dd->div1_mask;
	dpll_div >>= __ffs(dd->div1_mask);

	dpll_clk = (long long) __clk_get_rate(dd->clk_ref) * dpll_mult;
	do_div(dpll_clk, dpll_div + 1);

	return dpll_clk;
}
Пример #13
0
int omap_type(void)
{
	static u32 val = OMAP2_DEVICETYPE_MASK;

	if (val < OMAP2_DEVICETYPE_MASK)
		return val;

	if (soc_is_omap24xx()) {
		val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
	} else if (soc_is_ti81xx()) {
		val = omap_ctrl_readl(TI81XX_CONTROL_STATUS);
	} else if (soc_is_am33xx() || soc_is_am43xx()) {
		val = omap_ctrl_readl(AM33XX_CONTROL_STATUS);
	} else if (soc_is_omap34xx()) {
		val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
	} else if (soc_is_omap44xx()) {
		val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
	} else if (soc_is_omap54xx() || soc_is_dra7xx()) {
		val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS);
		val &= OMAP5_DEVICETYPE_MASK;
		val >>= 6;
		goto out;
	} else {
Пример #14
0
int __init am33xx_pm_init(void)
{
	struct powerdomain *cefuse_pwrdm;
#ifdef CONFIG_CPU_PM
	int ret;
	u32 temp;
	struct device_node *np;
#endif /* CONFIG_CPU_PM */

	if (!soc_is_am33xx() && !soc_is_am43xx())
		return -ENODEV;

#ifdef CONFIG_CPU_PM
	am33xx_pm = kzalloc(sizeof(*am33xx_pm), GFP_KERNEL);
	if (!am33xx_pm) {
		pr_err("Memory allocation failed\n");
		ret = -ENOMEM;
		return ret;
	}

	ret = am33xx_map_emif();
	if (ret) {
		pr_err("PM: Could not ioremap EMIF\n");
		goto err;
	}

#ifdef CONFIG_SUSPEND
	gfx_pwrdm = pwrdm_lookup("gfx_pwrdm");
	per_pwrdm = pwrdm_lookup("per_pwrdm");
	mpu_pwrdm = pwrdm_lookup("mpu_pwrdm");

	if ((!gfx_pwrdm) || (!per_pwrdm) || (!mpu_pwrdm)) {
		ret = -ENODEV;
		goto err;
	}

	/*
	 * Code paths for each SoC are nearly the same but set ops
	 * handle differences during init, pre-suspend, and post-suspend
	 */

	if (soc_is_am33xx())
		am33xx_pm->ops = &am33xx_ops;
	else if (soc_is_am43xx())
		am33xx_pm->ops = &am43xx_ops;

	ret = am33xx_pm->ops->init();

	if (ret)
		goto err;
#endif /* CONFIG_SUSPEND */

	/* Determine Memory Type */
	temp = readl(am33xx_emif_base + EMIF_SDRAM_CONFIG);
	temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT;
	/* Parameters to pass to assembly code */
	susp_params.wfi_flags = 0;
	susp_params.emif_addr_virt = am33xx_emif_base;
	susp_params.dram_sync = am33xx_dram_sync;

	switch (temp) {
	case MEM_TYPE_DDR2:
		susp_params.wfi_flags |= WFI_MEM_TYPE_DDR2;
		break;
	case MEM_TYPE_DDR3:
		susp_params.wfi_flags |= WFI_MEM_TYPE_DDR3;
		break;
	}
	susp_params.wfi_flags |= WFI_SELF_REFRESH;
	susp_params.wfi_flags |= WFI_SAVE_EMIF;
	susp_params.wfi_flags |= WFI_DISABLE_EMIF;
	susp_params.wfi_flags |= WFI_WAKE_M3;

	am33xx_pm->ipc.reg4 = temp & MEM_TYPE_MASK;

	np = of_find_compatible_node(NULL, NULL, "ti,am3353-wkup-m3");
	if (np) {
		if (of_find_property(np, "ti,needs-vtt-toggle", NULL) &&
		    (!(of_property_read_u32(np, "ti,vtt-gpio-pin",
							&temp)))) {
			if (temp >= 0 && temp <= 31)
				am33xx_pm->ipc.reg4 |=
					((1 << VTT_STAT_SHIFT) |
					(temp << VTT_GPIO_PIN_SHIFT));
			else
				pr_warn("PM: Invalid VTT GPIO(%d) pin\n", temp);
		}

		if (of_find_property(np, "ti,set-io-isolation", NULL))
			am33xx_pm->ipc.reg4 |= (1 << IO_ISOLATION_STAT_SHIFT);
	}

#ifdef CONFIG_SUSPEND
	ret = am33xx_setup_sleep_sequence();
	if (ret) {
		pr_err("Error fetching I2C sleep/wake sequence\n");
		goto err;
	}
#endif /* CONFIG_SUSPEND */
#endif /* CONFIG_CPU_PM */

	(void) clkdm_for_each(omap_pm_clkdms_setup, NULL);

	/* CEFUSE domain can be turned off post bootup */
	cefuse_pwrdm = pwrdm_lookup("cefuse_pwrdm");
	if (cefuse_pwrdm)
		omap_set_pwrdm_state(cefuse_pwrdm, PWRDM_POWER_OFF);
	else
		pr_err("PM: Failed to get cefuse_pwrdm\n");

#ifdef CONFIG_CPU_PM
	am33xx_pm->state = M3_STATE_RESET;

	wkup_m3_set_ops(&am33xx_wkup_m3_ops);

	/* m3 may have already loaded but ops were not set yet,
	 * manually invoke */

	if (wkup_m3_is_valid())
		am33xx_m3_fw_ready_cb();
#endif /* CONFIG_CPU_PM */

	return 0;

#ifdef CONFIG_CPU_PM
err:
	kfree(am33xx_pm);
	return ret;
#endif /* CONFIG_CPU_PM */
}