Esempio n. 1
0
static unsigned configure_dma_errata(void)
{
	unsigned errata = 0;

	/*
	 * Errata applicable for OMAP2430ES1.0 and all omap2420
	 *
	 * I.
	 * Erratum ID: Not Available
	 * Inter Frame DMA buffering issue DMA will wrongly
	 * buffer elements if packing and bursting is enabled. This might
	 * result in data gets stalled in FIFO at the end of the block.
	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
	 * guarantee no data will stay in the DMA FIFO in case inter frame
	 * buffering occurs
	 *
	 * II.
	 * Erratum ID: Not Available
	 * DMA may hang when several channels are used in parallel
	 * In the following configuration, DMA channel hanging can occur:
	 * a. Channel i, hardware synchronized, is enabled
	 * b. Another channel (Channel x), software synchronized, is enabled.
	 * c. Channel i is disabled before end of transfer
	 * d. Channel i is reenabled.
	 * e. Steps 1 to 4 are repeated a certain number of times.
	 * f. A third channel (Channel y), software synchronized, is enabled.
	 * Channel x and Channel y may hang immediately after step 'f'.
	 * Workaround:
	 * For any channel used - make sure NextLCH_ID is set to the value j.
	 */
	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
				(omap_type() == OMAP2430_REV_ES1_0))) {

		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
	}

	/*
	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
	 * after a transaction error.
	 * Workaround: SW should explicitely disable the channel.
	 */
	if (cpu_class_is_omap2())
		SET_DMA_ERRATA(DMA_ERRATA_i378);

	/*
	 * Erratum ID: i541: sDMA FIFO draining does not finish
	 * If sDMA channel is disabled on the fly, sDMA enters standby even
	 * through FIFO Drain is still in progress
	 * Workaround: Put sDMA in NoStandby more before a logical channel is
	 * disabled, then put it back to SmartStandby right after the channel
	 * finishes FIFO draining.
	 */
	if (cpu_is_omap34xx())
		SET_DMA_ERRATA(DMA_ERRATA_i541);

	/*
	 * Erratum ID: i88 : Special programming model needed to disable DMA
	 * before end of block.
	 * Workaround: software must ensure that the DMA is configured in No
	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
	 */
	if (omap_type() == OMAP3430_REV_ES1_0)
		SET_DMA_ERRATA(DMA_ERRATA_i88);

	/*
	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
	 * read before the DMA controller finished disabling the channel.
	 */
	SET_DMA_ERRATA(DMA_ERRATA_3_3);

	/*
	 * Erratum ID: Not Available
	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
	 * after secure sram context save and restore.
	 * Work around: Hence we need to manually clear those IRQs to avoid
	 * spurious interrupts. This affects only secure devices.
	 */
	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
		SET_DMA_ERRATA(DMA_ROMCODE_BUG);

	return errata;
}
Esempio n. 2
0
/* Populate the scratchpad structure with restore structure */
void omap3_save_scratchpad_contents(void)
{
	void  __iomem *scratchpad_address;
	u32 arm_context_addr;
	struct omap3_scratchpad scratchpad_contents;
	struct omap3_scratchpad_prcm_block prcm_block_contents;
	struct omap3_scratchpad_sdrc_block sdrc_block_contents;

	/*
	 * Populate the Scratchpad contents
	 *
	 * The "get_*restore_pointer" functions are used to provide a
	 * physical restore address where the ROM code jumps while waking
	 * up from MPU OFF/OSWR state.
	 * The restore pointer is stored into the scratchpad.
	 */
	scratchpad_contents.boot_config_ptr = 0x0;
	if (cpu_is_omap3630())
		scratchpad_contents.public_restore_ptr =
			virt_to_phys(omap3_restore_3630);
	else if (omap_rev() != OMAP3430_REV_ES3_0 &&
					omap_rev() != OMAP3430_REV_ES3_1 &&
					omap_rev() != OMAP3430_REV_ES3_1_2)
		scratchpad_contents.public_restore_ptr =
			virt_to_phys(omap3_restore);
	else
		scratchpad_contents.public_restore_ptr =
			virt_to_phys(omap3_restore_es3);

	if (omap_type() == OMAP2_DEVICE_TYPE_GP)
		scratchpad_contents.secure_ram_restore_ptr = 0x0;
	else
		scratchpad_contents.secure_ram_restore_ptr =
			(u32) __pa(omap3_secure_ram_storage);
	scratchpad_contents.sdrc_module_semaphore = 0x0;
	scratchpad_contents.prcm_block_offset = 0x2C;
	scratchpad_contents.sdrc_block_offset = 0x64;

	/* Populate the PRCM block contents */
	prcm_block_contents.prm_clksrc_ctrl =
		omap2_prm_read_mod_reg(OMAP3430_GR_MOD,
				       OMAP3_PRM_CLKSRC_CTRL_OFFSET);
	prcm_block_contents.prm_clksel =
		omap2_prm_read_mod_reg(OMAP3430_CCR_MOD,
				       OMAP3_PRM_CLKSEL_OFFSET);
	prcm_block_contents.cm_clksel_core =
			omap2_cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
	prcm_block_contents.cm_clksel_wkup =
			omap2_cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
	prcm_block_contents.cm_clken_pll =
			omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN);
	/*
	 * As per erratum i671, ROM code does not respect the PER DPLL
	 * programming scheme if CM_AUTOIDLE_PLL..AUTO_PERIPH_DPLL == 1.
	 * Then,  in anycase, clear these bits to avoid extra latencies.
	 */
	prcm_block_contents.cm_autoidle_pll =
			omap2_cm_read_mod_reg(PLL_MOD, CM_AUTOIDLE) &
			~OMAP3430_AUTO_PERIPH_DPLL_MASK;
	prcm_block_contents.cm_clksel1_pll =
			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
	prcm_block_contents.cm_clksel2_pll =
			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
	prcm_block_contents.cm_clksel3_pll =
			omap2_cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
	prcm_block_contents.cm_clken_pll_mpu =
			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
	prcm_block_contents.cm_autoidle_pll_mpu =
			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
	prcm_block_contents.cm_clksel1_pll_mpu =
			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
	prcm_block_contents.cm_clksel2_pll_mpu =
			omap2_cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
	prcm_block_contents.prcm_block_size = 0x0;

	/* Populate the SDRC block contents */
	sdrc_block_contents.sysconfig =
			(sdrc_read_reg(SDRC_SYSCONFIG) & 0xFFFF);
	sdrc_block_contents.cs_cfg =
			(sdrc_read_reg(SDRC_CS_CFG) & 0xFFFF);
	sdrc_block_contents.sharing =
			(sdrc_read_reg(SDRC_SHARING) & 0xFFFF);
	sdrc_block_contents.err_type =
			(sdrc_read_reg(SDRC_ERR_TYPE) & 0xFFFF);
	sdrc_block_contents.dll_a_ctrl = sdrc_read_reg(SDRC_DLLA_CTRL);
	sdrc_block_contents.dll_b_ctrl = 0x0;
	/*
	 * Due to a OMAP3 errata (1.142), on EMU/HS devices SRDC should
	 * be programed to issue automatic self refresh on timeout
	 * of AUTO_CNT = 1 prior to any transition to OFF mode.
	 */
	if ((omap_type() != OMAP2_DEVICE_TYPE_GP)
			&& (omap_rev() >= OMAP3430_REV_ES3_0))
		sdrc_block_contents.power = (sdrc_read_reg(SDRC_POWER) &
				~(SDRC_POWER_AUTOCOUNT_MASK|
				SDRC_POWER_CLKCTRL_MASK)) |
				(1 << SDRC_POWER_AUTOCOUNT_SHIFT) |
				SDRC_SELF_REFRESH_ON_AUTOCOUNT;
	else
		sdrc_block_contents.power = sdrc_read_reg(SDRC_POWER);

	sdrc_block_contents.cs_0 = 0x0;
	sdrc_block_contents.mcfg_0 = sdrc_read_reg(SDRC_MCFG_0);
	sdrc_block_contents.mr_0 = (sdrc_read_reg(SDRC_MR_0) & 0xFFFF);
	sdrc_block_contents.emr_1_0 = 0x0;
	sdrc_block_contents.emr_2_0 = 0x0;
	sdrc_block_contents.emr_3_0 = 0x0;
	sdrc_block_contents.actim_ctrla_0 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_A_0);
	sdrc_block_contents.actim_ctrlb_0 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_B_0);
	sdrc_block_contents.rfr_ctrl_0 =
			sdrc_read_reg(SDRC_RFR_CTRL_0);
	sdrc_block_contents.cs_1 = 0x0;
	sdrc_block_contents.mcfg_1 = sdrc_read_reg(SDRC_MCFG_1);
	sdrc_block_contents.mr_1 = sdrc_read_reg(SDRC_MR_1) & 0xFFFF;
	sdrc_block_contents.emr_1_1 = 0x0;
	sdrc_block_contents.emr_2_1 = 0x0;
	sdrc_block_contents.emr_3_1 = 0x0;
	sdrc_block_contents.actim_ctrla_1 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_A_1);
	sdrc_block_contents.actim_ctrlb_1 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_B_1);
	sdrc_block_contents.rfr_ctrl_1 =
			sdrc_read_reg(SDRC_RFR_CTRL_1);
	sdrc_block_contents.dcdl_1_ctrl = 0x0;
	sdrc_block_contents.dcdl_2_ctrl = 0x0;
	sdrc_block_contents.flags = 0x0;
	sdrc_block_contents.block_size = 0x0;

	arm_context_addr = virt_to_phys(omap3_arm_context);

	/* Copy all the contents to the scratchpad location */
	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
	memcpy_toio(scratchpad_address, &scratchpad_contents,
		 sizeof(scratchpad_contents));
	/* Scratchpad contents being 32 bits, a divide by 4 done here */
	memcpy_toio(scratchpad_address +
		scratchpad_contents.prcm_block_offset,
		&prcm_block_contents, sizeof(prcm_block_contents));
	memcpy_toio(scratchpad_address +
		scratchpad_contents.sdrc_block_offset,
		&sdrc_block_contents, sizeof(sdrc_block_contents));
	/*
	 * Copies the address of the location in SDRAM where ARM
	 * registers get saved during a MPU OFF transition.
	 */
	memcpy_toio(scratchpad_address +
		scratchpad_contents.sdrc_block_offset +
		sizeof(sdrc_block_contents), &arm_context_addr, 4);
}
/*
 * Distributor is enabled by the master CPU
 * Also clear the SAR backup status register
 */
static inline void enable_gic_distributor(void)
{
	writel(0x1, gic_dist_base_addr + GIC_DIST_CTRL);
	if (omap_type() == OMAP2_DEVICE_TYPE_GP)
		writel(0x0, sar_bank3_base + SAR_BACKUP_STATUS_OFFSET);
}
/*
 * OMAP4 MPUSS Low Power Entry Function
 *
 * The purpose of this function is to manage low power programming
 * of OMAP4 MPUSS subsystem
 * Paramenters:
 *	cpu : CPU ID
 *	power_state: Targetted Low power state.
 *
 * MPUSS Low power states
 * The basic rule is that the MPUSS power domain must be at the higher or
 * equal power state (state that consume more power) than the higher of the
 * two CPUs. For example, it is illegal for system power to be OFF, while
 * the power of one or both of the CPU is DORMANT. When an illegal state is
 * entered, then the hardware behavior is unpredictable.
 *
 * MPUSS state for the context save
 * save_state =
 *	0 - Nothing lost and no need to save: MPUSS INACTIVE
 *	1 - CPUx L1 and logic lost: MPUSS CSWR
 *	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
 *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
 */
void omap4_enter_lowpower(unsigned int cpu, unsigned int power_state)
{
	unsigned int save_state, wakeup_cpu, inst_clk_enab = 0;

	if (cpu > NR_CPUS)
		return;

	/*
	 * Low power state not supported on ES1.0 silicon
	 */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		wmb();
		do_wfi();
		return;
	}

	switch (power_state) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		save_state = 0;
		break;
	case PWRDM_POWER_OFF:
		save_state = 1;
		setup_wakeup_routine(cpu);
		save_local_timers(cpu);
		break;
	case PWRDM_POWER_RET:
		/*
		 * CPUx CSWR is invalid hardware state. Additionally
		 * CPUx OSWR  doesn't give any gain vs CPUxOFF and
		 * hence not supported
		 */
	default:
		/* Invalid state */
		pr_debug("Invalid CPU low power state\n");
		return;
	}

	/*
	 * MPUSS book keeping should be executed by master
	 * CPU only which is the last CPU to go down
	 */
	if (cpu)
		goto cpu_prepare;
	/*
	 * Check MPUSS next state and save GIC if needed
	 * GIC lost during MPU OFF and OSWR
	 */
	pwrdm_clear_all_prev_pwrst(mpuss_pd);
	if (omap4_device_off_read_next_state() &&
			 (omap_type() != OMAP2_DEVICE_TYPE_GP)) {
		/* FIXME: Check if this can be optimised */

		/* l3_main inst clock must be enabled for
		 * a save ram operation
		 */
		if (!l3_main_3_ick->usecount) {
			inst_clk_enab = 1;
			clk_enable(l3_main_3_ick);
		}

		save_secure_all();
		save_gic_wakeupgen_secure();

		if (inst_clk_enab == 1)
			clk_disable(l3_main_3_ick);

		save_ivahd_tesla_regs();
		save_l3instr_regs();

		save_state = 3;
		goto cpu_prepare;
	}

	switch (pwrdm_read_next_pwrst(mpuss_pd)) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		/* No need to save MPUSS context */
		break;
	case PWRDM_POWER_RET:
		/* MPUSS OSWR, logic lost */
		if (pwrdm_read_logic_retst(mpuss_pd) == PWRDM_POWER_OFF) {
			if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
				save_gic_wakeupgen_secure();
				save_l3instr_regs();
			} else {
				save_gic();
				omap4_wakeupgen_save();
			}
			save_state = 2;
		}
		break;
	case PWRDM_POWER_OFF:
		/* MPUSS OFF */
		if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
			/* l3_main inst clock must be enabled for
			 * a save ram operation
			 */
			if (!l3_main_3_ick->usecount) {
	                        inst_clk_enab = 1;
				clk_enable(l3_main_3_ick);
			}
			save_secure_ram();

			if (inst_clk_enab == 1)
				clk_disable(l3_main_3_ick);

			save_gic_wakeupgen_secure();
			save_ivahd_tesla_regs();
			save_l3instr_regs();
		} else {
			save_gic();
			omap4_wakeupgen_save();
		}
		save_state = 3;
		break;
	default:
		/* Fall through */
		;
	}

	/*
	 * Program the CPU targeted state
	 */
cpu_prepare:
	clear_cpu_prev_pwrst(cpu);
	if (cpu)
		pwrdm_set_next_pwrst(cpu1_pwrdm, power_state);
	else
		pwrdm_set_next_pwrst(cpu0_pwrdm, power_state);
	scu_pwrst_prepare(cpu, power_state);

	if (regset_save_on_suspend && !cpu)
		pm_dbg_regset_save(1);

	/*
	 * Call low level routine to enter to
	 * targeted power state
	 */
	__omap4_cpu_suspend(cpu, save_state);
	wakeup_cpu = hard_smp_processor_id();

	if (regset_save_on_suspend && !wakeup_cpu)
		pm_dbg_regset_save(2);

	/*
	 * Restore the CPUx and mpuss power state to ON otherwise
	 * CPUx power domain can transitions to programmed low power
	 * state while doing WFI outside the low powe code. On HS devices,
	 * CPUx can do WFI outside idle thread  which can result in
	 * power domain domain transition if the previous state was
	 * programmed to OFF/RET.
	 */
	if (wakeup_cpu) {
		pwrdm_set_next_pwrst(cpu1_pwrdm, PWRDM_POWER_ON);
	} else {
		pwrdm_set_next_pwrst(cpu0_pwrdm, PWRDM_POWER_ON);
		pwrdm_set_next_pwrst(mpuss_pd, PWRDM_POWER_ON);
		omap4_secure_dispatcher(0x21, 4, 0, 0, 0, 0, 0);
	}

	/*
	 * Check the CPUx previous power state
	 */
	if (read_cpu_prev_pwrst(wakeup_cpu) == PWRDM_POWER_OFF) {
		cpu_init();
		restore_mmu_table_entry();
		restore_local_timers(wakeup_cpu);
	}

	/*
	 * Check MPUSS previous power state and enable
	 * GIC if needed.
	 */
	switch (pwrdm_read_prev_pwrst(mpuss_pd)) {
	case PWRDM_POWER_ON:
	case PWRDM_POWER_INACTIVE:
		/* No need to restore */
		break;
	case PWRDM_POWER_RET:
		if (pwrdm_read_prev_logic_pwrst(mpuss_pd) != PWRDM_POWER_OFF)
			break;
		/* fall through if hit MPU OSWR */
	case PWRDM_POWER_OFF:
		/*
		 * Enable GIC distributor
		 */
		if (!wakeup_cpu) {
			if ((omap_type() == OMAP2_DEVICE_TYPE_GP)
					&& omap4_device_off_read_prev_state()) {
				restore_gic();
				omap4_wakeupgen_restore();
			}
			enable_gic_distributor();
			if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
				if (omap4_device_off_read_prev_state())
					restore_ivahd_tesla_regs();
				restore_l3instr_regs();
			}
		}
		/*
		 * Enable GIC cpu inrterface
		 */
		enable_gic_cpu_interface();
		break;
	default:
		;
	}

}
Esempio n. 5
0
/*
 * Initialise OMAP4 MPUSS
 */
int __init omap4_mpuss_init(void)
{
	struct omap4_cpu_pm_info *pm_info;

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
		return -ENODEV;
	}

	sar_base = omap4_get_sar_ram_base();

	/* Initilaise per CPU PM information */
	pm_info = &per_cpu(omap4_pm_info, 0x0);
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
	pm_info->wkup_sar_addr = sar_base + CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU0 pwrdm\n");
		return -ENODEV;
	}

	/* Clear CPU previous power domain state */
	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
	cpu_clear_prev_logic_pwrst(0);

	/* Initialise CPU0 power domain state to ON */
	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);

	pm_info = &per_cpu(omap4_pm_info, 0x1);
	pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
	pm_info->wkup_sar_addr = sar_base + CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
	pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU1 pwrdm\n");
		return -ENODEV;
	}

	/* Clear CPU previous power domain state */
	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
	cpu_clear_prev_logic_pwrst(1);

	/* Initialise CPU1 power domain state to ON */
	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);

	mpuss_pd = pwrdm_lookup("mpu_pwrdm");
	if (!mpuss_pd) {
		pr_err("Failed to lookup MPUSS power domain\n");
		return -ENODEV;
	}
	pwrdm_clear_all_prev_pwrst(mpuss_pd);
	mpuss_clear_prev_logic_pwrst();

	/* Save device type on scratchpad for low level code to use */
	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
		__raw_writel(1, sar_base + OMAP_TYPE_OFFSET);
	else
		__raw_writel(0, sar_base + OMAP_TYPE_OFFSET);

	save_l2x0_context();

	return 0;
}
Esempio n. 6
0
/*
 * Interrupt Handler for L3 error detection
 *	1) Identify the clock domain to which the error belongs to
 *	2) Identify the slave where the error information is logged
 *	3) Print the logged information
 */
static irqreturn_t l3_interrupt_handler(int irq, void *dev_id)
{
	int inttype, i, j;
	int err_source = 0;
	u32 stderrlog_main, stderrlog_main_reg_val, error_source_reg;
	u32 ctrl_sec_err_status, ctrl_sec_err_status_regval, slave_addr;
	char *source_name;

	/*
	 * Get the Type of interrupt
	 * 0- Application
	 * 1 - Debug
	 */
	if (irq == OMAP44XX_IRQ_L3_APP)
		inttype = 0;
	else
		inttype = 1;

	for (i = 0; i < 3; i++) {
		/*
		 * Read the regerr register of the clock domain
		 * to determine the source
		 */
		error_source_reg =  readl((l3_base[i] + l3_flagmux_regerr[i]
							+ (inttype << 3)));
		/* Get the corresponding error and analyse */
		if (error_source_reg) {
			/* Identify the source from control status register */
			for (j = 0; !err_source; j++)
				err_source = error_source_reg & (1<<j);
			/* Since array offset is from Zero */
			err_source = j-1;
			/* Read the stderrlog_main_source from clk domain */
			stderrlog_main = (u32)l3_base[i] +
				(*(l3_targ_stderrlog_main[i] + err_source));
			stderrlog_main_reg_val =  readl(stderrlog_main);

			switch ((stderrlog_main_reg_val & CUSTOM_ERROR)) {
			case STANDARD_ERROR:
				/* check if this is a firewall violation */
				ctrl_sec_err_status = (u32)ctrl_base +
						ctrl_sec_err_stat[inttype];
				ctrl_sec_err_status_regval =
						readl(ctrl_sec_err_status);
				source_name =
				(char *)(*(l3_targ_stderrlog_main_sourcename[i]
					+ err_source));
				slave_addr = stderrlog_main +

						L3_SLAVE_ADDRESS_OFFSET;
				if (!ctrl_sec_err_status_regval) {
					/*
					 * get the details about the inband
					 * error as command etc and print
					 * details
					 */
					pr_crit("L3 standard error: SOURCE:%s"
						"at address 0x%x\n",
						source_name, readl(slave_addr));
					dump_stack();
				} else {
					/* Then this is a Fire Wall Error */
					if (omap_type() == OMAP2_DEVICE_TYPE_GP)
						omap_fw_error_handler(
								ctrl_sec_err_status,
								ctrl_sec_err_status_regval);
				}
				/* clear the stderr log */
				writel((stderrlog_main_reg_val |
					CLEAR_STDERR_LOG), stderrlog_main);
				break;
			case CUSTOM_ERROR:
				pr_crit("CUSTOM SRESP error with SOURCE:%s\n",
				(char *)(*(l3_targ_stderrlog_main_sourcename[i]
						+ err_source)));
				/* clear the std error log*/
				writel((stderrlog_main_reg_val |
					CLEAR_STDERR_LOG), stderrlog_main);
				dump_stack();
				break;
			default:
				/* Nothing to be handled here as of now */
				break;
			}
		/* Error found so break the for loop */
		break;
		}
	}
	return IRQ_HANDLED;
}
Esempio n. 7
0
static ssize_t omap4_soc_type_show(struct kobject *kobj,
                                   struct kobj_attribute *attr, char *buf)
{
    return sprintf(buf, "%s\n", omap_types[omap_type()]);
}
Esempio n. 8
0
/* Populate the scratchpad structure with restore structure */
void omap3_save_scratchpad_contents(void)
{
	void  __iomem *scratchpad_address;
	u32 arm_context_addr;
	struct omap3_scratchpad scratchpad_contents;
	struct omap3_scratchpad_prcm_block prcm_block_contents;
	struct omap3_scratchpad_sdrc_block sdrc_block_contents;

	/* Populate the Scratchpad contents */
	scratchpad_contents.boot_config_ptr = 0x0;
	if (omap_rev() != OMAP3430_REV_ES3_0 &&
					omap_rev() != OMAP3430_REV_ES3_1)
		scratchpad_contents.public_restore_ptr =
			virt_to_phys(get_restore_pointer());
	else
		scratchpad_contents.public_restore_ptr =
			virt_to_phys(get_es3_restore_pointer());
	if (omap_type() == OMAP2_DEVICE_TYPE_GP)
		scratchpad_contents.secure_ram_restore_ptr = 0x0;
	else
		scratchpad_contents.secure_ram_restore_ptr =
			(u32) __pa(omap3_secure_ram_storage);
	scratchpad_contents.sdrc_module_semaphore = 0x0;
	scratchpad_contents.prcm_block_offset = 0x2C;
	scratchpad_contents.sdrc_block_offset = 0x64;

	/* Populate the PRCM block contents */
	prcm_block_contents.prm_clksrc_ctrl = prm_read_mod_reg(OMAP3430_GR_MOD,
			OMAP3_PRM_CLKSRC_CTRL_OFFSET);
	prcm_block_contents.prm_clksel = prm_read_mod_reg(OMAP3430_CCR_MOD,
			OMAP3_PRM_CLKSEL_OFFSET);
	prcm_block_contents.cm_clksel_core =
			cm_read_mod_reg(CORE_MOD, CM_CLKSEL);
	prcm_block_contents.cm_clksel_wkup =
			cm_read_mod_reg(WKUP_MOD, CM_CLKSEL);
	prcm_block_contents.cm_clken_pll =
			cm_read_mod_reg(PLL_MOD, CM_CLKEN);
	prcm_block_contents.cm_autoidle_pll =
			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_AUTOIDLE_PLL);
	prcm_block_contents.cm_clksel1_pll =
			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL1_PLL);
	prcm_block_contents.cm_clksel2_pll =
			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL2_PLL);
	prcm_block_contents.cm_clksel3_pll =
			cm_read_mod_reg(PLL_MOD, OMAP3430_CM_CLKSEL3);
	prcm_block_contents.cm_clken_pll_mpu =
			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKEN_PLL);
	prcm_block_contents.cm_autoidle_pll_mpu =
			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL);
	prcm_block_contents.cm_clksel1_pll_mpu =
			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL);
	prcm_block_contents.cm_clksel2_pll_mpu =
			cm_read_mod_reg(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL);
	prcm_block_contents.prcm_block_size = 0x0;

	/* Populate the SDRC block contents */
	sdrc_block_contents.sysconfig =
			(sdrc_read_reg(SDRC_SYSCONFIG) & 0xFFFF);
	sdrc_block_contents.cs_cfg =
			(sdrc_read_reg(SDRC_CS_CFG) & 0xFFFF);
	sdrc_block_contents.sharing =
			(sdrc_read_reg(SDRC_SHARING) & 0xFFFF);
	sdrc_block_contents.err_type =
			(sdrc_read_reg(SDRC_ERR_TYPE) & 0xFFFF);
	sdrc_block_contents.dll_a_ctrl = sdrc_read_reg(SDRC_DLLA_CTRL);
	sdrc_block_contents.dll_b_ctrl = 0x0;
	/*
	 * Due to a OMAP3 errata (1.142), on EMU/HS devices SRDC should
	 * be programed to issue automatic self refresh on timeout
	 * of AUTO_CNT = 1 prior to any transition to OFF mode.
	 */
	if ((omap_type() != OMAP2_DEVICE_TYPE_GP)
			&& (omap_rev() >= OMAP3430_REV_ES3_0))
		sdrc_block_contents.power = (sdrc_read_reg(SDRC_POWER) &
				~(SDRC_POWER_AUTOCOUNT_MASK|
				SDRC_POWER_CLKCTRL_MASK)) |
				(1 << SDRC_POWER_AUTOCOUNT_SHIFT) |
				SDRC_SELF_REFRESH_ON_AUTOCOUNT;
	else
		sdrc_block_contents.power = sdrc_read_reg(SDRC_POWER);

	sdrc_block_contents.cs_0 = 0x0;
	sdrc_block_contents.mcfg_0 = sdrc_read_reg(SDRC_MCFG_0);
	sdrc_block_contents.mr_0 = (sdrc_read_reg(SDRC_MR_0) & 0xFFFF);
	sdrc_block_contents.emr_1_0 = 0x0;
	sdrc_block_contents.emr_2_0 = 0x0;
	sdrc_block_contents.emr_3_0 = 0x0;
	sdrc_block_contents.actim_ctrla_0 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_A_0);
	sdrc_block_contents.actim_ctrlb_0 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_B_0);
	sdrc_block_contents.rfr_ctrl_0 =
			sdrc_read_reg(SDRC_RFR_CTRL_0);
	sdrc_block_contents.cs_1 = 0x0;
	sdrc_block_contents.mcfg_1 = sdrc_read_reg(SDRC_MCFG_1);
	sdrc_block_contents.mr_1 = sdrc_read_reg(SDRC_MR_1) & 0xFFFF;
	sdrc_block_contents.emr_1_1 = 0x0;
	sdrc_block_contents.emr_2_1 = 0x0;
	sdrc_block_contents.emr_3_1 = 0x0;
	sdrc_block_contents.actim_ctrla_1 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_A_1);
	sdrc_block_contents.actim_ctrlb_1 =
			sdrc_read_reg(SDRC_ACTIM_CTRL_B_1);
	sdrc_block_contents.rfr_ctrl_1 =
			sdrc_read_reg(SDRC_RFR_CTRL_1);
	sdrc_block_contents.dcdl_1_ctrl = 0x0;
	sdrc_block_contents.dcdl_2_ctrl = 0x0;
#ifdef CONFIG_SAMSUNG_KERNEL_DEBUG
	sdrc_block_contents.flags = 0x4D524F4E; // reset flag (p:0x480029C4 v:F00029C4)
#else
	sdrc_block_contents.flags = 0x0;
#endif /* CONFIG_SAMSUNG_KERNEL_DEBUG */
	sdrc_block_contents.block_size = 0x0;

	arm_context_addr = virt_to_phys(omap3_arm_context);

	/* Copy all the contents to the scratchpad location */
	scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD);
	memcpy_toio(scratchpad_address, &scratchpad_contents,
		 sizeof(scratchpad_contents));
	/* Scratchpad contents being 32 bits, a divide by 4 done here */
	memcpy_toio(scratchpad_address +
		scratchpad_contents.prcm_block_offset,
		&prcm_block_contents, sizeof(prcm_block_contents));
	memcpy_toio(scratchpad_address +
		scratchpad_contents.sdrc_block_offset,
		&sdrc_block_contents, sizeof(sdrc_block_contents));
	/*
	 * Copies the address of the location in SDRAM where ARM
	 * registers get saved during a MPU OFF transition.
	 */
	memcpy_toio(scratchpad_address +
		scratchpad_contents.sdrc_block_offset +
		sizeof(sdrc_block_contents), &arm_context_addr, 4);
}
Esempio n. 9
0
/*
 * Initialise the wakeupgen module.
 */
int __init omap_wakeupgen_init(void)
{
	int i;
	unsigned int boot_cpu = smp_processor_id();
	unsigned int max_spi_reg;

	/* Not supported on OMAP4 ES1.0 silicon */
	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "WakeupGen: Not supported on OMAP4430 ES1.0\n");
		return -EPERM;
	}

	/* Static mapping, never released */
	wakeupgen_base = ioremap(OMAP_WKUPGEN_BASE, SZ_4K);
	if (WARN_ON(!wakeupgen_base))
		return -ENOMEM;

	if (cpu_is_omap44xx()) {
		irq_banks = OMAP4_NR_BANKS;
		max_irqs = OMAP4_NR_IRQS;
		secure_api_index = OMAP4_HAL_SAVEGIC_INDEX;
		secure_hw_api_index = OMAP4_HAL_SAVEHW_INDEX;
		secure_hal_save_all_api_index = OMAP4_HAL_SAVEALL_INDEX;
		secure_ram_api_index  = OMAP4_HAL_SAVESECURERAM_INDEX;
	} else if (cpu_is_omap54xx()) {
		secure_api_index = OMAP5_HAL_SAVEGIC_INDEX;
		secure_hw_api_index = OMAP5_HAL_SAVEHW_INDEX;
		secure_hal_save_all_api_index = OMAP5_HAL_SAVEALL_INDEX;
		secure_ram_api_index  = OMAP5_HAL_SAVESECURERAM_INDEX;
	}

	/* Clear all IRQ bitmasks at wakeupGen level */
	for (i = 0; i < irq_banks; i++) {
		wakeupgen_writel(0, i, CPU0_ID);
		wakeupgen_writel(0, i, CPU1_ID);
	}

	/*
	 * Override GIC architecture specific functions to add
	 * OMAP WakeupGen interrupt controller along with GIC
	 */
	gic_arch_extn.irq_mask = wakeupgen_mask;
	gic_arch_extn.irq_unmask = wakeupgen_unmask;
	gic_arch_extn.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE;

	/*
	 * FIXME: Add support to set_smp_affinity() once the core
	 * GIC code has necessary hooks in place.
	 */

	/* Associate all the IRQs to boot CPU like GIC init does. */
	for (i = 0; i < max_irqs; i++)
		irq_target_cpu[i] = boot_cpu;

	/*
	 * Find out how many interrupts are supported.
	 * OMAP4 supports max of 128 SPIs where as GIC can support
	 * up to 1020 interrupt sources. On OMAP4, maximum SPIs are
	 * fused in DIST_CTR bit-fields as 128. Hence the code is safe
	 * from reserved register writes since its well within 1020.
	 */
	max_spi_reg = gic_readl(GIC_DIST_CTR, 0) & 0x1f;

	spi_irq_banks = min(max_spi_reg, irq_banks);

	/*
	 * Set CPU0 GIC backup flag permanently for omap4460 GP,
	 * this is needed because of the ROM code bug that breaks
	 * GIC during wakeup from device off. This errata fix also
	 * clears the GIC save area during init to prevent restoring
	 * garbage to the GIC.
	 */
	if (cpu_is_omap446x() && omap_type() == OMAP2_DEVICE_TYPE_GP)
		pm44xx_errata |= PM_OMAP4_ROM_CPU1_BACKUP_ERRATUM_xxx;

	if (cpu_is_omap44xx() && (omap_type() == OMAP2_DEVICE_TYPE_GP)) {
		sar_base = omap4_get_sar_ram_base();

		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_CPU1_BACKUP_ERRATUM_xxx))
			for (i = SAR_BACKUP_STATUS_OFFSET;
			     i < WAKEUPGENENB_OFFSET_CPU0; i += 4)
				sar_writel(0, i, 0);

		sar_writel(GIC_ISR_NON_SECURE, SAR_ICDISR_CPU0_OFFSET, 0);
		sar_writel(GIC_ISR_NON_SECURE, SAR_ICDISR_CPU1_OFFSET, 0);
		for (i = 0; i < max_spi_reg; i++)
			sar_writel(GIC_ISR_NON_SECURE, SAR_ICDISR_SPI_OFFSET,
				   i);

		if (IS_PM44XX_ERRATUM(PM_OMAP4_ROM_CPU1_BACKUP_ERRATUM_xxx))
			__raw_writel(SAR_BACKUP_STATUS_GIC_CPU0,
				     sar_base + SAR_BACKUP_STATUS_OFFSET);

	} else {
		l3_main_3_oh = omap_hwmod_lookup("l3_main_3");
		if (!l3_main_3_oh)
			pr_err("%s: failed to get l3_main_3_oh\n", __func__);
	}

	irq_hotplug_init();
	irq_pm_init();

	return 0;
}
Esempio n. 10
0
/*
 * Initialise OMAP4 MPUSS
 */
int __init omap4_mpuss_init(void)
{
	struct omap4_cpu_pm_info *pm_info;

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
		return -ENODEV;
	}

	if (cpu_is_omap44xx())
		sar_base = omap4_get_sar_ram_base();

	/* Initilaise per CPU PM information */
	pm_info = &per_cpu(omap4_pm_info, 0x0);
	if (sar_base) {
		pm_info->scu_sar_addr = sar_base + SCU_OFFSET0;
		pm_info->wkup_sar_addr = sar_base +
					CPU0_WAKEUP_NS_PA_ADDR_OFFSET;
		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET0;
	}
	pm_info->pwrdm = pwrdm_lookup("cpu0_pwrdm");
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU0 pwrdm\n");
		return -ENODEV;
	}

	/* Clear CPU previous power domain state */
	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
	cpu_clear_prev_logic_pwrst(0);

	/* Initialise CPU0 power domain state to ON */
	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);

	pm_info = &per_cpu(omap4_pm_info, 0x1);
	if (sar_base) {
		pm_info->scu_sar_addr = sar_base + SCU_OFFSET1;
		pm_info->wkup_sar_addr = sar_base +
					CPU1_WAKEUP_NS_PA_ADDR_OFFSET;
		pm_info->l2x0_sar_addr = sar_base + L2X0_SAVE_OFFSET1;
	}

	pm_info->pwrdm = pwrdm_lookup("cpu1_pwrdm");
	if (!pm_info->pwrdm) {
		pr_err("Lookup failed for CPU1 pwrdm\n");
		return -ENODEV;
	}

	/* Clear CPU previous power domain state */
	pwrdm_clear_all_prev_pwrst(pm_info->pwrdm);
	cpu_clear_prev_logic_pwrst(1);

	/* Initialise CPU1 power domain state to ON */
	pwrdm_set_next_pwrst(pm_info->pwrdm, PWRDM_POWER_ON);

	mpuss_pd = pwrdm_lookup("mpu_pwrdm");
	if (!mpuss_pd) {
		pr_err("Failed to lookup MPUSS power domain\n");
		return -ENODEV;
	}
	pwrdm_clear_all_prev_pwrst(mpuss_pd);
	mpuss_clear_prev_logic_pwrst();

	if (sar_base) {
		/* Save device type on scratchpad for low level code to use */
		writel_relaxed((omap_type() != OMAP2_DEVICE_TYPE_GP) ? 1 : 0,
			       sar_base + OMAP_TYPE_OFFSET);
		save_l2x0_context();
	}

	if (cpu_is_omap44xx()) {
		omap_pm_ops.finish_suspend = omap4_finish_suspend;
		omap_pm_ops.resume = omap4_cpu_resume;
		omap_pm_ops.scu_prepare = scu_pwrst_prepare;
		omap_pm_ops.hotplug_restart = omap4_secondary_startup;
		cpu_context_offset = OMAP4_RM_CPU0_CPU0_CONTEXT_OFFSET;
	} else if (soc_is_omap54xx() || soc_is_dra7xx()) {
		cpu_context_offset = OMAP54XX_RM_CPU0_CPU0_CONTEXT_OFFSET;
		enable_mercury_retention_mode();
	}

	if (cpu_is_omap446x())
		omap_pm_ops.hotplug_restart = omap4460_secondary_startup;

	return 0;
}
static int __init omap_l2_cache_init(void)
{
	u32 aux_ctrl = 0;
	u32 por_ctrl = 0;
	u32 lockdown = 0;
	bool mpu_prefetch_disable_errata = false;

	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

#ifdef CONFIG_OMAP_ALLOW_OSWR
	if (omap_rev() == OMAP4460_REV_ES1_0)
		mpu_prefetch_disable_errata = true;
#endif
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Static mapping, never released */
	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	if (WARN_ON(!l2cache_base))
		return -ENODEV;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	/*
	 * 16-way associativity, parity disabled
	 * Way size - 32KB (es1.0)
	 * Way size - 64KB (es2.0 +)
	 */
	aux_ctrl = readl_relaxed(l2cache_base + L2X0_AUX_CTRL);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
		goto skip_aux_por_api;
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Drop instruction prefetch hint since it degrades the
	 * the performance.
	 */
	aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
		(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
		(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	if (!mpu_prefetch_disable_errata)
		aux_ctrl |= (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	omap_smc1(0x109, aux_ctrl);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Setup POR Control register */
	por_ctrl = readl_relaxed(l2cache_base + L2X0_PREFETCH_CTRL);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Double linefill is available only on OMAP4460 L2X0.
	 * It may cause single cache line memory corruption, leave it disabled
	 * on all devices
	 */
	por_ctrl &= ~(1 << L2X0_PREFETCH_DOUBLE_LINEFILL_SHIFT);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
	if (!mpu_prefetch_disable_errata) {
		por_ctrl &= ~L2X0_POR_OFFSET_MASK;
		por_ctrl |= L2X0_POR_OFFSET_VALUE;
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/* Set POR through PPA service only in EMU/HS devices */
	if (omap_type() != OMAP2_DEVICE_TYPE_GP)
		omap4_secure_dispatcher(PPA_SERVICE_PL310_POR, 0x7, 1,
				por_ctrl, 0, 0, 0);
	else if (omap_rev() >= OMAP4430_REV_ES2_2)
		omap_smc1(0x113, por_ctrl);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);


	/*
	 * FIXME: Temporary WA for OMAP4460 stability issue.
	 * Lock-down specific L2 cache ways which  makes effective
	 * L2 size as 512 KB instead of 1 MB
	 */
	if (omap_rev() == OMAP4460_REV_ES1_0) {
		lockdown = 0xa5a5;
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D0);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_D1);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I0);
		printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);
		writel_relaxed(lockdown, l2cache_base + L2X0_LOCKDOWN_WAY_I1);
	}
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

skip_aux_por_api:
	/* Enable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x1);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK);
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	/*
	 * Override default outer_cache.disable with a OMAP4
	 * specific one
	*/
	outer_cache.disable = omap4_l2x0_disable;
	outer_cache.set_debug = omap4_l2x0_set_debug;
	printk(KERN_INFO "===[%s(%d)]===\n", __func__, __LINE__);

	return 0;
}
Esempio n. 12
0
/*
 * SAR RAM used to save and restore the HW
 * context in low power modes
 */
static int __init omap4_sar_ram_init(void)
{
	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;

	/*
	 * Static mapping, never released Actual SAR area used is 8K it's
	 * spaced over 16K address with some part is reserved.
	 */
	sar_ram_base = ioremap(OMAP44XX_SAR_RAM_BASE, SZ_16K);
	BUG_ON(!sar_ram_base);

	/*
	 * All these are static mappings so ioremap() will
	 * just return with mapped VA
	 */
	omap4_sar_modules[EMIF1_INDEX] = ioremap(OMAP44XX_EMIF1, SZ_1M);
	BUG_ON(!omap4_sar_modules[EMIF1_INDEX]);
	omap4_sar_modules[EMIF2_INDEX] = ioremap(OMAP44XX_EMIF2, SZ_1M);
	BUG_ON(!omap4_sar_modules[EMIF2_INDEX]);
	omap4_sar_modules[DMM_INDEX] = ioremap(OMAP44XX_DMM_BASE, SZ_1M);
	BUG_ON(!omap4_sar_modules[DMM_INDEX]);
	omap4_sar_modules[CM1_INDEX] = ioremap(OMAP4430_CM1_BASE, SZ_8K);
	BUG_ON(!omap4_sar_modules[CM1_INDEX]);
	omap4_sar_modules[CM2_INDEX] = ioremap(OMAP4430_CM2_BASE, SZ_8K);
	BUG_ON(!omap4_sar_modules[CM2_INDEX]);
	omap4_sar_modules[C2C_INDEX] = ioremap(OMAP44XX_C2C_BASE, SZ_1M);
	BUG_ON(!omap4_sar_modules[C2C_INDEX]);
	omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX] =
	    ioremap(OMAP443X_CTRL_BASE, SZ_4K);
	BUG_ON(!omap4_sar_modules[CTRL_MODULE_PAD_CORE_INDEX]);
	omap4_sar_modules[L3_CLK1_INDEX] = ioremap(L3_44XX_BASE_CLK1, SZ_1M);
	BUG_ON(!omap4_sar_modules[L3_CLK1_INDEX]);
	omap4_sar_modules[L3_CLK2_INDEX] = ioremap(L3_44XX_BASE_CLK2, SZ_1M);
	BUG_ON(!omap4_sar_modules[L3_CLK2_INDEX]);
	omap4_sar_modules[L3_CLK3_INDEX] = ioremap(L3_44XX_BASE_CLK3, SZ_1M);
	BUG_ON(!omap4_sar_modules[L3_CLK3_INDEX]);
	omap4_sar_modules[USBTLL_INDEX] = ioremap(OMAP44XX_USBTLL_BASE, SZ_1M);
	BUG_ON(!omap4_sar_modules[USBTLL_INDEX]);
	omap4_sar_modules[UHH_INDEX] = ioremap(OMAP44XX_UHH_CONFIG_BASE, SZ_1M);
	BUG_ON(!omap4_sar_modules[UHH_INDEX]);
	omap4_sar_modules[L4CORE_INDEX] = ioremap(L4_44XX_PHYS, SZ_4M);
	BUG_ON(!omap4_sar_modules[L4CORE_INDEX]);
	omap4_sar_modules[L4PER_INDEX] = ioremap(L4_PER_44XX_PHYS, SZ_4M);
	BUG_ON(!omap4_sar_modules[L4PER_INDEX]);

	/*
	 * SAR BANK3 contains all firewall settings and it's saved through
	 * secure API on HS device. On GP device these registers are
	 * meaningless but still needs to be saved. Otherwise Auto-restore
	 * phase DMA takes an abort. Hence save these conents only once
	 * in init to avoid the issue while waking up from device OFF
	 */
	if (omap_type() == OMAP2_DEVICE_TYPE_GP)
		save_sar_bank3();
	/*
	 * Work around for OMAP443x Errata i632: "LPDDR2 Corruption After OFF
	 * Mode Transition When CS1 Is Used On EMIF":
	 * Overwrite EMIF1/EMIF2
	 * SECURE_EMIF1_SDRAM_CONFIG2_REG
	 * SECURE_EMIF2_SDRAM_CONFIG2_REG
	 */
	if (cpu_is_omap443x()) {
		void __iomem *secure_ctrl_mod;

		secure_ctrl_mod = ioremap(OMAP4_CTRL_MODULE_WKUP, SZ_4K);
		BUG_ON(!secure_ctrl_mod);

		__raw_writel(0x10, secure_ctrl_mod +
				OMAP4_CTRL_SECURE_EMIF1_SDRAM_CONFIG2_REG);
		__raw_writel(0x10, secure_ctrl_mod +
				OMAP4_CTRL_SECURE_EMIF2_SDRAM_CONFIG2_REG);
		wmb();
		iounmap(secure_ctrl_mod);
	}

	/*
	 * L3INIT PD and clocks are needed for SAR save phase
	 */
	l3init_pwrdm = pwrdm_lookup("l3init_pwrdm");
	if (!l3init_pwrdm)
		pr_err("Failed to get l3init_pwrdm\n");

	l3init_clkdm = clkdm_lookup("l3_init_clkdm");
	if (!l3init_clkdm)
		pr_err("Failed to get l3_init_clkdm\n");

	usb_host_ck = clk_get(NULL, "usb_host_hs_fck");
	if (!usb_host_ck)
		pr_err("Could not get usb_host_ck\n");

	usb_tll_ck = clk_get(NULL, "usb_tll_hs_ick");
	if (!usb_tll_ck)
		pr_err("Could not get usb_tll_ck\n");

	return 0;
}
Esempio n. 13
0
static int __init omap_l2_cache_init(void)
{
	u32 l2x0_auxctrl;
	u32 l2x0_por;
	u32 l2x0_lockdown;

	/*
	 * To avoid code running on other OMAPs in
	 * multi-omap builds
	 */
	if (!cpu_is_omap44xx())
		return -ENODEV;

	/* Static mapping, never released */
	l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
	BUG_ON(!l2cache_base);

	if (omap_rev() == OMAP4430_REV_ES1_0) {
		l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE_ES1;
		goto skip_auxctlr;
	}

	if (cpu_is_omap446x()) {
		if (omap_rev() == OMAP4460_REV_ES1_0) {
			l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE_ES1;
			l2x0_por = OMAP446X_PL310_POR_ES1;
			l2x0_lockdown = 0xa5a5;
		} else {
			l2x0_auxctrl = OMAP446X_L2X0_AUXCTL_VALUE;
			l2x0_por = OMAP446X_PL310_POR;
			l2x0_lockdown = 0;
		}
	} else {
		l2x0_auxctrl = OMAP443X_L2X0_AUXCTL_VALUE;
		l2x0_por = OMAP443X_PL310_POR;
		l2x0_lockdown = 0;
	}

	/* Set POR through PPA service only in EMU/HS devices */
	if (omap_type() != OMAP2_DEVICE_TYPE_GP) {
		omap4_secure_dispatcher(
				PPA_SERVICE_PL310_POR, 0x7, 1,
				l2x0_por, 0, 0, 0);
	} else if (omap_rev() > OMAP4430_REV_ES2_1)
			omap_smc1(0x113, l2x0_por);

	/*
	 * FIXME : Temporary WA for the OMAP4460 stability
	 * issue. For OMAP4460 the effective L2X0 Size  = 512 KB
	 * with this WA.
	 */
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x900);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x908);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x904);
	writel_relaxed(l2x0_lockdown, l2cache_base + 0x90C);

	/*
	 * Doble Linefill, BRESP enabled, $I and $D prefetch ON,
	 * Share-override = 1, NS lockdown enabled
	 */
	omap_smc1(0x109, l2x0_auxctrl);

skip_auxctlr:
	/* Enable PL310 L2 Cache controller */
	omap_smc1(0x102, 0x1);

	/*
	 * 32KB way size, 16-way associativity,
	 * parity disabled
	 */
	l2x0_init(l2cache_base, l2x0_auxctrl, 0xd0000fff);

	return 0;
}