示例#1
0
static void omap_hsmmc1_before_set_reg(struct device *dev, int slot,
				  int power_on, int vdd)
{
	u32 reg, prog_io;
	struct omap_mmc_platform_data *mmc = dev->platform_data;

	if (mmc->slots[0].remux)
		mmc->slots[0].remux(dev, slot, power_on);

	if (power_on) {
		if (cpu_is_omap2430()) {
			reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1);
			if ((1 << vdd) >= MMC_VDD_30_31)
				reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE;
			else
				reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE;
			omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
		}

		if (mmc->slots[0].internal_clock) {
			reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
			reg |= OMAP2_MMCSDIO1ADPCLKISEL;
			omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
		}

		reg = omap_ctrl_readl(control_pbias_offset);
		if (cpu_is_omap3630()) {
			/* Set MMC I/O to 52Mhz */
			prog_io = omap_ctrl_readl(OMAP343X_CONTROL_PROG_IO1);
			prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL;
			omap_ctrl_writel(prog_io, OMAP343X_CONTROL_PROG_IO1);
		} else {
			reg |= OMAP2_PBIASSPEEDCTRL0;
		}
		reg &= ~OMAP2_PBIASLITEPWRDNZ0;
		omap_ctrl_writel(reg, control_pbias_offset);
	} else {
		reg = omap_ctrl_readl(control_pbias_offset);
		reg &= ~OMAP2_PBIASLITEPWRDNZ0;
		omap_ctrl_writel(reg, control_pbias_offset);
	}
}
示例#2
0
void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
				 struct omap_sdrc_params *sdrc_cs1)
{
	u8 skip_setup_idle = 0;

	pwrdm_init(powerdomains_omap);
	clkdm_init(clockdomains_omap, clkdm_autodeps);
	if (cpu_is_omap242x())
		omap2420_hwmod_init();
	else if (cpu_is_omap243x())
		omap2430_hwmod_init();
	else if (cpu_is_omap34xx())
		omap3xxx_hwmod_init();
	/* The OPP tables have to be registered before a clk init */
	omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);

	if (cpu_is_omap2420())
		omap2420_clk_init();
	else if (cpu_is_omap2430())
		omap2430_clk_init();
	else if (cpu_is_omap34xx())
		omap3xxx_clk_init();
	else if (cpu_is_omap44xx())
		omap4xxx_clk_init();
	else
		pr_err("Could not init clock framework - unknown CPU\n");

	omap_serial_early_init();

#ifndef CONFIG_PM_RUNTIME
	skip_setup_idle = 1;
#endif
	if (cpu_is_omap24xx() || cpu_is_omap34xx())   /* FIXME: OMAP4 */
		omap_hwmod_late_init(skip_setup_idle);

	if (cpu_is_omap24xx() || cpu_is_omap34xx()) {
		omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
		_omap2_init_reprogram_sdrc();
	}
	gpmc_init();
}
示例#3
0
static void omap_hsmmc_disable_clks(struct mmc_omap_host *host)
{
	unsigned long flags;

	spin_lock_irqsave(&host->clk_lock, flags);

	if (!host->clks_enabled)
		goto done;

	clk_disable(host->fclk);
	clk_disable(host->iclk);
	host->clks_enabled = 0;
	if (cpu_is_omap2430()) {
		if (host->dbclk_enabled) {
			clk_disable(host->dbclk);
			host->dbclk_enabled = 0;
		}
	}
done:
	spin_unlock_irqrestore(&host->clk_lock, flags);
}
static void omap_init_32k(void)
{
	if (cpu_is_omap2420()) {
		omap_32k_resources[0].start = OMAP2420_32KSYNCT_BASE;
		omap_32k_resources[0].end = OMAP2420_32KSYNCT_BASE + SZ_4K;
	} else if (cpu_is_omap2430()) {
		omap_32k_resources[0].start = OMAP2430_32KSYNCT_BASE;
		omap_32k_resources[0].end = OMAP2430_32KSYNCT_BASE + SZ_4K;
	} else if (cpu_is_omap34xx()) {
		omap_32k_resources[0].start = OMAP3430_32KSYNCT_BASE;
		omap_32k_resources[0].end = OMAP3430_32KSYNCT_BASE + SZ_4K;
	} else if (cpu_is_omap44xx()) {
		omap_32k_resources[0].start = OMAP4430_32KSYNCT_BASE;
		omap_32k_resources[0].end = OMAP4430_32KSYNCT_BASE + SZ_4K;
	} else {        /* not supported */
		return;
	}


	(void) platform_device_register(&omap_32k_device);
};
示例#5
0
文件: dma.c 项目: christinaa/Daisy
/**
 * @brief omap_dma_set_prio_lch : Set channel wise priority settings
 *
 * @param lch
 * @param read_prio - Read priority
 * @param write_prio - Write priority
 * Both of the above can be set with one of the following values :
 * 	DMA_CH_PRIO_HIGH/DMA_CH_PRIO_LOW
 */
int
omap_dma_set_prio_lch(int lch, unsigned char read_prio,
					  unsigned char write_prio)
{
	u32 l;
	
	if (unlikely((lch < 0 || lch >= dma_lch_count))) {
		IOLog("Invalid channel id\n");
		return -EINVAL;
	}
	l = dma_read(CCR(lch));
	l &= ~((1 << 6) | (1 << 26));
	if (cpu_is_omap2430() || cpu_is_omap34xx() ||  cpu_is_omap44xx())
		l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
	else
		l |= ((read_prio & 0x1) << 6);
	
	dma_write(l, CCR(lch));
	
	return 0;
}
示例#6
0
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

// from GB MMC0 and MMC1 was swapping. [START]
#ifdef TI_FS_MMC
	for (; controllers->mmc; controllers++)
		omap_init_hsmmc(controllers, controllers->mmc);
#else
	omap_init_hsmmc(&controllers[1], controllers[1].mmc);
	omap_init_hsmmc(&controllers[0], controllers[0].mmc);
	omap_init_hsmmc(&controllers[2], controllers[2].mmc);
#endif // TI_FS_MMC
// from GB MMC0 and MMC1 was swapping. [END]
    sd_ldo_init();

}
示例#7
0
/* Routine to resume the MMC device */
static int omap_mmc_resume(struct platform_device *pdev)
{
	int ret = 0;
	struct mmc_omap_host *host = platform_get_drvdata(pdev);

	if (host && !host->suspended)
		return 0;

	if (host) {

		ret = mmc_clk_try_enable(host);
		if (ret != 0)
			goto clk_en_err;

		if (cpu_is_omap2430()) {
			if (clk_enable(host->dbclk) != 0)
				dev_dbg(mmc_dev(host->mmc),
					"Enabling debounce clk failed\n");
		}

		if (host->pdata->resume) {
			ret = host->pdata->resume(&pdev->dev, host->slot_id);
			if (ret)
				dev_dbg(mmc_dev(host->mmc),
					"Unmask interrupt failed\n");
		}

		/* Notify the core to resume the host */
		ret = mmc_resume_host(host->mmc);
		if (ret == 0)
			host->suspended = 0;
	}

	return ret;

clk_en_err:
	dev_dbg(mmc_dev(host->mmc),
		"Failed to enable MMC clocks during resume\n");
	return ret;
}
示例#8
0
void __init omap_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	u32 reg;

	if (omap_hsmmc_done)
		return;

	omap_hsmmc_done = 1;

	if (!(cpu_is_omap44xx() || cpu_is_omap54xx())) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else if (cpu_is_omap44xx()) {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_SDMMC1_DR0_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	} else if (cpu_is_omap54xx()) {
		control_pbias_offset =
			OMAP5_CTRL_MODULE_CORE_PAD_CONTROL_PBIAS;
	}

	for (; controllers->mmc; controllers++)
		omap_hsmmc_init_one(controllers, controllers->mmc);

}
示例#9
0
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}

		if (machine_is_omap3_pandora()) {
			/* needed for gpio_126 - gpio_129 to work correctly */
			reg = omap_ctrl_readl(control_pbias_offset);
			reg &= ~OMAP343X_PBIASLITEVMODE1;
			omap_ctrl_writel(reg, control_pbias_offset);
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_SDMMC1_DR0_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (; controllers->mmc; controllers++)
		omap_init_hsmmc(controllers, controllers->mmc);

}
示例#10
0
void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
				 struct omap_sdrc_params *sdrc_cs1,
				 struct omap_opp *mpu_opps,
				 struct omap_opp *dsp_opps,
				 struct omap_opp *l3_opps)
{
	struct omap_hwmod **hwmods = NULL;

	if (cpu_is_omap2420())
		hwmods = omap2420_hwmods;
	else if (cpu_is_omap2430())
		hwmods = omap2430_hwmods;
	else if (cpu_is_omap34xx())
		hwmods = omap34xx_hwmods;
	else if (cpu_is_omap44xx())
		hwmods = omap44xx_hwmods;

	pwrdm_init(powerdomains_omap);
	clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);
	omap_hwmod_init(hwmods);
#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
	/* The OPP tables have to be registered before a clk init */
	omap2_mux_init();
	omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
#endif
	omap2_clk_init();
	omap_serial_early_init();
	omap_hwmod_late_init();
#ifndef CONFIG_ARCH_OMAP4
	omap_pm_if_init();
	omap2_sdrc_init(sdrc_cs0, sdrc_cs1);
	_omap2_init_reprogram_sdrc();
#endif
	gpmc_init();
	omap_gpio_early_init();
	omap2_dm_timer_early_init();
}
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		if ((machine_is_omap_hummingbird() &&
				(system_rev > HUMMINGBIRD_EVT0B)) ||
				machine_is_omap_ovation())
			reg |= (OMAP4_SDMMC1_DR0_SPEEDCTRL_MASK |
				OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
				OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		else
			reg |= (OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
				OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (; controllers->mmc; controllers++)
		omap_init_hsmmc(controllers, controllers->mmc);

}
示例#12
0
static void omap_init_mcspi(void)
{
	if (cpu_is_omap44xx()) {
		omap2_mcspi1_resources[0].start	= OMAP4_MCSPI1_BASE;
		omap2_mcspi1_resources[0].end	= OMAP4_MCSPI1_BASE + 0xff;
		omap2_mcspi2_resources[0].start	= OMAP4_MCSPI2_BASE;
		omap2_mcspi2_resources[0].end	= OMAP4_MCSPI2_BASE + 0xff;
		omap2_mcspi3_resources[0].start	= OMAP4_MCSPI3_BASE;
		omap2_mcspi3_resources[0].end	= OMAP4_MCSPI3_BASE + 0xff;
		omap2_mcspi4_resources[0].start	= OMAP4_MCSPI4_BASE;
		omap2_mcspi4_resources[0].end	= OMAP4_MCSPI4_BASE + 0xff;
	}
	platform_device_register(&omap2_mcspi1);
	platform_device_register(&omap2_mcspi2);
#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
	defined(CONFIG_ARCH_OMAP4)
	if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
		platform_device_register(&omap2_mcspi3);
#endif
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
	if (cpu_is_omap343x() || cpu_is_omap44xx())
		platform_device_register(&omap2_mcspi4);
#endif
}
示例#13
0
文件: clock24xx.c 项目: 274914765/C
int __init omap2_clk_init(void)
{
    struct prcm_config *prcm;
    struct clk **clkp;
    u32 clkrate;

    if (cpu_is_omap242x())
        cpu_mask = RATE_IN_242X;
    else if (cpu_is_omap2430())
        cpu_mask = RATE_IN_243X;

    clk_init(&omap2_clk_functions);

    omap2_osc_clk_recalc(&osc_ck);
    omap2_sys_clk_recalc(&sys_ck);

    for (clkp = onchip_24xx_clks;
         clkp < onchip_24xx_clks + ARRAY_SIZE(onchip_24xx_clks);
         clkp++) {

        if ((*clkp)->flags & CLOCK_IN_OMAP242X && cpu_is_omap2420()) {
            clk_register(*clkp);
            continue;
        }

        if ((*clkp)->flags & CLOCK_IN_OMAP243X && cpu_is_omap2430()) {
            clk_register(*clkp);
            continue;
        }
    }

    /* Check the MPU rate set by bootloader */
    clkrate = omap2_get_dpll_rate_24xx(&dpll_ck);
    for (prcm = rate_table; prcm->mpu_speed; prcm++) {
        if (!(prcm->flags & cpu_mask))
            continue;
        if (prcm->xtal_speed != sys_ck.rate)
            continue;
        if (prcm->dpll_speed <= clkrate)
             break;
    }
    curr_prcm_set = prcm;

    recalculate_root_clocks();

    printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
           "%ld.%01ld/%ld/%ld MHz\n",
           (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
           (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;

    /*
     * Only enable those clocks we will need, let the drivers
     * enable other clocks as necessary
     */
    clk_enable_init_clocks();

    /* Avoid sleeping sleeping during omap2_clk_prepare_for_reboot() */
    vclk = clk_get(NULL, "virt_prcm_set");
    sclk = clk_get(NULL, "sys_ck");

    return 0;
}
static int omap_hsmmc_enable_clks(struct mmc_omap_host *host)
{
	unsigned long flags, timeout;
	int ret = 0;

	spin_lock_irqsave(&host->clk_lock, flags);

	if (host->clks_enabled)
		goto done;

	ret = clk_enable(host->iclk);
	if (ret)
		goto clk_en_err1;

	ret = clk_enable(host->fclk);
	if (ret)
		goto clk_en_err2;

	host->clks_enabled = 1;

	if (cpu_is_omap2430()) {
		/*
		 * MMC can still work without debounce clock.
		 */
		if (IS_ERR(host->dbclk))
			dev_warn(mmc_dev(host->mmc),
			"Failed to get debounce clock\n");
		else
			if (clk_enable(host->dbclk) != 0)
				dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
						" clk failed\n");
			else
				host->dbclk_enabled = 1;
	}

	if (!(host->pdata->context_loss) ||
		(host->pdata->context_loss(host->dev) != host->off_counter)) {
			/* Coming out of OFF:
			 * The SRA bit of SYSCTL reg has a wrong reset
			 * value.
			 * The bit resets automatically in subsequent
			 * reads. Idealy it should have been 0 as per
			 * the reset value of the register.
			 * Wait for the reset to complete */
			timeout = jiffies +
				msecs_to_jiffies(MMC_TIMEOUT_MS);
			while ((OMAP_HSMMC_READ(host->base, SYSSTATUS)
				& RESETDONE) != RESETDONE
				&& time_before(jiffies, timeout))
				;
			omap2_hsmmc_restore_ctx(host);
	}

done:
	spin_unlock_irqrestore(&host->clk_lock, flags);
	return ret;
clk_en_err2:
	clk_disable(host->iclk);
clk_en_err1:
	dev_dbg(mmc_dev(host->mmc),
		"Unable to enable MMC clocks \n");
	spin_unlock_irqrestore(&host->clk_lock, flags);
	return ret;
}
示例#15
0
文件: clock24xx.c 项目: 274914765/C
/* Sets basic clocks based on the specified rate */
static int omap2_select_table_rate(struct clk *clk, unsigned long rate)
{
    u32 cur_rate, done_rate, bypass = 0, tmp;
    struct prcm_config *prcm;
    unsigned long found_speed = 0;
    unsigned long flags;

    if (clk != &virt_prcm_set)
        return -EINVAL;

    for (prcm = rate_table; prcm->mpu_speed; prcm++) {
        if (!(prcm->flags & cpu_mask))
            continue;

        if (prcm->xtal_speed != sys_ck.rate)
            continue;

        if (prcm->mpu_speed <= rate) {
            found_speed = prcm->mpu_speed;
            break;
        }
    }

    if (!found_speed) {
        printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
               rate / 1000000);
        return -EINVAL;
    }

    curr_prcm_set = prcm;
    cur_rate = omap2_get_dpll_rate_24xx(&dpll_ck);

    if (prcm->dpll_speed == cur_rate / 2) {
        omap2_reprogram_sdrc(CORE_CLK_SRC_DPLL, 1);
    } else if (prcm->dpll_speed == cur_rate * 2) {
        omap2_reprogram_sdrc(CORE_CLK_SRC_DPLL_X2, 1);
    } else if (prcm->dpll_speed != cur_rate) {
        local_irq_save(flags);

        if (prcm->dpll_speed == prcm->xtal_speed)
            bypass = 1;

        if ((prcm->cm_clksel2_pll & OMAP24XX_CORE_CLK_SRC_MASK) ==
            CORE_CLK_SRC_DPLL_X2)
            done_rate = CORE_CLK_SRC_DPLL_X2;
        else
            done_rate = CORE_CLK_SRC_DPLL;

        /* MPU divider */
        cm_write_mod_reg(prcm->cm_clksel_mpu, MPU_MOD, CM_CLKSEL);

        /* dsp + iva1 div(2420), iva2.1(2430) */
        cm_write_mod_reg(prcm->cm_clksel_dsp,
                 OMAP24XX_DSP_MOD, CM_CLKSEL);

        cm_write_mod_reg(prcm->cm_clksel_gfx, GFX_MOD, CM_CLKSEL);

        /* Major subsystem dividers */
        tmp = cm_read_mod_reg(CORE_MOD, CM_CLKSEL1) & OMAP24XX_CLKSEL_DSS2_MASK;
        cm_write_mod_reg(prcm->cm_clksel1_core | tmp, CORE_MOD, CM_CLKSEL1);
        if (cpu_is_omap2430())
            cm_write_mod_reg(prcm->cm_clksel_mdm,
                     OMAP2430_MDM_MOD, CM_CLKSEL);

        /* x2 to enter init_mem */
        omap2_reprogram_sdrc(CORE_CLK_SRC_DPLL_X2, 1);

        omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
                   bypass);

        omap2_init_memory_params(omap2_dll_force_needed());
        omap2_reprogram_sdrc(done_rate, 0);

        local_irq_restore(flags);
    }
    omap2_dpll_recalc(&dpll_ck);

    return 0;
}
示例#16
0
文件: clock34xx.c 项目: 274914765/C
int __init omap2_clk_init(void)
{
    /* struct prcm_config *prcm; */
    struct clk **clkp;
    /* u32 clkrate; */
    u32 cpu_clkflg;

    /* REVISIT: Ultimately this will be used for multiboot */
#if 0
    if (cpu_is_omap242x()) {
        cpu_mask = RATE_IN_242X;
        cpu_clkflg = CLOCK_IN_OMAP242X;
        clkp = onchip_24xx_clks;
    } else if (cpu_is_omap2430()) {
        cpu_mask = RATE_IN_243X;
        cpu_clkflg = CLOCK_IN_OMAP243X;
        clkp = onchip_24xx_clks;
    }
#endif
    if (cpu_is_omap34xx()) {
        cpu_mask = RATE_IN_343X;
        cpu_clkflg = CLOCK_IN_OMAP343X;
        clkp = onchip_34xx_clks;

        /*
         * Update this if there are further clock changes between ES2
         * and production parts
         */
        if (is_sil_rev_equal_to(OMAP3430_REV_ES1_0)) {
            /* No 3430ES1-only rates exist, so no RATE_IN_3430ES1 */
            cpu_clkflg |= CLOCK_IN_OMAP3430ES1;
        } else {
            cpu_mask |= RATE_IN_3430ES2;
            cpu_clkflg |= CLOCK_IN_OMAP3430ES2;
        }
    }

    clk_init(&omap2_clk_functions);

    for (clkp = onchip_34xx_clks;
         clkp < onchip_34xx_clks + ARRAY_SIZE(onchip_34xx_clks);
         clkp++) {
        if ((*clkp)->flags & cpu_clkflg)
            clk_register(*clkp);
    }

    /* REVISIT: Not yet ready for OMAP3 */
#if 0
    /* Check the MPU rate set by bootloader */
    clkrate = omap2_get_dpll_rate_24xx(&dpll_ck);
    for (prcm = rate_table; prcm->mpu_speed; prcm++) {
        if (!(prcm->flags & cpu_mask))
            continue;
        if (prcm->xtal_speed != sys_ck.rate)
            continue;
        if (prcm->dpll_speed <= clkrate)
             break;
    }
    curr_prcm_set = prcm;
#endif

    recalculate_root_clocks();

    printk(KERN_INFO "Clocking rate (Crystal/DPLL/ARM core): "
           "%ld.%01ld/%ld/%ld MHz\n",
           (osc_sys_ck.rate / 1000000), (osc_sys_ck.rate / 100000) % 10,
           (core_ck.rate / 1000000), (arm_fck.rate / 1000000));

    /*
     * Only enable those clocks we will need, let the drivers
     * enable other clocks as necessary
     */
    clk_enable_init_clocks();

    /* Avoid sleeping during omap2_clk_prepare_for_reboot() */
    /* REVISIT: not yet ready for 343x */
#if 0
    vclk = clk_get(NULL, "virt_prcm_set");
    sclk = clk_get(NULL, "sys_ck");
#endif
    return 0;
}
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	struct omap2_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
	int i;
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (c = controllers; c->mmc; c++) {
		struct hsmmc_controller *hc = hsmmc + c->mmc - 1;
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc) {
			pr_debug("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		if (mmc) {
			pr_debug("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data),
			      GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			goto done;
		}

		if (cpu_is_ti816x())
			mmc->version = MMC_CTRL_VERSION_2;

		if (c->name)
			strncpy(hc->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(hc->name, ARRAY_SIZE(hc->name),
				"mmc%islot%i", c->mmc, 1);
		mmc->slots[0].name = hc->name;
		mmc->nr_slots = 1;
		mmc->slots[0].caps = c->caps;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;
		if (cpu_is_omap44xx())
			mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
		else
			mmc->reg_offset = 0;

		mmc->get_context_loss_count = hsmmc_get_context_loss;

		mmc->slots[0].switch_pin = c->gpio_cd;
		mmc->slots[0].gpio_wp = c->gpio_wp;

		mmc->slots[0].remux = c->remux;
		mmc->slots[0].init_card = c->init_card;

		if (c->cover_only)
			mmc->slots[0].cover = 1;

		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		if (c->no_off)
			mmc->slots[0].no_off = 1;

		if (c->vcc_aux_disable_is_sleep)
			mmc->slots[0].vcc_aux_disable_is_sleep = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		if (cpu_is_omap3505() || cpu_is_omap3517())
			mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
						 MMC_VDD_26_27 |
						 MMC_VDD_27_28 |
						 MMC_VDD_29_30 |
						 MMC_VDD_30_31 |
						 MMC_VDD_31_32;
		else
			mmc->slots[0].ocr_mask = c->ocr_mask;

		if (cpu_is_omap3517() || cpu_is_omap3505() || cpu_is_ti81xx())
			mmc->slots[0].set_power = nop_mmc_set_power;
		else
			mmc->slots[0].features |= HSMMC_HAS_PBIAS;

		if ((cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0)) ||
				cpu_is_ti814x())
			mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;

		switch (c->mmc) {
		case 1:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* on-chip level shifting via PBIAS0/PBIAS1 */
				if (cpu_is_omap44xx()) {
					mmc->slots[0].before_set_reg =
						omap4_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap4_hsmmc1_after_set_reg;
				} else {
					mmc->slots[0].before_set_reg =
						omap_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap_hsmmc1_after_set_reg;
				}
			}

			/* Omap3630 HSMMC1 supports only 4-bit */
			if (cpu_is_omap3630() &&
					(c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
				mmc->slots[0].caps = c->caps;
			}
			break;
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
			if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
			}
			/* FALLTHROUGH */
		case 3:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* off-chip level shifting, or none */
				mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
				mmc->slots[0].after_set_reg = NULL;
			}
			break;
		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		hsmmc_data[c->mmc - 1] = mmc;
	}

	if (!cpu_is_ti81xx())
		omap2_init_mmc(hsmmc_data, OMAP34XX_NR_MMC);
	else
		omap2_init_mmc(hsmmc_data, TI81XX_NR_MMC);

	/* pass the device nodes back to board setup code */
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		c->dev = mmc->dev;
	}

done:
	for (i = 0; i < nr_hsmmc; i++)
		kfree(hsmmc_data[i]);
}
示例#18
0
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	struct omap2_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
	int i;
	u32 reg;
	int controller_cnt = 0;

	printk(">>> omap2_hsmmc_init\n");

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		}
		else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	}
        else {
		control_pbias_offset = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK | OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK | OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|	OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK | OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (c = controllers; c->mmc; c++) {
		struct hsmmc_controller *hc = hsmmc + controller_cnt;
		struct omap_mmc_platform_data *mmc = hsmmc_data[controller_cnt];
		
		if (!c->mmc || c->mmc > nr_hsmmc) {
			printk("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		
		if (mmc) {
			printk("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			goto done;
		}

		if (c->name)
			strncpy(hc->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(hc->name, ARRAY_SIZE(hc->name),
				"mmc%islot%i", c->mmc, 1);

#ifdef CONFIG_TIWLAN_SDIO
		if (c->mmc == CONFIG_TIWLAN_MMC_CONTROLLER) {
			mmc->slots[0].embedded_sdio = &omap_wifi_emb_data;
			mmc->slots[0].register_status_notify = &omap_wifi_status_register;
			mmc->slots[0].card_detect = &omap_wifi_status;
		}
#endif

		mmc->slots[0].name = hc->name;
		mmc->nr_slots = 1;
		mmc->slots[0].caps = c->caps;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;

		/* Register offset Mapping */
		if (cpu_is_omap44xx())
			mmc->regs_map = (u16 *) omap4_mmc_reg_map;
		else
			mmc->regs_map = (u16 *) omap3_mmc_reg_map;

		if (!cpu_is_omap44xx())
			mmc->get_context_loss_count = hsmmc_get_context_loss;
//&*&*&*SJ1_20110607, Add SIM card detection.		
#if defined (CONFIG_SIM_CARD_DETECTION) && defined (CONFIG_CHANGE_INAND_MMC_SCAN_INDEX)
		mmc->slots[0].sim_switch_pin = c->gpio_sim_cd;
#endif
//&*&*&*SJ2_20110607, Add SIM card detection.
		mmc->slots[0].switch_pin = c->gpio_cd;
                mmc->slots[0].cd_active_high = c->cd_active_high;
		mmc->slots[0].gpio_wp = c->gpio_wp;
		mmc->slots[0].remux = c->remux;

		if (c->cover_only)
			mmc->slots[0].cover = 1;

		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		if (c->no_off)
			mmc->slots[0].no_off = 1;

		if (c->vcc_aux_disable_is_sleep)
			mmc->slots[0].vcc_aux_disable_is_sleep = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		mmc->slots[0].ocr_mask = c->ocr_mask;

		if (cpu_is_omap3517() || cpu_is_omap3505())
			mmc->slots[0].set_power = nop_mmc_set_power;
		else
			mmc->slots[0].features |= HSMMC_HAS_PBIAS;

		if (cpu_is_omap44xx()) {
			if (omap_rev() > OMAP4430_REV_ES1_0)
				mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;

			mmc->slots[0].features |= HSMMC_DVFS_24MHZ_CONST;

			if (c->mmc >= 3 && c->mmc <= 5) {
				mmc->slots[0].features |= HSMMC_HAS_48MHZ_MASTER_CLK;
				mmc->get_context_loss_count =
						hsmmc_get_context_loss;
			}
		}
		
		switch (c->mmc) {
		case 1:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* on-chip level shifting via PBIAS0/PBIAS1 */
				if (cpu_is_omap44xx()) {
					mmc->slots[0].before_set_reg =
						omap4_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap4_hsmmc1_after_set_reg;
				}
				else {
					mmc->slots[0].before_set_reg =
						omap_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap_hsmmc1_after_set_reg;
				}
			}

			/* Omap3630 HSMMC1 supports only 4-bit */
			if (cpu_is_omap3630() && (c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
				mmc->slots[0].caps = c->caps;
			}
			break;
			
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
		
			if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
			}
			/* FALLTHROUGH */
		case 3:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* off-chip level shifting, or none */
				mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
				mmc->slots[0].after_set_reg = NULL;
			}
#ifdef CONFIG_TIWLAN_SDIO
			mmc->slots[0].ocr_mask  = MMC_VDD_165_195;
#endif
			break;

		case 4:
		case 5:
			/* TODO Update required */
			mmc->slots[0].before_set_reg = NULL;
			mmc->slots[0].after_set_reg = NULL;
#ifdef CONFIG_TIWLAN_SDIO
			mmc->slots[0].ocr_mask  = MMC_VDD_165_195;
#endif
			break;

		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		
		hsmmc_data[controller_cnt] = mmc;
		omap2_init_mmc(hsmmc_data[controller_cnt], c->mmc);
		controller_cnt++;
	}

	/* pass the device nodes back to board setup code */
	controller_cnt = 0;
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[controller_cnt];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		
		c->dev = mmc->dev;
		controller_cnt++;
	}

done:
	for (i = 0; i < controller_cnt; i++)
		kfree(hsmmc_data[i]);

	printk("<<< omap2_hsmmc_init\n");
}
示例#19
0
static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
				    struct snd_pcm_hw_params *params,
				    struct snd_soc_dai *dai)
{
	struct snd_soc_pcm_runtime *rtd = substream->private_data;
	struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai;
	struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
	struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
	int dma, bus_id = mcbsp_data->bus_id, id = cpu_dai->id;
	int uninitialized_var(wlen);
	int uninitialized_var(channels);
	int uninitialized_var(wpf);
	unsigned long uninitialized_var(port);
	unsigned int uninitialized_var(format);
	int xfer_size = 0;

	if (cpu_class_is_omap1()) {
		dma = omap1_dma_reqs[bus_id][substream->stream];
		port = omap1_mcbsp_port[bus_id][substream->stream];
	} else if (cpu_is_omap2420()) {
		dma = omap24xx_dma_reqs[bus_id][substream->stream];
		port = omap2420_mcbsp_port[bus_id][substream->stream];
	} else if (cpu_is_omap2430()) {
		dma = omap24xx_dma_reqs[bus_id][substream->stream];
		port = omap2430_mcbsp_port[bus_id][substream->stream];
	} else if (cpu_is_omap343x()) {
		dma = omap24xx_dma_reqs[bus_id][substream->stream];
		port = omap34xx_mcbsp_port[bus_id][substream->stream];
		xfer_size = omap34xx_mcbsp_thresholds[bus_id]
					[substream->stream];
		/* reset the xfer_size to the integral multiple of
		the buffer size. This is for DMA packet mode transfer */
		if (xfer_size) {
			int buffer_size = params_buffer_size(params);
			if (xfer_size > buffer_size) {
				printk(KERN_DEBUG "buffer_size is %d \n",
					buffer_size);
				xfer_size = 0;
			} else if (params_channels(params) == 1) {
				/* Mono needs 16 bits DMA, no FIFO */
				xfer_size = 1;
			} else {
				int temp =  buffer_size / xfer_size;
				while (buffer_size % xfer_size) {
					temp++;
					xfer_size = buffer_size / (temp);
				}
			}
		}
	} else {
		return -ENODEV;
	}
	omap_mcbsp_dai_dma_params[id][substream->stream].name =
		substream->stream ? "Audio Capture" : "Audio Playback";
	omap_mcbsp_dai_dma_params[id][substream->stream].dma_req = dma;
	omap_mcbsp_dai_dma_params[id][substream->stream].port_addr = port;
	omap_mcbsp_dai_dma_params[id][substream->stream].xfer_size = xfer_size;
	cpu_dai->dma_data = &omap_mcbsp_dai_dma_params[id][substream->stream];

	if (mcbsp_data->configured) {
		/* McBSP already configured by another stream */
		return 0;
	}

	format = mcbsp_data->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
	wpf = channels = params_channels(params);

	if (format == SND_SOC_DAIFMT_SPDIF) {
		regs->xcr2	&= ~(XPHASE);
		regs->rcr2	&= ~(RPHASE);
		/* Don't care about channels number and frame
			length 2. Set 4 frames (frame length 1) */
		regs->xcr1	|= XFRLEN1(4 - 1);
		regs->rcr1	|= RFRLEN1(1 - 1);
	} else {
		switch (channels) {
		case 2:
			if (format == SND_SOC_DAIFMT_I2S) {
				/* Use dual-phase frames */
				regs->rcr2	|= RPHASE;
				regs->xcr2	|= XPHASE;
				/* Set 1 word per (McBSP) frame for phase1 and phase2 */
				wpf--;
				regs->rcr2	|= RFRLEN2(wpf - 1);
				regs->xcr2	|= XFRLEN2(wpf - 1);
			} else if (format == SND_SOC_DAIFMT_I2S_1PHASE || format == SND_SOC_DAIFMT_DSP_A_1PHASE) {
				printk(KERN_DEBUG "Configure McBSP for 1 phase\n");
				regs->xcr2	&= ~(XPHASE);
				regs->rcr2	&= ~(RPHASE);
				wpf--;
			}
		case 1:
		case 4:
			/* Set word per (McBSP) frame for phase1 */
			regs->rcr1	|= RFRLEN1(wpf - 1);
			regs->xcr1	|= XFRLEN1(wpf - 1);
			break;
		default:
			/* Unsupported number of channels */
			return -EINVAL;
		}
	}

	switch (params_format(params)) {
	case SNDRV_PCM_FORMAT_S16_LE:
		/* Set word lengths */
		wlen = 16;
		if (format == SND_SOC_DAIFMT_SPDIF ||
		   (channels != 1 && (format == SND_SOC_DAIFMT_I2S_1PHASE ||
		   format == SND_SOC_DAIFMT_DSP_A_1PHASE))) {
			regs->xcr1	|= XWDLEN1(OMAP_MCBSP_WORD_32);
			regs->rcr1	|= RWDLEN1(OMAP_MCBSP_WORD_32);
			omap_mcbsp_dai_dma_params[id]
			[SNDRV_PCM_STREAM_PLAYBACK].dma_word_size = 32;
			omap_mcbsp_dai_dma_params[id]
			[SNDRV_PCM_STREAM_CAPTURE].dma_word_size = 32;
		} else {
			regs->rcr2	|= RWDLEN2(OMAP_MCBSP_WORD_16);
			regs->rcr1	|= RWDLEN1(OMAP_MCBSP_WORD_16);
			regs->xcr2	|= XWDLEN2(OMAP_MCBSP_WORD_16);
			regs->xcr1	|= XWDLEN1(OMAP_MCBSP_WORD_16);
			omap_mcbsp_dai_dma_params[id]
			[substream->stream].dma_word_size = 16;
		}
		break;
	case SNDRV_PCM_FORMAT_S8:
		/* Set word lengths */
		wlen = 8;
		if (format == SND_SOC_DAIFMT_SPDIF ||
		   (channels != 1 && (format == SND_SOC_DAIFMT_I2S_1PHASE ||
		   format == SND_SOC_DAIFMT_DSP_A_1PHASE))) {
			regs->xcr1	|= XWDLEN1(OMAP_MCBSP_WORD_16);
			regs->rcr1	|= RWDLEN1(OMAP_MCBSP_WORD_16);
			omap_mcbsp_dai_dma_params[id]
			[SNDRV_PCM_STREAM_PLAYBACK].dma_word_size = 16;
			omap_mcbsp_dai_dma_params[id]
			[SNDRV_PCM_STREAM_CAPTURE].dma_word_size = 16;
		} else {
			regs->rcr2	|= RWDLEN2(OMAP_MCBSP_WORD_8);
			regs->rcr1	|= RWDLEN1(OMAP_MCBSP_WORD_8);
			regs->xcr2	|= XWDLEN2(OMAP_MCBSP_WORD_8);
			regs->xcr1	|= XWDLEN1(OMAP_MCBSP_WORD_8);
			omap_mcbsp_dai_dma_params[id]
			[substream->stream].dma_word_size = 8;
		}
		break;
	default:
		/* Unsupported PCM format */
		return -EINVAL;
	}

	/* Set FS period and length in terms of bit clock periods */
	switch (format) {
	case SND_SOC_DAIFMT_I2S:
	case SND_SOC_DAIFMT_I2S_1PHASE:
		regs->srgr2	|= FPER(wlen * channels - 1);
		regs->srgr1	|= FWID(wlen - 1);
		break;
	case SND_SOC_DAIFMT_DSP_A_1PHASE:
	case SND_SOC_DAIFMT_DSP_A:
	case SND_SOC_DAIFMT_DSP_B:
		regs->srgr2	|= FPER(wlen * channels - 1);
		regs->srgr1	|= FWID(0);
		break;
	case SND_SOC_DAIFMT_SPDIF:
		regs->srgr2	|= FPER(4 * 32 - 1);
		regs->srgr1	|= FWID(0);
		break;
	}

	regs->xccr |= XDMAEN;
	regs->wken = XRDYEN;
	regs->rccr |= RDMAEN;

	omap_mcbsp_config(bus_id, &mcbsp_data->regs);

	if ((bus_id == 1) && (xfer_size != 0)) {
		printk(KERN_DEBUG "Configure McBSP TX FIFO threshold to %d\n",
			xfer_size);
		omap_mcbsp_set_tx_threshold(bus_id, xfer_size);
	}

	/* We want to be able to change stuff 
		(like channels number) dynamically */ 
	//mcbsp_data->configured = 1;

	return 0;
}
void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
{
	struct twl4030_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
			nr_hsmmc = 2;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset = OMAP44XX_CONTROL_PBIAS_LITE;
		control_mmc1 = OMAP44XX_CONTROL_MMC1;
		reg = omap_ctrl_readl(control_mmc1);
		reg |= (OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP0 |
			OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP1);
		reg &= ~(OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP2 |
			OMAP4_CONTROL_SDMMC1_PUSTRENGTHGRP3);
		reg |= (OMAP4_CONTROL_SDMMC1_DR0_SPEEDCTRL |
			OMAP4_CONTROL_SDMMC1_DR1_SPEEDCTRL |
			OMAP4_CONTROL_SDMMC1_DR2_SPEEDCTRL);
		omap_ctrl_writel(reg, control_mmc1);
	}

	for (c = controllers; c->mmc; c++) {
		struct twl_mmc_controller *twl = hsmmc + c->mmc - 1;
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc) {
			pr_debug("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		if (mmc) {
			pr_debug("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			return;
		}

		if (c->name)
			strncpy(twl->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(twl->name, ARRAY_SIZE(twl->name),
				"mmc%islot%i", c->mmc, 1);

#ifdef CONFIG_MMC_EMBEDDED_SDIO
		if (c->mmc == CONFIG_TIWLAN_MMC_CONTROLLER) {
			mmc->slots[0].embedded_sdio = &omap_wifi_emb_data;
			mmc->slots[0].register_status_notify =
				&omap_wifi_status_register;
			mmc->slots[0].card_detect = &omap_wifi_status;
		}
#elif defined(CONFIG_TIWLAN_SDIO)
		if (c->mmc == CONFIG_TIWLAN_MMC_CONTROLLER)
			mmc->name = "TIWLAN_SDIO";
#endif
		mmc->slots[0].name = twl->name;
		mmc->nr_slots = 1;
		mmc->slots[0].wires = c->wires;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;
		mmc->init = twl_mmc_late_init;

		/* note: twl4030 card detect GPIOs can disable VMMCx ... */
		if (!cpu_is_omap44xx()) {
			if (gpio_is_valid(c->gpio_cd)) {
				mmc->cleanup = twl_mmc_cleanup;
				mmc->suspend = twl_mmc_suspend;
				mmc->resume = twl_mmc_resume;

				mmc->slots[0].switch_pin = c->gpio_cd;
				mmc->slots[0].card_detect_irq =
							gpio_to_irq(c->gpio_cd);
				if (c->cover_only)
					mmc->slots[0].get_cover_state =
							twl_mmc_get_cover_state;
				else
					mmc->slots[0].card_detect =
							twl_mmc_card_detect;
			} else
				mmc->slots[0].switch_pin = -EINVAL;
		} else {
			/* HardCoding Phoenix number for only MMC1 of OMAP4 */
			if (c->mmc == 1)
				mmc->slots[0].card_detect_irq = 384;
			else
				mmc->slots[0].card_detect_irq = 0;
			if (c->cover_only)
				mmc->slots[0].get_cover_state =
						twl_mmc_get_cover_state;
			else
				mmc->slots[0].card_detect =
						twl_mmc_card_detect;
		}

		mmc->get_context_loss_count =
				twl4030_mmc_get_context_loss;

		if (!cpu_is_omap44xx()) {
			/* write protect normally uses an OMAP gpio */
			if (gpio_is_valid(c->gpio_wp)) {
				gpio_request(c->gpio_wp, "mmc_wp");
				gpio_direction_input(c->gpio_wp);

				mmc->slots[0].gpio_wp = c->gpio_wp;
				mmc->slots[0].get_ro = twl_mmc_get_ro;
			} else
				mmc->slots[0].gpio_wp = -EINVAL;
		}
		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		mmc->slots[0].ocr_mask = c->ocr_mask;

		switch (c->mmc) {
		case 1:
			/* on-chip level shifting via PBIAS0/PBIAS1 */
			mmc->slots[0].set_power = twl_mmc1_set_power;
			mmc->slots[0].set_sleep = twl_mmc1_set_sleep;

			/* Omap3630 HSMMC1 supports only 4-bit */
			if (cpu_is_omap3630() && c->wires > 4) {
				c->wires = 4;
				mmc->slots[0].wires = c->wires;
			}
			break;
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
			if (c->transceiver && c->wires > 4)
				c->wires = 4;
				
// TI Added to support Samsung Customisation 
            mmc->slots[0].set_power = twl_iNand_set_power;

		    if(gpio_request(OMAP_GPIO_MOVI_EN, "iNand_Power_source")< 0) {
                printk(KERN_ERR "Failed to get OMAP_GPIO_MOVI_EN pin \n");
                return;
			}
			gpio_direction_output(OMAP_GPIO_MOVI_EN, 1);
			//mmc->slots[0].set_sleep = twl_mmc23_set_sleep;
// TI Added to support Samsung Customisation 			
			break;
			/* FALLTHROUGH */
		case 3:
		case 4:
		case 5:
			/* off-chip level shifting, or none */
			mmc->slots[0].set_power = twl_mmc23_set_power;
			mmc->slots[0].set_sleep = twl_mmc23_set_sleep;
#ifdef CONFIG_MMC_EMBEDDED_SDIO
			mmc->slots[0].ocr_mask  = MMC_VDD_165_195;
#endif
			break;
		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		hsmmc_data[c->mmc - 1] = mmc;
	}

	omap2_init_mmc(hsmmc_data, OMAP44XX_NR_MMC);

	/* pass the device nodes back to board setup code */
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		c->dev = mmc->dev;
	}
}
示例#21
0
static int omap_i2c_init(struct omap_i2c_dev *dev)
{
	u16 psc = 0, scll = 0, sclh = 0, buf = 0;
	u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
	unsigned long fclk_rate = 12000000;
	unsigned long internal_clk = 0;
	struct clk *fclk;

	if (dev->rev >= OMAP_I2C_REV_ON_3430) {
		/*
		 * Enabling all wakup sources to stop I2C freezing on
		 * WFI instruction.
		 * REVISIT: Some wkup sources might not be needed.
		 */
		dev->westate = OMAP_I2C_WE_ALL;
		omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
	}

	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);

	if (cpu_class_is_omap1()) {
		/*
		 * The I2C functional clock is the armxor_ck, so there's
		 * no need to get "armxor_ck" separately.  Now, if OMAP2420
		 * always returns 12MHz for the functional clock, we can
		 * do this bit unconditionally.
		 */
		fclk = clk_get(dev->dev, "fck");
		fclk_rate = clk_get_rate(fclk);
		clk_put(fclk);

		/* TRM for 5912 says the I2C clock must be prescaled to be
		 * between 7 - 12 MHz. The XOR input clock is typically
		 * 12, 13 or 19.2 MHz. So we should have code that produces:
		 *
		 * XOR MHz	Divider		Prescaler
		 * 12		1		0
		 * 13		2		1
		 * 19.2		2		1
		 */
		if (fclk_rate > 12000000)
			psc = fclk_rate / 12000000;
	}

	if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {

		/*
		 * HSI2C controller internal clk rate should be 19.2 Mhz for
		 * HS and for all modes on 2430. On 34xx we can use lower rate
		 * to get longer filter period for better noise suppression.
		 * The filter is iclk (fclk for HS) period.
		 */
		if (dev->speed > 400 || cpu_is_omap2430())
			internal_clk = 19200;
		else if (dev->speed > 100)
			internal_clk = 9600;
		else
			internal_clk = 4000;
		fclk = clk_get(dev->dev, "fck");
		fclk_rate = clk_get_rate(fclk) / 1000;
#ifdef CONFIG_OMAP4_DPLL_CASCADING
		dev->i2c_fclk_rate = fclk_rate;
#endif
		clk_put(fclk);

		/* Compute prescaler divisor */
		psc = fclk_rate / internal_clk;
		psc = psc - 1;

		/* If configured for High Speed */
		if (dev->speed > 400) {
			unsigned long scl;

			/* For first phase of HS mode */
			scl = internal_clk / 400;
			fsscll = scl - (scl / 3) - 7;
			fssclh = (scl / 3) - 5;

			/* For second phase of HS mode */
			scl = fclk_rate / dev->speed;
			hsscll = scl - (scl / 3) - 7;
			hssclh = (scl / 3) - 5;
		} else if (dev->speed > 100) {
			unsigned long scl;

			/* Fast mode */
			scl = internal_clk / dev->speed;
			fsscll = scl - (scl / 3) - 7;
			fssclh = (scl / 3) - 5;
		} else {
			/* Standard mode */
			fsscll = internal_clk / (dev->speed * 2) - 7;
			fssclh = internal_clk / (dev->speed * 2) - 5;
		}
		scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
		sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
	} else {
		/* Program desired operating rate */
		fclk_rate /= (psc + 1) * 1000;
		if (psc > 2)
			psc = 2;
		scll = fclk_rate / (dev->speed * 2) - 7 + psc;
		sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
	}

	/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
	omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);

	/* SCL low and high time values */
	omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
	omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);

	if (dev->fifo_size) {
		/* Note: setup required fifo size - 1. RTRSH and XTRSH */
		buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR |
			(dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR;
		omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
	}

	/* Take the I2C module out of reset: */
	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);

	dev->errata = 0;

	if (cpu_is_omap2430() || cpu_is_omap34xx())
		dev->errata |= I2C_OMAP_ERRATA_I207;

	if (cpu_is_omap34xx() || cpu_is_omap44xx()) {
		dev->pscstate = psc;
		dev->scllstate = scll;
		dev->sclhstate = sclh;
		dev->bufstate = buf;
	}
	return 0;
}
示例#22
0
void __init twl4030_mmc_init(struct twl4030_hsmmc_info *controllers)
{
	struct twl4030_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);

	if (cpu_is_omap2430()) {
		control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
		control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		nr_hsmmc = 2;
	} else {
		control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
		control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
	}

	for (c = controllers; c->mmc; c++) {
		struct twl_mmc_controller *twl = hsmmc + c->mmc - 1;
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc) {
			pr_debug("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		if (mmc) {
			pr_debug("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data), GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			return;
		}

		if (c->name)
			strncpy(twl->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(twl->name, ARRAY_SIZE(twl->name),
				"mmc%islot%i", c->mmc, 1);
		mmc->slots[0].name = twl->name;
		mmc->nr_slots = 1;
		mmc->slots[0].wires = c->wires;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;
		mmc->init = twl_mmc_late_init;

		/* note: twl4030 card detect GPIOs can disable VMMCx ... */
		if (gpio_is_valid(c->gpio_cd)) {
			mmc->cleanup = twl_mmc_cleanup;
			mmc->suspend = twl_mmc_suspend;
			mmc->resume = twl_mmc_resume;

			mmc->slots[0].switch_pin = c->gpio_cd;
			mmc->slots[0].card_detect_irq = gpio_to_irq(c->gpio_cd);
			if (c->cover_only)
				mmc->slots[0].get_cover_state = twl_mmc_get_cover_state;
			else
				mmc->slots[0].card_detect = twl_mmc_card_detect;
		} else
			mmc->slots[0].switch_pin = -EINVAL;

		mmc->get_context_loss_count =
				twl4030_mmc_get_context_loss;

		/* write protect normally uses an OMAP gpio */
		if (gpio_is_valid(c->gpio_wp)) {
			gpio_request(c->gpio_wp, "mmc_wp");
			gpio_direction_input(c->gpio_wp);

			mmc->slots[0].gpio_wp = c->gpio_wp;
			mmc->slots[0].get_ro = twl_mmc_get_ro;
		} else
			mmc->slots[0].gpio_wp = -EINVAL;

		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		mmc->slots[0].ocr_mask = c->ocr_mask;

		switch (c->mmc) {
		case 1:
			/* on-chip level shifting via PBIAS0/PBIAS1 */
			mmc->slots[0].set_power = twl_mmc1_set_power;
			mmc->slots[0].set_sleep = twl_mmc1_set_sleep;
			break;
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
			if (c->transceiver && c->wires > 4)
				c->wires = 4;
			/* FALLTHROUGH */
		case 3:
			/* off-chip level shifting, or none */
			mmc->slots[0].set_power = twl_mmc23_set_power;
			mmc->slots[0].set_sleep = twl_mmc23_set_sleep;
			break;
		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		hsmmc_data[c->mmc - 1] = mmc;
	}

	omap2_init_mmc(hsmmc_data, OMAP34XX_NR_MMC);

	/* pass the device nodes back to board setup code */
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		c->dev = mmc->dev;
	}
}
static int twl_mmc1_set_power(struct device *dev, int slot, int power_on,
				int vdd)
{
	u32 reg, prog_io;
	int ret = 0;
	struct twl_mmc_controller *c = &hsmmc[0];
	struct omap_mmc_platform_data *mmc = dev->platform_data;
   
//	printk("[twl_mmc] '%s' called, power=%d\n", __func__, power_on);
	/*
	 * Assume we power both OMAP VMMC1 (for CMD, CLK, DAT0..3) and the
	 * card with Vcc regulator (from twl4030 or whatever).  OMAP has both
	 * 1.8V and 3.0V modes, controlled by the PBIAS register.
	 *
	 * In 8-bit modes, OMAP VMMC1A (for DAT4..7) needs a supply, which
	 * is most naturally TWL VSIM; those pins also use PBIAS.
	 *
	 * FIXME handle VMMC1A as needed ...
	 */
	if (power_on) {
		if (cpu_is_omap2430()) {
			reg = omap_ctrl_readl(OMAP243X_CONTROL_DEVCONF1);
			if ((1 << vdd) >= MMC_VDD_30_31)
				reg |= OMAP243X_MMC1_ACTIVE_OVERWRITE;
			else
				reg &= ~OMAP243X_MMC1_ACTIVE_OVERWRITE;
			omap_ctrl_writel(reg, OMAP243X_CONTROL_DEVCONF1);
		}
		if (!cpu_is_omap44xx()) {
			if (mmc->slots[0].internal_clock) {
				reg = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0);
				reg |= OMAP2_MMCSDIO1ADPCLKISEL;
				omap_ctrl_writel(reg, OMAP2_CONTROL_DEVCONF0);
			}

			reg = omap_ctrl_readl(control_pbias_offset);
			if (cpu_is_omap3630()) {
				/* Set MMC I/O to 52Mhz */
				prog_io = omap_ctrl_readl
						(OMAP343X_CONTROL_PROG_IO1);
				prog_io |= OMAP3630_PRG_SDMMC1_SPEEDCTRL;
				omap_ctrl_writel
					(prog_io, OMAP343X_CONTROL_PROG_IO1);
			} else {
				reg |= OMAP2_PBIASSPEEDCTRL0;
			}
			reg &= ~OMAP2_PBIASLITEPWRDNZ0;
		} else {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ |
						OMAP4_MMC1_PWRDWNZ);
		}
		omap_ctrl_writel(reg, control_pbias_offset);
		ret = mmc_regulator_set_ocr(c->vcc, vdd);

		/* 100ms delay required for PBIAS configuration */
		msleep(100);
		if (!cpu_is_omap44xx()) {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg |= (OMAP2_PBIASLITEPWRDNZ0 |
						OMAP2_PBIASSPEEDCTRL0);
			if ((1 << vdd) <= MMC_VDD_165_195)
				reg &= ~OMAP2_PBIASLITEVMODE0;
			else
				reg |= OMAP2_PBIASLITEVMODE0;
		} else {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg |= OMAP4_MMC1_PBIASLITE_PWRDNZ;
			if ((1 << vdd) <= MMC_VDD_165_195)
				reg &= ~(OMAP4_MMC1_PBIASLITE_VMODE);
			else
				reg |= (OMAP4_MMC1_PBIASLITE_VMODE);

			reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ |
						OMAP4_MMC1_PWRDWNZ);
		}
		omap_ctrl_writel(reg, control_pbias_offset);
	} else {
		if (!cpu_is_omap44xx()) {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg &= ~OMAP2_PBIASLITEPWRDNZ0;
		} else {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ |
						OMAP4_MMC1_PWRDWNZ);
		}
		omap_ctrl_writel(reg, control_pbias_offset);
		ret = mmc_regulator_set_ocr(c->vcc, 0);

		/* 100ms delay required for PBIAS configuration */
		msleep(100);
		if (!cpu_is_omap44xx()) {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg |= (OMAP2_PBIASSPEEDCTRL0 | OMAP2_PBIASLITEPWRDNZ0
						| OMAP2_PBIASLITEVMODE0);
		} else {
			reg = omap_ctrl_readl(control_pbias_offset);
			reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ |
				OMAP4_MMC1_PBIASLITE_VMODE |
				OMAP4_MMC1_PWRDWNZ);
		}
		omap_ctrl_writel(reg, control_pbias_offset);
	}

	return ret;
}
示例#24
0
void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
{
#ifdef CONFIG_ARCH_OMAP1
	if (cpu_class_is_omap1()) {
		if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
			return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
	}
	if (cpu_is_omap730()) {
		if (BETWEEN(p, OMAP730_DSP_BASE, OMAP730_DSP_SIZE))
			return XLATE(p, OMAP730_DSP_BASE, OMAP730_DSP_START);

		if (BETWEEN(p, OMAP730_DSPREG_BASE, OMAP730_DSPREG_SIZE))
			return XLATE(p, OMAP730_DSPREG_BASE,
					OMAP730_DSPREG_START);
	}
	if (cpu_is_omap15xx()) {
		if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
			return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);

		if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
			return XLATE(p, OMAP1510_DSPREG_BASE,
					OMAP1510_DSPREG_START);
	}
	if (cpu_is_omap16xx()) {
		if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
			return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);

		if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
			return XLATE(p, OMAP16XX_DSPREG_BASE,
					OMAP16XX_DSPREG_START);
	}
#endif
#ifdef CONFIG_ARCH_OMAP2
	if (cpu_is_omap24xx()) {
		if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
			return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
		if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
			return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
	}
	if (cpu_is_omap2420()) {
		if (BETWEEN(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_SIZE))
			return XLATE(p, DSP_MEM_24XX_PHYS, DSP_MEM_24XX_VIRT);
		if (BETWEEN(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE))
			return XLATE(p, DSP_IPI_24XX_PHYS, DSP_IPI_24XX_SIZE);
		if (BETWEEN(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_SIZE))
			return XLATE(p, DSP_MMU_24XX_PHYS, DSP_MMU_24XX_VIRT);
	}
	if (cpu_is_omap2430()) {
		if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
			return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
		if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
			return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
		if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
			return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
		if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
			return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP3
	if (cpu_is_omap34xx()) {
		if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
			return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
		if (BETWEEN(p, L4_WK_34XX_PHYS, L4_WK_34XX_SIZE))
			return XLATE(p, L4_WK_34XX_PHYS, L4_WK_34XX_VIRT);
		if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
			return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
		if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
			return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
		if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
			return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
		if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
			return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
		if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
			return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP4
	if (cpu_is_omap44xx()) {
		if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
			return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
		if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
			return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
		if (BETWEEN(p, L4_WK_44XX_PHYS, L4_WK_44XX_SIZE))
			return XLATE(p, L4_WK_44XX_PHYS, L4_WK_44XX_VIRT);
		if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
			return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
		if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
			return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
		if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
			return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
	}
#endif
	return __arm_ioremap(p, size, type);
}
示例#25
0
/*
 * This must be called before _set_clkdiv and _set_sysclk since McBSP register
 * cache is initialized here
 */
static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
				      unsigned int fmt)
{
	struct omap_mcbsp_data *mcbsp_data = to_mcbsp(cpu_dai->private_data);
	struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
	unsigned int temp_fmt = fmt;

	if (mcbsp_data->configured)
		return 0;

	mcbsp_data->fmt = fmt;
	memset(regs, 0, sizeof(*regs));
	/* Generic McBSP register settings */
	regs->spcr2	|= XINTM(3) | FREE;
	regs->spcr1	|= RINTM(3);
	regs->rcr2	|= RFIG;
	regs->xcr2	|= XFIG;
	if (cpu_is_omap2430() || cpu_is_omap34xx()) {
		regs->xccr = DXENDLY(1);
		regs->rccr = RFULL_CYCLE;
	}

	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
	case SND_SOC_DAIFMT_I2S:
	case SND_SOC_DAIFMT_I2S_1PHASE:
		/* 1-bit data delay */
		regs->rcr2	|= RDATDLY(1);
		regs->xcr2	|= XDATDLY(1);
		break;
	case SND_SOC_DAIFMT_DSP_A:
	case SND_SOC_DAIFMT_DSP_A_1PHASE:
		/* 1-bit data delay */
		regs->rcr2      |= RDATDLY(1);
		regs->xcr2      |= XDATDLY(1);
		/* Invert FS polarity configuration */
		temp_fmt ^= SND_SOC_DAIFMT_NB_IF;
		break;
	case SND_SOC_DAIFMT_DSP_B:
		/* 0-bit data delay */
		regs->rcr2      |= RDATDLY(0);
		regs->xcr2      |= XDATDLY(0);
		/* Invert FS polarity configuration */
		temp_fmt ^= SND_SOC_DAIFMT_NB_IF;
		break;
	case SND_SOC_DAIFMT_SPDIF:
		/* The recording has to work even if the output
			is in SPDIF mode, so receive in DSP-A mode */ 
		/* 1-bit data delay */
		regs->rcr2      |= RDATDLY(1);
		/* 0-bit data delay */
		regs->xcr2      |= XDATDLY(0);
		/* LSB First */
		regs->xcr2      |= XCOMPAND(1);
		break;
	default:
		/* Unsupported data format */
		return -EINVAL;
	}

	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
	case SND_SOC_DAIFMT_CBS_CFS:
		/* McBSP master. Set FS and bit clocks as outputs */
		regs->pcr0	|= FSXM | FSRM |
				   CLKXM | CLKRM;
		/* Sample rate generator drives the FS */
		regs->srgr2	|= FSGM;
		break;
	case SND_SOC_DAIFMT_CBM_CFM:
		/* McBSP slave */
		break;
	default:
		/* Unsupported master/slave configuration */
		return -EINVAL;
	}

	/* Set bit clock (CLKX/CLKR) and FS polarities */
	switch (temp_fmt & SND_SOC_DAIFMT_INV_MASK) {
	case SND_SOC_DAIFMT_NB_NF:
		/*
		 * Normal BCLK + FS.
		 * FS active low. TX data driven on falling edge of bit clock
		 * and RX data sampled on rising edge of bit clock.
		 */
		regs->pcr0	|= FSXP | FSRP |
				   CLKXP | CLKRP;
		break;
	case SND_SOC_DAIFMT_NB_IF:
		regs->pcr0	|= CLKXP | CLKRP;
		break;
	case SND_SOC_DAIFMT_IB_NF:
		regs->pcr0	|= FSXP | FSRP;
		break;
	case SND_SOC_DAIFMT_IB_IF:
		break;
	default:
		return -EINVAL;
	}

	return 0;
}
static int __init omap_hsmmc_probe(struct platform_device *pdev)
{
	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
	struct mmc_host *mmc;
	struct omap_hsmmc_host *host = NULL;
	struct resource *res;
	int ret = 0, irq;

	if (pdata == NULL) {
		dev_err(&pdev->dev, "Platform Data is missing\n");
		return -ENXIO;
	}

	if (pdata->nr_slots == 0) {
		dev_err(&pdev->dev, "No Slots\n");
		return -ENXIO;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	irq = platform_get_irq(pdev, 0);
	if (res == NULL || irq < 0)
		return -ENXIO;

	res = request_mem_region(res->start, res->end - res->start + 1,
							pdev->name);
	if (res == NULL)
		return -EBUSY;

	mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		goto err;
	}

	host		= mmc_priv(mmc);
	host->mmc	= mmc;
	host->pdata	= pdata;
	host->dev	= &pdev->dev;
	host->use_dma	= 1;
	host->dev->dma_mask = &pdata->dma_mask;
	host->dma_ch	= -1;
	host->irq	= irq;
	host->id	= pdev->id;
	host->slot_id	= 0;
	host->mapbase	= res->start;
	host->base	= ioremap(host->mapbase, SZ_4K);
	host->power_mode = -1;

	platform_set_drvdata(pdev, host);
	INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);

	if (mmc_slot(host).power_saving)
		mmc->ops	= &omap_hsmmc_ps_ops;
	else
		mmc->ops	= &omap_hsmmc_ops;

	mmc->f_min	= 400000;
	mmc->f_max	= 52000000;

	sema_init(&host->sem, 1);
	spin_lock_init(&host->irq_lock);

	host->iclk = clk_get(&pdev->dev, "ick");
	if (IS_ERR(host->iclk)) {
		ret = PTR_ERR(host->iclk);
		host->iclk = NULL;
		goto err1;
	}
	host->fclk = clk_get(&pdev->dev, "fck");
	if (IS_ERR(host->fclk)) {
		ret = PTR_ERR(host->fclk);
		host->fclk = NULL;
		clk_put(host->iclk);
		goto err1;
	}

	omap_hsmmc_context_save(host);

	mmc->caps |= MMC_CAP_DISABLE;
	mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT);
	/* we start off in DISABLED state */
	host->dpm_state = DISABLED;

	if (mmc_host_enable(host->mmc) != 0) {
		clk_put(host->iclk);
		clk_put(host->fclk);
		goto err1;
	}

	if (clk_enable(host->iclk) != 0) {
		mmc_host_disable(host->mmc);
		clk_put(host->iclk);
		clk_put(host->fclk);
		goto err1;
	}

	if (cpu_is_omap2430()) {
		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
		/*
		 * MMC can still work without debounce clock.
		 */
		if (IS_ERR(host->dbclk))
			dev_warn(mmc_dev(host->mmc),
				"Failed to get debounce clock\n");
		else
			host->got_dbclk = 1;

		if (host->got_dbclk)
			if (clk_enable(host->dbclk) != 0)
				dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
							" clk failed\n");
	}

	/* Since we do only SG emulation, we can have as many segs
	 * as we want. */
	mmc->max_phys_segs = 1024;
	mmc->max_hw_segs = 1024;

	mmc->max_blk_size = 512;       /* Block Length at max can be 1024 */
	mmc->max_blk_count = 0xFFFF;    /* No. of Blocks is 16 bits */
	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
	mmc->max_seg_size = mmc->max_req_size;

	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED |
		     MMC_CAP_WAIT_WHILE_BUSY;

	if (mmc_slot(host).wires >= 8)
		mmc->caps |= MMC_CAP_8_BIT_DATA;
	else if (mmc_slot(host).wires >= 4)
		mmc->caps |= MMC_CAP_4_BIT_DATA;

	if (mmc_slot(host).nonremovable)
		mmc->caps |= MMC_CAP_NONREMOVABLE;

	omap_hsmmc_conf_bus_power(host);

	/* Select DMA lines */
	switch (host->id) {
	case OMAP_MMC1_DEVID:
		host->dma_line_tx = OMAP24XX_DMA_MMC1_TX;
		host->dma_line_rx = OMAP24XX_DMA_MMC1_RX;
		break;
	case OMAP_MMC2_DEVID:
		host->dma_line_tx = OMAP24XX_DMA_MMC2_TX;
		host->dma_line_rx = OMAP24XX_DMA_MMC2_RX;
		break;
	case OMAP_MMC3_DEVID:
		host->dma_line_tx = OMAP34XX_DMA_MMC3_TX;
		host->dma_line_rx = OMAP34XX_DMA_MMC3_RX;
		break;
	case OMAP_MMC4_DEVID:
		host->dma_line_tx = OMAP44XX_DMA_MMC4_TX;
		host->dma_line_rx = OMAP44XX_DMA_MMC4_RX;
		break;
	case OMAP_MMC5_DEVID:
		host->dma_line_tx = OMAP44XX_DMA_MMC5_TX;
		host->dma_line_rx = OMAP44XX_DMA_MMC5_RX;
		break;
	default:
		dev_err(mmc_dev(host->mmc), "Invalid MMC id\n");
		goto err_irq;
	}

	/* Request IRQ for MMC operations */
	ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED,
			mmc_hostname(mmc), host);
	if (ret) {
		dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");
		goto err_irq;
	}

	/* initialize power supplies, gpios, etc */
	if (pdata->init != NULL) {
		if (pdata->init(&pdev->dev) != 0) {
			dev_dbg(mmc_dev(host->mmc),
				"Unable to configure MMC IRQs\n");
			goto err_irq_cd_init;
		}
	}
	mmc->ocr_avail = mmc_slot(host).ocr_mask;

	/* Request IRQ for card detect */
	if ((mmc_slot(host).card_detect_irq)) {
		ret = request_irq(mmc_slot(host).card_detect_irq,
				  omap_hsmmc_cd_handler,
				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
					  | IRQF_DISABLED,
				  mmc_hostname(mmc), host);
		if (ret) {
			dev_dbg(mmc_dev(host->mmc),
				"Unable to grab MMC CD IRQ\n");
			goto err_irq_cd;
		}
	}

	OMAP_HSMMC_WRITE(host->base, ISE, INT_EN_MASK);
	OMAP_HSMMC_WRITE(host->base, IE, INT_EN_MASK);

	mmc_host_lazy_disable(host->mmc);

	omap_hsmmc_protect_card(host);

	mmc_add_host(mmc);

	if (mmc_slot(host).name != NULL) {
		ret = device_create_file(&mmc->class_dev, &dev_attr_slot_name);
		if (ret < 0)
			goto err_slot_name;
	}
	if (mmc_slot(host).card_detect_irq && mmc_slot(host).get_cover_state) {
		ret = device_create_file(&mmc->class_dev,
					&dev_attr_cover_switch);
		if (ret < 0)
			goto err_cover_switch;
	}

	omap_hsmmc_debugfs(mmc);

	return 0;

err_cover_switch:
	device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
err_slot_name:
	mmc_remove_host(mmc);
err_irq_cd:
	free_irq(mmc_slot(host).card_detect_irq, host);
err_irq_cd_init:
	free_irq(host->irq, host);
err_irq:
	mmc_host_disable(host->mmc);
	clk_disable(host->iclk);
	clk_put(host->fclk);
	clk_put(host->iclk);
	if (host->got_dbclk) {
		clk_disable(host->dbclk);
		clk_put(host->dbclk);
	}

err1:
	iounmap(host->base);
err:
	dev_dbg(mmc_dev(host->mmc), "Probe Failed\n");
	release_mem_region(res->start, res->end - res->start + 1);
	if (host)
		mmc_free_host(mmc);
	return ret;
}
static int omap_i2c_init(struct omap_i2c_dev *dev)
{
	u16 psc = 0, scll = 0, sclh = 0, buf = 0;
	u16 fsscll = 0, fssclh = 0, hsscll = 0, hssclh = 0;
	unsigned long fclk_rate = 12000000;
	unsigned long timeout;
	unsigned long internal_clk = 0;
	struct clk *fclk;

	if (dev->rev >= OMAP_I2C_REV_2) {
		/* Disable I2C controller before soft reset */
		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
			omap_i2c_read_reg(dev, OMAP_I2C_CON_REG) &
				~(OMAP_I2C_CON_EN));

		omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG, SYSC_SOFTRESET_MASK);
		/* For some reason we need to set the EN bit before the
		 * reset done bit gets set. */
		timeout = jiffies + OMAP_I2C_TIMEOUT;
		omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);
		while (!(omap_i2c_read_reg(dev, OMAP_I2C_SYSS_REG) &
			 SYSS_RESETDONE_MASK)) {
			if (time_after(jiffies, timeout)) {
				dev_warn(dev->dev, "timeout waiting "
						"for controller reset\n");
				return -ETIMEDOUT;
			}
			msleep(1);
		}

		/* SYSC register is cleared by the reset; rewrite it */
		if (dev->rev == OMAP_I2C_REV_ON_2430) {

			omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
					   SYSC_AUTOIDLE_MASK);

		} else if (dev->rev >= OMAP_I2C_REV_ON_3430) {
			dev->syscstate = SYSC_AUTOIDLE_MASK;
			dev->syscstate |= SYSC_ENAWAKEUP_MASK;
			dev->syscstate |= (SYSC_IDLEMODE_SMART <<
			      __ffs(SYSC_SIDLEMODE_MASK));
			dev->syscstate |= (SYSC_CLOCKACTIVITY_FCLK <<
			      __ffs(SYSC_CLOCKACTIVITY_MASK));

			omap_i2c_write_reg(dev, OMAP_I2C_SYSC_REG,
							dev->syscstate);
			/*
			 * Enabling all wakup sources to stop I2C freezing on
			 * WFI instruction.
			 * REVISIT: Some wkup sources might not be needed.
			 */
			dev->westate = OMAP_I2C_WE_ALL;
			omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate);
		}
	}
	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);

	if (cpu_class_is_omap1()) {
		/*
		 * The I2C functional clock is the armxor_ck, so there's
		 * no need to get "armxor_ck" separately.  Now, if OMAP2420
		 * always returns 12MHz for the functional clock, we can
		 * do this bit unconditionally.
		 */
		fclk = clk_get(dev->dev, "fck");
		fclk_rate = clk_get_rate(fclk);
		clk_put(fclk);

		/* TRM for 5912 says the I2C clock must be prescaled to be
		 * between 7 - 12 MHz. The XOR input clock is typically
		 * 12, 13 or 19.2 MHz. So we should have code that produces:
		 *
		 * XOR MHz	Divider		Prescaler
		 * 12		1		0
		 * 13		2		1
		 * 19.2		2		1
		 */
		if (fclk_rate > 12000000)
			psc = fclk_rate / 12000000;
	}

	if (!(cpu_class_is_omap1() || cpu_is_omap2420())) {

		/*
		 * HSI2C controller internal clk rate should be 19.2 Mhz for
		 * HS and for all modes on 2430. On 34xx we can use lower rate
		 * to get longer filter period for better noise suppression.
		 * The filter is iclk (fclk for HS) period.
		 */
		if (dev->speed > 400 || cpu_is_omap2430())
			internal_clk = 19200;
		else if (dev->speed > 100)
			internal_clk = 9600;
		else
			internal_clk = 4000;
		fclk = clk_get(dev->dev, "fck");
		fclk_rate = clk_get_rate(fclk) / 1000;
		clk_put(fclk);

		/* Compute prescaler divisor */
		psc = fclk_rate / internal_clk;
		psc = psc - 1;

		/* If configured for High Speed */
		if (dev->speed > 400) {
			unsigned long scl;

			/* For first phase of HS mode */
			scl = internal_clk / 400;
			fsscll = scl - (scl / 3) - 7;
			fssclh = (scl / 3) - 5;

			/* For second phase of HS mode */
			scl = fclk_rate / dev->speed;
			hsscll = scl - (scl / 3) - 7;
			hssclh = (scl / 3) - 5;
		} else if (dev->speed > 100) {
			unsigned long scl;

			/* Fast mode */
			scl = internal_clk / dev->speed;
			fsscll = scl - (scl / 3) - 7;
			fssclh = (scl / 3) - 5;
		} else {
			/* Standard mode */
			fsscll = internal_clk / (dev->speed * 2) - 7;
			fssclh = internal_clk / (dev->speed * 2) - 5;
		}
		scll = (hsscll << OMAP_I2C_SCLL_HSSCLL) | fsscll;
		sclh = (hssclh << OMAP_I2C_SCLH_HSSCLH) | fssclh;
	} else {
		/* Program desired operating rate */
		fclk_rate /= (psc + 1) * 1000;
		if (psc > 2)
			psc = 2;
		scll = fclk_rate / (dev->speed * 2) - 7 + psc;
		sclh = fclk_rate / (dev->speed * 2) - 7 + psc;
	}

	/* Setup clock prescaler to obtain approx 12MHz I2C module clock: */
	omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, psc);

	/* SCL low and high time values */
	omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, scll);
	omap_i2c_write_reg(dev, OMAP_I2C_SCLH_REG, sclh);

	if (dev->fifo_size) {
		/* Note: setup required fifo size - 1. RTRSH and XTRSH */
		buf = (dev->fifo_size - 1) << 8 | OMAP_I2C_BUF_RXFIF_CLR |
			(dev->fifo_size - 1) | OMAP_I2C_BUF_TXFIF_CLR;
		omap_i2c_write_reg(dev, OMAP_I2C_BUF_REG, buf);
	}

	/* Take the I2C module out of reset: */
	omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, OMAP_I2C_CON_EN);

	dev->errata = 0;

	if (cpu_is_omap2430() || cpu_is_omap34xx())
		dev->errata |= I2C_OMAP_ERRATA_I207;

	/* Enable interrupts */
	dev->iestate = (OMAP_I2C_IE_XRDY | OMAP_I2C_IE_RRDY |
			OMAP_I2C_IE_ARDY | OMAP_I2C_IE_NACK |
			OMAP_I2C_IE_AL)  | ((dev->fifo_size) ?
				(OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
	omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
	if (cpu_is_omap34xx()) {
		dev->pscstate = psc;
		dev->scllstate = scll;
		dev->sclhstate = sclh;
		dev->bufstate = buf;
	}
	return 0;
}
示例#28
0
static void __init prcm_setup_regs(void)
{
	int i, num_mem_banks;
	struct powerdomain *pwrdm;

	/* Enable autoidle */
	prm_write_mod_reg(OMAP24XX_AUTOIDLE, OCP_MOD,
				OMAP24XX_PRM_SYSCONFIG_OFFSET);

	/* Set all domain wakeup dependencies */
	prm_write_mod_reg(OMAP_EN_WKUP_MASK, MPU_MOD, PM_WKDEP);
	prm_write_mod_reg(0, OMAP24XX_DSP_MOD, PM_WKDEP);
	prm_write_mod_reg(0, GFX_MOD, PM_WKDEP);
	prm_write_mod_reg(0, CORE_MOD, PM_WKDEP);
	if (cpu_is_omap2430())
		prm_write_mod_reg(0, OMAP2430_MDM_MOD, PM_WKDEP);

	/*
	 * Set CORE powerdomain memory banks to retain their contents
	 * during RETENTION
	 */
	num_mem_banks = pwrdm_get_mem_bank_count(core_pwrdm);
	for (i = 0; i < num_mem_banks; i++)
		pwrdm_set_mem_retst(core_pwrdm, i, PWRDM_POWER_RET);

	/* Set CORE powerdomain's next power state to RETENTION */
	pwrdm_set_next_pwrst(core_pwrdm, PWRDM_POWER_RET);

	/*
	 * Set MPU powerdomain's next power state to RETENTION;
	 * preserve logic state during retention
	 */
	pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET);
	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);

	/* Force-power down DSP, GFX powerdomains */

	pwrdm = clkdm_get_pwrdm(dsp_clkdm);
	pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
	omap2_clkdm_sleep(dsp_clkdm);

	pwrdm = clkdm_get_pwrdm(gfx_clkdm);
	pwrdm_set_next_pwrst(pwrdm, PWRDM_POWER_OFF);
	omap2_clkdm_sleep(gfx_clkdm);

	/* Enable clockdomain hardware-supervised control for all clkdms */
	clkdm_for_each(_pm_clkdm_enable_hwsup, NULL);

	/* Enable clock autoidle for all domains */
	cm_write_mod_reg(OMAP24XX_AUTO_CAM |
			 OMAP24XX_AUTO_MAILBOXES |
			 OMAP24XX_AUTO_WDT4 |
			 OMAP2420_AUTO_WDT3 |
			 OMAP24XX_AUTO_MSPRO |
			 OMAP2420_AUTO_MMC |
			 OMAP24XX_AUTO_FAC |
			 OMAP2420_AUTO_EAC |
			 OMAP24XX_AUTO_HDQ |
			 OMAP24XX_AUTO_UART2 |
			 OMAP24XX_AUTO_UART1 |
			 OMAP24XX_AUTO_I2C2 |
			 OMAP24XX_AUTO_I2C1 |
			 OMAP24XX_AUTO_MCSPI2 |
			 OMAP24XX_AUTO_MCSPI1 |
			 OMAP24XX_AUTO_MCBSP2 |
			 OMAP24XX_AUTO_MCBSP1 |
			 OMAP24XX_AUTO_GPT12 |
			 OMAP24XX_AUTO_GPT11 |
			 OMAP24XX_AUTO_GPT10 |
			 OMAP24XX_AUTO_GPT9 |
			 OMAP24XX_AUTO_GPT8 |
			 OMAP24XX_AUTO_GPT7 |
			 OMAP24XX_AUTO_GPT6 |
			 OMAP24XX_AUTO_GPT5 |
			 OMAP24XX_AUTO_GPT4 |
			 OMAP24XX_AUTO_GPT3 |
			 OMAP24XX_AUTO_GPT2 |
			 OMAP2420_AUTO_VLYNQ |
			 OMAP24XX_AUTO_DSS,
			 CORE_MOD, CM_AUTOIDLE1);
	cm_write_mod_reg(OMAP24XX_AUTO_UART3 |
			 OMAP24XX_AUTO_SSI |
			 OMAP24XX_AUTO_USB,
			 CORE_MOD, CM_AUTOIDLE2);
	cm_write_mod_reg(OMAP24XX_AUTO_SDRC |
			 OMAP24XX_AUTO_GPMC |
			 OMAP24XX_AUTO_SDMA,
			 CORE_MOD, CM_AUTOIDLE3);
	cm_write_mod_reg(OMAP24XX_AUTO_PKA |
			 OMAP24XX_AUTO_AES |
			 OMAP24XX_AUTO_RNG |
			 OMAP24XX_AUTO_SHA |
			 OMAP24XX_AUTO_DES,
			 CORE_MOD, OMAP24XX_CM_AUTOIDLE4);

	cm_write_mod_reg(OMAP2420_AUTO_DSP_IPI, OMAP24XX_DSP_MOD, CM_AUTOIDLE);

	/* Put DPLL and both APLLs into autoidle mode */
	cm_write_mod_reg((0x03 << OMAP24XX_AUTO_DPLL_SHIFT) |
			 (0x03 << OMAP24XX_AUTO_96M_SHIFT) |
			 (0x03 << OMAP24XX_AUTO_54M_SHIFT),
			 PLL_MOD, CM_AUTOIDLE);

	cm_write_mod_reg(OMAP24XX_AUTO_OMAPCTRL |
			 OMAP24XX_AUTO_WDT1 |
			 OMAP24XX_AUTO_MPU_WDT |
			 OMAP24XX_AUTO_GPIOS |
			 OMAP24XX_AUTO_32KSYNC |
			 OMAP24XX_AUTO_GPT1,
			 WKUP_MOD, CM_AUTOIDLE);

	/* REVISIT: Configure number of 32 kHz clock cycles for sys_clk
	 * stabilisation */
	prm_write_mod_reg(15 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
					OMAP24XX_PRCM_CLKSSETUP_OFFSET);

	/* Configure automatic voltage transition */
	prm_write_mod_reg(2 << OMAP_SETUP_TIME_SHIFT, OMAP24XX_GR_MOD,
					OMAP24XX_PRCM_VOLTSETUP_OFFSET);
	prm_write_mod_reg(OMAP24XX_AUTO_EXTVOLT |
		      (0x1 << OMAP24XX_SETOFF_LEVEL_SHIFT) |
		      OMAP24XX_MEMRETCTRL |
		      (0x1 << OMAP24XX_SETRET_LEVEL_SHIFT) |
		      (0x0 << OMAP24XX_VOLT_LEVEL_SHIFT),
		      OMAP24XX_GR_MOD, OMAP24XX_PRCM_VOLTCTRL_OFFSET);

	/* Enable wake-up events */
	prm_write_mod_reg(OMAP24XX_EN_GPIOS | OMAP24XX_EN_GPT1,
			  WKUP_MOD, PM_WKEN);
}
示例#29
0
/*
 * This must be called before _set_clkdiv and _set_sysclk since McBSP register
 * cache is initialized here
 */
static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
				      unsigned int fmt)
{
	struct omap_mcbsp_data *mcbsp_data = snd_soc_dai_get_drvdata(cpu_dai);
	struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs;
	bool inv_fs = false;

	if (mcbsp_data->configured)
		return 0;

	mcbsp_data->fmt = fmt;
	memset(regs, 0, sizeof(*regs));
	/* Generic McBSP register settings */
	regs->spcr2	|= XINTM(3) | FREE;
	regs->spcr1	|= RINTM(3);
	/* RFIG and XFIG are not defined in 34xx */
	if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) {
		regs->rcr2	|= RFIG;
		regs->xcr2	|= XFIG;
	}
	if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) {
		regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE;
		regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE;
	}

	switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
	case SND_SOC_DAIFMT_I2S:
		/* 1-bit data delay */
		regs->rcr2	|= RDATDLY(1);
		regs->xcr2	|= XDATDLY(1);
		break;
	case SND_SOC_DAIFMT_LEFT_J:
		/* 0-bit data delay */
		regs->rcr2	|= RDATDLY(0);
		regs->xcr2	|= XDATDLY(0);
		regs->spcr1	|= RJUST(2);
		/* Invert FS polarity configuration */
		inv_fs = true;
		break;
	case SND_SOC_DAIFMT_DSP_A:
		/* 1-bit data delay */
		regs->rcr2      |= RDATDLY(1);
		regs->xcr2      |= XDATDLY(1);
		/* Invert FS polarity configuration */
		inv_fs = true;
		break;
	case SND_SOC_DAIFMT_DSP_B:
		/* 0-bit data delay */
		regs->rcr2      |= RDATDLY(0);
		regs->xcr2      |= XDATDLY(0);
		/* Invert FS polarity configuration */
		inv_fs = true;
		break;
	default:
		/* Unsupported data format */
		return -EINVAL;
	}

	switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
	case SND_SOC_DAIFMT_CBS_CFS:
		/* McBSP master. Set FS and bit clocks as outputs */
		regs->pcr0	|= FSXM | FSRM |
				   CLKXM | CLKRM;
		/* Sample rate generator drives the FS */
		regs->srgr2	|= FSGM;
		break;
	case SND_SOC_DAIFMT_CBM_CFM:
		/* McBSP slave */
		break;
	default:
		/* Unsupported master/slave configuration */
		return -EINVAL;
	}

	/* Set bit clock (CLKX/CLKR) and FS polarities */
	switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
	case SND_SOC_DAIFMT_NB_NF:
		/*
		 * Normal BCLK + FS.
		 * FS active low. TX data driven on falling edge of bit clock
		 * and RX data sampled on rising edge of bit clock.
		 */
		regs->pcr0	|= FSXP | FSRP |
				   CLKXP | CLKRP;
		break;
	case SND_SOC_DAIFMT_NB_IF:
		regs->pcr0	|= CLKXP | CLKRP;
		break;
	case SND_SOC_DAIFMT_IB_NF:
		regs->pcr0	|= FSXP | FSRP;
		break;
	case SND_SOC_DAIFMT_IB_IF:
		break;
	default:
		return -EINVAL;
	}
	if (inv_fs == true)
		regs->pcr0 ^= FSXP | FSRP;

	return 0;
}
示例#30
0
static unsigned configure_dma_errata(void)
{
	unsigned errata = 0;

	/*
	 * Errata applicable for OMAP2430ES1.0 and all omap2420
	 *
	 * I.
	 * Erratum ID: Not Available
	 * Inter Frame DMA buffering issue DMA will wrongly
	 * buffer elements if packing and bursting is enabled. This might
	 * result in data gets stalled in FIFO at the end of the block.
	 * Workaround: DMA channels must have BUFFERING_DISABLED bit set to
	 * guarantee no data will stay in the DMA FIFO in case inter frame
	 * buffering occurs
	 *
	 * II.
	 * Erratum ID: Not Available
	 * DMA may hang when several channels are used in parallel
	 * In the following configuration, DMA channel hanging can occur:
	 * a. Channel i, hardware synchronized, is enabled
	 * b. Another channel (Channel x), software synchronized, is enabled.
	 * c. Channel i is disabled before end of transfer
	 * d. Channel i is reenabled.
	 * e. Steps 1 to 4 are repeated a certain number of times.
	 * f. A third channel (Channel y), software synchronized, is enabled.
	 * Channel x and Channel y may hang immediately after step 'f'.
	 * Workaround:
	 * For any channel used - make sure NextLCH_ID is set to the value j.
	 */
	if (cpu_is_omap2420() || (cpu_is_omap2430() &&
				(omap_type() == OMAP2430_REV_ES1_0))) {

		SET_DMA_ERRATA(DMA_ERRATA_IFRAME_BUFFERING);
		SET_DMA_ERRATA(DMA_ERRATA_PARALLEL_CHANNELS);
	}

	/*
	 * Erratum ID: i378: OMAP2+: sDMA Channel is not disabled
	 * after a transaction error.
	 * Workaround: SW should explicitely disable the channel.
	 */
	if (cpu_class_is_omap2())
		SET_DMA_ERRATA(DMA_ERRATA_i378);

	/*
	 * Erratum ID: i541: sDMA FIFO draining does not finish
	 * If sDMA channel is disabled on the fly, sDMA enters standby even
	 * through FIFO Drain is still in progress
	 * Workaround: Put sDMA in NoStandby more before a logical channel is
	 * disabled, then put it back to SmartStandby right after the channel
	 * finishes FIFO draining.
	 */
	if (cpu_is_omap34xx())
		SET_DMA_ERRATA(DMA_ERRATA_i541);

	/*
	 * Erratum ID: i88 : Special programming model needed to disable DMA
	 * before end of block.
	 * Workaround: software must ensure that the DMA is configured in No
	 * Standby mode(DMAx_OCP_SYSCONFIG.MIDLEMODE = "01")
	 */
	if (omap_type() == OMAP3430_REV_ES1_0)
		SET_DMA_ERRATA(DMA_ERRATA_i88);

	/*
	 * Erratum 3.2/3.3: sometimes 0 is returned if CSAC/CDAC is
	 * read before the DMA controller finished disabling the channel.
	 */
	SET_DMA_ERRATA(DMA_ERRATA_3_3);

	/*
	 * Erratum ID: Not Available
	 * A bug in ROM code leaves IRQ status for channels 0 and 1 uncleared
	 * after secure sram context save and restore.
	 * Work around: Hence we need to manually clear those IRQs to avoid
	 * spurious interrupts. This affects only secure devices.
	 */
	if (cpu_is_omap34xx() && (omap_type() != OMAP2_DEVICE_TYPE_GP))
		SET_DMA_ERRATA(DMA_ROMCODE_BUG);

	return errata;
}