void __init omap3xxx_powerdomains_init(void)
{
	unsigned int rev;

	if (!cpu_is_omap34xx() && !cpu_is_ti81xx())
		return;

	/* Only 81xx needs custom pwrdm_operations */
	if (!cpu_is_ti81xx())
		pwrdm_register_platform_funcs(&omap3_pwrdm_operations);;

	rev = omap_rev();

	if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
		pwrdm_register_pwrdms(powerdomains_am35x);
	} else if (rev == TI8148_REV_ES1_0 || rev == TI8148_REV_ES2_0 ||
		   rev == TI8148_REV_ES2_1) {
		pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations);
		pwrdm_register_pwrdms(powerdomains_ti814x);
	} else if (rev == TI8168_REV_ES1_0 || rev == TI8168_REV_ES1_1
			|| rev == TI8168_REV_ES2_0 || rev == TI8168_REV_ES2_1) {
		pwrdm_register_platform_funcs(&ti81xx_pwrdm_operations);
		pwrdm_register_pwrdms(powerdomains_ti816x);
	} else {
		pwrdm_register_pwrdms(powerdomains_omap3430_common);

		switch (rev) {
		case OMAP3430_REV_ES1_0:
			pwrdm_register_pwrdms(powerdomains_omap3430es1);
			break;
		case OMAP3430_REV_ES2_0:
		case OMAP3430_REV_ES2_1:
		case OMAP3430_REV_ES3_0:
		case OMAP3630_REV_ES1_0:
			pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
			break;
		case OMAP3430_REV_ES3_1:
		case OMAP3430_REV_ES3_1_2:
		case OMAP3630_REV_ES1_1:
		case OMAP3630_REV_ES1_2:
			pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
			break;
		default:
			WARN(1, "OMAP3 powerdomain init: unknown chip type\n");
		}
	}

	pwrdm_complete_init();
}
Esempio n. 2
0
/* Resets clock rates and reboots the system. Only called from system.h */
void omap_prcm_arch_reset(char mode, const char *cmd)
{
	s16 prcm_offs = 0;

	if (cpu_is_omap24xx()) {
		omap2xxx_clk_prepare_for_reboot();

		prcm_offs = WKUP_MOD;
	} else if (cpu_is_omap34xx()) {
		prcm_offs = OMAP3430_GR_MOD;
		omap3_ctrl_write_boot_mode((cmd ? (u8)*cmd : 0));
	} else if (cpu_is_omap44xx()) {
		omap4_prm_global_warm_sw_reset(); /* never returns */
	} else if (cpu_is_ti81xx()) {
		omap2_prm_set_mod_reg_bits(TI81XX_GLOBAL_RST_COLD, prcm_offs,
						TI81XX_PRM_DEVICE_RSTCTRL);
	} else {
		WARN_ON(1);
        }

        /* Ensure the sleep script doesn't run */
        twl4030_remove_script(TWL4030_SLEEP_SCRIPT);

	omap2_prm_set_mod_reg_bits(OMAP_RST_DPLL3_MASK, prcm_offs, OMAP2_RM_RSTCTRL);

	omap2_prm_read_mod_reg(prcm_offs, OMAP2_RM_RSTCTRL); /* OCP barrier */
}
Esempio n. 3
0
void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
{
	struct omap_hwmod		*oh;
	struct platform_device		*pdev;
	struct device			*dev;
	int				bus_id = -1;
	const char			*oh_name, *name;
	struct omap_musb_board_data	*board_data;

	if (musb_board_data)
		board_data = musb_board_data;
	else
		board_data = &musb_default_board_data;

	/*
	 * REVISIT: This line can be removed once all the platforms using
	 * musb_core.c have been converted to use use clkdev.
	 */
	musb_plat.clock = "ick";
	musb_plat.board_data = board_data;
	musb_plat.power = board_data->power >> 1;
	musb_plat.mode = board_data->mode;
	musb_plat.extvbus = board_data->extvbus;

	if (cpu_is_omap44xx())
		musb_plat.has_mailbox = true;

	if (soc_is_am35xx()) {
		oh_name = "am35x_otg_hs";
		name = "musb-am35x";
	} else if (cpu_is_ti81xx()) {
		oh_name = "usb_otg_hs";
		name = "musb-ti81xx";
	} else {
		oh_name = "usb_otg_hs";
		name = "musb-omap2430";
	}

        oh = omap_hwmod_lookup(oh_name);
        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
                 __func__, oh_name))
                return;

	pdev = omap_device_build(name, bus_id, oh, &musb_plat,
				 sizeof(musb_plat));
	if (IS_ERR(pdev)) {
		pr_err("Could not build omap_device for %s %s\n",
						name, oh_name);
		return;
	}

	dev = &pdev->dev;
	get_device(dev);
	dev->dma_mask = &musb_dmamask;
	dev->coherent_dma_mask = musb_dmamask;
	put_device(dev);
}
Esempio n. 4
0
void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
{
	struct omap_hwmod		*oh;
	struct platform_device		*pdev;
	struct device			*dev;
	int				bus_id = -1;
	const char			*oh_name, *name;
	struct omap_musb_board_data	*board_data;

	if (musb_board_data)
		board_data = musb_board_data;
	else
		board_data = &musb_default_board_data;

	/*
                                                                  
                                                      
  */
	musb_plat.clock = "ick";
	musb_plat.board_data = board_data;
	musb_plat.power = board_data->power >> 1;
	musb_plat.mode = board_data->mode;
	musb_plat.extvbus = board_data->extvbus;

	if (cpu_is_omap3517() || cpu_is_omap3505()) {
		oh_name = "am35x_otg_hs";
		name = "musb-am35x";
	} else if (cpu_is_ti81xx()) {
		oh_name = "usb_otg_hs";
		name = "musb-ti81xx";
	} else {
		oh_name = "usb_otg_hs";
		name = "musb-omap2430";
	}

        oh = omap_hwmod_lookup(oh_name);
        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
                 __func__, oh_name))
                return;

	pdev = omap_device_build(name, bus_id, oh, &musb_plat,
			       sizeof(musb_plat), NULL, 0, false);
	if (IS_ERR(pdev)) {
		pr_err("Could not build omap_device for %s %s\n",
						name, oh_name);
		return;
	}

	dev = &pdev->dev;
	get_device(dev);
	dev->dma_mask = &musb_dmamask;
	dev->coherent_dma_mask = musb_dmamask;
	put_device(dev);

	if (cpu_is_omap44xx())
		omap4430_phy_init(dev);
}
Esempio n. 5
0
/*
 * Initialize asm_irq_base for entry-macro.S
 */
static inline void omap_irq_base_init(void)
{
	extern void __iomem *omap_irq_base;

#ifdef MULTI_OMAP2
	if (cpu_is_omap24xx())
		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP24XX_IC_BASE);
	else if (cpu_is_omap34xx() || cpu_is_ti81xx())
		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_IC_BASE);
	else if (cpu_is_omap44xx())
		omap_irq_base = OMAP2_L4_IO_ADDRESS(OMAP44XX_GIC_CPU_BASE);
	else
		pr_err("Could not initialize omap_irq_base\n");
#endif
}
Esempio n. 6
0
int omap_type(void)
{
	static u32 val = OMAP2_DEVICETYPE_MASK;

	if (val < OMAP2_DEVICETYPE_MASK)
		return val;

	if (cpu_is_omap24xx()) {
		val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
	} else if (cpu_is_ti81xx()) {
		val = omap_ctrl_readl(TI81XX_CONTROL_STATUS);
	} else if (soc_is_am33xx() || soc_is_am43xx()) {
		val = omap_ctrl_readl(AM33XX_CONTROL_STATUS);
	} else if (cpu_is_omap34xx()) {
		val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
	} else if (cpu_is_omap44xx()) {
		val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
	} else if (soc_is_omap54xx() || soc_is_dra7xx()) {
		val = omap_ctrl_readl(OMAP5XXX_CONTROL_STATUS);
		val &= OMAP5_DEVICETYPE_MASK;
		val >>= 6;
		goto out;
	} else {
Esempio n. 7
0
int omap_type(void)
{
	u32 val = 0;

	if (cpu_is_omap24xx()) {
		val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
	} else if (cpu_is_omap34xx()) {
		val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
	} else if (cpu_is_omap44xx()) {
		val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
	} else if (cpu_is_ti81xx()) {
		val = omap_ctrl_readl(TI81XX_CONTROL_STATUS);
	} else {
		pr_err("Cannot detect omap type!\n");
		goto out;
	}

	val &= OMAP2_DEVICETYPE_MASK;
	val >>= 8;

out:
	return val;
}
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	struct omap2_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
	int i;
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (c = controllers; c->mmc; c++) {
		struct hsmmc_controller *hc = hsmmc + c->mmc - 1;
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc) {
			pr_debug("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		if (mmc) {
			pr_debug("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data),
			      GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			goto done;
		}

		if (cpu_is_ti816x())
			mmc->version = MMC_CTRL_VERSION_2;

		if (c->name)
			strncpy(hc->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(hc->name, ARRAY_SIZE(hc->name),
				"mmc%islot%i", c->mmc, 1);
		mmc->slots[0].name = hc->name;
		mmc->nr_slots = 1;
		mmc->slots[0].caps = c->caps;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;
		if (cpu_is_omap44xx())
			mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
		else
			mmc->reg_offset = 0;

		mmc->get_context_loss_count = hsmmc_get_context_loss;

		mmc->slots[0].switch_pin = c->gpio_cd;
		mmc->slots[0].gpio_wp = c->gpio_wp;

		mmc->slots[0].remux = c->remux;
		mmc->slots[0].init_card = c->init_card;

		if (c->cover_only)
			mmc->slots[0].cover = 1;

		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		if (c->no_off)
			mmc->slots[0].no_off = 1;

		if (c->vcc_aux_disable_is_sleep)
			mmc->slots[0].vcc_aux_disable_is_sleep = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		if (cpu_is_omap3505() || cpu_is_omap3517())
			mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
						 MMC_VDD_26_27 |
						 MMC_VDD_27_28 |
						 MMC_VDD_29_30 |
						 MMC_VDD_30_31 |
						 MMC_VDD_31_32;
		else
			mmc->slots[0].ocr_mask = c->ocr_mask;

		if (cpu_is_omap3517() || cpu_is_omap3505() || cpu_is_ti81xx())
			mmc->slots[0].set_power = nop_mmc_set_power;
		else
			mmc->slots[0].features |= HSMMC_HAS_PBIAS;

		if ((cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0)) ||
				cpu_is_ti814x())
			mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;

		switch (c->mmc) {
		case 1:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* on-chip level shifting via PBIAS0/PBIAS1 */
				if (cpu_is_omap44xx()) {
					mmc->slots[0].before_set_reg =
						omap4_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap4_hsmmc1_after_set_reg;
				} else {
					mmc->slots[0].before_set_reg =
						omap_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap_hsmmc1_after_set_reg;
				}
			}

			/* Omap3630 HSMMC1 supports only 4-bit */
			if (cpu_is_omap3630() &&
					(c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
				mmc->slots[0].caps = c->caps;
			}
			break;
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
			if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
			}
			/* FALLTHROUGH */
		case 3:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* off-chip level shifting, or none */
				mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
				mmc->slots[0].after_set_reg = NULL;
			}
			break;
		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		hsmmc_data[c->mmc - 1] = mmc;
	}

	if (!cpu_is_ti81xx())
		omap2_init_mmc(hsmmc_data, OMAP34XX_NR_MMC);
	else
		omap2_init_mmc(hsmmc_data, TI81XX_NR_MMC);

	/* pass the device nodes back to board setup code */
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		c->dev = mmc->dev;
	}

done:
	for (i = 0; i < nr_hsmmc; i++)
		kfree(hsmmc_data[i]);
}
Esempio n. 9
0
void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
{
	struct omap_hwmod		*oh;
	struct platform_device		*pdev;
	struct device			*dev;
	int				bus_id = -1;
	const char			*oh_name, *name;
	struct omap_musb_board_data	*board_data;

	if (musb_board_data)
		board_data = musb_board_data;
	else
		board_data = &musb_default_board_data;

	/*
	 * REVISIT: This line can be removed once all the platforms using
	 * musb_core.c have been converted to use use clkdev.
	 */
	musb_plat[0].clock = "ick";
	musb_plat[0].board_data = board_data;
	musb_plat[0].power = board_data->power >> 1;
	musb_plat[0].mode = board_data->mode;
	musb_plat[0].extvbus = board_data->extvbus;

	/*
	 * OMAP3630/AM35x platform has MUSB RTL-1.8 which has the fix for the
	 * issue restricting active endpoints to use first 8K of FIFO space.
	 * This issue restricts OMAP35x platform to use fifo_mode '5'.
	 */
	if (cpu_is_omap3430())
		musb_config.fifo_mode = 5;

	if (cpu_is_omap3517() || cpu_is_omap3505()) {
		oh_name = "am35x_otg_hs";
		name = "musb-am35x";
	} else if (cpu_is_ti81xx() || cpu_is_am33xx()) {
		board_data->set_phy_power = ti81xx_musb_phy_power;
		oh_name = "usb_otg_hs";
		name = "musb-ti81xx";
	} else {
		oh_name = "usb_otg_hs";
		name = "musb-omap2430";
	}

        oh = omap_hwmod_lookup(oh_name);
        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
                 __func__, oh_name))
                return;

	pdev = omap_device_build(name, bus_id, oh, &musb_plat,
			       sizeof(musb_plat), NULL, 0, false);
	if (IS_ERR(pdev)) {
		pr_err("Could not build omap_device for %s %s\n",
						name, oh_name);
		return;
	}

	dev = &pdev->dev;
	get_device(dev);
	dev->dma_mask = &musb_dmamask;
	dev->coherent_dma_mask = musb_dmamask;
	put_device(dev);

	if (cpu_is_omap44xx())
		omap4430_phy_init(dev);
}
Esempio n. 10
0
void __init omap2_init_common_infrastructure(void)
{
	u8 postsetup_state;

	if (cpu_is_omap242x()) {
		omap2xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap2420_hwmod_init();
	} else if (cpu_is_omap243x()) {
		omap2xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap2430_hwmod_init();
	} else if (cpu_is_omap34xx()) {
		omap3xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap3xxx_hwmod_init();
	} else if (cpu_is_ti81xx()) {
		ti81xx_powerdomains_init();
		omap2_clockdomains_init();
		ti81xx_hwmod_init();
	} else if (cpu_is_omap44xx()) {
		omap44xx_powerdomains_init();
		omap44xx_clockdomains_init();
		omap44xx_hwmod_init();
	} else {
		pr_err("Could not init hwmod data - unknown SoC\n");
        }

	/* Set the default postsetup state for all hwmods */
#ifdef CONFIG_PM_RUNTIME
	postsetup_state = _HWMOD_STATE_IDLE;
#else
	postsetup_state = _HWMOD_STATE_ENABLED;
#endif
	omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);

	/*
	 * Set the default postsetup state for unusual modules (like
	 * MPU WDT).
	 *
	 * The postsetup_state is not actually used until
	 * omap_hwmod_late_init(), so boards that desire full watchdog
	 * coverage of kernel initialization can reprogram the
	 * postsetup_state between the calls to
	 * omap2_init_common_infra() and omap2_init_common_devices().
	 *
	 * XXX ideally we could detect whether the MPU WDT was currently
	 * enabled here and make this conditional
	 */
	postsetup_state = _HWMOD_STATE_DISABLED;
	omap_hwmod_for_each_by_class("wd_timer",
				     _set_hwmod_postsetup_state,
				     &postsetup_state);

	omap_pm_if_early_init();

	if (cpu_is_omap2420())
		omap2420_clk_init();
	else if (cpu_is_omap2430())
		omap2430_clk_init();
	else if (cpu_is_omap34xx())
		omap3xxx_clk_init();
	else if (cpu_is_ti816x())
		ti816x_clk_init();
	else if (cpu_is_ti814x())
		ti814x_clk_init();
	else if (cpu_is_omap44xx())
		omap4xxx_clk_init();
	else
		pr_err("Could not init clock framework - unknown SoC\n");
}
Esempio n. 11
0
/**
 * cppi41_controller_start - start DMA controller
 * @controller: the controller
 *
 * This function initializes the CPPI 4.1 Tx/Rx channels.
 */
static int __devinit cppi41_controller_start(struct dma_controller *controller)
{
	struct cppi41 *cppi;
	struct cppi41_channel *cppi_ch;
	void __iomem *reg_base;
	struct usb_pkt_desc *curr_pd;
	unsigned long pd_addr;
	int i;
	struct usb_cppi41_info *cppi_info;

	cppi = container_of(controller, struct cppi41, controller);
	cppi_info = cppi->cppi_info;

	if (cpu_is_ti81xx()) {
		cppi->automode_reg_offs = TI81XX_USB_AUTOREQ_REG;
		cppi->teardown_reg_offs = TI81XX_USB_TEARDOWN_REG;
	} else {
		cppi->automode_reg_offs = USB_AUTOREQ_REG;
		cppi->teardown_reg_offs = USB_TEARDOWN_REG;
	}

	/*
	 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
	 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
	 * Similarly, the descriptor size should also be a multiple of 32.
	 */

	/*
	 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
	 * dma_alloc_coherent()  will return a page aligned address, so our
	 * alignment requirement will be honored.
	 */
	cppi->bd_size = USB_CPPI41_MAX_PD * sizeof(struct usb_pkt_desc);
	cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
					  cppi->bd_size,
					  &cppi->pd_mem_phys,
					  GFP_KERNEL | GFP_DMA);
	if (cppi->pd_mem == NULL) {
		DBG(1, "ERROR: packet descriptor memory allocation failed\n");
		return 0;
	}

	if (cppi41_mem_rgn_alloc(cppi_info->q_mgr, cppi->pd_mem_phys,
				 USB_CPPI41_DESC_SIZE_SHIFT,
				 get_count_order(USB_CPPI41_MAX_PD),
				 &cppi->pd_mem_rgn)) {
		DBG(1, "ERROR: queue manager memory region allocation "
		    "failed\n");
		goto free_pds;
	}

	/* Allocate the teardown completion queue */
	if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
			       0, &cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue allocation failed\n");
		goto free_mem_rgn;
	}
	DBG(4, "Allocated teardown completion queue %d in queue manager 0\n",
	    cppi->teardownQNum);

	if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
		DBG(1, "ERROR: teardown completion queue initialization "
		    "failed\n");
		goto free_queue;
	}

	/*
	 * "Slice" PDs one-by-one from the big chunk and
	 * add them to the free pool.
	 */
	curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
	pd_addr = cppi->pd_mem_phys;
	for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
		curr_pd->dma_addr = pd_addr;

		usb_put_free_pd(cppi, curr_pd);
		curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
						  USB_CPPI41_DESC_ALIGN);
		pd_addr += USB_CPPI41_DESC_ALIGN;
	}

	/* Configure the Tx channels */
	for (i = 0, cppi_ch = cppi->tx_cppi_ch;
	     i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
		const struct cppi41_tx_ch *tx_info;

		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->transmit = 1;
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;

		/*
		 * Extract the CPPI 4.1 DMA Tx channel configuration and
		 * construct/store the Tx PD tag info field for later use...
		 */
		tx_info = cppi41_dma_block[cppi_info->dma_block].tx_ch_info
			  + cppi_info->ep_dma_ch[i];
		cppi_ch->src_queue = tx_info->tx_queue[0];
		cppi_ch->tag_info = (tx_info->port_num <<
				     CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
				    (tx_info->ch_num <<
				     CPPI41_SRC_TAG_CH_NUM_SHIFT) |
				    (tx_info->sub_ch_num <<
				     CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
	}

	/* Configure the Rx channels */
	for (i = 0, cppi_ch = cppi->rx_cppi_ch;
	     i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;
	}

	/* Construct/store Tx PD packet info field for later use */
	cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
			 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT);

	/* Do necessary configuartion in hardware to get started */
	reg_base = cppi->musb->ctrl_base;

	/* Disable auto request mode */
	musb_writel(reg_base, cppi->automode_reg_offs, 0);

	/* Disable the CDC/RNDIS modes */
	musb_writel(reg_base, USB_TX_MODE_REG, 0);
	musb_writel(reg_base, USB_RX_MODE_REG, 0);

	return 1;

 free_queue:
	if (cppi41_queue_free(0, cppi->teardownQNum))
		DBG(1, "ERROR: failed to free teardown completion queue\n");

 free_mem_rgn:
	if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
		DBG(1, "ERROR: failed to free queue manager memory region\n");

 free_pds:
	dma_free_coherent(cppi->musb->controller,
			  USB_CPPI41_MAX_PD * USB_CPPI41_DESC_ALIGN,
			  cppi->pd_mem, cppi->pd_mem_phys);

	return 0;
}
Esempio n. 12
0
/*
 * Intercept ioremap() requests for addresses in our fixed mapping regions.
 */
void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
{
#ifdef CONFIG_ARCH_OMAP1
	if (cpu_class_is_omap1()) {
		if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
			return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
	}
	if (cpu_is_omap7xx()) {
		if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE))
			return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START);

		if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE))
			return XLATE(p, OMAP7XX_DSPREG_BASE,
					OMAP7XX_DSPREG_START);
	}
	if (cpu_is_omap15xx()) {
		if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
			return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);

		if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
			return XLATE(p, OMAP1510_DSPREG_BASE,
					OMAP1510_DSPREG_START);
	}
	if (cpu_is_omap16xx()) {
		if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
			return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);

		if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
			return XLATE(p, OMAP16XX_DSPREG_BASE,
					OMAP16XX_DSPREG_START);
	}
#endif
#ifdef CONFIG_ARCH_OMAP2
	if (cpu_is_omap24xx()) {
		if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
			return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
		if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
			return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
	}
	if (cpu_is_omap2420()) {
		if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
			return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
		if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
			return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
		if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
			return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
	}
	if (cpu_is_omap2430()) {
		if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
			return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
		if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
			return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
		if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
			return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
		if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
			return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP3
	if (cpu_is_omap34xx()) {
		if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
			return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
		if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
			return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
		if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
			return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
		if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
			return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
		if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
			return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
		if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
			return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP4
	if (cpu_is_omap44xx()) {
		if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
			return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
		if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
			return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
		if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
			return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
		if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE))
			return XLATE(p, OMAP44XX_EMIF1_PHYS,		\
							OMAP44XX_EMIF1_VIRT);
		if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE))
			return XLATE(p, OMAP44XX_EMIF2_PHYS,		\
							OMAP44XX_EMIF2_VIRT);
		if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE))
			return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT);
		if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
			return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
		if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
			return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_TI81XX
	if (cpu_is_ti81xx()) {
		if (BETWEEN(p, L4_SLOW_TI81XX_PHYS, L4_SLOW_TI81XX_SIZE))
			return XLATE(p, L4_SLOW_TI81XX_PHYS,
					L4_SLOW_TI81XX_VIRT);
		if (BETWEEN(p, TI81XX_L2_MC_PHYS, TI81XX_L2_MC_SIZE))
			return XLATE(p, TI81XX_L2_MC_PHYS, TI81XX_L2_MC_VIRT);
	}
#endif
	return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}