Exemplo n.º 1
0
void ti81xx_musb_phy_power(u8 on)
{
	void __iomem *scm_base = NULL;
	u32 usbphycfg;

	scm_base = ioremap(TI81XX_SCM_BASE, SZ_2K);
	if (!scm_base) {
		pr_err("system control module ioremap failed\n");
		return;
	}

	usbphycfg = __raw_readl(scm_base + USBCTRL0);

	if (on) {
		if (cpu_is_ti816x()) {
			usbphycfg |= TI816X_USBPHY0_NORMAL_MODE;
			usbphycfg &= ~TI816X_USBPHY_REFCLK_OSC;
		} else if (cpu_is_ti814x()) {
			usbphycfg &= ~(USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN
				| USBPHY_DPINPUT | USBPHY_DMINPUT);
			usbphycfg |= (USBPHY_OTGVDET_EN | USBPHY_OTGSESSEND_EN
				| USBPHY_DPOPBUFCTL | USBPHY_DMOPBUFCTL);
		}
	} else {
		if (cpu_is_ti816x())
			usbphycfg &= ~TI816X_USBPHY0_NORMAL_MODE;
		else if (cpu_is_ti814x())
			usbphycfg |= USBPHY_CM_PWRDN | USBPHY_OTG_PWRDN;

	}
	__raw_writel(usbphycfg, scm_base + USBCTRL0);

	iounmap(scm_base);
}
static int __init ti81xx_vpss_init(void)
{
	/*FIXME add platform data here*/
	int r;
	if (cpu_is_ti816x() || cpu_is_dm385()) {
		if (cpu_is_dm385())
			vps_pdata.cpu = CPU_DM813X;
		else
			vps_pdata.cpu = CPU_DM816X;
		vps_pdata.numvencs = 4;
		vps_pdata.vencmask = (1 << VPS_DC_MAX_VENC) - 1;
	} else if (cpu_is_ti814x()) {
		vps_pdata.cpu = CPU_DM814X;
		vps_pdata.numvencs = 3;
		vps_pdata.vencmask = (1 << VPS_DC_MAX_VENC) - 1 \
					- VPS_DC_VENC_HDCOMP;
	}

	vpss_device.dev.platform_data = &vps_pdata;
	r = platform_device_register(&vpss_device);
	if (r)
		printk(KERN_ERR "unable to register ti81xx_vpss device\n");
	else
		printk(KERN_INFO "registered ti81xx_vpss device\n");
	return r;
}
Exemplo n.º 3
0
IMG_VOID SysGetSGXTimingInformation(SGX_TIMING_INFORMATION *psTimingInfo)
{
	IMG_UINT32 rate;

#if defined(NO_HARDWARE)
        if(cpu_is_ti816x())
        {
		rate = SYS_389x_SGX_CLOCK_SPEED;
	}
	else
	{
		rate = SYS_387x_SGX_CLOCK_SPEED;
	}

#else
	PVR_ASSERT(atomic_read(&gpsSysSpecificData->sSGXClocksEnabled) != 0);

	rate = clk_get_rate(gpsSysSpecificData->psSGX_FCK);
	PVR_ASSERT(rate != 0);
#endif
	psTimingInfo->ui32CoreClockSpeed = rate;
	psTimingInfo->ui32HWRecoveryFreq = scale_prop_to_SGX_clock(SYS_SGX_HWRECOVERY_TIMEOUT_FREQ, rate);
	psTimingInfo->ui32uKernelFreq = scale_prop_to_SGX_clock(SYS_SGX_PDS_TIMER_FREQ, rate);
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
	psTimingInfo->bEnableActivePM = IMG_TRUE;
#else
	psTimingInfo->bEnableActivePM = IMG_FALSE;
#endif 
	psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS;
}
Exemplo n.º 4
0
static inline IMG_UINT32 scale_inv_prop_to_SGX_clock(IMG_UINT32 val, IMG_UINT32 rate)
{
        if(cpu_is_ti816x())
        {
		return scale_by_rate(val, SYS_389x_SGX_CLOCK_SPEED, rate);
	}
	else
	{
		return scale_by_rate(val, SYS_387x_SGX_CLOCK_SPEED, rate);
	}
}
Exemplo n.º 5
0
/**
 * omap2_clksel_round_rate_div() - find divisor for the given clock and rate
 * @clk: OMAP struct clk to use
 * @target_rate: desired clock rate
 * @new_div: ptr to where we should store the divisor
 *
 * Finds 'best' divider value in an array based on the source and target
 * rates.  The divider array must be sorted with smallest divider first.
 * This function is also used by the DPLL3 M2 divider code.
 *
 * Returns the rounded clock rate or returns 0xffffffff on error.
 */
u32 omap2_clksel_round_rate_div(struct clk *clk, unsigned long target_rate,
				u32 *new_div)
{
	unsigned long test_rate;
	const struct clksel *clks;
	const struct clksel_rate *clkr;
	u32 last_div = 0;

	if (!clk->clksel || !clk->clksel_mask)
		return ~0;

	pr_debug("clock: clksel_round_rate_div: %s target_rate %ld\n",
		 clk->name, target_rate);

	*new_div = 1;

	clks = _get_clksel_by_parent(clk, clk->parent);
	if (!clks)
		return ~0;

	for (clkr = clks->rates; clkr->div; clkr++) {
		if (!(clkr->flags & cpu_mask))
			continue;

		/* Sanity check */
		if (clkr->div <= last_div)
			pr_err("clock: clksel_rate table not sorted "
			       "for clock %s", clk->name);

		last_div = clkr->div;

		test_rate = clk->parent->rate / clkr->div;

		if (test_rate <= target_rate)
			break; /* found it */
	}

	if (!clkr->div) {
		if (!cpu_is_ti816x() && !cpu_is_ti814x())
			pr_err("clock: Could not find divisor for target "
			       "rate %ld for clock %s parent %s\n",
				target_rate, clk->name, clk->parent->name);
		return ~0;
	}

	*new_div = clkr->div;

	pr_debug("clock: new_div = %d, new_rate = %ld\n", *new_div,
		 (clk->parent->rate / clkr->div));

	return clk->parent->rate / clkr->div;
}
Exemplo n.º 6
0
void __init omap2_hsmmc_init(struct omap2_hsmmc_info *controllers)
{
	struct omap2_hsmmc_info *c;
	int nr_hsmmc = ARRAY_SIZE(hsmmc_data);
	int i;
	u32 reg;

	if (!cpu_is_omap44xx()) {
		if (cpu_is_omap2430()) {
			control_pbias_offset = OMAP243X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP243X_CONTROL_DEVCONF1;
		} else {
			control_pbias_offset = OMAP343X_CONTROL_PBIAS_LITE;
			control_devconf1_offset = OMAP343X_CONTROL_DEVCONF1;
		}
	} else {
		control_pbias_offset =
			OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_PBIASLITE;
		control_mmc1 = OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_MMC1;
		reg = omap4_ctrl_pad_readl(control_mmc1);
		reg |= (OMAP4_SDMMC1_PUSTRENGTH_GRP0_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP1_MASK);
		reg &= ~(OMAP4_SDMMC1_PUSTRENGTH_GRP2_MASK |
			OMAP4_SDMMC1_PUSTRENGTH_GRP3_MASK);
		reg |= (OMAP4_USBC1_DR0_SPEEDCTRL_MASK|
			OMAP4_SDMMC1_DR1_SPEEDCTRL_MASK |
			OMAP4_SDMMC1_DR2_SPEEDCTRL_MASK);
		omap4_ctrl_pad_writel(reg, control_mmc1);
	}

	for (c = controllers; c->mmc; c++) {
		struct hsmmc_controller *hc = hsmmc + c->mmc - 1;
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc) {
			pr_debug("MMC%d: no such controller\n", c->mmc);
			continue;
		}
		if (mmc) {
			pr_debug("MMC%d: already configured\n", c->mmc);
			continue;
		}

		mmc = kzalloc(sizeof(struct omap_mmc_platform_data),
			      GFP_KERNEL);
		if (!mmc) {
			pr_err("Cannot allocate memory for mmc device!\n");
			goto done;
		}

		if (cpu_is_ti816x())
			mmc->version = MMC_CTRL_VERSION_2;

		if (c->name)
			strncpy(hc->name, c->name, HSMMC_NAME_LEN);
		else
			snprintf(hc->name, ARRAY_SIZE(hc->name),
				"mmc%islot%i", c->mmc, 1);
		mmc->slots[0].name = hc->name;
		mmc->nr_slots = 1;
		mmc->slots[0].caps = c->caps;
		mmc->slots[0].internal_clock = !c->ext_clock;
		mmc->dma_mask = 0xffffffff;
		if (cpu_is_omap44xx())
			mmc->reg_offset = OMAP4_MMC_REG_OFFSET;
		else
			mmc->reg_offset = 0;

		mmc->get_context_loss_count = hsmmc_get_context_loss;

		mmc->slots[0].switch_pin = c->gpio_cd;
		mmc->slots[0].gpio_wp = c->gpio_wp;

		mmc->slots[0].remux = c->remux;
		mmc->slots[0].init_card = c->init_card;

		if (c->cover_only)
			mmc->slots[0].cover = 1;

		if (c->nonremovable)
			mmc->slots[0].nonremovable = 1;

		if (c->power_saving)
			mmc->slots[0].power_saving = 1;

		if (c->no_off)
			mmc->slots[0].no_off = 1;

		if (c->vcc_aux_disable_is_sleep)
			mmc->slots[0].vcc_aux_disable_is_sleep = 1;

		/* NOTE:  MMC slots should have a Vcc regulator set up.
		 * This may be from a TWL4030-family chip, another
		 * controllable regulator, or a fixed supply.
		 *
		 * temporary HACK: ocr_mask instead of fixed supply
		 */
		if (cpu_is_omap3505() || cpu_is_omap3517())
			mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
						 MMC_VDD_26_27 |
						 MMC_VDD_27_28 |
						 MMC_VDD_29_30 |
						 MMC_VDD_30_31 |
						 MMC_VDD_31_32;
		else
			mmc->slots[0].ocr_mask = c->ocr_mask;

		if (cpu_is_omap3517() || cpu_is_omap3505() || cpu_is_ti81xx())
			mmc->slots[0].set_power = nop_mmc_set_power;
		else
			mmc->slots[0].features |= HSMMC_HAS_PBIAS;

		if ((cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0)) ||
				cpu_is_ti814x())
			mmc->slots[0].features |= HSMMC_HAS_UPDATED_RESET;

		switch (c->mmc) {
		case 1:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* on-chip level shifting via PBIAS0/PBIAS1 */
				if (cpu_is_omap44xx()) {
					mmc->slots[0].before_set_reg =
						omap4_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap4_hsmmc1_after_set_reg;
				} else {
					mmc->slots[0].before_set_reg =
						omap_hsmmc1_before_set_reg;
					mmc->slots[0].after_set_reg =
						omap_hsmmc1_after_set_reg;
				}
			}

			/* Omap3630 HSMMC1 supports only 4-bit */
			if (cpu_is_omap3630() &&
					(c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
				mmc->slots[0].caps = c->caps;
			}
			break;
		case 2:
			if (c->ext_clock)
				c->transceiver = 1;
			if (c->transceiver && (c->caps & MMC_CAP_8_BIT_DATA)) {
				c->caps &= ~MMC_CAP_8_BIT_DATA;
				c->caps |= MMC_CAP_4_BIT_DATA;
			}
			/* FALLTHROUGH */
		case 3:
			if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
				/* off-chip level shifting, or none */
				mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
				mmc->slots[0].after_set_reg = NULL;
			}
			break;
		default:
			pr_err("MMC%d configuration not supported!\n", c->mmc);
			kfree(mmc);
			continue;
		}
		hsmmc_data[c->mmc - 1] = mmc;
	}

	if (!cpu_is_ti81xx())
		omap2_init_mmc(hsmmc_data, OMAP34XX_NR_MMC);
	else
		omap2_init_mmc(hsmmc_data, TI81XX_NR_MMC);

	/* pass the device nodes back to board setup code */
	for (c = controllers; c->mmc; c++) {
		struct omap_mmc_platform_data *mmc = hsmmc_data[c->mmc - 1];

		if (!c->mmc || c->mmc > nr_hsmmc)
			continue;
		c->dev = mmc->dev;
	}

done:
	for (i = 0; i < nr_hsmmc; i++)
		kfree(hsmmc_data[i]);
}
Exemplo n.º 7
0
/**
 * cppi41_controller_start - start DMA controller
 * @controller: the controller
 *
 * This function initializes the CPPI 4.1 Tx/Rx channels.
 */
static int __devinit cppi41_controller_start(struct dma_controller *controller)
{
	struct cppi41 *cppi;
	struct cppi41_channel *cppi_ch;
	void __iomem *reg_base;
	struct usb_pkt_desc *curr_pd;
	unsigned long pd_addr;
	int i;
	struct usb_cppi41_info *cppi_info;
	struct musb *musb;

	cppi = container_of(controller, struct cppi41, controller);
	cppi_info = cppi->cppi_info;
	musb = cppi->musb;

	if (cpu_is_ti816x() || cpu_is_am33xx()) {
		cppi->automode_reg_offs = TI81XX_USB_AUTOREQ_REG;
		cppi->teardown_reg_offs = TI81XX_USB_TEARDOWN_REG;
	} else {
		cppi->automode_reg_offs = USB_AUTOREQ_REG;
		cppi->teardown_reg_offs = USB_TEARDOWN_REG;
	}

	/*
	 * TODO: We may need to check USB_CPPI41_MAX_PD here since CPPI 4.1
	 * requires the descriptor count to be a multiple of 2 ^ 5 (i.e. 32).
	 * Similarly, the descriptor size should also be a multiple of 32.
	 */

	/*
	 * Allocate free packet descriptor pool for all Tx/Rx endpoints --
	 * dma_alloc_coherent()  will return a page aligned address, so our
	 * alignment requirement will be honored.
	 */
	cppi->bd_size = USB_CPPI41_MAX_PD * sizeof(struct usb_pkt_desc);
	cppi->pd_mem = dma_alloc_coherent(cppi->musb->controller,
					  cppi->bd_size,
					  &cppi->pd_mem_phys,
					  GFP_KERNEL | GFP_DMA);
	if (cppi->pd_mem == NULL) {
		dev_dbg(musb->controller, "ERROR: packet descriptor memory allocation failed\n");
		return 0;
	}

	if (cppi41_mem_rgn_alloc(cppi_info->q_mgr, cppi->pd_mem_phys,
				 USB_CPPI41_DESC_SIZE_SHIFT,
				 get_count_order(USB_CPPI41_MAX_PD),
				 &cppi->pd_mem_rgn)) {
		dev_dbg(musb->controller, "ERROR: queue manager memory region allocation "
		    "failed\n");
		goto free_pds;
	}

	/* Allocate the teardown completion queue */
	if (cppi41_queue_alloc(CPPI41_UNASSIGNED_QUEUE,
			       0, &cppi->teardownQNum)) {
		dev_dbg(musb->controller, "ERROR: teardown completion queue allocation failed\n");
		goto free_mem_rgn;
	}
	dev_dbg(musb->controller, "Allocated teardown completion queue %d in queue manager 0\n",
	    cppi->teardownQNum);

	if (cppi41_queue_init(&cppi->queue_obj, 0, cppi->teardownQNum)) {
		dev_dbg(musb->controller, "ERROR: teardown completion queue initialization "
		    "failed\n");
		goto free_queue;
	}

	/*
	 * "Slice" PDs one-by-one from the big chunk and
	 * add them to the free pool.
	 */
	curr_pd = (struct usb_pkt_desc *)cppi->pd_mem;
	pd_addr = cppi->pd_mem_phys;
	for (i = 0; i < USB_CPPI41_MAX_PD; i++) {
		curr_pd->dma_addr = pd_addr;

		usb_put_free_pd(cppi, curr_pd);
		curr_pd = (struct usb_pkt_desc *)((char *)curr_pd +
						  USB_CPPI41_DESC_ALIGN);
		pd_addr += USB_CPPI41_DESC_ALIGN;
	}

	/* Configure the Tx channels */
	for (i = 0, cppi_ch = cppi->tx_cppi_ch;
	     i < ARRAY_SIZE(cppi->tx_cppi_ch); ++i, ++cppi_ch) {
		const struct cppi41_tx_ch *tx_info;

		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->transmit = 1;
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;

		/*
		 * Extract the CPPI 4.1 DMA Tx channel configuration and
		 * construct/store the Tx PD tag info field for later use...
		 */
		tx_info = cppi41_dma_block[cppi_info->dma_block].tx_ch_info
			  + cppi_info->ep_dma_ch[i];
		cppi_ch->src_queue = tx_info->tx_queue[0];
		cppi_ch->tag_info = (tx_info->port_num <<
				     CPPI41_SRC_TAG_PORT_NUM_SHIFT) |
				    (tx_info->ch_num <<
				     CPPI41_SRC_TAG_CH_NUM_SHIFT) |
				    (tx_info->sub_ch_num <<
				     CPPI41_SRC_TAG_SUB_CH_NUM_SHIFT);
	}

	/* Configure the Rx channels */
	for (i = 0, cppi_ch = cppi->rx_cppi_ch;
	     i < ARRAY_SIZE(cppi->rx_cppi_ch); ++i, ++cppi_ch) {
		memset(cppi_ch, 0, sizeof(struct cppi41_channel));
		cppi_ch->ch_num = i;
		cppi_ch->channel.private_data = cppi;
	}

	/* Construct/store Tx PD packet info field for later use */
	cppi->pkt_info = (CPPI41_PKT_TYPE_USB << CPPI41_PKT_TYPE_SHIFT) |
			 (CPPI41_RETURN_LINKED << CPPI41_RETURN_POLICY_SHIFT);

	/* Do necessary configuartion in hardware to get started */
	reg_base = cppi->musb->ctrl_base;

	/* Disable auto request mode */
	musb_writel(reg_base, cppi->automode_reg_offs, 0);

	/* Disable the CDC/RNDIS modes */
	musb_writel(reg_base, USB_TX_MODE_REG, 0);
	musb_writel(reg_base, USB_RX_MODE_REG, 0);

	return 1;

 free_queue:
	if (cppi41_queue_free(0, cppi->teardownQNum))
		dev_dbg(musb->controller, "ERROR: failed to free teardown completion queue\n");

 free_mem_rgn:
	if (cppi41_mem_rgn_free(cppi_info->q_mgr, cppi->pd_mem_rgn))
		dev_dbg(musb->controller, "ERROR: failed to free queue manager memory region\n");

 free_pds:
	dma_free_coherent(cppi->musb->controller,
			  cppi->bd_size,
			  cppi->pd_mem, cppi->pd_mem_phys);

	return 0;
}
Exemplo n.º 8
0
void __init omap2_init_common_infrastructure(void)
{
	u8 postsetup_state;

	if (cpu_is_omap242x()) {
		omap2xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap2420_hwmod_init();
	} else if (cpu_is_omap243x()) {
		omap2xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap2430_hwmod_init();
	} else if (cpu_is_omap34xx()) {
		omap3xxx_powerdomains_init();
		omap2_clockdomains_init();
		omap3xxx_hwmod_init();
	} else if (cpu_is_ti81xx()) {
		ti81xx_powerdomains_init();
		omap2_clockdomains_init();
		ti81xx_hwmod_init();
	} else if (cpu_is_omap44xx()) {
		omap44xx_powerdomains_init();
		omap44xx_clockdomains_init();
		omap44xx_hwmod_init();
	} else {
		pr_err("Could not init hwmod data - unknown SoC\n");
        }

	/* Set the default postsetup state for all hwmods */
#ifdef CONFIG_PM_RUNTIME
	postsetup_state = _HWMOD_STATE_IDLE;
#else
	postsetup_state = _HWMOD_STATE_ENABLED;
#endif
	omap_hwmod_for_each(_set_hwmod_postsetup_state, &postsetup_state);

	/*
	 * Set the default postsetup state for unusual modules (like
	 * MPU WDT).
	 *
	 * The postsetup_state is not actually used until
	 * omap_hwmod_late_init(), so boards that desire full watchdog
	 * coverage of kernel initialization can reprogram the
	 * postsetup_state between the calls to
	 * omap2_init_common_infra() and omap2_init_common_devices().
	 *
	 * XXX ideally we could detect whether the MPU WDT was currently
	 * enabled here and make this conditional
	 */
	postsetup_state = _HWMOD_STATE_DISABLED;
	omap_hwmod_for_each_by_class("wd_timer",
				     _set_hwmod_postsetup_state,
				     &postsetup_state);

	omap_pm_if_early_init();

	if (cpu_is_omap2420())
		omap2420_clk_init();
	else if (cpu_is_omap2430())
		omap2430_clk_init();
	else if (cpu_is_omap34xx())
		omap3xxx_clk_init();
	else if (cpu_is_ti816x())
		ti816x_clk_init();
	else if (cpu_is_ti814x())
		ti814x_clk_init();
	else if (cpu_is_omap44xx())
		omap4xxx_clk_init();
	else
		pr_err("Could not init clock framework - unknown SoC\n");
}
Exemplo n.º 9
0
Arquivo: io.c Projeto: 5victor/linux
/*
 * Intercept ioremap() requests for addresses in our fixed mapping regions.
 */
void __iomem *omap_ioremap(unsigned long p, size_t size, unsigned int type)
{

	WARN(!initialized, "Do not use ioremap before init_early\n");

#ifdef CONFIG_ARCH_OMAP1
	if (cpu_class_is_omap1()) {
		if (BETWEEN(p, OMAP1_IO_PHYS, OMAP1_IO_SIZE))
			return XLATE(p, OMAP1_IO_PHYS, OMAP1_IO_VIRT);
	}
	if (cpu_is_omap7xx()) {
		if (BETWEEN(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_SIZE))
			return XLATE(p, OMAP7XX_DSP_BASE, OMAP7XX_DSP_START);

		if (BETWEEN(p, OMAP7XX_DSPREG_BASE, OMAP7XX_DSPREG_SIZE))
			return XLATE(p, OMAP7XX_DSPREG_BASE,
					OMAP7XX_DSPREG_START);
	}
	if (cpu_is_omap15xx()) {
		if (BETWEEN(p, OMAP1510_DSP_BASE, OMAP1510_DSP_SIZE))
			return XLATE(p, OMAP1510_DSP_BASE, OMAP1510_DSP_START);

		if (BETWEEN(p, OMAP1510_DSPREG_BASE, OMAP1510_DSPREG_SIZE))
			return XLATE(p, OMAP1510_DSPREG_BASE,
					OMAP1510_DSPREG_START);
	}
	if (cpu_is_omap16xx()) {
		if (BETWEEN(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_SIZE))
			return XLATE(p, OMAP16XX_DSP_BASE, OMAP16XX_DSP_START);

		if (BETWEEN(p, OMAP16XX_DSPREG_BASE, OMAP16XX_DSPREG_SIZE))
			return XLATE(p, OMAP16XX_DSPREG_BASE,
					OMAP16XX_DSPREG_START);
	}
#endif
#ifdef CONFIG_ARCH_OMAP2
	if (cpu_is_omap24xx()) {
		if (BETWEEN(p, L3_24XX_PHYS, L3_24XX_SIZE))
			return XLATE(p, L3_24XX_PHYS, L3_24XX_VIRT);
		if (BETWEEN(p, L4_24XX_PHYS, L4_24XX_SIZE))
			return XLATE(p, L4_24XX_PHYS, L4_24XX_VIRT);
	}
	if (cpu_is_omap2420()) {
		if (BETWEEN(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_SIZE))
			return XLATE(p, DSP_MEM_2420_PHYS, DSP_MEM_2420_VIRT);
		if (BETWEEN(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE))
			return XLATE(p, DSP_IPI_2420_PHYS, DSP_IPI_2420_SIZE);
		if (BETWEEN(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_SIZE))
			return XLATE(p, DSP_MMU_2420_PHYS, DSP_MMU_2420_VIRT);
	}
	if (cpu_is_omap2430()) {
		if (BETWEEN(p, L4_WK_243X_PHYS, L4_WK_243X_SIZE))
			return XLATE(p, L4_WK_243X_PHYS, L4_WK_243X_VIRT);
		if (BETWEEN(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_SIZE))
			return XLATE(p, OMAP243X_GPMC_PHYS, OMAP243X_GPMC_VIRT);
		if (BETWEEN(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_SIZE))
			return XLATE(p, OMAP243X_SDRC_PHYS, OMAP243X_SDRC_VIRT);
		if (BETWEEN(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_SIZE))
			return XLATE(p, OMAP243X_SMS_PHYS, OMAP243X_SMS_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP3
	if (cpu_is_ti816x()) {
		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
	} else if (cpu_is_omap34xx()) {
		if (BETWEEN(p, L3_34XX_PHYS, L3_34XX_SIZE))
			return XLATE(p, L3_34XX_PHYS, L3_34XX_VIRT);
		if (BETWEEN(p, L4_34XX_PHYS, L4_34XX_SIZE))
			return XLATE(p, L4_34XX_PHYS, L4_34XX_VIRT);
		if (BETWEEN(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_SIZE))
			return XLATE(p, OMAP34XX_GPMC_PHYS, OMAP34XX_GPMC_VIRT);
		if (BETWEEN(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_SIZE))
			return XLATE(p, OMAP343X_SMS_PHYS, OMAP343X_SMS_VIRT);
		if (BETWEEN(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_SIZE))
			return XLATE(p, OMAP343X_SDRC_PHYS, OMAP343X_SDRC_VIRT);
		if (BETWEEN(p, L4_PER_34XX_PHYS, L4_PER_34XX_SIZE))
			return XLATE(p, L4_PER_34XX_PHYS, L4_PER_34XX_VIRT);
		if (BETWEEN(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_SIZE))
			return XLATE(p, L4_EMU_34XX_PHYS, L4_EMU_34XX_VIRT);
	}
#endif
#ifdef CONFIG_ARCH_OMAP4
	if (cpu_is_omap44xx()) {
		if (BETWEEN(p, L3_44XX_PHYS, L3_44XX_SIZE))
			return XLATE(p, L3_44XX_PHYS, L3_44XX_VIRT);
		if (BETWEEN(p, L4_44XX_PHYS, L4_44XX_SIZE))
			return XLATE(p, L4_44XX_PHYS, L4_44XX_VIRT);
		if (BETWEEN(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_SIZE))
			return XLATE(p, OMAP44XX_GPMC_PHYS, OMAP44XX_GPMC_VIRT);
		if (BETWEEN(p, OMAP44XX_EMIF1_PHYS, OMAP44XX_EMIF1_SIZE))
			return XLATE(p, OMAP44XX_EMIF1_PHYS,		\
							OMAP44XX_EMIF1_VIRT);
		if (BETWEEN(p, OMAP44XX_EMIF2_PHYS, OMAP44XX_EMIF2_SIZE))
			return XLATE(p, OMAP44XX_EMIF2_PHYS,		\
							OMAP44XX_EMIF2_VIRT);
		if (BETWEEN(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_SIZE))
			return XLATE(p, OMAP44XX_DMM_PHYS, OMAP44XX_DMM_VIRT);
		if (BETWEEN(p, L4_PER_44XX_PHYS, L4_PER_44XX_SIZE))
			return XLATE(p, L4_PER_44XX_PHYS, L4_PER_44XX_VIRT);
		if (BETWEEN(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_SIZE))
			return XLATE(p, L4_EMU_44XX_PHYS, L4_EMU_44XX_VIRT);
	}
#endif
	return __arm_ioremap_caller(p, size, type, __builtin_return_address(0));
}
Exemplo n.º 10
0
PVRSRV_ERROR SysInitialise(IMG_VOID)
{
	IMG_UINT32			i;
	PVRSRV_ERROR 		eError;
	PVRSRV_DEVICE_NODE	*psDeviceNode;
#if !defined(PVR_NO_OMAP_TIMER)
	IMG_CPU_PHYADDR		TimerRegPhysBase;
#endif
#if !defined(SGX_DYNAMIC_TIMING_INFO)
	SGX_TIMING_INFORMATION*	psTimingInfo;
#endif
	gpsSysData = &gsSysData;
	OSMemSet(gpsSysData, 0, sizeof(SYS_DATA));

	gpsSysSpecificData =  &gsSysSpecificData;
	OSMemSet(gpsSysSpecificData, 0, sizeof(SYS_SPECIFIC_DATA));

	gpsSysData->pvSysSpecificData = gpsSysSpecificData;

	eError = OSInitEnvData(&gpsSysData->pvEnvSpecificData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to setup env structure"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_ENVDATA);

	gpsSysData->ui32NumDevices = SYS_DEVICE_COUNT;

	
	for(i=0; i<SYS_DEVICE_COUNT; i++)
	{
		gpsSysData->sDeviceID[i].uiID = i;
		gpsSysData->sDeviceID[i].bInUse = IMG_FALSE;
	}

	gpsSysData->psDeviceNodeList = IMG_NULL;
	gpsSysData->psQueueList = IMG_NULL;

	eError = SysInitialiseCommon(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed in SysInitialiseCommon"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}

#if !defined(SGX_DYNAMIC_TIMING_INFO)
	
	psTimingInfo = &gsSGXDeviceMap.sTimingInfo;
if(cpu_is_ti816x())
	psTimingInfo->ui32CoreClockSpeed = SYS_389x_SGX_CLOCK_SPEED;
else
	psTimingInfo->ui32CoreClockSpeed = SYS_387x_SGX_CLOCK_SPEED;
	psTimingInfo->ui32HWRecoveryFreq = SYS_SGX_HWRECOVERY_TIMEOUT_FREQ; 
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
	psTimingInfo->bEnableActivePM = IMG_TRUE;
#else	
	psTimingInfo->bEnableActivePM = IMG_FALSE;
#endif 
	psTimingInfo->ui32ActivePowManLatencyms = SYS_SGX_ACTIVE_POWER_LATENCY_MS; 
	psTimingInfo->ui32uKernelFreq = SYS_SGX_PDS_TIMER_FREQ; 
#endif

	

	gpsSysSpecificData->ui32SrcClockDiv = 3;

	



	eError = SysLocateDevices(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to locate devices"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_LOCATEDEV);
#if 0
	eError = SysPMRuntimeRegister();
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register with OSPM!"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_PM_RUNTIME);
#endif
	


	eError = PVRSRVRegisterDevice(gpsSysData, SGXRegisterDevice,
								  DEVICE_SGX_INTERRUPT, &gui32SGXDeviceID);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to register device!"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_REGDEV);

	


	
	psDeviceNode = gpsSysData->psDeviceNodeList;
	while(psDeviceNode)
	{
		
		switch(psDeviceNode->sDevId.eDeviceType)
		{
			case PVRSRV_DEVICE_TYPE_SGX:
			{
				DEVICE_MEMORY_INFO *psDevMemoryInfo;
				DEVICE_MEMORY_HEAP_INFO *psDeviceMemoryHeap;

				


				psDeviceNode->psLocalDevMemArena = IMG_NULL;

				
				psDevMemoryInfo = &psDeviceNode->sDevMemoryInfo;
				psDeviceMemoryHeap = psDevMemoryInfo->psDeviceMemoryHeap;

				
				for(i=0; i<psDevMemoryInfo->ui32HeapCount; i++)
				{
					psDeviceMemoryHeap[i].ui32Attribs |= PVRSRV_BACKINGSTORE_SYSMEM_NONCONTIG;
				}

				gpsSGXDevNode = psDeviceNode;
				gsSysSpecificData.psSGXDevNode = psDeviceNode;

				break;
			}
			default:
				PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to find SGX device node!"));
				return PVRSRV_ERROR_INIT_FAILURE;
		}

		
		psDeviceNode = psDeviceNode->psNext;
	}

	eError = EnableSystemClocksWrap(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable system clocks (%d)", eError));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_SYSCLOCKS);
#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
	eError = EnableSGXClocksWrap(gpsSysData);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to Enable SGX clocks (%d)", eError));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
#endif	

	eError = PVRSRVInitialiseDevice(gui32SGXDeviceID);
	if (eError != PVRSRV_OK)
	{
		PVR_DPF((PVR_DBG_ERROR,"SysInitialise: Failed to initialise device!"));
		(IMG_VOID)SysDeinitialise(gpsSysData);
		gpsSysData = IMG_NULL;
		return eError;
	}
	SYS_SPECIFIC_DATA_SET(&gsSysSpecificData, SYS_SPECIFIC_DATA_ENABLE_INITDEV);

#if defined(SUPPORT_ACTIVE_POWER_MANAGEMENT)
	
	DisableSGXClocks(gpsSysData);
#endif	

#if !defined(PVR_NO_OMAP_TIMER)
#if defined(PVR_OMAP_TIMER_BASE_IN_SYS_SPEC_DATA)
	TimerRegPhysBase = gsSysSpecificData.sTimerRegPhysBase;
#else
	TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_REGS_SYS_PHYS_BASE;
#endif
	gpsSysData->pvSOCTimerRegisterKM = IMG_NULL;
	gpsSysData->hSOCTimerRegisterOSMemHandle = 0;
	if (TimerRegPhysBase.uiAddr != 0)
	{
		OSReservePhys(TimerRegPhysBase,
				  4,
				  PVRSRV_HAP_MULTI_PROCESS|PVRSRV_HAP_UNCACHED,
				  IMG_NULL,
				  (IMG_VOID **)&gpsSysData->pvSOCTimerRegisterKM,
				  &gpsSysData->hSOCTimerRegisterOSMemHandle);
	}
#endif 

	return PVRSRV_OK;
}
Exemplo n.º 11
0
PVRSRV_ERROR EnableSystemClocks(SYS_DATA *psSysData)
{
	SYS_SPECIFIC_DATA *psSysSpecData = (SYS_SPECIFIC_DATA *) psSysData->pvSysSpecificData;
	struct clk *psCLK;
	IMG_INT res;
	PVRSRV_ERROR eError;
	IMG_BOOL bPowerLock;

#if defined(DEBUG) || defined(TIMING)
	IMG_INT rate;
	struct clk *sys_ck;
	IMG_CPU_PHYADDR     TimerRegPhysBase;
	IMG_HANDLE hTimerEnable;
	IMG_UINT32 *pui32TimerEnable;

#endif	

	PVR_TRACE(("EnableSystemClocks: Enabling System Clocks"));

	if (!psSysSpecData->bSysClocksOneTimeInit)
	{
		bPowerLock = IMG_FALSE;

		spin_lock_init(&psSysSpecData->sPowerLock);
		atomic_set(&psSysSpecData->sPowerLockCPU, -1);
		spin_lock_init(&psSysSpecData->sNotifyLock);
		atomic_set(&psSysSpecData->sNotifyLockCPU, -1);

		atomic_set(&psSysSpecData->sSGXClocksEnabled, 0);
                
		psCLK = clk_get(NULL, "sgx_ck");
		
                if (IS_ERR(psCLK))
		{
			PVR_DPF((PVR_DBG_ERROR, "EnableSsystemClocks: Couldn't get SGX Functional Clock"));
			goto ExitError;
		}
		psSysSpecData->psSGX_FCK = psCLK;

		psSysSpecData->bSysClocksOneTimeInit = IMG_TRUE;
	}
	else
	{
		
		bPowerLock = PowerLockWrappedOnCPU(psSysSpecData);
		if (bPowerLock)
		{
			PowerLockUnwrap(psSysSpecData);
		}
	}

#if defined(CONSTRAINT_NOTIFICATIONS)
	psSysSpecData->pVdd2Handle = constraint_get(PVRSRV_MODNAME, &cnstr_id_vdd2);
	if (IS_ERR(psSysSpecData->pVdd2Handle))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get VDD2 constraint handle"));
		goto ExitError;
	}

	RegisterConstraintNotifications();
#endif

#if defined(DEBUG) || defined(TIMING)
        if(cpu_is_ti816x()) {
                psCLK = clk_get(NULL, "gpt6_fck");
        } else {
                psCLK = clk_get(NULL, "gpt7_fck");
        }
	if (IS_ERR(psCLK))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
		goto ExitUnRegisterConstraintNotifications;
	}
	psSysSpecData->psGPT11_FCK = psCLK;

        if(cpu_is_ti816x()) {
                psCLK = clk_get(NULL, "gpt6_ick");
        } else {
                psCLK = clk_get(NULL, "gpt7_ick");
        }
	if (IS_ERR(psCLK))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
		goto ExitUnRegisterConstraintNotifications;
	}
	psSysSpecData->psGPT11_ICK = psCLK;

	rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
	PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));

	res = clk_enable(psSysSpecData->psGPT11_FCK);
	if (res < 0)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
		goto ExitUnRegisterConstraintNotifications;
	}

	res = clk_enable(psSysSpecData->psGPT11_ICK);
	if (res < 0)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
		goto ExitDisableGPT11FCK;
	}

	
	TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_TSICR_SYS_PHYS_BASE;
	pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
                  4,
                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                  &hTimerEnable);

	if (pui32TimerEnable == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
		goto ExitDisableGPT11ICK;
	}

	rate = *pui32TimerEnable;
	if(!(rate & 4))
	{
		PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));

		
		*pui32TimerEnable = rate | 4;
	}

	OSUnMapPhysToLin(pui32TimerEnable,
		    4,
		    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
		    hTimerEnable);

	
	TimerRegPhysBase.uiAddr = SYS_TI81xx_GP7TIMER_ENABLE_SYS_PHYS_BASE;
	pui32TimerEnable = OSMapPhysToLin(TimerRegPhysBase,
                  4,
                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                  &hTimerEnable);

	if (pui32TimerEnable == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
		goto ExitDisableGPT11ICK;
	}

	
	*pui32TimerEnable = 3;

	OSUnMapPhysToLin(pui32TimerEnable,
		    4,
		    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
		    hTimerEnable);

#endif 

#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
	PVR_TRACE(("EnableSystemClocks: Setting SGX OPP constraint"));

	
	res = constraint_set(psSysSpecData->pVdd2Handle, max_vdd2_opp);
	if (res != 0)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: constraint_set failed (%d)", res));
		goto ExitConstraintSetFailed;
	}
#endif
	eError = PVRSRV_OK;
	goto Exit;

#if defined(PDUMP) && !defined(NO_HARDWARE) && defined(CONSTRAINT_NOTIFICATIONS)
ExitConstraintSetFailed:
#endif
#if defined(DEBUG) || defined(TIMING)
ExitDisableGPT11ICK:
	clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
	clk_disable(psSysSpecData->psGPT11_FCK);
ExitUnRegisterConstraintNotifications:
#endif	
#if defined(CONSTRAINT_NOTIFICATIONS)
	UnRegisterConstraintNotifications();
	constraint_put(psSysSpecData->pVdd2Handle);
#endif
Exit:
	if (bPowerLock)
	{
		PowerLockWrap(psSysSpecData);
	}

ExitError:
	eError = PVRSRV_ERROR_DISABLE_CLOCK_FAILURE;
	return eError;
}
Exemplo n.º 12
0
void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
{
	struct omap_hwmod		*oh;
	struct platform_device		*pdev;
	struct device			*dev;
	int				bus_id = -1;
	const char			*oh_name, *name;
	struct omap_musb_board_data	*board_data;

	if (musb_board_data)
		board_data = musb_board_data;
	else
		board_data = &musb_default_board_data;

	/*
	 * REVISIT: This line can be removed once all the platforms using
	 * musb_core.c have been converted to use use clkdev.
	 */
	musb_plat[0].clock = "ick";
	musb_plat[0].board_data = board_data;
	musb_plat[0].power = board_data->power >> 1;
	musb_plat[0].mode = board_data->mode;
	musb_plat[0].extvbus = board_data->extvbus;

	/*
	 * OMAP3630/AM35x platform has MUSB RTL-1.8 which has the fix for the
	 * issue restricting active endpoints to use first 8K of FIFO space.
	 * This issue restricts OMAP35x platform to use fifo_mode '5'.
	 */
	if (cpu_is_omap3430())
		musb_config.fifo_mode = 5;

	if (cpu_is_omap3517() || cpu_is_omap3505()) {
		oh_name = "am35x_otg_hs";
		name = "musb-am35x";
	} else if (cpu_is_ti816x() || cpu_is_am33xx()) {
		musb_config.fifo_mode = 4;

		/* only usb0 port enabled in peripheral mode*/
		if (board_data->mode == MUSB_PERIPHERAL) {
			board_data->instances = 0;
			musb_config.fifo_mode = 6;
		}

		board_data->set_phy_power = ti81xx_musb_phy_power;
		oh_name = "usb_otg_hs";
		name = "ti81xx-usbss";
	} else {
		oh_name = "usb_otg_hs";
		name = "musb-omap2430";
	}

        oh = omap_hwmod_lookup(oh_name);
        if (WARN(!oh, "%s: could not find omap_hwmod for %s\n",
                 __func__, oh_name))
                return;

	pdev = omap_device_build(name, bus_id, oh, &musb_plat,
			       sizeof(musb_plat), NULL, 0, false);
	if (IS_ERR(pdev)) {
		pr_err("Could not build omap_device for %s %s\n",
						name, oh_name);
		return;
	}

	dev = &pdev->dev;
	get_device(dev);
	dev->dma_mask = &musb_dmamask;
	dev->coherent_dma_mask = musb_dmamask;
	put_device(dev);

	if (cpu_is_omap44xx())
		omap4430_phy_init(dev);
}