static int r_tpu_enable(struct r_tpu_priv *p, enum led_brightness brightness)
{
	struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
	int prescaler[] = { 1, 4, 16, 64 };
	int k, ret;
	unsigned long rate, tmp;

	if (p->timer_state == R_TPU_TIMER_ON)
		return 0;

	/* wake up device and enable clock */
	pm_runtime_get_sync(&p->pdev->dev);
	ret = clk_enable(p->clk);
	if (ret) {
		dev_err(&p->pdev->dev, "cannot enable clock\n");
		return ret;
	}

	/* make sure channel is disabled */
	r_tpu_start_stop_ch(p, 0);

	/* get clock rate after enabling it */
	rate = clk_get_rate(p->clk);

	/* pick the lowest acceptable rate */
	for (k = 0; k < ARRAY_SIZE(prescaler); k++)
		if ((rate / prescaler[k]) < p->min_rate)
			break;

	if (!k) {
		dev_err(&p->pdev->dev, "clock rate mismatch\n");
		goto err0;
	}
	dev_dbg(&p->pdev->dev, "rate = %lu, prescaler %u\n",
		rate, prescaler[k - 1]);

	/* clear TCNT on TGRB match, count on rising edge, set prescaler */
	r_tpu_write(p, TCR, 0x0040 | (k - 1));

	/* output 0 until TGRA, output 1 until TGRB */
	r_tpu_write(p, TIOR, 0x0002);

	rate /= prescaler[k - 1] * p->refresh_rate;
	r_tpu_write(p, TGRB, rate);
	dev_dbg(&p->pdev->dev, "TRGB = 0x%04lx\n", rate);

	tmp = (cfg->max_brightness - brightness) * rate;
	r_tpu_write(p, TGRA, tmp / cfg->max_brightness);
	dev_dbg(&p->pdev->dev, "TRGA = 0x%04lx\n", tmp / cfg->max_brightness);

	/* PWM mode */
	r_tpu_write(p, TMDR, 0x0002);

	/* enable channel */
	r_tpu_start_stop_ch(p, 1);

	p->timer_state = R_TPU_TIMER_ON;
	return 0;
 err0:
	clk_disable(p->clk);
	pm_runtime_put_sync(&p->pdev->dev);
	return -ENOTSUPP;
}
static int snddev_icodec_open_rx(struct snddev_icodec_state *icodec)
{
	int trc;
	int afe_channel_mode;
	union afe_port_config afe_config;
	struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;

	wake_lock(&drv->rx_idlelock);

	if (drv->snddev_vreg) {
		if (!strcmp(icodec->data->name, "headset_stereo_rx"))
			vreg_mode_vote(drv->snddev_vreg, 1,
					SNDDEV_LOW_POWER_MODE);
		else
			vreg_mode_vote(drv->snddev_vreg, 1,
					SNDDEV_HIGH_POWER_MODE);
	}
	msm_snddev_rx_mclk_request();

	drv->rx_osrclk = clk_get(0, "i2s_spkr_osr_clk");
	if (IS_ERR(drv->rx_osrclk))
		pr_err("%s master clock Error\n", __func__);

	trc =  clk_set_rate(drv->rx_osrclk,
			SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
	//(+)dragonball		
  printk("SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate)=>%d",icodec->sample_rate);
	if (IS_ERR_VALUE(trc)) {
		pr_err("ERROR setting m clock1\n");
		goto error_invalid_freq;
	}

	clk_enable(drv->rx_osrclk);
	drv->rx_bitclk = clk_get(0, "i2s_spkr_bit_clk");
	if (IS_ERR(drv->rx_bitclk))
		pr_err("%s clock Error\n", __func__);

	/* Master clock = Sample Rate * OSR rate bit clock
	 * OSR Rate bit clock = bit/sample * channel master
	 * clock / bit clock = divider value = 8
	 */
	if (msm_codec_i2s_slave_mode) {
		pr_info("%s: configuring bit clock for slave mode\n",
				__func__);
		trc =  clk_set_rate(drv->rx_bitclk, 0);
	} else
		trc =  clk_set_rate(drv->rx_bitclk, 8);

	if (IS_ERR_VALUE(trc)) {
		pr_err("ERROR setting m clock1\n");
		goto error_adie;
	}
	clk_enable(drv->rx_bitclk);

	if (icodec->data->voltage_on)
		icodec->data->voltage_on();

	/* Configure ADIE */
	trc = adie_codec_open(icodec->data->profile, &icodec->adie_path);
	if (IS_ERR_VALUE(trc))
		pr_err("%s: adie codec open failed\n", __func__);
	else
		adie_codec_setpath(icodec->adie_path,
					icodec->sample_rate, 256);
	/* OSR default to 256, can be changed for power optimization
	 * If OSR is to be changed, need clock API for setting the divider
	 */
	//(+)dragonball		
  printk("adie_codec_open::profile=>%s,adie_path=>%s ",icodec->data->profile,icodec->adie_path );	 

	switch (icodec->data->channel_mode) {
	case 2:
		afe_channel_mode = MSM_AFE_STEREO;
		break;
	case 1:
	default:
		afe_channel_mode = MSM_AFE_MONO;
		break;
	}
	afe_config.mi2s.channel = afe_channel_mode;
	afe_config.mi2s.bitwidth = 16;
	afe_config.mi2s.line = 1;
	if (msm_codec_i2s_slave_mode)
		afe_config.mi2s.ws = 0;
	else
		afe_config.mi2s.ws = 1;

	trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate);

	if (IS_ERR_VALUE(trc))
		pr_err("%s: afe open failed, trc = %d\n", __func__, trc);
	
	/* Enable ADIE */
	if (icodec->adie_path) {
		adie_codec_proceed_stage(icodec->adie_path,
					ADIE_CODEC_DIGITAL_READY);
		adie_codec_proceed_stage(icodec->adie_path,
					ADIE_CODEC_DIGITAL_ANALOG_READY);
	}

	if (msm_codec_i2s_slave_mode)
		adie_codec_set_master_mode(icodec->adie_path, 1);
	else
		adie_codec_set_master_mode(icodec->adie_path, 0);

	/* Enable power amplifier */
	if (icodec->data->pamp_on) {
		if (icodec->data->pamp_on()) {
			pr_err("%s: Error turning on rx power\n", __func__);
			goto error_pamp;
		}
	}

	icodec->enabled = 1;

	wake_unlock(&drv->rx_idlelock);
	return 0;

error_pamp:
error_adie:
	clk_disable(drv->rx_osrclk);
error_invalid_freq:

	pr_err("%s: encounter error\n", __func__);

	wake_unlock(&drv->rx_idlelock);
	return -ENODEV;
}
Esempio n. 3
0
static PVRSRV_ERROR AcquireGPTimer(SYS_SPECIFIC_DATA *psSysSpecData)
{
#if defined(PVR_OMAP4_TIMING_PRCM)
	struct clk *psCLK;
	IMG_INT res;
	struct clk *sys_ck;
	IMG_INT rate;
#endif
	PVRSRV_ERROR eError;

	IMG_CPU_PHYADDR sTimerRegPhysBase;
	IMG_HANDLE hTimerEnable;
	IMG_UINT32 *pui32TimerEnable;

	PVR_ASSERT(psSysSpecData->sTimerRegPhysBase.uiAddr == 0);

#if defined(PVR_OMAP4_TIMING_PRCM)
	
	psCLK = clk_get(NULL, "gpt11_fck");
	if (IS_ERR(psCLK))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 functional clock"));
		goto ExitError;
	}
	psSysSpecData->psGPT11_FCK = psCLK;

	psCLK = clk_get(NULL, "gpt11_ick");
	if (IS_ERR(psCLK))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get GPTIMER11 interface clock"));
		goto ExitError;
	}
	psSysSpecData->psGPT11_ICK = psCLK;

	sys_ck = clk_get(NULL, "sys_clkin_ck");
	if (IS_ERR(sys_ck))
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't get System clock"));
		goto ExitError;
	}

	if(clk_get_parent(psSysSpecData->psGPT11_FCK) != sys_ck)
	{
		PVR_TRACE(("Setting GPTIMER11 parent to System Clock"));
		res = clk_set_parent(psSysSpecData->psGPT11_FCK, sys_ck);
		if (res < 0)
		{
			PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't set GPTIMER11 parent clock (%d)", res));
		goto ExitError;
		}
	}

	rate = clk_get_rate(psSysSpecData->psGPT11_FCK);
	PVR_TRACE(("GPTIMER11 clock is %dMHz", HZ_TO_MHZ(rate)));

	res = clk_enable(psSysSpecData->psGPT11_FCK);
	if (res < 0)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 functional clock (%d)", res));
		goto ExitError;
	}

	res = clk_enable(psSysSpecData->psGPT11_ICK);
	if (res < 0)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: Couldn't enable GPTIMER11 interface clock (%d)", res));
		goto ExitDisableGPT11FCK;
	}
#endif	

	
	sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_TSICR_SYS_PHYS_BASE;
	pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
                  4,
                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                  &hTimerEnable);

	if (pui32TimerEnable == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
		goto ExitDisableGPT11ICK;
	}

	if(!(*pui32TimerEnable & 4))
	{
		PVR_TRACE(("Setting GPTIMER11 mode to posted (currently is non-posted)"));

		
		*pui32TimerEnable |= 4;
	}

	OSUnMapPhysToLin(pui32TimerEnable,
		    4,
		    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
		    hTimerEnable);

	
	sTimerRegPhysBase.uiAddr = SYS_OMAP4430_GP11TIMER_ENABLE_SYS_PHYS_BASE;
	pui32TimerEnable = OSMapPhysToLin(sTimerRegPhysBase,
                  4,
                  PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
                  &hTimerEnable);

	if (pui32TimerEnable == IMG_NULL)
	{
		PVR_DPF((PVR_DBG_ERROR, "EnableSystemClocks: OSMapPhysToLin failed"));
		goto ExitDisableGPT11ICK;
	}

	
	*pui32TimerEnable = 3;

	OSUnMapPhysToLin(pui32TimerEnable,
		    4,
		    PVRSRV_HAP_KERNEL_ONLY|PVRSRV_HAP_UNCACHED,
		    hTimerEnable);

	psSysSpecData->sTimerRegPhysBase = sTimerRegPhysBase;

	eError = PVRSRV_OK;

	goto Exit;

ExitDisableGPT11ICK:
#if defined(PVR_OMAP4_TIMING_PRCM)
	clk_disable(psSysSpecData->psGPT11_ICK);
ExitDisableGPT11FCK:
	clk_disable(psSysSpecData->psGPT11_FCK);
ExitError:
#endif	
	eError = PVRSRV_ERROR_CLOCK_REQUEST_FAILED;
Exit:
	return eError;
}
int tegra_asoc_utils_init(struct tegra_asoc_utils_data *data,
			  struct device *dev)
{
	int ret;
	int rate;

	data->dev = dev;

	data->clk_pll_p_out1 = clk_get_sys(NULL, "pll_p_out1");
	if (IS_ERR(data->clk_pll_p_out1)) {
		dev_err(data->dev, "Can't retrieve clk pll_p_out1\n");
		ret = PTR_ERR(data->clk_pll_p_out1);
		goto err;
	}

	data->clk_pll_a = clk_get_sys(NULL, "pll_a");
	if (IS_ERR(data->clk_pll_a)) {
		dev_err(data->dev, "Can't retrieve clk pll_a\n");
		ret = PTR_ERR(data->clk_pll_a);
		goto err_put_pll_p_out1;
	}

	data->clk_pll_a_out0 = clk_get_sys(NULL, "pll_a_out0");
	if (IS_ERR(data->clk_pll_a_out0)) {
		dev_err(data->dev, "Can't retrieve clk pll_a_out0\n");
		ret = PTR_ERR(data->clk_pll_a_out0);
		goto err_put_pll_a;
	}

	data->clk_m = clk_get_sys(NULL, "clk_m");
	if (IS_ERR(data->clk_m)) {
		dev_err(data->dev, "Can't retrieve clk clk_m\n");
		ret = PTR_ERR(data->clk_m);
		goto err;
	}

#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	data->clk_cdev1 = clk_get_sys(NULL, "cdev1");
#else
	data->clk_cdev1 = clk_get_sys("extern1", NULL);
#endif
	if (IS_ERR(data->clk_cdev1)) {
		dev_err(data->dev, "Can't retrieve clk cdev1\n");
		ret = PTR_ERR(data->clk_cdev1);
		goto err_put_pll_a_out0;
	}

#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
	data->clk_out1 = ERR_PTR(-ENOENT);
#else
	data->clk_out1 = clk_get_sys("clk_out_1", "extern1");
	if (IS_ERR(data->clk_out1)) {
		dev_err(data->dev, "Can't retrieve clk out1\n");
		ret = PTR_ERR(data->clk_out1);
		goto err_put_cdev1;
	}
#endif

#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
#if TEGRA30_I2S_MASTER_PLAYBACK
	ret = clk_set_parent(data->clk_cdev1, data->clk_pll_a_out0);
	if (ret) {
		dev_err(data->dev, "Can't set clk cdev1/extern1 parent");
		goto err_put_out1;
	}
#else
	rate = clk_get_rate(data->clk_m);

	if(rate == 26000000)
		clk_set_rate(data->clk_cdev1, 13000000);

	ret = clk_set_parent(data->clk_cdev1, data->clk_m);
	if (ret) {
		dev_err(data->dev, "Can't set clk cdev1/extern1 parent");
		goto err_put_out1;
	}
#endif

#endif
#if 0 //remove clk_enable clk_cdev1, move it to fm34.c enable
	ret = clk_enable(data->clk_cdev1);
	if (ret) {
		dev_err(data->dev, "Can't enable clk cdev1/extern1");
		goto err_put_out1;
	}
#endif
	if (!IS_ERR(data->clk_out1)) {
		ret = clk_enable(data->clk_out1);
		if (ret) {
			dev_err(data->dev, "Can't enable clk out1");
			goto err_put_out1;
		}
	}

	ret = tegra_asoc_utils_set_rate(data, 48000, 256 * 48000);
	if (ret)
		goto err_put_out1;

	return 0;

err_put_out1:
	if (!IS_ERR(data->clk_out1))
		clk_put(data->clk_out1);
#if !defined(CONFIG_ARCH_TEGRA_2x_SOC)
err_put_cdev1:
#endif
	clk_put(data->clk_cdev1);
err_put_pll_a_out0:
	clk_put(data->clk_pll_a_out0);
err_put_pll_a:
	clk_put(data->clk_pll_a);
err_put_pll_p_out1:
	clk_put(data->clk_pll_p_out1);
err:
	return ret;
}
Esempio n. 5
0
static inline void __init apollon_init_smc91x(void)
{
	unsigned long base;

	unsigned int rate;
	struct clk *gpmc_fck;
	int eth_cs;
	int err;

	gpmc_fck = clk_get(NULL, "gpmc_fck");	/* Always on ENABLE_ON_INIT */
	if (IS_ERR(gpmc_fck)) {
		WARN_ON(1);
		return;
	}

	clk_enable(gpmc_fck);
	rate = clk_get_rate(gpmc_fck);

	eth_cs = APOLLON_ETH_CS;

	/* Make sure CS1 timings are correct */
	gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG1, 0x00011200);

	if (rate >= 160000000) {
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f01);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080803);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1c0b1c0a);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x041f1F1F);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000004C4);
	} else if (rate >= 130000000) {
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f00);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080802);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1C091C09);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x041f1F1F);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000004C4);
	} else {/* rate = 100000000 */
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG2, 0x001f1f00);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG3, 0x00080802);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG4, 0x1C091C09);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG5, 0x031A1F1F);
		gpmc_cs_write_reg(eth_cs, GPMC_CS_CONFIG6, 0x000003C2);
	}

	if (gpmc_cs_request(APOLLON_ETH_CS, SZ_16M, &base) < 0) {
		printk(KERN_ERR "Failed to request GPMC CS for smc91x\n");
		goto out;
	}
	apollon_smc91x_resources[0].start = base + 0x300;
	apollon_smc91x_resources[0].end   = base + 0x30f;
	udelay(100);

	omap_mux_init_gpio(APOLLON_ETHR_GPIO_IRQ, 0);
	err = gpio_request_one(APOLLON_ETHR_GPIO_IRQ, GPIOF_IN, "SMC91x irq");
	if (err) {
		printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
			APOLLON_ETHR_GPIO_IRQ);
		gpmc_cs_free(APOLLON_ETH_CS);
	}
out:
	clk_disable(gpmc_fck);
	clk_put(gpmc_fck);
}
Esempio n. 6
0
static int vpif_resume(struct device *dev)
{
	clk_enable(vpif_clk);
	return 0;
}
static u32 res_trk_enable_videocore(void)
{
	mutex_lock(&resource_context.lock);
	if (!resource_context.rail_enabled) {
		int rc = -1;

		rc = regulator_enable(resource_context.regulator);
		if (rc) {
			VCDRES_MSG_ERROR("%s(): regulator_enable failed %d\n",
							 __func__, rc);
			goto bail_out;
		}
		VCDRES_MSG_LOW("%s(): regulator enable Success %d\n",
							__func__, rc);

		resource_context.pclk = clk_get(resource_context.device,
			"iface_clk");

		if (IS_ERR(resource_context.pclk)) {
			VCDRES_MSG_ERROR("%s(): iface_clk get failed\n"
							 , __func__);
			goto disable_regulator;
		}

		resource_context.hclk = clk_get(resource_context.device,
			"core_clk");

		if (IS_ERR(resource_context.hclk)) {
			VCDRES_MSG_ERROR("%s(): core_clk get failed\n"
							 , __func__);

			goto release_pclk;
		}

		resource_context.hclk_div2 =
			clk_get(resource_context.device, "core_div2_clk");

		if (IS_ERR(resource_context.hclk_div2)) {
			VCDRES_MSG_ERROR("%s(): core_div2_clk get failed\n"
							 , __func__);
			goto release_hclk_pclk;
		}

		if (clk_set_rate(resource_context.hclk,
			mfc_clk_freq_table[0])) {
			VCDRES_MSG_ERROR("\n pwr_rail_enable:"
				 " set clk rate failed\n");
			goto release_all_clks;
		}

		if (clk_enable(resource_context.pclk)) {
			VCDRES_MSG_ERROR("vidc pclk Enable failed\n");
			goto release_all_clks;
		}

		if (clk_enable(resource_context.hclk)) {
			VCDRES_MSG_ERROR("vidc hclk Enable failed\n");
			goto disable_pclk;
		}

		if (clk_enable(resource_context.hclk_div2)) {
			VCDRES_MSG_ERROR("vidc hclk_div2 Enable failed\n");
			goto disable_hclk_pclk;
		}

		rc = clk_reset(resource_context.pclk, CLK_RESET_DEASSERT);
		if (rc) {
			VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc);
			goto disable_and_release_all_clks;
		}
		msleep(20);

		clk_disable(resource_context.pclk);
		clk_disable(resource_context.hclk);
		clk_disable(resource_context.hclk_div2);

	}
	resource_context.rail_enabled = 1;
	mutex_unlock(&resource_context.lock);
	return true;

disable_and_release_all_clks:
	clk_disable(resource_context.hclk_div2);
disable_hclk_pclk:
	clk_disable(resource_context.hclk);
disable_pclk:
	clk_disable(resource_context.pclk);
release_all_clks:
	clk_put(resource_context.hclk_div2);
	resource_context.hclk_div2 = NULL;
release_hclk_pclk:
	clk_put(resource_context.hclk);
	resource_context.hclk = NULL;
release_pclk:
	clk_put(resource_context.pclk);
	resource_context.pclk = NULL;
disable_regulator:
	regulator_disable(resource_context.regulator);
bail_out:
	mutex_unlock(&resource_context.lock);
	return false;
}
Esempio n. 8
0
static int hdmi_streamon(struct hdmi_device *hdev)
{
	struct device *dev = hdev->dev;
	struct hdmi_resources *res = &hdev->res;
	int ret, tries;

	dev_dbg(dev, "%s\n", __func__);

	hdev->streaming = 1;
	ret = v4l2_subdev_call(hdev->phy_sd, video, s_stream, 1);
	if (ret)
		return ret;

	/* waiting for HDMIPHY's PLL to get to steady state */
	for (tries = 100; tries; --tries) {
		if (is_hdmiphy_ready(hdev))
			break;

		mdelay(1);
	}
	/* steady state not achieved */
	if (tries == 0) {
		dev_err(dev, "hdmiphy's pll could not reach steady state.\n");
		v4l2_subdev_call(hdev->phy_sd, video, s_stream, 0);
		hdmi_dumpregs(hdev, "s_stream");
		return -EIO;
	}

	/* hdmiphy clock is used for HDMI in streaming mode */
	clk_disable(res->sclk_hdmi);
	clk_set_parent(res->sclk_hdmi, res->sclk_hdmiphy);
	clk_enable(res->sclk_hdmi);

	/* 3D test */
	hdmi_set_infoframe(hdev);

	/* set packets for audio */
	hdmi_set_packets(hdev);

	/* init audio */
#if defined(CONFIG_VIDEO_EXYNOS_HDMI_AUDIO_I2S)
	hdmi_reg_i2s_audio_init(hdev);
#elif defined(CONFIG_VIDEO_EXYNOS_HDMI_AUDIO_SPDIF)
	hdmi_reg_spdif_audio_init(hdev);
#endif
	/* enbale HDMI audio */
	if (hdev->audio_enable)
		hdmi_audio_enable(hdev, 1);

	/* enable HDMI and timing generator */
	hdmi_enable(hdev, 1);
	hdmi_tg_enable(hdev, 1);

	/* start HDCP if enabled */
	if (hdev->hdcp_info.hdcp_enable) {
		ret = hdcp_start(hdev);
		if (ret)
			return ret;
	}

	hdmi_dumpregs(hdev, "streamon");
	return 0;
}
Esempio n. 9
0
static int __devinit hdmi_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct resource *res;
	struct i2c_adapter *phy_adapter;
	struct hdmi_device *hdmi_dev = NULL;
	struct hdmi_driver_data *drv_data;
	int ret;
	unsigned int irq_type;

	dev_dbg(dev, "probe start\n");

	hdmi_dev = kzalloc(sizeof(*hdmi_dev), GFP_KERNEL);
	if (!hdmi_dev) {
		dev_err(dev, "out of memory\n");
		ret = -ENOMEM;
		goto fail;
	}

	hdmi_dev->dev = dev;

	ret = hdmi_resources_init(hdmi_dev);
	if (ret)
		goto fail_hdev;

	/* mapping HDMI registers */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(dev, "get memory resource failed.\n");
		ret = -ENXIO;
		goto fail_init;
	}

	hdmi_dev->regs = ioremap(res->start, resource_size(res));
	if (hdmi_dev->regs == NULL) {
		dev_err(dev, "register mapping failed.\n");
		ret = -ENXIO;
		goto fail_hdev;
	}

	/* External hpd */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (res == NULL) {
		dev_err(dev, "get external interrupt resource failed.\n");
		ret = -ENXIO;
		goto fail_regs;
	}
	hdmi_dev->ext_irq = res->start;

	/* Internal hpd */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
	if (res == NULL) {
		dev_err(dev, "get internal interrupt resource failed.\n");
		ret = -ENXIO;
		goto fail_regs;
	}
	hdmi_dev->int_irq = res->start;

	/* workqueue for HPD */
	hdmi_dev->hpd_wq = create_workqueue("hdmi-hpd");
	if (hdmi_dev->hpd_wq == NULL)
		ret = -ENXIO;
	INIT_WORK(&hdmi_dev->hpd_work, s5p_hpd_kobject_uevent);

	/* setting v4l2 name to prevent WARN_ON in v4l2_device_register */
	strlcpy(hdmi_dev->v4l2_dev.name, dev_name(dev),
		sizeof(hdmi_dev->v4l2_dev.name));
	/* passing NULL owner prevents driver from erasing drvdata */
	ret = v4l2_device_register(NULL, &hdmi_dev->v4l2_dev);
	if (ret) {
		dev_err(dev, "could not register v4l2 device.\n");
		goto fail_regs;
	}

	drv_data = (struct hdmi_driver_data *)
		platform_get_device_id(pdev)->driver_data;
	dev_info(dev, "hdmiphy i2c bus number = %d\n", drv_data->hdmiphy_bus);

	phy_adapter = i2c_get_adapter(drv_data->hdmiphy_bus);
	if (phy_adapter == NULL) {
		dev_err(dev, "adapter request failed\n");
		ret = -ENXIO;
		goto fail_vdev;
	}

	hdmi_dev->phy_sd = v4l2_i2c_new_subdev_board(&hdmi_dev->v4l2_dev,
		phy_adapter, &hdmiphy_info, NULL);
	/* on failure or not adapter is no longer useful */
	i2c_put_adapter(phy_adapter);
	if (hdmi_dev->phy_sd == NULL) {
		dev_err(dev, "missing subdev for hdmiphy\n");
		ret = -ENODEV;
		goto fail_vdev;
	}

	/* HDMI PHY power off
	 * HDMI PHY is on as default configuration
	 * So, HDMI PHY must be turned off if it's not used */
	clk_enable(hdmi_dev->res.hdmiphy);
	v4l2_subdev_call(hdmi_dev->phy_sd, core, s_power, 0);
	clk_disable(hdmi_dev->res.hdmiphy);

	pm_runtime_enable(dev);

	/* irq setting by TV power on/off status */
	if (!pm_runtime_suspended(hdmi_dev->dev)) {
		hdmi_dev->curr_irq = hdmi_dev->int_irq;
		irq_type = 0;
		s5p_v4l2_int_src_hdmi_hpd();
	} else {
		if (s5p_v4l2_hpd_read_gpio())
			atomic_set(&hdmi_dev->hpd_state, HPD_HIGH);
		else
			atomic_set(&hdmi_dev->hpd_state, HPD_LOW);
		hdmi_dev->curr_irq = hdmi_dev->ext_irq;
		irq_type = IRQ_TYPE_EDGE_BOTH;
		s5p_v4l2_int_src_ext_hpd();
	}

	hdmi_dev->hpd_user_checked = false;

	ret = request_irq(hdmi_dev->curr_irq, hdmi_irq_handler,
			irq_type, "hdmi", hdmi_dev);

	if (ret) {
		dev_err(dev, "request interrupt failed.\n");
		goto fail_vdev;
	}

	hdmi_dev->cur_preset = HDMI_DEFAULT_PRESET;
	/* FIXME: missing fail preset is not supported */
	hdmi_dev->cur_conf = hdmi_preset2conf(hdmi_dev->cur_preset);

	/* default audio configuration : enable audio */
	hdmi_dev->audio_enable = 1;
	hdmi_dev->sample_rate = DEFAULT_SAMPLE_RATE;
	hdmi_dev->bits_per_sample = DEFAULT_BITS_PER_SAMPLE;
	hdmi_dev->audio_codec = DEFAULT_AUDIO_CODEC;

	/* register hdmi subdev as entity */
	ret = hdmi_register_entity(hdmi_dev);
	if (ret)
		goto fail_irq;

	hdmi_entity_info_print(hdmi_dev);

	/* initialize hdcp resource */
	ret = hdcp_prepare(hdmi_dev);
	if (ret)
		goto fail_irq;

	dev_info(dev, "probe sucessful\n");

	return 0;

fail_vdev:
	v4l2_device_unregister(&hdmi_dev->v4l2_dev);

fail_irq:
	free_irq(hdmi_dev->curr_irq, hdmi_dev);

fail_regs:
	iounmap(hdmi_dev->regs);

fail_init:
	hdmi_resources_cleanup(hdmi_dev);

fail_hdev:
	kfree(hdmi_dev);

fail:
	dev_err(dev, "probe failed\n");
	return ret;
}
Esempio n. 10
0
static int __devinit k3_gps_bcm_probe(struct platform_device *pdev)
{
    GPS_BCM_INFO *gps_bcm;
    struct resource *res;
    int ret = 0;

    gps_bcm = kzalloc(sizeof(GPS_BCM_INFO), GFP_KERNEL);
    if (!gps_bcm) {
        dev_err(&pdev->dev, "Alloc memory failed\n");
        return -ENOMEM;
    }

    platform_set_drvdata(pdev, gps_bcm);

    /* Get enable gpio */
    res = platform_get_resource(pdev, IORESOURCE_IO, 0);
    if (!res) {
        dev_err(&pdev->dev, "Get enable gpio resourse failed\n");
        ret = -ENXIO;
        goto err_free;
    }
    gps_bcm->gpioid_en = res->start;

    ret = gpio_request(gps_bcm->gpioid_en, "gps_enbale");
    if (ret < 0) {
        dev_err(&pdev->dev,  "gpio_request failed, gpio=%lu, ret=%d\n", gps_bcm->gpioid_en, ret);
        goto err_free;
    }
    gpio_export(gps_bcm->gpioid_en, false);

    /* Get reset gpio */
    res = platform_get_resource(pdev, IORESOURCE_IO, 1);
    if (!res) {
        dev_err(&pdev->dev, "Get reset gpio resourse failed\n");
        ret = -ENXIO;
        goto err_free_gpio_en;
    }
    gps_bcm->gpioid_ret = res->start;

    ret = gpio_request(gps_bcm->gpioid_ret, "gps_reset");
    if (ret < 0) {
        dev_err(&pdev->dev,  "gpio_request failed, gpio=%lu, ret=%d\n", gps_bcm->gpioid_ret, ret);
        goto err_free_gpio_en;
    }
    gpio_export(gps_bcm->gpioid_ret, false);

#ifndef CONFIG_MACH_K3V2OEM1
    /* Get power gpio (VDDIO 1.8V), Only for FPGA */
    res = platform_get_resource(pdev, IORESOURCE_IO, 2);
    if (!res) {
        dev_err(&pdev->dev, "Get power gpio resourse failed\n");
        ret = -ENXIO;
        goto err_free_gpio_ret;
    }
    gps_bcm->gpioid_power = res->start;

    ret = gpio_request(gps_bcm->gpioid_power, "gps_power");
    if (ret < 0) {
        dev_err(&pdev->dev, "gpio_request failed, gpio=%lu, rc=%d\n", gps_bcm->gpioid_power, ret);
        gpio_free(gps_bcm->gpioid_en);
        goto err_free_gpio_ret;
    }

    /* Low GPS power, only for FPGA */
    gpio_direction_output(gps_bcm->gpioid_power, 0);

    /* High GPS power, only for FPGA */
    gpio_set_value(gps_bcm->gpioid_power, 1);
    dev_dbg(&pdev->dev,  "High power\n");
#else
    /* Set 32KC clock */
    gps_bcm->clk = clk_get(NULL, "clk_pmu32kc");
    if (IS_ERR(gps_bcm->clk)) {
        dev_err(&pdev->dev, "Get gps clk failed\n");
        ret = PTR_ERR(gps_bcm->clk);
        goto err_free_gpio_ret;
    }
    ret = clk_enable(gps_bcm->clk);
    if (ret) {
        dev_err(&pdev->dev, "Enable clk failed, ret=%d\n", ret);
        goto err_free_clk;
    }

    /* Set iomux NORMAL, If set iomux failed, we still go on */
    gps_bcm->piomux_block  = iomux_get_block("block_gps_boardcom");
    if (!gps_bcm->piomux_block)
        dev_err(&pdev->dev, "Get gps iomux_block failed\n");

    gps_bcm->pblock_config = iomux_get_blockconfig("block_gps_boardcom");
    if (!gps_bcm->pblock_config)
        dev_err(&pdev->dev, "Get gps block_config failed\n");

    if ((gps_bcm->piomux_block) && (gps_bcm->pblock_config)) {
        ret = blockmux_set(gps_bcm->piomux_block, gps_bcm->pblock_config, NORMAL);
        if (ret)
            dev_err(&pdev->dev, "Set gps iomux to NORMAL failed, ret=%d\n", ret);
    }
#endif

    /* Low Reset GPIO */
    gpio_direction_output(gps_bcm->gpioid_ret, 0);
    dev_dbg(&pdev->dev,  "Low reset\n");

    /* Low Enable GPIO */
    gpio_direction_output(gps_bcm->gpioid_en, 0);
    dev_dbg(&pdev->dev,  "Low enable\n");

    /* High Reset GPIO*/
    gpio_set_value(gps_bcm->gpioid_ret, 1);
    dev_dbg(&pdev->dev,  "High reset\n");

    return 0;

#ifdef CONFIG_MACH_K3V2OEM1
err_free_clk:
    clk_put(gps_bcm->clk);
#endif

err_free_gpio_ret:
    gpio_free(gps_bcm->gpioid_ret);

err_free_gpio_en:
    gpio_free(gps_bcm->gpioid_en);

err_free:
    kfree(gps_bcm);
    gps_bcm = NULL;
    return ret;
}
Esempio n. 11
0
static int s3cfb_probe(struct platform_device *pdev)
{
	struct s3c_platform_fb *pdata;
	struct resource *res;
	int ret = 0;

	/* initialzing global structure */
	ctrl = kzalloc(sizeof(struct s3cfb_global), GFP_KERNEL);
	if (!ctrl) {
		dev_err(ctrl->dev, "failed to allocate for global fb structure\n");
		goto err_global;
	}

	ctrl->dev = &pdev->dev;
	s3cfb_set_lcd_info(ctrl);

	/* gpio */
	pdata = to_fb_plat(&pdev->dev);
	if (pdata->cfg_gpio)
		pdata->cfg_gpio(pdev);

	/* clock */
	ctrl->clock = clk_get(&pdev->dev, pdata->clk_name);
	if (IS_ERR(ctrl->clock)) {
		dev_err(ctrl->dev, "failed to get fimd clock source\n");
		ret = -EINVAL;
		goto err_clk;
	}

	clk_enable(ctrl->clock);

	/* io memory */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(ctrl->dev, "failed to get io memory region\n");
		ret = -EINVAL;
		goto err_io;
	}

	/* request mem region */
	res = request_mem_region(res->start,
		res->end - res->start + 1, pdev->name);
	if (!res) {
		dev_err(ctrl->dev, "failed to request io memory region\n");
		ret = -EINVAL;
		goto err_io;
	}

	/* ioremap for register block */
	ctrl->regs = ioremap(res->start, res->end - res->start + 1);
	if (!ctrl->regs) {
		dev_err(ctrl->dev, "failed to remap io region\n");
		ret = -EINVAL;
		goto err_io;
	}

	/* irq */
	ctrl->irq = platform_get_irq(pdev, 0);
	if (request_irq(ctrl->irq, s3cfb_irq_frame, IRQF_DISABLED,
				pdev->name, ctrl)) {
		dev_err(ctrl->dev, "request_irq failed\n");
		ret = -EINVAL;
		goto err_irq;
	}

#ifdef CONFIG_FB_S3C_V2_TRACE_UNDERRUN
	if (request_irq(platform_get_irq(pdev, 1), s3cfb_irq_fifo,
			IRQF_DISABLED, pdev->name, ctrl)) {
		dev_err(ctrl->dev, "request_irq failed\n");
		ret = -EINVAL;
		goto err_irq;
	}

	s3cfb_set_fifo_interrupt(ctrl, 1);
	dev_info(ctrl->dev, "fifo underrun trace\n");
#endif

	/* init global */
	s3cfb_init_global();
	s3cfb_display_on(ctrl);

	/* panel control */
	if (pdata->backlight_on)
		pdata->backlight_on(pdev);

	if (pdata->lcd_on)
		pdata->lcd_on(pdev);

	if (ctrl->lcd->init_ldi)
		ctrl->lcd->init_ldi();

	/* prepare memory */
	if (s3cfb_alloc_framebuffer())
		goto err_alloc;

	if (s3cfb_register_framebuffer())
		goto err_alloc;

	s3cfb_set_clock(ctrl);
	s3cfb_enable_window(pdata->default_win);

	ret = device_create_file(&(pdev->dev), &dev_attr_win_power);
	if (ret < 0)
		dev_err(ctrl->dev, "failed to add sysfs entries\n");

	dev_info(ctrl->dev, "registered successfully\n");

	return 0;

err_alloc:
	free_irq(ctrl->irq, ctrl);

err_irq:
	iounmap(ctrl->regs);

err_io:
	clk_disable(ctrl->clock);

err_clk:
	clk_put(ctrl->clock);

err_global:
	return ret;
}
Esempio n. 12
0
static int mfc_open(struct inode *inode, struct file *file)
{
	struct mfc_inst_ctx *mfc_ctx;
	int ret;

	mutex_lock(&mfc_mutex);

	if (!mfc_is_running()) {
		/* Turn on mfc power domain regulator */
		ret = regulator_enable(mfc_pd_regulator);
		if (ret < 0) {
			mfc_err("MFC_RET_POWER_ENABLE_FAIL\n");
			ret = -EINVAL;
			goto err_open;
		}

		clk_enable(mfc_sclk);

		mfc_load_firmware(mfc_fw_info->data, mfc_fw_info->size);

		if (mfc_init_hw() != true) {
			clk_disable(mfc_sclk);
			ret =  -ENODEV;
			goto err_regulator;
		}
		clk_disable(mfc_sclk);
	}

	mfc_ctx = (struct mfc_inst_ctx *)kmalloc(sizeof(struct mfc_inst_ctx), GFP_KERNEL);
	if (mfc_ctx == NULL) {
		mfc_err("MFCINST_MEMORY_ALLOC_FAIL\n");
		ret = -ENOMEM;
		goto err_regulator;
	}

	memset(mfc_ctx, 0, sizeof(struct mfc_inst_ctx));

	/* get the inst no allocating some part of memory among reserved memory */
	mfc_ctx->mem_inst_no = mfc_get_mem_inst_no();
	mfc_ctx->InstNo = -1;
	if (mfc_ctx->mem_inst_no < 0) {
		mfc_err("MFCINST_INST_NUM_EXCEEDED\n");
		ret = -EPERM;
		goto err_mem_inst;
	}

	if (mfc_set_state(mfc_ctx, MFCINST_STATE_OPENED) < 0) {
		mfc_err("MFCINST_ERR_STATE_INVALID\n");
		ret = -ENODEV;
		goto err_set_state;
	}

	/* Decoder only */
	mfc_ctx->extraDPB = MFC_MAX_EXTRA_DPB;
	mfc_ctx->FrameType = MFC_RET_FRAME_NOT_SET;

	file->private_data = mfc_ctx;

	mutex_unlock(&mfc_mutex);

	return 0;

err_set_state:
	mfc_return_mem_inst_no(mfc_ctx->mem_inst_no);
err_mem_inst:
	kfree(mfc_ctx);
err_regulator:
	if (!mfc_is_running()) {
		/* Turn off mfc power domain regulator */
		ret = regulator_disable(mfc_pd_regulator);
		if (ret < 0)
			mfc_err("MFC_RET_POWER_DISABLE_FAIL\n");
	}
err_open:
	mutex_unlock(&mfc_mutex);

	return ret;
}
Esempio n. 13
0
static long mfc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
	int ret, ex_ret;
	struct mfc_inst_ctx *mfc_ctx = NULL;
	struct mfc_common_args in_param;

	mutex_lock(&mfc_mutex);
	clk_enable(mfc_sclk);

	ret = copy_from_user(&in_param, (struct mfc_common_args *)arg, sizeof(struct mfc_common_args));
	if (ret < 0) {
		mfc_err("Inparm copy error\n");
		ret = -EIO;
		in_param.ret_code = MFCINST_ERR_INVALID_PARAM;
		goto out_ioctl;
	}

	mfc_ctx = (struct mfc_inst_ctx *)file->private_data;
	mutex_unlock(&mfc_mutex);

	switch (cmd) {
	case IOCTL_MFC_ENC_INIT:
		mutex_lock(&mfc_mutex);

		if (mfc_set_state(mfc_ctx, MFCINST_STATE_ENC_INITIALIZE) < 0) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		/* MFC encode init */
		in_param.ret_code = mfc_init_encode(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_ENC_EXE:
		mutex_lock(&mfc_mutex);
		if (mfc_ctx->MfcState < MFCINST_STATE_ENC_INITIALIZE) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		if (mfc_set_state(mfc_ctx, MFCINST_STATE_ENC_EXE) < 0) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		in_param.ret_code = mfc_exe_encode(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_DEC_INIT:
		mutex_lock(&mfc_mutex);
		if (mfc_set_state(mfc_ctx, MFCINST_STATE_DEC_INITIALIZE) < 0) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		/* MFC decode init */
		in_param.ret_code = mfc_init_decode(mfc_ctx, &(in_param.args));
		if (in_param.ret_code < 0) {
			ret = in_param.ret_code;
			mutex_unlock(&mfc_mutex);
			break;
		}

		if (in_param.args.dec_init.out_dpb_cnt <= 0) {
			mfc_err("MFC out_dpb_cnt error\n");
			mutex_unlock(&mfc_mutex);
			break;
		}

		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_DEC_EXE:
		mutex_lock(&mfc_mutex);
		if (mfc_ctx->MfcState < MFCINST_STATE_DEC_INITIALIZE) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		if (mfc_set_state(mfc_ctx, MFCINST_STATE_DEC_EXE) < 0) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		in_param.ret_code = mfc_exe_decode(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_GET_CONFIG:
		mutex_lock(&mfc_mutex);
		if (mfc_ctx->MfcState < MFCINST_STATE_DEC_INITIALIZE) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		in_param.ret_code = mfc_get_config(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_SET_CONFIG:
		mutex_lock(&mfc_mutex);
		in_param.ret_code = mfc_set_config(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_GET_IN_BUF:
		mutex_lock(&mfc_mutex);
		if (mfc_ctx->MfcState < MFCINST_STATE_OPENED) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		if (in_param.args.mem_alloc.buff_size <= 0) {
			mfc_err("MFCINST_ERR_INVALID_PARAM\n");
			in_param.ret_code = MFCINST_ERR_INVALID_PARAM;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		if ((is_dec_codec(in_param.args.mem_alloc.codec_type)) &&
				(in_param.args.mem_alloc.buff_size < (CPB_BUF_SIZE + DESC_BUF_SIZE))) {
			in_param.args.mem_alloc.buff_size = CPB_BUF_SIZE + DESC_BUF_SIZE;
		}

		/* Buffer manager should have 64KB alignment for MFC base addresses */
		in_param.args.mem_alloc.buff_size = ALIGN_TO_8KB(in_param.args.mem_alloc.buff_size);

		/* allocate stream buf for decoder & current YC buf for encoder */
		if (is_dec_codec(in_param.args.mem_alloc.codec_type))
			in_param.ret_code = mfc_allocate_buffer(mfc_ctx, &in_param.args, 0);
		else
			in_param.ret_code = mfc_allocate_buffer(mfc_ctx, &in_param.args, 1);

		mfc_ctx->desc_buff_paddr = in_param.args.mem_alloc.out_paddr + CPB_BUF_SIZE;

		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_FREE_BUF:
		mutex_lock(&mfc_mutex);
		if (mfc_ctx->MfcState < MFCINST_STATE_OPENED) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		in_param.ret_code = mfc_release_buffer((unsigned char *)in_param.args.mem_free.u_addr);
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_GET_PHYS_ADDR:
		mutex_lock(&mfc_mutex);
		mfc_debug("IOCTL_MFC_GET_PHYS_ADDR\n");

		if (mfc_ctx->MfcState < MFCINST_STATE_OPENED) {
			mfc_err("MFCINST_ERR_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;
			mutex_unlock(&mfc_mutex);
			break;
		}

		in_param.ret_code = mfc_get_phys_addr(mfc_ctx, &(in_param.args));
		ret = in_param.ret_code;
		mutex_unlock(&mfc_mutex);
		break;

	case IOCTL_MFC_GET_MMAP_SIZE:

		if (mfc_ctx->MfcState < MFCINST_STATE_OPENED) {
			mfc_err("MFC_RET_STATE_INVALID\n");
			in_param.ret_code = MFCINST_ERR_STATE_INVALID;
			ret = -EINVAL;

			break;
		}

		in_param.ret_code = MFCINST_RET_OK;
		ret = mfc_ctx->port0_mmap_size;

		break;

	case IOCTL_MFC_BUF_CACHE:
		mutex_lock(&mfc_mutex);

		in_param.ret_code = MFCINST_RET_OK;
		mfc_ctx->buf_type = in_param.args.buf_type;

		mutex_unlock(&mfc_mutex);
		break;

	default:
		mfc_err("Requested ioctl command is not defined. (ioctl cmd=0x%08x)\n", cmd);
		in_param.ret_code  = MFCINST_ERR_INVALID_PARAM;
		ret = -EINVAL;
	}

out_ioctl:
	clk_disable(mfc_sclk);

	ex_ret = copy_to_user((struct mfc_common_args *)arg, &in_param, sizeof(struct mfc_common_args));
	if (ex_ret < 0) {
		mfc_err("Outparm copy to user error\n");
		ret = -EIO;
	}

	mfc_debug_L0("---------------IOCTL return = %d ---------------\n", ret);

	return ret;
}
Esempio n. 14
0
static int omap2_enter_full_retention(void)
{
	u32 l;

	/* There is 1 reference hold for all children of the oscillator
	 * clock, the following will remove it. If no one else uses the
	 * oscillator itself it will be disabled if/when we enter retention
	 * mode.
	 */
	clk_disable(osc_ck);

	/* Clear old wake-up events */
	/* REVISIT: These write to reserved bits? */
	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);
	omap2_prm_write_mod_reg(0xffffffff, WKUP_MOD, PM_WKST);

	/*
	 * Set MPU powerdomain's next power state to RETENTION;
	 * preserve logic state during retention
	 */
	pwrdm_set_logic_retst(mpu_pwrdm, PWRDM_POWER_RET);
	pwrdm_set_next_pwrst(mpu_pwrdm, PWRDM_POWER_RET);

	/* Workaround to kill USB */
	l = omap_ctrl_readl(OMAP2_CONTROL_DEVCONF0) | OMAP24XX_USBSTANDBYCTRL;
	omap_ctrl_writel(l, OMAP2_CONTROL_DEVCONF0);

	omap2_gpio_prepare_for_idle(0);

	/* One last check for pending IRQs to avoid extra latency due
	 * to sleeping unnecessarily. */
	if (omap_irq_pending())
		goto no_sleep;

	/* Jump to SRAM suspend code */
	omap2_sram_suspend(sdrc_read_reg(SDRC_DLLA_CTRL),
			   OMAP_SDRC_REGADDR(SDRC_DLLA_CTRL),
			   OMAP_SDRC_REGADDR(SDRC_POWER));

no_sleep:
	omap2_gpio_resume_after_idle();

	clk_enable(osc_ck);

	/* clear CORE wake-up events */
	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, PM_WKST1);
	omap2_prm_write_mod_reg(0xffffffff, CORE_MOD, OMAP24XX_PM_WKST2);

	/* wakeup domain events - bit 1: GPT1, bit5 GPIO */
	omap2_prm_clear_mod_reg_bits(0x4 | 0x1, WKUP_MOD, PM_WKST);

	/* MPU domain wake events */
	l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
	if (l & 0x01)
		omap2_prm_write_mod_reg(0x01, OCP_MOD,
				  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);
	if (l & 0x20)
		omap2_prm_write_mod_reg(0x20, OCP_MOD,
				  OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);

	/* Mask future PRCM-to-MPU interrupts */
	omap2_prm_write_mod_reg(0x0, OCP_MOD, OMAP2_PRCM_IRQSTATUS_MPU_OFFSET);

	return 0;
}
Esempio n. 15
0
mali_bool mali_clk_set_rate(unsigned int clk, unsigned int mhz)
{
	unsigned long rate = 0;
	mali_bool bis_vpll = MALI_TRUE;

#ifndef CONFIG_VPLL_USE_FOR_TVENC
	bis_vpll = MALI_TRUE;
#endif

	_mali_osk_lock_wait(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	if (mali_clk_get(bis_vpll) == MALI_FALSE)
		return MALI_FALSE;

	rate = (unsigned long)clk * (unsigned long)mhz;
	MALI_DEBUG_PRINT(3,("= clk_set_rate : %d , %d \n",clk, mhz ));

	if (bis_vpll)
	{
		clk_set_rate(fout_vpll_clock, (unsigned int)clk * GPU_MHZ);
		clk_set_parent(vpll_src_clock, ext_xtal_clock);
		clk_set_parent(sclk_vpll_clock, fout_vpll_clock);

		clk_set_parent(mali_parent_clock, sclk_vpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}
	else
	{
		clk_set_parent(mali_parent_clock, mpll_clock);
		clk_set_parent(mali_clock, mali_parent_clock);
	}

	if (clk_enable(mali_clock) < 0)
		return MALI_FALSE;

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
                               rate, 0, 0, 0, 0);
#endif

	clk_set_rate(mali_clock, rate);
	rate = clk_get_rate(mali_clock);

#if MALI_TIMELINE_PROFILING_ENABLED
    _mali_profiling_add_event( MALI_PROFILING_EVENT_TYPE_SINGLE |
                               MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
                               MALI_PROFILING_EVENT_REASON_SINGLE_SW_GPU_FREQ,
                               rate, 1, 0, 0, 0);
#endif

	if (bis_vpll)
		mali_gpu_clk = (int)(rate / mhz);
	else
		mali_gpu_clk = (int)((rate + 500000) / mhz);

	GPU_MHZ = mhz;
	MALI_DEBUG_PRINT(3,("= clk_get_rate: %d \n",mali_gpu_clk));

	mali_clk_put(MALI_FALSE);

	_mali_osk_lock_signal(mali_dvfs_lock, _MALI_OSK_LOCKMODE_RW);

	return MALI_TRUE;
}
Esempio n. 16
0
static int do_mmc_init(int dev_index, bool removable)
{
	struct mmc_host *host;
	struct mmc *mmc;
#ifdef CONFIG_TEGRA186
	int ret;
#endif

	/* DT should have been read & host config filled in */
	host = &mmc_host[dev_index];
	if (!host->enabled)
		return -1;

	debug(" do_mmc_init: index %d, bus width %d pwr_gpio %d cd_gpio %d\n",
	      dev_index, host->width, gpio_get_number(&host->pwr_gpio),
	      gpio_get_number(&host->cd_gpio));

	host->clock = 0;

#ifdef CONFIG_TEGRA186
	ret = reset_assert(&host->reset_ctl);
	if (ret)
		return ret;
	ret = clk_enable(&host->clk);
	if (ret)
		return ret;
	ret = clk_set_rate(&host->clk, 20000000);
	if (IS_ERR_VALUE(ret))
		return ret;
	ret = reset_deassert(&host->reset_ctl);
	if (ret)
		return ret;
#else
	clock_start_periph_pll(host->mmc_id, CLOCK_ID_PERIPH, 20000000);
#endif

	if (dm_gpio_is_valid(&host->pwr_gpio))
		dm_gpio_set_value(&host->pwr_gpio, 1);

	memset(&host->cfg, 0, sizeof(host->cfg));

	host->cfg.name = "Tegra SD/MMC";
	host->cfg.ops = &tegra_mmc_ops;

	host->cfg.voltages = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195;
	host->cfg.host_caps = 0;
	if (host->width == 8)
		host->cfg.host_caps |= MMC_MODE_8BIT;
	if (host->width >= 4)
		host->cfg.host_caps |= MMC_MODE_4BIT;
	host->cfg.host_caps |= MMC_MODE_HS_52MHz | MMC_MODE_HS;

	/*
	 * min freq is for card identification, and is the highest
	 *  low-speed SDIO card frequency (actually 400KHz)
	 * max freq is highest HS eMMC clock as per the SD/MMC spec
	 *  (actually 52MHz)
	 */
	host->cfg.f_min = 375000;
	host->cfg.f_max = 48000000;

	host->cfg.b_max = CONFIG_SYS_MMC_MAX_BLK_COUNT;

	mmc = mmc_create(&host->cfg, host);
	mmc->block_dev.removable = removable;
	if (mmc == NULL)
		return -1;

	return 0;
}
Esempio n. 17
0
static int mv_u3d_phy_init(struct usb_phy *phy)
{
	struct mv_u3d_phy *mv_u3d_phy;
	void __iomem *base;
	u32 val, count;

	/* enable usb3 phy */
	mv_u3d_phy = container_of(phy, struct mv_u3d_phy, phy);

	if (mv_u3d_phy->clk)
		clk_enable(mv_u3d_phy->clk);

	base = mv_u3d_phy->base;

	val = mv_u3d_phy_read(base, USB3_POWER_PLL_CONTROL);
	val &= ~(USB3_POWER_PLL_CONTROL_PU_MASK);
	val |= 0xF << USB3_POWER_PLL_CONTROL_PU_SHIFT;
	mv_u3d_phy_write(base, USB3_POWER_PLL_CONTROL, val);
	udelay(100);

	mv_u3d_phy_write(base, USB3_RESET_CONTROL,
			USB3_RESET_CONTROL_RESET_PIPE);
	udelay(100);

	mv_u3d_phy_write(base, USB3_RESET_CONTROL,
			USB3_RESET_CONTROL_RESET_PIPE
			| USB3_RESET_CONTROL_RESET_PHY);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_POWER_PLL_CONTROL);
	val &= ~(USB3_POWER_PLL_CONTROL_REF_FREF_SEL_MASK
		| USB3_POWER_PLL_CONTROL_PHY_MODE_MASK);
	val |=  (USB3_PLL_25MHZ << USB3_POWER_PLL_CONTROL_REF_FREF_SEL_SHIFT)
		| (0x5 << USB3_POWER_PLL_CONTROL_PHY_MODE_SHIFT);
	mv_u3d_phy_write(base, USB3_POWER_PLL_CONTROL, val);
	udelay(100);

	mv_u3d_phy_clear(base, USB3_KVCO_CALI_CONTROL,
		USB3_KVCO_CALI_CONTROL_USE_MAX_PLL_RATE_MASK);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_SQUELCH_FFE);
	val &= ~(USB3_SQUELCH_FFE_FFE_CAP_SEL_MASK
		| USB3_SQUELCH_FFE_FFE_RES_SEL_MASK
		| USB3_SQUELCH_FFE_SQ_THRESH_IN_MASK);
	val |= ((0xD << USB3_SQUELCH_FFE_FFE_CAP_SEL_SHIFT)
		| (0x7 << USB3_SQUELCH_FFE_FFE_RES_SEL_SHIFT)
		| (0x8 << USB3_SQUELCH_FFE_SQ_THRESH_IN_SHIFT));
	mv_u3d_phy_write(base, USB3_SQUELCH_FFE, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_GEN1_SET0);
	val &= ~USB3_GEN1_SET0_G1_TX_SLEW_CTRL_EN_MASK;
	val |= 1 << USB3_GEN1_SET0_G1_TX_EMPH_EN_SHIFT;
	mv_u3d_phy_write(base, USB3_GEN1_SET0, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_GEN2_SET0);
	val &= ~(USB3_GEN2_SET0_G2_TX_AMP_MASK
		| USB3_GEN2_SET0_G2_TX_EMPH_AMP_MASK
		| USB3_GEN2_SET0_G2_TX_SLEW_CTRL_EN_MASK);
	val |= ((0x14 << USB3_GEN2_SET0_G2_TX_AMP_SHIFT)
		| (1 << USB3_GEN2_SET0_G2_TX_AMP_ADJ_SHIFT)
		| (0xA << USB3_GEN2_SET0_G2_TX_EMPH_AMP_SHIFT)
		| (1 << USB3_GEN2_SET0_G2_TX_EMPH_EN_SHIFT));
	mv_u3d_phy_write(base, USB3_GEN2_SET0, val);
	udelay(100);

	mv_u3d_phy_read(base, USB3_TX_EMPPH);
	val &= ~(USB3_TX_EMPPH_AMP_MASK
		| USB3_TX_EMPPH_EN_MASK
		| USB3_TX_EMPPH_AMP_FORCE_MASK
		| USB3_TX_EMPPH_PAR1_MASK
		| USB3_TX_EMPPH_PAR2_MASK);
	val |= ((0xB << USB3_TX_EMPPH_AMP_SHIFT)
		| (1 << USB3_TX_EMPPH_EN_SHIFT)
		| (1 << USB3_TX_EMPPH_AMP_FORCE_SHIFT)
		| (0x1C << USB3_TX_EMPPH_PAR1_SHIFT)
		| (1 << USB3_TX_EMPPH_PAR2_SHIFT));

	mv_u3d_phy_write(base, USB3_TX_EMPPH, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_GEN2_SET1);
	val &= ~(USB3_GEN2_SET1_G2_RX_SELMUPI_MASK
		| USB3_GEN2_SET1_G2_RX_SELMUPF_MASK
		| USB3_GEN2_SET1_G2_RX_SELMUFI_MASK
		| USB3_GEN2_SET1_G2_RX_SELMUFF_MASK);
	val |= ((1 << USB3_GEN2_SET1_G2_RX_SELMUPI_SHIFT)
		| (1 << USB3_GEN2_SET1_G2_RX_SELMUPF_SHIFT)
		| (1 << USB3_GEN2_SET1_G2_RX_SELMUFI_SHIFT)
		| (1 << USB3_GEN2_SET1_G2_RX_SELMUFF_SHIFT));
	mv_u3d_phy_write(base, USB3_GEN2_SET1, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_DIGITAL_LOOPBACK_EN);
	val &= ~USB3_DIGITAL_LOOPBACK_EN_SEL_BITS_MASK;
	val |= 1 << USB3_DIGITAL_LOOPBACK_EN_SEL_BITS_SHIFT;
	mv_u3d_phy_write(base, USB3_DIGITAL_LOOPBACK_EN, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_IMPEDANCE_TX_SSC);
	val &= ~USB3_IMPEDANCE_TX_SSC_SSC_AMP_MASK;
	val |= 0xC << USB3_IMPEDANCE_TX_SSC_SSC_AMP_SHIFT;
	mv_u3d_phy_write(base, USB3_IMPEDANCE_TX_SSC, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_IMPEDANCE_CALI_CTRL);
	val &= ~USB3_IMPEDANCE_CALI_CTRL_IMP_CAL_THR_MASK;
	val |= 0x4 << USB3_IMPEDANCE_CALI_CTRL_IMP_CAL_THR_SHIFT;
	mv_u3d_phy_write(base, USB3_IMPEDANCE_CALI_CTRL, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_PHY_ISOLATION_MODE);
	val &= ~(USB3_PHY_ISOLATION_MODE_PHY_GEN_RX_MASK
		| USB3_PHY_ISOLATION_MODE_PHY_GEN_TX_MASK
		| USB3_PHY_ISOLATION_MODE_TX_DRV_IDLE_MASK);
	val |= ((1 << USB3_PHY_ISOLATION_MODE_PHY_GEN_RX_SHIFT)
		| (1 << USB3_PHY_ISOLATION_MODE_PHY_GEN_TX_SHIFT));
	mv_u3d_phy_write(base, USB3_PHY_ISOLATION_MODE, val);
	udelay(100);

	val = mv_u3d_phy_read(base, USB3_TXDETRX);
	val &= ~(USB3_TXDETRX_VTHSEL_MASK);
	val |= 0x1 << USB3_TXDETRX_VTHSEL_SHIFT;
	mv_u3d_phy_write(base, USB3_TXDETRX, val);
	udelay(100);

	dev_dbg(mv_u3d_phy->dev, "start calibration\n");

calstart:
	/* Perform Manual Calibration */
	mv_u3d_phy_set(base, USB3_KVCO_CALI_CONTROL,
		1 << USB3_KVCO_CALI_CONTROL_CAL_START_SHIFT);

	mdelay(1);

	count = 0;
	while (1) {
		val = mv_u3d_phy_read(base, USB3_KVCO_CALI_CONTROL);
		if (val & (1 << USB3_KVCO_CALI_CONTROL_CAL_DONE_SHIFT))
			break;
		else if (count > 50) {
			dev_dbg(mv_u3d_phy->dev, "calibration failure, retry...\n");
			goto calstart;
		}
		count++;
		mdelay(1);
	}

	/* active PIPE interface */
	mv_u3d_phy_write(base, USB3_PIPE_SM_CTRL,
		1 << USB3_PIPE_SM_CTRL_PHY_INIT_DONE);

	return 0;
}
Esempio n. 18
0
void __init nmdk_timer_init(void)
{
    unsigned long rate;
    struct clk *clk0;
    struct clk *clk1;
    u32 cr;

    clk0 = clk_get_sys("mtu0", NULL);
    BUG_ON(IS_ERR(clk0));

    clk1 = clk_get_sys("mtu1", NULL);
    BUG_ON(IS_ERR(clk1));

    clk_enable(clk0);
    clk_enable(clk1);

    /*
     * Tick rate is 2.4MHz for Nomadik and 110MHz for ux500:
     * use a divide-by-16 counter if it's more than 16MHz
     */
    cr = MTU_CRn_32BITS;;
    rate = clk_get_rate(clk0);
    if (rate > 16 << 20) {
        rate /= 16;
        cr |= MTU_CRn_PRESCALE_16;
    } else {
        cr |= MTU_CRn_PRESCALE_1;
    }
    clocksource_calc_mult_shift(&nmdk_clksrc, rate, MTU_MIN_RANGE);

    /* Timer 0 is the free running clocksource */
    writel(cr, mtu_base + MTU_CR(0));
    writel(0, mtu_base + MTU_LR(0));
    writel(0, mtu_base + MTU_BGLR(0));
    writel(cr | MTU_CRn_ENA, mtu_base + MTU_CR(0));

    /* Now the scheduling clock is ready */
    nmdk_clksrc.read = nmdk_read_timer;

    if (clocksource_register(&nmdk_clksrc))
        pr_err("timer: failed to initialize clock source %s\n",
               nmdk_clksrc.name);

    /* Timer 1 is used for events, fix according to rate */
    cr = MTU_CRn_32BITS;
    rate = clk_get_rate(clk1);
    if (rate > 16 << 20) {
        rate /= 16;
        cr |= MTU_CRn_PRESCALE_16;
    } else {
        cr |= MTU_CRn_PRESCALE_1;
    }
    clockevents_calc_mult_shift(&nmdk_clkevt, rate, MTU_MIN_RANGE);

    writel(cr | MTU_CRn_ONESHOT, mtu_base + MTU_CR(1)); /* off, currently */

    nmdk_clkevt.max_delta_ns =
        clockevent_delta2ns(0xffffffff, &nmdk_clkevt);
    nmdk_clkevt.min_delta_ns =
        clockevent_delta2ns(0x00000002, &nmdk_clkevt);
    nmdk_clkevt.cpumask	= cpumask_of(0);

    /* Register irq and clockevents */
    setup_irq(IRQ_MTU0, &nmdk_timer_irq);
    clockevents_register_device(&nmdk_clkevt);
}
Esempio n. 19
0
static int sossi_init(struct omapfb_device *fbdev)
{
	u32 l, k;
	struct clk *fck;
	struct clk *dpll1out_ck;
	int r;

	sossi.base = ioremap(OMAP_SOSSI_BASE, SZ_1K);
	if (!sossi.base) {
		dev_err(fbdev->dev, "can't ioremap SoSSI\n");
		return -ENOMEM;
	}

	sossi.fbdev = fbdev;
	spin_lock_init(&sossi.lock);

	dpll1out_ck = clk_get(fbdev->dev, "ck_dpll1out");
	if (IS_ERR(dpll1out_ck)) {
		dev_err(fbdev->dev, "can't get DPLL1OUT clock\n");
		return PTR_ERR(dpll1out_ck);
	}
	/*
	 * We need the parent clock rate, which we might divide further
	 * depending on the timing requirements of the controller. See
	 * _set_timings.
	 */
	sossi.fck_hz = clk_get_rate(dpll1out_ck);
	clk_put(dpll1out_ck);

	fck = clk_get(fbdev->dev, "ck_sossi");
	if (IS_ERR(fck)) {
		dev_err(fbdev->dev, "can't get SoSSI functional clock\n");
		return PTR_ERR(fck);
	}
	sossi.fck = fck;

	/* Reset and enable the SoSSI module */
	l = omap_readl(MOD_CONF_CTRL_1);
	l |= CONF_SOSSI_RESET_R;
	omap_writel(l, MOD_CONF_CTRL_1);
	l &= ~CONF_SOSSI_RESET_R;
	omap_writel(l, MOD_CONF_CTRL_1);

	clk_enable(sossi.fck);
	l = omap_readl(ARM_IDLECT2);
	l &= ~(1 << 8);			/* DMACK_REQ */
	omap_writel(l, ARM_IDLECT2);

	l = sossi_read_reg(SOSSI_INIT2_REG);
	/* Enable and reset the SoSSI block */
	l |= (1 << 0) | (1 << 1);
	sossi_write_reg(SOSSI_INIT2_REG, l);
	/* Take SoSSI out of reset */
	l &= ~(1 << 1);
	sossi_write_reg(SOSSI_INIT2_REG, l);

	sossi_write_reg(SOSSI_ID_REG, 0);
	l = sossi_read_reg(SOSSI_ID_REG);
	k = sossi_read_reg(SOSSI_ID_REG);

	if (l != 0x55555555 || k != 0xaaaaaaaa) {
		dev_err(fbdev->dev,
			"invalid SoSSI sync pattern: %08x, %08x\n", l, k);
		r = -ENODEV;
		goto err;
	}

	if ((r = omap_lcdc_set_dma_callback(sossi_dma_callback, NULL)) < 0) {
		dev_err(fbdev->dev, "can't get LCDC IRQ\n");
		r = -ENODEV;
		goto err;
	}

	l = sossi_read_reg(SOSSI_ID_REG); /* Component code */
	l = sossi_read_reg(SOSSI_ID_REG);
	dev_info(fbdev->dev, "SoSSI version %d.%d initialized\n",
		l >> 16, l & 0xffff);

	l = sossi_read_reg(SOSSI_INIT1_REG);
	l |= (1 << 19); /* DMA_MODE */
	l &= ~(1 << 31); /* REORDERING */
	sossi_write_reg(SOSSI_INIT1_REG, l);

	if ((r = request_irq(INT_1610_SoSSI_MATCH, sossi_match_irq,
			     IRQ_TYPE_EDGE_FALLING,
	     "sossi_match", sossi.fbdev->dev)) < 0) {
		dev_err(sossi.fbdev->dev, "can't get SoSSI match IRQ\n");
		goto err;
	}

	clk_disable(sossi.fck);
	return 0;

err:
	clk_disable(sossi.fck);
	clk_put(sossi.fck);
	return r;
}
Esempio n. 20
0
void fimg2d_clk_on(struct fimg2d_control *ctrl)
{
	clk_enable(ctrl->clock);
	fimg2d_debug("%s : clock enable\n", __func__);
}
static u32 res_trk_disable_videocore(void)
{
	int rc = -1;
	mutex_lock(&resource_context.lock);

	if (!resource_context.rail_enabled) {
		mutex_unlock(&resource_context.lock);
		return false;
	}

	if (!resource_context.clock_enabled &&
		resource_context.pclk &&
		resource_context.hclk &&
		resource_context.hclk_div2) {

		VCDRES_MSG_LOW("\nEnabling clk before disabling pwr rail\n");
		if (clk_set_rate(resource_context.hclk,
			mfc_clk_freq_table[0])) {
			VCDRES_MSG_ERROR("\n pwr_rail_disable:"
				 " set clk rate failed\n");
			goto bail_out;
		}

		if (clk_enable(resource_context.pclk)) {
			VCDRES_MSG_ERROR("vidc pclk Enable failed\n");
			goto bail_out;
		}

		if (clk_enable(resource_context.hclk)) {
			VCDRES_MSG_ERROR("vidc hclk Enable failed\n");
			goto disable_pclk;
		}

		if (clk_enable(resource_context.hclk_div2)) {
			VCDRES_MSG_ERROR("vidc hclk_div2 Enable failed\n");
			goto disable_hclk;
		}
	} else {
		VCDRES_MSG_ERROR("\ndisabling pwr rail: Enabling clk failed\n");
		goto bail_out;
	}

	resource_context.rail_enabled = 0;
	rc = clk_reset(resource_context.pclk, CLK_RESET_ASSERT);
	if (rc) {
		VCDRES_MSG_ERROR("\n clk_reset failed %d\n", rc);
		mutex_unlock(&resource_context.lock);
		return false;
	}
	msleep(20);

	clk_disable(resource_context.pclk);
	clk_disable(resource_context.hclk);
	clk_disable(resource_context.hclk_div2);

	clk_put(resource_context.hclk_div2);
	clk_put(resource_context.hclk);
	clk_put(resource_context.pclk);

	rc = regulator_disable(resource_context.regulator);
	if (rc) {
		VCDRES_MSG_ERROR("\n regulator disable failed %d\n", rc);
		mutex_unlock(&resource_context.lock);
		return false;
	}

	resource_context.hclk_div2 = NULL;
	resource_context.hclk = NULL;
	resource_context.pclk = NULL;

	mutex_unlock(&resource_context.lock);

	return true;

disable_hclk:
	clk_disable(resource_context.hclk);
disable_pclk:
	clk_disable(resource_context.pclk);
bail_out:
	if (resource_context.pclk) {
		clk_put(resource_context.pclk);
		resource_context.pclk = NULL;
	}
	if (resource_context.hclk) {
		clk_put(resource_context.hclk);
		resource_context.hclk = NULL;
	}
	if (resource_context.hclk_div2) {
		clk_put(resource_context.hclk_div2);
		resource_context.hclk_div2 = NULL;
	}
	mutex_unlock(&resource_context.lock);
	return false;
}
Esempio n. 22
0
static int __init s3c2410ts_probe(struct platform_device *pdev)
{
	int rc;
	struct s3c2410_ts_mach_info *info;
	struct input_dev *input_dev;
	int ret = 0;

	dev_info(&pdev->dev, "Starting\n");

	info = (struct s3c2410_ts_mach_info *)pdev->dev.platform_data;

	if (!info)
	{
		dev_err(&pdev->dev, "Hm... too bad: no platform data for ts\n");
		return -EINVAL;
	}

#ifdef CONFIG_TOUCHSCREEN_S3C2410_DEBUG
	printk(DEBUG_LVL "Entering s3c2410ts_init\n");
#endif

	adc_clock = clk_get(NULL, "adc");
	if (!adc_clock) {
		dev_err(&pdev->dev, "failed to get adc clock source\n");
		return -ENOENT;
	}
	clk_enable(adc_clock);

#ifdef CONFIG_TOUCHSCREEN_S3C2410_DEBUG
	printk(DEBUG_LVL "got and enabled clock\n");
#endif

	base_addr = ioremap(S3C2410_PA_ADC,0x20);
	if (base_addr == NULL) {
		dev_err(&pdev->dev, "Failed to remap register block\n");
		ret = -ENOMEM;
		goto bail0;
	}


	/* If we acutally are a S3C2410: Configure GPIOs */
	if (!strcmp(pdev->name, "s3c2410-ts"))
		s3c2410_ts_connect();

	if ((info->presc & 0xff) > 0)
		writel(S3C2410_ADCCON_PRSCEN |
		       S3C2410_ADCCON_PRSCVL(info->presc&0xFF),
						    base_addr + S3C2410_ADCCON);
	else
		writel(0, base_addr+S3C2410_ADCCON);

	/* Initialise registers */
	if ((info->delay & 0xffff) > 0)
		writel(info->delay & 0xffff,  base_addr + S3C2410_ADCDLY);

	writel(WAIT4INT(0), base_addr + S3C2410_ADCTSC);

	/* Initialise input stuff */
	memset(&ts, 0, sizeof(struct s3c2410ts));
	input_dev = input_allocate_device();

	if (!input_dev) {
		dev_err(&pdev->dev, "Unable to allocate the input device\n");
		ret = -ENOMEM;
		goto bail1;
	}

	ts.dev = input_dev;
	ts.dev->evbit[0] = BIT_MASK(EV_SYN) | BIT_MASK(EV_KEY) |
			   BIT_MASK(EV_ABS);
	ts.dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
	input_set_abs_params(ts.dev, ABS_X, 0, 0x3FF, 0, 0);
	input_set_abs_params(ts.dev, ABS_Y, 0, 0x3FF, 0, 0);
	input_set_abs_params(ts.dev, ABS_PRESSURE, 0, 1, 0, 0);

	ts.dev->name = s3c2410ts_name;
	ts.dev->id.bustype = BUS_RS232;
	ts.dev->id.vendor = 0xDEAD;
	ts.dev->id.product = 0xBEEF;
	ts.dev->id.version = S3C2410TSVERSION;
	ts.state = TS_STATE_STANDBY;
	ts.event_fifo = kfifo_alloc(TS_EVENT_FIFO_SIZE, GFP_KERNEL, NULL);
	if (IS_ERR(ts.event_fifo)) {
		ret = -EIO;
		goto bail2;
	}

	/* create the filter chain set up for the 2 coordinates we produce */
	ts.chain = ts_filter_chain_create(pdev, info->filter_config, 2);

	if (IS_ERR(ts.chain))
		goto bail2;

	ts_filter_chain_clear(ts.chain);

	/* Get irqs */
	if (request_irq(IRQ_ADC, stylus_action, IRQF_SAMPLE_RANDOM,
						    "s3c2410_action", ts.dev)) {
		dev_err(&pdev->dev, "Could not allocate ts IRQ_ADC !\n");
		iounmap(base_addr);
		ret = -EIO;
		goto bail3;
	}
	if (request_irq(IRQ_TC, stylus_updown, IRQF_SAMPLE_RANDOM,
			"s3c2410_action", ts.dev)) {
		dev_err(&pdev->dev, "Could not allocate ts IRQ_TC !\n");
		free_irq(IRQ_ADC, ts.dev);
		iounmap(base_addr);
		ret = -EIO;
		goto bail4;
	}

	dev_info(&pdev->dev, "Successfully loaded\n");

	/* All went ok, so register to the input system */
	rc = input_register_device(ts.dev);
	if (rc) {
		ret = -EIO;
		goto bail5;
	}

	return 0;

bail5:
	free_irq(IRQ_TC, ts.dev);
	free_irq(IRQ_ADC, ts.dev);
	clk_disable(adc_clock);
	iounmap(base_addr);
	disable_irq(IRQ_TC);
bail4:
	disable_irq(IRQ_ADC);
bail3:
	ts_filter_chain_destroy(ts.chain);
	kfifo_free(ts.event_fifo);
bail2:
	input_unregister_device(ts.dev);
bail1:
	iounmap(base_addr);
bail0:

	return ret;
}
int tegra_asoc_utils_set_rate(struct tegra_asoc_utils_data *data, int srate,
			      int mclk)
{
	int new_baseclock;
	bool clk_change;
	int err;
	bool reenable_clock;

	switch (srate) {
	case 11025:
	case 22050:
	case 44100:
	case 88200:
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
		new_baseclock = 56448000;
#else
		new_baseclock = 564480000;
#endif
		break;
	case 8000:
	case 16000:
	case 32000:
	case 48000:
	case 64000:
	case 96000:
#if defined(CONFIG_ARCH_TEGRA_2x_SOC)
		new_baseclock = 73728000;
#else
		new_baseclock = 552960000;
#endif
		break;
	default:
		return -EINVAL;
	}

	clk_change = ((new_baseclock != data->set_baseclock) ||
			(mclk != data->set_mclk));
	if (!clk_change)
		return 0;

	/* Don't change rate if already one dai-link is using it */
	if (data->lock_count)
		return -EINVAL;

	data->set_baseclock = 0;
	data->set_mclk = 0;

	reenable_clock = false;
	if(tegra_is_clk_enabled(data->clk_pll_a)) {
		clk_disable(data->clk_pll_a);
		reenable_clock = true;
	}
	err = clk_set_rate(data->clk_pll_a, new_baseclock);
	if (err) {
		dev_err(data->dev, "Can't set pll_a rate: %d\n", err);
		return err;
	}
	if(reenable_clock)
		clk_enable(data->clk_pll_a);

	reenable_clock = false;
	if(tegra_is_clk_enabled(data->clk_pll_a_out0)) {
		clk_disable(data->clk_pll_a_out0);
		reenable_clock = true;
	}
	err = clk_set_rate(data->clk_pll_a_out0, mclk);
	if (err) {
		dev_err(data->dev, "Can't set clk_pll_a_out0 rate: %d\n", err);
		return err;
	}
	if(reenable_clock)
		clk_enable(data->clk_pll_a_out0);


	data->set_baseclock = new_baseclock;
	data->set_mclk = mclk;

	return 0;
}
Esempio n. 24
0
static inline void brf6150_rx(struct brf6150_info *info)
{
	u8 byte;

	NBT_DBG_TRANSFER("rx_tasklet woke up\ndata ");

	while (brf6150_inb(info, UART_LSR) & UART_LSR_DR) {
		if (info->rx_skb == NULL) {
			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
			if (!info->rx_skb) {
				printk(KERN_WARNING "brf6150: Can't allocate memory for new packet\n");
				return;
			}
			info->rx_state = WAIT_FOR_PKT_TYPE;
			info->rx_skb->dev = (void *)info->hdev;
			brf6150_disable_pm_rx(info);
			clk_enable(info->uart_ck);
		}

		byte = brf6150_inb(info, UART_RX);
		if (info->garbage_bytes) {
			info->garbage_bytes--;
			info->hdev->stat.err_rx++;
			continue;
		}
		info->hdev->stat.byte_rx++;
		NBT_DBG_TRANSFER_NF("0x%.2x  ", byte);
		switch (info->rx_state) {
		case WAIT_FOR_PKT_TYPE:
			bt_cb(info->rx_skb)->pkt_type = byte;
			info->rx_count = brf6150_get_hdr_len(byte);
			if (info->rx_count >= 0) {
				info->rx_state = WAIT_FOR_HEADER;
			} else {
				info->hdev->stat.err_rx++;
				kfree_skb(info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		case WAIT_FOR_HEADER:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				info->rx_count = brf6150_get_data_len(info, info->rx_skb);
				if (info->rx_count > skb_tailroom(info->rx_skb)) {
					printk(KERN_WARNING "brf6150: Frame is %ld bytes too long.\n",
					       info->rx_count - skb_tailroom(info->rx_skb));
					info->rx_skb = NULL;
					info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb);
					clk_disable(info->uart_ck);
					break;
				}
				info->rx_state = WAIT_FOR_DATA;
				if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) {
					brf6150_negotiation_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
				if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) {
					brf6150_alive_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
			}
			break;
		case WAIT_FOR_DATA:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				brf6150_recv_frame(info, info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		default:
			WARN_ON(1);
			break;
		}
	}

	NBT_DBG_TRANSFER_NF("\n");
}
static int sprd_rtc_probe(struct platform_device *plat_dev)
{
	int err = -ENODEV;
	struct resource *irq;

	rtc_data = kzalloc(sizeof(*rtc_data), GFP_KERNEL);
	if(IS_ERR(rtc_data)){
		err = PTR_ERR(rtc_data);
		return err;
	};

	/*ensure the rtc interrupt don't be send to Adie when there's no
	  *rtc alarm int occur.
	  */
	sci_adi_raw_write(ANA_RTC_SPG_UPD, SPRD_RTC_LOCK);
	/* disable all interrupt */
	sci_adi_clr(ANA_RTC_INT_EN, RTC_INT_ALL_MSK);
	/* enable rtc device */
	rtc_data->clk = clk_get(&plat_dev->dev, "ext_32k");
	if (IS_ERR(rtc_data->clk)) {
		err = PTR_ERR(rtc_data->clk);
		goto kfree_data;
	}

	err = clk_enable(rtc_data->clk);
	if (err < 0)
		goto put_clk;

	CLEAR_RTC_INT(RTC_INT_ALL_MSK);
	rtc_data->rtc = rtc_device_register("sprd_rtc", &plat_dev->dev,
			&sprd_rtc_ops, THIS_MODULE);
	if (IS_ERR(rtc_data->rtc)) {
		err = PTR_ERR(rtc_data->rtc);
		goto disable_clk;
	}

	irq = platform_get_resource(plat_dev, IORESOURCE_IRQ, 0);
	if(unlikely(!irq)) {
		dev_err(&plat_dev->dev, "no irq resource specified\n");
		goto unregister_rtc;
	}
	rtc_data->irq_no = irq->start;
	platform_set_drvdata(plat_dev, rtc_data);

	err = request_irq(rtc_data->irq_no, rtc_interrupt_handler, 0, "sprd_rtc", rtc_data->rtc);
	if(err){
		printk(KERN_ERR "RTC regist irq error\n");
		goto unregister_rtc;
	}
	sprd_creat_caliberate_attr(rtc_data->rtc->dev);

	sprd_rtc_hwrst_set(1);
	sprd_rtc_set_bit_spg_counter(SPG_CNT_8SECS_RESET, 1);

	sprd_rtc_check_power_down(&plat_dev->dev);

	sprd_rtc_open(&plat_dev->dev);//test

	return 0;

unregister_rtc:
	rtc_device_unregister(rtc_data->rtc);
disable_clk:
	clk_disable(rtc_data->clk);
put_clk:
	clk_put(rtc_data->clk);
kfree_data:
	kfree(rtc_data);
	return err;
}
Esempio n. 26
0
/*
 * Probe for the device
 */
static int __init at91_mci_probe(struct platform_device *pdev)
{
	struct mmc_host *mmc;
	struct at91mci_host *host;
	struct resource *res;
	int ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENXIO;

	if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME))
		return -EBUSY;

	mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
	if (!mmc) {
		ret = -ENOMEM;
		dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
		goto fail6;
	}

	mmc->ops = &at91_mci_ops;
	mmc->f_min = 375000;
	mmc->f_max = 25000000;
	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
	mmc->caps = 0;

	mmc->max_blk_size  = MCI_MAXBLKSIZE;
	mmc->max_blk_count = MCI_BLKATONCE;
	mmc->max_req_size  = MCI_BUFSIZE;
	mmc->max_segs      = MCI_BLKATONCE;
	mmc->max_seg_size  = MCI_BUFSIZE;

	host = mmc_priv(mmc);
	host->mmc = mmc;
	host->bus_mode = 0;
	host->board = pdev->dev.platform_data;
	if (host->board->wire4) {
		if (at91mci_is_mci1rev2xx())
			mmc->caps |= MMC_CAP_4_BIT_DATA;
		else
			dev_warn(&pdev->dev, "4 wire bus mode not supported"
				" - using 1 wire\n");
	}

	host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE,
					&host->physical_address, GFP_KERNEL);
	if (!host->buffer) {
		ret = -ENOMEM;
		dev_err(&pdev->dev, "Can't allocate transmit buffer\n");
		goto fail5;
	}

	/* Add SDIO capability when available */
	if (at91mci_is_mci1rev2xx()) {
		/* at91mci MCI1 rev2xx sdio interrupt erratum */
		if (host->board->wire4 || !host->board->slot_b)
			mmc->caps |= MMC_CAP_SDIO_IRQ;
	}

	/*
	 * Reserve GPIOs ... board init code makes sure these pins are set
	 * up as GPIOs with the right direction (input, except for vcc)
	 */
	if (gpio_is_valid(host->board->det_pin)) {
		ret = gpio_request(host->board->det_pin, "mmc_detect");
		if (ret < 0) {
			dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
			goto fail4b;
		}
	}
	if (gpio_is_valid(host->board->wp_pin)) {
		ret = gpio_request(host->board->wp_pin, "mmc_wp");
		if (ret < 0) {
			dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
			goto fail4;
		}
	}
	if (gpio_is_valid(host->board->vcc_pin)) {
		ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
		if (ret < 0) {
			dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
			goto fail3;
		}
	}

	/*
	 * Get Clock
	 */
	host->mci_clk = clk_get(&pdev->dev, "mci_clk");
	if (IS_ERR(host->mci_clk)) {
		ret = -ENODEV;
		dev_dbg(&pdev->dev, "no mci_clk?\n");
		goto fail2;
	}

	/*
	 * Map I/O region
	 */
	host->baseaddr = ioremap(res->start, resource_size(res));
	if (!host->baseaddr) {
		ret = -ENOMEM;
		goto fail1;
	}

	/*
	 * Reset hardware
	 */
	clk_enable(host->mci_clk);		/* Enable the peripheral clock */
	at91_mci_disable(host);
	at91_mci_enable(host);

	/*
	 * Allocate the MCI interrupt
	 */
	host->irq = platform_get_irq(pdev, 0);
	ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
			mmc_hostname(mmc), host);
	if (ret) {
		dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
		goto fail0;
	}

	setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);

	platform_set_drvdata(pdev, mmc);

	/*
	 * Add host to MMC layer
	 */
	if (gpio_is_valid(host->board->det_pin)) {
		host->present = !gpio_get_value(host->board->det_pin);
	}
	else
		host->present = -1;

	mmc_add_host(mmc);

	/*
	 * monitor card insertion/removal if we can
	 */
	if (gpio_is_valid(host->board->det_pin)) {
		ret = request_irq(gpio_to_irq(host->board->det_pin),
				at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
		if (ret)
			dev_warn(&pdev->dev, "request MMC detect irq failed\n");
		else
			device_init_wakeup(&pdev->dev, 1);
	}

	pr_debug("Added MCI driver\n");

	return 0;

fail0:
	clk_disable(host->mci_clk);
	iounmap(host->baseaddr);
fail1:
	clk_put(host->mci_clk);
fail2:
	if (gpio_is_valid(host->board->vcc_pin))
		gpio_free(host->board->vcc_pin);
fail3:
	if (gpio_is_valid(host->board->wp_pin))
		gpio_free(host->board->wp_pin);
fail4:
	if (gpio_is_valid(host->board->det_pin))
		gpio_free(host->board->det_pin);
fail4b:
	if (host->buffer)
		dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
				host->buffer, host->physical_address);
fail5:
	mmc_free_host(mmc);
fail6:
	release_mem_region(res->start, resource_size(res));
	dev_err(&pdev->dev, "probe failed, err %d\n", ret);
	return ret;
}
static int snddev_icodec_open_tx(struct snddev_icodec_state *icodec)
{
	int trc;
	int afe_channel_mode;
	union afe_port_config afe_config;
	struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;;

	wake_lock(&drv->tx_idlelock);

	if (drv->snddev_vreg)
		vreg_mode_vote(drv->snddev_vreg, 1, SNDDEV_HIGH_POWER_MODE);

	/* Reuse pamp_on for TX platform-specific setup  */
	if (icodec->data->pamp_on) {
		if (icodec->data->pamp_on()) {
			pr_err("%s: Error turning on tx power\n", __func__);
			goto error_pamp;
		}
	}

	msm_snddev_tx_mclk_request();

	drv->tx_osrclk = clk_get(0, "i2s_mic_osr_clk");
	if (IS_ERR(drv->tx_osrclk))
		pr_err("%s master clock Error\n", __func__);

	trc =  clk_set_rate(drv->tx_osrclk,
			SNDDEV_ICODEC_CLK_RATE(icodec->sample_rate));
	if (IS_ERR_VALUE(trc)) {
		pr_err("ERROR setting m clock1\n");
		goto error_invalid_freq;
	}

	clk_enable(drv->tx_osrclk);
	drv->tx_bitclk = clk_get(0, "i2s_mic_bit_clk");
	if (IS_ERR(drv->tx_bitclk))
		pr_err("%s clock Error\n", __func__);

	/* Master clock = Sample Rate * OSR rate bit clock
	 * OSR Rate bit clock = bit/sample * channel master
	 * clock / bit clock = divider value = 8
	 */
	if (msm_codec_i2s_slave_mode) {
		pr_info("%s: configuring bit clock for slave mode\n",
				__func__);
		trc =  clk_set_rate(drv->tx_bitclk, 0);
	} else
		trc =  clk_set_rate(drv->tx_bitclk, 8);

	clk_enable(drv->tx_bitclk);

	/* Enable ADIE */
	trc = adie_codec_open(icodec->data->profile, &icodec->adie_path);
	if (IS_ERR_VALUE(trc))
		pr_err("%s: adie codec open failed\n", __func__);
	else
		adie_codec_setpath(icodec->adie_path,
					icodec->sample_rate, 256);

	switch (icodec->data->channel_mode) {
	case 2:
		afe_channel_mode = MSM_AFE_STEREO;
		break;
	case 1:
	default:
		afe_channel_mode = MSM_AFE_MONO;
		break;
	}
	afe_config.mi2s.channel = afe_channel_mode;
	afe_config.mi2s.bitwidth = 16;
	afe_config.mi2s.line = 1;
	if (msm_codec_i2s_slave_mode)
		afe_config.mi2s.ws = 0;
	else
		afe_config.mi2s.ws = 1;

	trc = afe_open(icodec->data->copp_id, &afe_config, icodec->sample_rate);

	if (icodec->adie_path) {
		adie_codec_proceed_stage(icodec->adie_path,
					ADIE_CODEC_DIGITAL_READY);
		adie_codec_proceed_stage(icodec->adie_path,
					ADIE_CODEC_DIGITAL_ANALOG_READY);
	}

	if (msm_codec_i2s_slave_mode)
		adie_codec_set_master_mode(icodec->adie_path, 1);
	else
		adie_codec_set_master_mode(icodec->adie_path, 0);

	icodec->enabled = 1;

	wake_unlock(&drv->tx_idlelock);
	return 0;

error_invalid_freq:

	if (icodec->data->pamp_off)
		icodec->data->pamp_off();

	pr_err("%s: encounter error\n", __func__);
error_pamp:
	wake_unlock(&drv->tx_idlelock);
	return -ENODEV;
}
Esempio n. 28
0
mali_bool mali_clk_get(mali_bool bis_vpll)
{
	if (bis_vpll == MALI_TRUE)
	{
		if (ext_xtal_clock == NULL)
		{
			ext_xtal_clock = clk_get(NULL,EXTXTALCLK_NAME);
			if (IS_ERR(ext_xtal_clock)) {
				MALI_PRINT( ("MALI Error : failed to get source ext_xtal_clock\n"));
				return MALI_FALSE;
			}
			clk_enable(ext_xtal_clock);
		}

		if (vpll_src_clock == NULL)
		{
			vpll_src_clock = clk_get(NULL,VPLLSRCCLK_NAME);
			if (IS_ERR(vpll_src_clock)) {
				MALI_PRINT( ("MALI Error : failed to get source vpll_src_clock\n"));
				return MALI_FALSE;
			}
			clk_enable(vpll_src_clock);
		}

		if (fout_vpll_clock == NULL)
		{
			fout_vpll_clock = clk_get(NULL,FOUTVPLLCLK_NAME);
			if (IS_ERR(fout_vpll_clock)) {
				MALI_PRINT( ("MALI Error : failed to get source fout_vpll_clock\n"));
				return MALI_FALSE;
			}
			clk_enable(fout_vpll_clock);
		}

		if (sclk_vpll_clock == NULL)
		{
			sclk_vpll_clock = clk_get(NULL,SCLVPLLCLK_NAME);
			if (IS_ERR(sclk_vpll_clock)) {
				MALI_PRINT( ("MALI Error : failed to get source sclk_vpll_clock\n"));
				return MALI_FALSE;
			}
			clk_enable(sclk_vpll_clock);
		}

		if (mali_parent_clock == NULL)
		{
			mali_parent_clock = clk_get(NULL, GPUMOUT1CLK_NAME);

			if (IS_ERR(mali_parent_clock)) {
				MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
				return MALI_FALSE;
			}
			clk_enable(mali_parent_clock);
		}
	}
	else // mpll
	{
		if (mpll_clock == NULL)
		{
			mpll_clock = clk_get(NULL,MPLLCLK_NAME);

			if (IS_ERR(mpll_clock)) {
				MALI_PRINT( ("MALI Error : failed to get source mpll clock\n"));
				return MALI_FALSE;
			}
		}

		if (mali_parent_clock == NULL)
		{
			mali_parent_clock = clk_get(NULL, GPUMOUT0CLK_NAME);

			if (IS_ERR(mali_parent_clock)) {
				MALI_PRINT( ( "MALI Error : failed to get source mali parent clock\n"));
				return MALI_FALSE;
			}
		}
	}

	// mali clock get always.
	if (mali_clock == NULL)
	{
		mali_clock = clk_get(NULL, GPUCLK_NAME);

		if (IS_ERR(mali_clock)) {
			MALI_PRINT( ("MALI Error : failed to get source mali clock\n"));
			return MALI_FALSE;
		}
	}

	return MALI_TRUE;
}
Esempio n. 29
0
static int usbhs_enable(struct device *dev)
{
	struct usbhs_hcd_omap		*omap = dev_get_drvdata(dev);
	struct usbhs_omap_platform_data	*pdata = &omap->platdata;
	unsigned long			flags = 0;
	int				ret = 0;
	unsigned long			timeout;
	unsigned			reg;

	dev_dbg(dev, "starting TI HSUSB Controller\n");
	if (!pdata) {
		dev_dbg(dev, "missing platform_data\n");
		return  -ENODEV;
	}

	spin_lock_irqsave(&omap->lock, flags);
	if (omap->count > 0)
		goto end_count;

	clk_enable(omap->usbhost_ick);
	clk_enable(omap->usbhost_hs_fck);
	clk_enable(omap->usbhost_fs_fck);
	clk_enable(omap->usbtll_fck);
	clk_enable(omap->usbtll_ick);

	if (pdata->ehci_data->phy_reset) {
		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) {
			gpio_request(pdata->ehci_data->reset_gpio_port[0],
						"USB1 PHY reset");
			gpio_direction_output
				(pdata->ehci_data->reset_gpio_port[0], 0);
		}

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) {
			gpio_request(pdata->ehci_data->reset_gpio_port[1],
						"USB2 PHY reset");
			gpio_direction_output
				(pdata->ehci_data->reset_gpio_port[1], 0);
		}

		/* Hold the PHY in RESET for enough time till DIR is high */
		udelay(10);
	}

	omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION);
	dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev);

	/* perform TLL soft reset, and wait until reset is complete */
	usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
			OMAP_USBTLL_SYSCONFIG_SOFTRESET);

	/* Wait for TLL reset to complete */
	timeout = jiffies + msecs_to_jiffies(1000);
	while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS)
			& OMAP_USBTLL_SYSSTATUS_RESETDONE)) {
		cpu_relax();

		if (time_after(jiffies, timeout)) {
			dev_dbg(dev, "operation timed out\n");
			ret = -EINVAL;
			goto err_tll;
		}
	}
	dev_dbg(dev, "TLL RESET DONE\n");

	/* (1<<3) = no idle mode only for initial debugging */
	usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG,
			OMAP_USBTLL_SYSCONFIG_ENAWAKEUP |
			OMAP_USBTLL_SYSCONFIG_SIDLEMODE |
			OMAP_USBTLL_SYSCONFIG_AUTOIDLE);

	/* Put UHH in NoIdle/NoStandby mode */
	reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG);
	if (is_omap_usbhs_rev1(omap)) {
		reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP
				| OMAP_UHH_SYSCONFIG_SIDLEMODE
				| OMAP_UHH_SYSCONFIG_CACTIVITY
				| OMAP_UHH_SYSCONFIG_MIDLEMODE);
		reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE;
	} else if (is_omap_usbhs_rev2(omap)) {
		reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR;
		reg |= OMAP4_UHH_SYSCONFIG_NOIDLE;
		reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR;
		reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY;
	}

	usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg);

	reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG);
	/* setup ULPI bypass and burst configurations */
	reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN
			| OMAP_UHH_HOSTCONFIG_INCR8_BURST_EN
			| OMAP_UHH_HOSTCONFIG_INCR16_BURST_EN);
	reg |= OMAP4_UHH_HOSTCONFIG_APP_START_CLK;
	reg &= ~OMAP_UHH_HOSTCONFIG_INCRX_ALIGN_EN;

	if (is_omap_usbhs_rev1(omap)) {
		if (pdata->port_mode[0] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P1_CONNECT_STATUS;
		if (pdata->port_mode[1] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P2_CONNECT_STATUS;
		if (pdata->port_mode[2] == OMAP_USBHS_PORT_MODE_UNUSED)
			reg &= ~OMAP_UHH_HOSTCONFIG_P3_CONNECT_STATUS;

		/* Bypass the TLL module for PHY mode operation */
		if (cpu_is_omap3430() && (omap_rev() <= OMAP3430_REV_ES2_1)) {
			dev_dbg(dev, "OMAP3 ES version <= ES2.1\n");
			if (is_ehci_phy_mode(pdata->port_mode[0]) ||
				is_ehci_phy_mode(pdata->port_mode[1]) ||
					is_ehci_phy_mode(pdata->port_mode[2]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_BYPASS;
		} else {
			dev_dbg(dev, "OMAP3 ES version > ES2.1\n");
			if (is_ehci_phy_mode(pdata->port_mode[0]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P1_BYPASS;
			if (is_ehci_phy_mode(pdata->port_mode[1]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P2_BYPASS;
			if (is_ehci_phy_mode(pdata->port_mode[2]))
				reg &= ~OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
			else
				reg |= OMAP_UHH_HOSTCONFIG_ULPI_P3_BYPASS;
		}
	} else if (is_omap_usbhs_rev2(omap)) {
		/* Clear port mode fields for PHY mode*/
		reg &= ~OMAP4_P1_MODE_CLEAR;
		reg &= ~OMAP4_P2_MODE_CLEAR;

		if (is_ehci_phy_mode(pdata->port_mode[0])) {
			ret = clk_set_parent(omap->utmi_p1_fck,
						omap->xclk60mhsp1_ck);
			if (ret != 0) {
				dev_err(dev, "xclk60mhsp1_ck set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
		} else if (is_ehci_tll_mode(pdata->port_mode[0])) {
			ret = clk_set_parent(omap->utmi_p1_fck,
						omap->init_60m_fclk);
			if (ret != 0) {
				dev_err(dev, "init_60m_fclk set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
			clk_enable(omap->usbhost_p1_fck);
			clk_enable(omap->usbtll_p1_fck);
		}

		if (is_ehci_phy_mode(pdata->port_mode[1])) {
			ret = clk_set_parent(omap->utmi_p2_fck,
						omap->xclk60mhsp2_ck);
			if (ret != 0) {
				dev_err(dev, "xclk60mhsp1_ck set parent"
					"failed error:%d\n", ret);
				goto err_tll;
			}
		} else if (is_ehci_tll_mode(pdata->port_mode[1])) {
			ret = clk_set_parent(omap->utmi_p2_fck,
						omap->init_60m_fclk);
			if (ret != 0) {
				dev_err(dev, "init_60m_fclk set parent"
				"failed error:%d\n", ret);
				goto err_tll;
			}
			clk_enable(omap->usbhost_p2_fck);
			clk_enable(omap->usbtll_p2_fck);
		}

		clk_enable(omap->utmi_p1_fck);
		clk_enable(omap->utmi_p2_fck);

		if (is_ehci_tll_mode(pdata->port_mode[0]) ||
			(is_ohci_port(pdata->port_mode[0])))
			reg |= OMAP4_P1_MODE_TLL;
		else if (is_ehci_hsic_mode(pdata->port_mode[0]))
			reg |= OMAP4_P1_MODE_HSIC;

		if (is_ehci_tll_mode(pdata->port_mode[1]) ||
			(is_ohci_port(pdata->port_mode[1])))
			reg |= OMAP4_P2_MODE_TLL;
		else if (is_ehci_hsic_mode(pdata->port_mode[1]))
			reg |= OMAP4_P2_MODE_HSIC;
	}

	usbhs_write(omap->uhh_base, OMAP_UHH_HOSTCONFIG, reg);
	dev_dbg(dev, "UHH setup done, uhh_hostconfig=%x\n", reg);

	if (is_ehci_tll_mode(pdata->port_mode[0]) ||
		is_ehci_tll_mode(pdata->port_mode[1]) ||
		is_ehci_tll_mode(pdata->port_mode[2]) ||
		(is_ohci_port(pdata->port_mode[0])) ||
		(is_ohci_port(pdata->port_mode[1])) ||
		(is_ohci_port(pdata->port_mode[2]))) {

		/* Enable UTMI mode for required TLL channels */
		if (is_omap_usbhs_rev2(omap))
			usbhs_omap_tll_init(dev, OMAP_REV2_TLL_CHANNEL_COUNT);
		else
			usbhs_omap_tll_init(dev, OMAP_TLL_CHANNEL_COUNT);
	}

	if (pdata->ehci_data->phy_reset) {
		/* Hold the PHY in RESET for enough time till
		 * PHY is settled and ready
		 */
		udelay(10);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
			gpio_set_value
				(pdata->ehci_data->reset_gpio_port[0], 1);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
			gpio_set_value
				(pdata->ehci_data->reset_gpio_port[1], 1);
	}

end_count:
	omap->count++;
	spin_unlock_irqrestore(&omap->lock, flags);
	return 0;

err_tll:
	if (pdata->ehci_data->phy_reset) {
		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0]))
			gpio_free(pdata->ehci_data->reset_gpio_port[0]);

		if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1]))
			gpio_free(pdata->ehci_data->reset_gpio_port[1]);
	}

	clk_disable(omap->usbtll_ick);
	clk_disable(omap->usbtll_fck);
	clk_disable(omap->usbhost_fs_fck);
	clk_disable(omap->usbhost_hs_fck);
	clk_disable(omap->usbhost_ick);
	spin_unlock_irqrestore(&omap->lock, flags);
	return ret;
}
Esempio n. 30
0
static void s5p_fimd_lite_power_on(struct s5p_fimd_ext_device *fx_dev)
{
	struct s5p_fimd_lite *fimd_lite = fimd_ext_get_drvdata(fx_dev);

	clk_enable(fimd_lite->clk);
}