static int snddev_icodec_close_tx(struct snddev_icodec_state *icodec)
{
	struct snddev_icodec_drv_state *drv = &snddev_icodec_drv;

	pm_qos_update_request(&drv->tx_pm_qos_req,
			      msm_cpuidle_get_deep_idle_latency());

	if (drv->snddev_vreg)
		vreg_mode_vote(drv->snddev_vreg, 0, SNDDEV_HIGH_POWER_MODE);

	/* Disable ADIE */
	if (icodec->adie_path) {
		adie_codec_proceed_stage(icodec->adie_path,
					ADIE_CODEC_DIGITAL_OFF);
		adie_codec_close(icodec->adie_path);
		icodec->adie_path = NULL;
	}

	afe_close(icodec->data->copp_id);

	clk_disable_unprepare(drv->tx_bitclk);
	clk_disable_unprepare(drv->tx_osrclk);

	msm_snddev_tx_mclk_free();

	/* Reuse pamp_off for TX platform-specific setup  */
	if (icodec->data->pamp_off)
		icodec->data->pamp_off();

	icodec->enabled = 0;

	pm_qos_update_request(&drv->tx_pm_qos_req, PM_QOS_DEFAULT_VALUE);
	return 0;
}
Пример #2
0
/**
 * sr_classp5_recal_work() - work which actually does the recalibration
 * @work: pointer to the work
 *
 * on a periodic basis, we come and reset our calibration setup
 * so that a recalibration of the OPPs take place. This takes
 * care of aging factor in the system.
 */
static void sr_classp5_recal_work(struct work_struct *work)
{
	unsigned long delay;

	/* try lock only to avoid deadlock with suspend handler */
	if (!mutex_trylock(&omap_dvfs_lock)) {
		pr_err("%s: Can't acquire lock, delay recalibration\n",
		       __func__);
		schedule_delayed_work(&recal_work, msecs_to_jiffies
				      (SRP5_SAMPLING_DELAY_MS *
				       SRP5_STABLE_SAMPLES));
		return;
	}

	/*
	 * Deny Idle during recalibration due to the following reasons:
	 * - HW loop is enabled when we enter this function
	 * - HW loop may be triggered at any moment of time from idle
	 * - As result we may have race between SmartReflex disabling/enabling
	 * from CPU Idle and from recalibration function
	 */
	pm_qos_update_request(&recal_qos, 0);

	delay = msecs_to_jiffies(CONFIG_OMAP_SR_CLASS1_P5_RECALIBRATION_DELAY);

	if (voltdm_for_each(sr_classp5_voltdm_recal, NULL))
		pr_err("%s: Recalibration failed\n", __func__);

	/* Enable CPU Idle */
	pm_qos_update_request(&recal_qos, PM_QOS_DEFAULT_VALUE);

	next_recal_time = jiffies + delay;
	schedule_delayed_work(&recal_work, delay);
	mutex_unlock(&omap_dvfs_lock);
}
Пример #3
0
void fimg2d_pm_qos_update_bus(struct fimg2d_control *ctrl,
				enum fimg2d_qos_status status)
{
	enum fimg2d_qos_level idx;
	int ret = 0;
	unsigned long qflags;

	g2d_spin_lock(&ctrl->qoslock, qflags);
	if ((ctrl->qos_lv >= G2D_LV0) && (ctrl->qos_lv < G2D_LV_END))
		idx = ctrl->qos_lv;
	else
		goto err;
	g2d_spin_unlock(&ctrl->qoslock, qflags);

	if (status == FIMG2D_QOS_ON) {
		if (ctrl->pre_qos_lv != ctrl->qos_lv) {
			pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos,
					g2d_qos_table[idx].freq_mif);
			pm_qos_update_request(&ctrl->exynos5_g2d_int_qos,
					g2d_qos_table[idx].freq_int);
			fimg2d_debug("idx:%d, freq_mif:%d, freq_int:%d, ret:%d\n",
					idx, g2d_qos_table[idx].freq_mif,
					g2d_qos_table[idx].freq_int, ret);
		}
	} else if (status == FIMG2D_QOS_OFF) {
		pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, 0);
		pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, 0);
	}

	return;
err:
	fimg2d_debug("invalid qos_lv:%d\n", ctrl->qos_lv);
}
static int exynos5_busfreq_int_target(struct device *dev, unsigned long *_freq,
			      u32 flags)
{
	int err = 0;
	struct platform_device *pdev = container_of(dev, struct platform_device,
						    dev);
	struct busfreq_data_int *data = platform_get_drvdata(pdev);
	struct opp *opp;
	unsigned long old_freq, freq;
	unsigned long volt;

	rcu_read_lock();
	opp = devfreq_recommended_opp(dev, _freq, flags);
	if (IS_ERR(opp)) {
		rcu_read_unlock();
		dev_err(dev, "%s: Invalid OPP.\n", __func__);
		return PTR_ERR(opp);
	}

	freq = opp_get_freq(opp);
	volt = opp_get_voltage(opp);
	rcu_read_unlock();

	old_freq = data->curr_freq;

	if (old_freq == freq)
		return 0;

	dev_dbg(dev, "targeting %lukHz %luuV\n", freq, volt);

	mutex_lock(&data->lock);

	if (data->disabled)
		goto out;

	if (freq > exynos5_int_opp_table[0].clk)
		pm_qos_update_request(&data->int_req, freq * 16 / 1000);
	else
		pm_qos_update_request(&data->int_req, -1);

	if (old_freq < freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	err = clk_set_rate(data->int_clk, freq * 1000);

	if (err)
		goto out;

	if (old_freq > freq)
		err = exynos5_int_setvolt(data, volt);
	if (err)
		goto out;

	data->curr_freq = freq;
out:
	mutex_unlock(&data->lock);
	return err;
}
Пример #5
0
static void usbnet_unlock_perf(void)
{
	pr_info("[USBNET] %s\n", __func__);

	pm_qos_update_request(&usbnet_req_freq, (s32)PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
	pm_qos_update_request(&usbnet_req_cpus, (s32)PM_QOS_MIN_ONLINE_CPUS_DEFAULT_VALUE);
}
int proactive_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state)
{
	DVFS_ASSERT(platform);

	if (!platform->devfreq_status)
		return 0;

	switch (state) {
		case GPU_CONTROL_PM_QOS_INIT:
			pm_qos_add_request(&proactive_mif_min_qos, PM_QOS_BUS_THROUGHPUT, 0);
			pm_qos_add_request(&proactive_apollo_min_qos, PM_QOS_CLUSTER0_FREQ_MIN, 0);
			pm_qos_add_request(&proactive_atlas_min_qos, PM_QOS_CLUSTER1_FREQ_MIN, 0);
			if (!platform->pmqos_int_disable)
				pm_qos_add_request(&proactive_int_min_qos, PM_QOS_DEVICE_THROUGHPUT, 0);
			break;
		case GPU_CONTROL_PM_QOS_DEINIT:
			pm_qos_remove_request(&proactive_mif_min_qos);
			pm_qos_remove_request(&proactive_apollo_min_qos);
			pm_qos_remove_request(&proactive_atlas_min_qos);
			if (!platform->pmqos_int_disable)
				pm_qos_remove_request(&proactive_int_min_qos);
			break;
		case GPU_CONTROL_PM_QOS_RESET:
			pm_qos_update_request(&proactive_mif_min_qos, 0);
			pm_qos_update_request(&proactive_apollo_min_qos, 0);
			pm_qos_update_request(&proactive_atlas_min_qos, 0);
		default:
			break;
	}

	return 0;
}
Пример #7
0
static void esa_update_qos(void)
{
#ifdef CONFIG_PM_DEVFREQ
	int mif_qos_new;
#ifdef CONFIG_SND_ESA_SA_EFFECT
	int int_qos_new = 0;
#endif
	if (!si.fw_ready)
		mif_qos_new = 0;
	else if (esa_check_ip_exist(ADEC_AAC))
		mif_qos_new = CA5_MIF_FREQ_HIGH;
	else
		mif_qos_new = CA5_MIF_FREQ_NORM;

#ifdef CONFIG_SND_ESA_SA_EFFECT
	if (si.effect_on) {
		mif_qos_new = CA5_MIF_FREQ_BOOST;
		int_qos_new = CA5_INT_FREQ_BOOST;
	}

	if (si.int_qos != int_qos_new) {
		si.int_qos = int_qos_new;
		pm_qos_update_request(&si.ca5_int_qos, si.int_qos);
		pr_debug("%s: int_qos = %d\n", __func__, si.int_qos);
	}
#endif
	if (si.mif_qos != mif_qos_new) {
		si.mif_qos = mif_qos_new;
		pm_qos_update_request(&si.ca5_mif_qos, si.mif_qos);
		pr_debug("%s: mif_qos = %d\n", __func__, si.mif_qos);
	}
#endif
}
Пример #8
0
static void cfb_boost(struct work_struct *w)
{
    cancel_delayed_work_sync(&unboost);
    pm_qos_update_request(&core_req, 1);
    pm_qos_update_request(&freq_req, boost_freq);
    queue_delayed_work(cfb_wq, &unboost, msecs_to_jiffies(boost_time));
}
Пример #9
0
static int fimg2d_open(struct inode *inode, struct file *file)
{
	struct fimg2d_context *ctx;
	unsigned long flags, count;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx) {
		fimg2d_err("not enough memory for ctx\n");
		return -ENOMEM;
	}
	file->private_data = (void *)ctx;

	g2d_spin_lock(&ctrl->bltlock, flags);
	fimg2d_add_context(ctrl, ctx);
	count = atomic_read(&ctrl->nctx);
	g2d_spin_unlock(&ctrl->bltlock, flags);

#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
	if (count == 1) {
		/* mif lock : 800MHz */
		pm_qos_update_request(&exynos5_g2d_mif_qos, 800000);
		pm_qos_update_request(&exynos5_g2d_cpu_qos, 400000);
	} else
		fimg2d_debug("count:%ld, pm_qos_update_request() is already called\n", count);
#endif
	return 0;
}
Пример #10
0
static int fimg2d_release(struct inode *inode, struct file *file)
{
	struct fimg2d_context *ctx = file->private_data;
	int retry = POLL_RETRY;
	unsigned long flags, count;

	fimg2d_debug("ctx %p\n", ctx);
	while (retry--) {
		if (!atomic_read(&ctx->ncmd))
			break;
		mdelay(POLL_TIMEOUT);
	}

	g2d_spin_lock(&ctrl->bltlock, flags);
	fimg2d_del_context(ctrl, ctx);
	count = atomic_read(&ctrl->nctx);
	g2d_spin_unlock(&ctrl->bltlock, flags);

#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
	if (!count) {
		pm_qos_update_request(&exynos5_g2d_mif_qos, 0);
		pm_qos_update_request(&exynos5_g2d_cpu_qos, 0);
	} else
		fimg2d_debug("count:%ld, pm_qos_update_request() is already called\n", count);
#endif
	kfree(ctx);
	return 0;
}
Пример #11
0
static int tegra_camera_power_off(struct tegra_camera_dev *dev)
{
	int ret = 0;
	int val = 0;

	dev_dbg(dev->dev, "%s++\n", __func__);

	/* Disable external power */
	if (dev->reg) {
		ret = regulator_disable(dev->reg);
		if (ret) {
			dev_err(dev->dev,
				"%s: disable csi regulator failed.\n",
				__func__);
			return ret;
		}
	}
	dev->power_on = 0;
	tegra_camera_on = dev->power_on;

	val = PAD_CIL_PDVREG(0x0);
	tegra_vi_csi_writel(val, CSI_CIL_PAD_CONFIG);

	if (camera_boost){
		pr_info("%s: clean camera freq boost\n", __func__);
		pm_qos_update_request(&boost_cpu_freq_req, (s32)PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
		pm_qos_update_request(&boost_cpu_num_req, (s32)PM_QOS_MIN_ONLINE_CPUS_DEFAULT_VALUE);
	}
	
	return ret;
}
Пример #12
0
static int tegra_camera_power_on(struct tegra_camera_dev *dev)
{
	int ret = 0;

	dev_dbg(dev->dev, "%s++\n", __func__);

	/* Enable external power */
	if (dev->reg) {
		ret = regulator_enable(dev->reg);
		if (ret) {
			dev_err(dev->dev,
				"%s: enable csi regulator failed.\n",
				__func__);
			return ret;
		}
	}

	dev->power_on = 1;
	tegra_camera_on = dev->power_on;

	if (camera_boost){
		pr_info("%s: set camera boost freq = %d cpus = %d\n", __func__, camera_boost_freq, camera_boost_cpus);
		pm_qos_update_request(&boost_cpu_freq_req, (s32)camera_boost_freq);
		pm_qos_update_request(&boost_cpu_num_req, (s32)camera_boost_cpus);
	}
	
	return ret;
}
Пример #13
0
static int omap_mbox_startup(struct omap_mbox *mbox)
{
	int ret = 0;
	struct omap_mbox_queue *mq;

	mutex_lock(&mbox_configured_lock);
	if (!mbox_configured++) {
		pm_qos_update_request(&mbox_qos_request,
					SET_MPU_CORE_CONSTRAINT);
		if (likely(mbox->ops->startup)) {
			ret = mbox->ops->startup(mbox);
			if (unlikely(ret))
				goto fail_startup;
		} else
			goto fail_startup;
	}

	if (!mbox->use_count++) {
		mq = mbox_queue_alloc(mbox, NULL, mbox_tx_tasklet);
		if (!mq) {
			ret = -ENOMEM;
			goto fail_alloc_txq;
		}
		mbox->txq = mq;

		mq = mbox_queue_alloc(mbox, mbox_rx_work, NULL);
		if (!mq) {
			ret = -ENOMEM;
			goto fail_alloc_rxq;
		}
		mbox->rxq = mq;
		mq->mbox = mbox;
		ret = request_irq(mbox->irq, mbox_interrupt, IRQF_SHARED,
							mbox->name, mbox);
		if (unlikely(ret)) {
			pr_err("failed to register mailbox interrupt:%d\n",
									ret);
			goto fail_request_irq;
		}
	}
	mutex_unlock(&mbox_configured_lock);
	return 0;

fail_request_irq:
	mbox_queue_free(mbox->rxq);
fail_alloc_rxq:
	mbox_queue_free(mbox->txq);
fail_alloc_txq:
	if (mbox->ops->shutdown)
		mbox->ops->shutdown(mbox);
	mbox->use_count--;
fail_startup:
	if (!--mbox_configured)
		pm_qos_update_request(&mbox_qos_request,
					 CLEAR_MPU_CORE_CONSTRAINT);
	mutex_unlock(&mbox_configured_lock);
	return ret;
}
Пример #14
0
/*
 * Prepare controller for a transaction and call omap_i2c_xfer_msg
 * to do the work during IRQ processing.
 */
static int
omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
	int i;
	int r;

	if (dev == NULL)
		return -EINVAL;

	if (dev->shutdown)
		return -EPERM;

	r = omap_i2c_hwspinlock_lock(dev);
	/* To-Do: if we are unable to acquire the lock, we must
	try to recover somehow */
	if (r != 0)
		return r;

	/* We have the bus, enable IRQ */
	omap_i2c_unidle(dev);
	enable_irq(dev->irq);

	r = omap_i2c_wait_for_bb(dev);
	if (r < 0)
		r = omap_i2c_bus_clear(dev);
	if (r < 0)
		goto out;

	/*
	 * When waiting for completion of a i2c transfer, we need to
	 * set a wake up latency constraint for the MPU. This is to
	 * ensure quick enough wakeup from idle, when transfer
	 * completes.
	 */
	if (dev->pm_qos)
		pm_qos_update_request(dev->pm_qos, dev->latency);

	for (i = 0; i < num; i++) {
		r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
		if (r != 0)
			break;
	}

	if (dev->pm_qos)
		pm_qos_update_request(dev->pm_qos, PM_QOS_DEFAULT_VALUE);

	if (r == 0)
		r = num;

	omap_i2c_wait_for_bb(dev);
out:
	disable_irq(dev->irq);
	omap_i2c_idle(dev);
	omap_i2c_hwspinlock_unlock(dev);

	return r;
}
Пример #15
0
void bluetooth_pm_sleep(void)
{
	unsigned long irq_flags;

	//printk("+++ %s\n", __func__);

	spin_lock_irqsave(&rw_lock, irq_flags);

	/* already asleep, this is an error case */
	if (test_bit(BT_ASLEEP, &flags)) {
		spin_unlock_irqrestore(&rw_lock, irq_flags);
		printk("--- %s, already asleep return\n", __func__);
		return;
	}

//BT_S : [CONBT-1475] LGC_BT_COMMON_IMP_KERNEL_UART_HCI_COMMAND_TIMEOUT
	uart_off_jiffies = jiffies;

	if(check_uart_control_available(uart_on_jiffies, uart_off_jiffies) == false)
	{
		mod_timer(&uart_control_timer, jiffies + msecs_to_jiffies(UART_CONTROL_BLOCK_TIME));
		spin_unlock_irqrestore(&rw_lock, irq_flags);
		printk("--- %s - UART control unavailable Return\n", __func__);
		return;
	}
//BT_E : [CONBT-1475] LGC_BT_COMMON_IMP_KERNEL_UART_HCI_COMMAND_TIMEOUT

	set_bit(BT_ASLEEP, &flags);

	spin_unlock_irqrestore(&rw_lock, irq_flags);

	printk("%s, going to sleep...\n", __func__);

	//printk("%s, WAKE_UNLOCK_START\n", __func__);

	wake_unlock(&bsi->wake_lock);

	//printk("%s, WAKE_UNLOCK_END\n", __func__);

#ifdef UART_CONTROL_MSM
	/*Deactivating UART */
	hsuart_power(0);
#endif /*UART_CONTROL_MSM*/

#ifdef QOS_REQUEST_MSM
	if(bsi->dma_qos_request == REQUESTED) {
		pm_qos_update_request(&bsi->dma_qos, 0x7FFFFFF);
	}
#endif /* QOS_REQUEST_MSM */

#ifdef QOS_REQUEST_TEGRA
	pm_qos_update_request(&bsi->resume_cpu_freq_req,
				PM_QOS_DEFAULT_VALUE);
#endif/*QOS_REQUEST_TEGRA*/

	//printk("--- %s\n", __func__);
}
int secos_booster_start(enum secos_boost_policy policy)
{
	int ret = 0;
	int freq;

	current_core = mc_active_core();

	/* migrate to big Core */
	if ((policy != MAX_PERFORMANCE) && (policy != MID_PERFORMANCE)
			&& (policy != MIN_PERFORMANCE)) {
		pr_err("%s: wrong secos boost policy:%d\n", __func__, policy);
		ret = -EINVAL;
		goto error;
	}

	/* cpufreq configuration */
	if (policy == MAX_PERFORMANCE)
		freq = max_cpu_freq;
	else if (policy == MID_PERFORMANCE)
		freq = MID_CPUFREQ;
	else
		freq = 0;
	pm_qos_update_request(&secos_booster_qos, freq); /* KHz */

	if (!cpu_online(DEFAULT_BIG_CORE)) {
		pr_debug("%s: %d core is offline\n", __func__, DEFAULT_BIG_CORE);
		udelay(100);
		if (!cpu_online(DEFAULT_BIG_CORE)) {
			pr_debug("%s: %d core is offline\n", __func__, DEFAULT_BIG_CORE);
			pm_qos_update_request(&secos_booster_qos, 0);
			ret = -EPERM;
			goto error;
		}
		pr_debug("%s: %d core is online\n", __func__, DEFAULT_BIG_CORE);
	}
	ret = mc_switch_core(DEFAULT_BIG_CORE);
	if (ret) {
		pr_err("%s: mc switch failed : err:%d\n", __func__, ret);
		pm_qos_update_request(&secos_booster_qos, 0);
		goto error;
	}

	/* Change schedule policy */
	mc_set_schedule_policy(DEFAULT_BIG_CORE);

	/* Restore origin performance policy after default boost time */
	hrtimer_cancel(&timer);
	hrtimer_start(&timer, ns_to_ktime((u64)DEFAULT_SECOS_BOOST_TIME * NSEC_PER_MSEC),
			HRTIMER_MODE_REL);

error:
	return ret;
}
Пример #17
0
void fimg2d_pm_qos_update_cpu(struct fimg2d_control *ctrl,
				enum fimg2d_qos_status status)
{
	enum fimg2d_qos_level idx;
	unsigned long qflags;

	g2d_spin_lock(&ctrl->qoslock, qflags);
	if ((ctrl->qos_lv >= G2D_LV0) && (ctrl->qos_lv < G2D_LV_END))
		idx = ctrl->qos_lv;
	else
		goto err;
	g2d_spin_unlock(&ctrl->qoslock, qflags);

	if (status == FIMG2D_QOS_ON) {
		if (ctrl->pre_qos_lv != ctrl->qos_lv) {
#ifdef CONFIG_SCHED_HMP
			g2d_spin_lock(&ctrl->qoslock, qflags);
			if (idx == 0 && !ctrl->boost) {
				set_hmp_boost(true);
				ctrl->boost = true;
				fimg2d_debug("turn on hmp booster\n");
			}
			g2d_spin_unlock(&ctrl->qoslock, qflags);
#endif

			pm_qos_update_request(&ctrl->exynos5_g2d_cluster1_qos,
					g2d_qos_table[idx].freq_cpu);
			pm_qos_update_request(&ctrl->exynos5_g2d_cluster0_qos,
					g2d_qos_table[idx].freq_kfc);
			fimg2d_debug("idx:%d, freq_cpu:%d, freq_kfc:%d\n",
					idx, g2d_qos_table[idx].freq_cpu,
					g2d_qos_table[idx].freq_kfc);
		}
	} else if (status == FIMG2D_QOS_OFF) {
		pm_qos_update_request(&ctrl->exynos5_g2d_cluster1_qos, 0);
		pm_qos_update_request(&ctrl->exynos5_g2d_cluster0_qos, 0);

#ifdef CONFIG_SCHED_HMP
		g2d_spin_lock(&ctrl->qoslock, qflags);
		if (ctrl->boost) {
			set_hmp_boost(false);
			ctrl->boost = false;
			fimg2d_debug("turn off hmp booster\n");
		}
		g2d_spin_unlock(&ctrl->qoslock, qflags);
#endif
	}

	return;
err:
	fimg2d_debug("invalid qos_lv:%d\n", ctrl->qos_lv);
}
Пример #18
0
/*
 * Prepare controller for a transaction and call omap_i2c_xfer_msg
 * to do the work during IRQ processing.
 */
static int
omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
	struct omap_i2c_dev *dev = i2c_get_adapdata(adap);
	int i;
	int r;
	u16 val;

	omap_i2c_unidle(dev);

	r = omap_i2c_wait_for_bb(dev);
	/* If timeout, try to again check after soft reset of I2C block */
	if (WARN_ON(r == -ETIMEDOUT)) {
		/* Provide a permanent clock to recover the peripheral */
		val = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
		val |= (OMAP_I2C_SYSTEST_ST_EN |
				OMAP_I2C_SYSTEST_FREE |
				(2 << OMAP_I2C_SYSTEST_TMODE_SHIFT));
		omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, val);
		msleep(1);
		omap_i2c_init(dev);
		r = omap_i2c_wait_for_bb(dev);
	}
	if (r < 0)
		goto out;

	/*
	 * When waiting for completion of a i2c transfer, we need to
	 * set a wake up latency constraint for the MPU. This is to
	 * ensure quick enough wakeup from idle, when transfer
	 * completes.
	 */
	if (dev->pm_qos)
		pm_qos_update_request(dev->pm_qos, dev->latency);

	for (i = 0; i < num; i++) {
		r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
		if (r != 0)
			break;
	}

	if (dev->pm_qos)
		pm_qos_update_request(dev->pm_qos, PM_QOS_DEFAULT_VALUE);

	if (r == 0)
		r = num;

	omap_i2c_wait_for_bb(dev);
out:
	omap_i2c_idle(dev);
	return r;
}
Пример #19
0
static void usbnet_unlock_perf(void)
{
	pr_info("[USBNET] %s\n", __func__);

#ifdef CONFIG_PERFLOCK
	if (is_perf_lock_active(&usbnet_perf_lock))
		perf_unlock(&usbnet_perf_lock);
#endif
	pm_qos_update_request(&usbnet_req_freq, (s32)PM_QOS_CPU_FREQ_MIN_DEFAULT_VALUE);
	pm_qos_update_request(&usbnet_req_cpus, (s32)PM_QOS_MIN_ONLINE_CPUS_DEFAULT_VALUE);

	is_usbnet_perf_locked = 0;
}
static int gpu_pm_qos_command(struct exynos_context *platform, gpu_pmqos_state state)
{
#ifdef CONFIG_BUS_DEVFREQ
	switch (state) {
	case GPU_CONTROL_PM_QOS_INIT:
		pm_qos_add_request(&exynos5_g3d_mif_qos, PM_QOS_BUS_THROUGHPUT, 0);
		pm_qos_add_request(&exynos5_g3d_int_qos, PM_QOS_DEVICE_THROUGHPUT, 0);
		pm_qos_add_request(&exynos5_g3d_cpu_kfc_min_qos, PM_QOS_KFC_FREQ_MIN, 0);
		pm_qos_add_request(&exynos5_g3d_cpu_egl_max_qos, PM_QOS_CPU_FREQ_MAX, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE);
#if SLSI_INTEGRATION
#if defined(SET_MINLOCK)
		pm_qos_add_request(&exynos5_g3d_cpu_egl_min_qos, PM_QOS_CPU_FREQ_MIN, 0);

		platform->custom_cpu_max_lock = 0;
#endif
#endif
		break;
	case GPU_CONTROL_PM_QOS_DEINIT:
		pm_qos_remove_request(&exynos5_g3d_mif_qos);
		pm_qos_remove_request(&exynos5_g3d_int_qos);
		pm_qos_remove_request(&exynos5_g3d_cpu_kfc_min_qos);
		pm_qos_remove_request(&exynos5_g3d_cpu_egl_max_qos);
#if SLSI_INTEGRATION
#if defined(SET_MINLOCK)
		pm_qos_remove_request(&exynos5_g3d_cpu_egl_min_qos);
#endif
#endif
		break;
	case GPU_CONTROL_PM_QOS_SET:
		if (platform->step < 0)
			return -1;
		pm_qos_update_request(&exynos5_g3d_mif_qos, platform->table[platform->step].mem_freq);
		pm_qos_update_request(&exynos5_g3d_int_qos, platform->table[platform->step].int_freq);
		pm_qos_update_request(&exynos5_g3d_cpu_kfc_min_qos, platform->table[platform->step].cpu_freq);

#if SLSI_INTEGRATION
#if defined(SET_MINLOCK)
		if (platform->custom_cpu_max_lock)
			pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, platform->custom_cpu_max_lock);
		else
#endif
#endif
			pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, platform->table[platform->step].cpu_max_freq);

		break;
	case GPU_CONTROL_PM_QOS_RESET:
		pm_qos_update_request(&exynos5_g3d_mif_qos, 0);
		pm_qos_update_request(&exynos5_g3d_int_qos, 0);
		pm_qos_update_request(&exynos5_g3d_cpu_kfc_min_qos, 0);
		pm_qos_update_request(&exynos5_g3d_cpu_egl_max_qos, PM_QOS_CPU_FREQ_MAX_DEFAULT_VALUE);
	default:
		break;
	}
#endif /* CONFIG_BUS_DEVFREQ */
	return 0;
}
static void pxav3_access_constrain(struct sdhci_host *host, unsigned int ac)
{
        struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc));
        struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data;

        if (!pdata)
                return;
        if (ac)
		pm_qos_update_request(&pdata->qos_idle,
				PM_QOS_CPUIDLE_BLOCK_AXI_VALUE);
        else
		pm_qos_update_request(&pdata->qos_idle,
				PM_QOS_CPUIDLE_BLOCK_DEFAULT_VALUE);
}
/* gpu gpu setmode clock and power*/
int sec_gpu_pwr_clk_state_set(sec_gpu_state state)
{
	int err = 0;
	mutex_lock(&lock);
	switch (state) {
	case GPU_PWR_CLK_STATE_ON:
	{
#if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ)
		pm_qos_update_request(&exynos5_g3d_int_qos, 200000);
		if (sec_gpu_setting_clock < MIF_THRESHHOLD_VALUE_CLK)
			pm_qos_update_request(&exynos5_g3d_mif_qos, 267000);
		else
			pm_qos_update_request(&exynos5_g3d_mif_qos, 800000);

		if (sec_gpu_setting_clock >= sec_gpu_top_clock) {
#ifdef CONFIG_ARM_EXYNOS_IKS_CPUFREQ
			pm_qos_update_request(&exynos5_g3d_cpu_qos, 600000);
#else
			pm_qos_update_request(&exynos5_g3d_cpu_qos, 800000);
#endif
		}
#endif
		err = gpu_power_enable();
		if (err) {
			mutex_unlock(&lock);
			return err;
		}
		err = sec_gpu_clock_enable();
		if (err) {
			mutex_unlock(&lock);
			return err;
		}
		sec_gpu_power_on = true;
	}
	break;
	case GPU_PWR_CLK_STATE_OFF:
	{
		sec_gpu_power_on = false;
		sec_gpu_clock_disable();
		err = gpu_power_disable();
		if (err) {
			mutex_unlock(&lock);
			return err;
		}
#if defined(CONFIG_ARM_EXYNOS5410_BUS_DEVFREQ)
		pm_qos_update_request(&exynos5_g3d_cpu_qos, 0);
		pm_qos_update_request(&exynos5_g3d_int_qos, 0);
		pm_qos_update_request(&exynos5_g3d_mif_qos, 0);
#endif
	}
	break;
	default:
		PVR_DPF((PVR_DBG_ERROR, "Error setting sec_gpu_state_set: %d", state));
	break;
	}

	mutex_unlock(&lock);
	return err;
}
void fimg2d_pm_qos_update(struct fimg2d_control *ctrl, enum fimg2d_qos_status status)
{
#if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \
	defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ) || \
	defined(CONFIG_FIMG2D_USE_BUS_DEVFREQ)
	struct fimg2d_platdata *pdata;
	enum fimg2d_qos_level idx;
	int ret = 0;

#ifdef CONFIG_OF
	pdata = ctrl->pdata;
#else
	pdata = to_fimg2d_plat(ctrl->dev);
#endif
#endif

	if (status == FIMG2D_QOS_ON) {
		if (ctrl->pre_qos_lv != ctrl->qos_lv) {
#ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ
			idx = ctrl->qos_lv;
			if (idx == 0)
				ret = set_hmp_boost(true);

			pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos,
					g2d_qos_table[idx].freq_mif);
			pm_qos_update_request(&ctrl->exynos5_g2d_int_qos,
					g2d_qos_table[idx].freq_int);
			fimg2d_debug("idx:%d, freq_mif:%d, freq_int:%d, ret:%d\n",
					idx, g2d_qos_table[idx].freq_mif,
					g2d_qos_table[idx].freq_int, ret);

#endif
#if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \
			defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
			pm_qos_update_request(&ctrl->exynos5_g2d_cpu_qos,
					g2d_qos_table[idx].freq_cpu);
			pm_qos_update_request(&ctrl->exynos5_g2d_kfc_qos,
					g2d_qos_table[idx].freq_kfc);
			fimg2d_debug("idx:%d, freq_cpu:%d, freq_kfc:%d\n",
					idx, g2d_qos_table[idx].freq_cpu,
					g2d_qos_table[idx].freq_kfc);
		}
#endif
	} else if (status == FIMG2D_QOS_OFF) {
#ifdef CONFIG_FIMG2D_USE_BUS_DEVFREQ
		pm_qos_update_request(&ctrl->exynos5_g2d_mif_qos, 0);
		pm_qos_update_request(&ctrl->exynos5_g2d_int_qos, 0);
#endif
#if defined(CONFIG_ARM_EXYNOS_IKS_CPUFREQ) || \
	defined(CONFIG_ARM_EXYNOS_MP_CPUFREQ)
		pm_qos_update_request(&ctrl->exynos5_g2d_cpu_qos, 0);
		pm_qos_update_request(&ctrl->exynos5_g2d_kfc_qos, 0);
#endif
		idx = ctrl->qos_lv;
		if (idx == 0)
			ret = set_hmp_boost(false);
	}
}
Пример #24
0
static void spi_aic3254_allow_sleep(void)
{
	struct ecodec_aic3254_state *codec_drv = &codec_clk;

	pm_qos_update_request(&codec_drv->pm_qos_req, PM_QOS_DEFAULT_VALUE);
	wake_unlock(&codec_drv->wakelock);
}
Пример #25
0
static int exynos5_devfreq_isp_suspend(struct device *dev)
{
	if (pm_qos_request_active(&exynos5_isp_qos))
		pm_qos_update_request(&exynos5_isp_qos, exynos5_devfreq_isp_profile.initial_freq);

	return 0;
}
Пример #26
0
static void serial_omap_uart_qos_work(struct work_struct *work)
{
	struct uart_omap_port *up = container_of(work, struct uart_omap_port,
						qos_work);

	pm_qos_update_request(&up->pm_qos_request, up->latency);
}
bool wcd9xxx_lock_sleep(
	struct wcd9xxx_core_resource *wcd9xxx_res)
{
	enum wcd9xxx_pm_state os;

	mutex_lock(&wcd9xxx_res->pm_lock);
	if (wcd9xxx_res->wlock_holders++ == 0) {
		pr_debug("%s: holding wake lock\n", __func__);
		pm_qos_update_request(&wcd9xxx_res->pm_qos_req,
				      msm_cpuidle_get_deep_idle_latency());
	}
	mutex_unlock(&wcd9xxx_res->pm_lock);

	if (!wait_event_timeout(wcd9xxx_res->pm_wq,
				((os =  wcd9xxx_pm_cmpxchg(wcd9xxx_res,
						  WCD9XXX_PM_SLEEPABLE,
						  WCD9XXX_PM_AWAKE)) ==
							WCD9XXX_PM_SLEEPABLE ||
					(os == WCD9XXX_PM_AWAKE)),
				msecs_to_jiffies(
					WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS))) {
		pr_warn("%s: system didn't resume within %dms, s %d, w %d\n",
			__func__,
			WCD9XXX_SYSTEM_RESUME_TIMEOUT_MS, wcd9xxx_res->pm_state,
			wcd9xxx_res->wlock_holders);
		wcd9xxx_unlock_sleep(wcd9xxx_res);
		return false;
	}
	wake_up_all(&wcd9xxx_res->pm_wq);
	return true;
}
static int msm_mctl_release(struct msm_cam_media_controller *p_mctl)
{
	int rc = 0;
	struct msm_sync *sync = &(p_mctl->sync);

	v4l2_subdev_call(p_mctl->ispif_sdev, core, ioctl,
		VIDIOC_MSM_ISPIF_RELEASE, NULL);

	if (p_mctl->isp_sdev && p_mctl->isp_sdev->isp_release)
		p_mctl->isp_sdev->isp_release(&p_mctl->sync);

	v4l2_subdev_call(p_mctl->csid_sdev, core, ioctl,
		VIDIOC_MSM_CSID_RELEASE, NULL);

	v4l2_subdev_call(p_mctl->csiphy_sdev, core, ioctl,
		VIDIOC_MSM_CSIPHY_RELEASE, NULL);

	if (p_mctl->sync.actctrl.a_power_down)
		p_mctl->sync.actctrl.a_power_down(sync->sdata->actuator_info);

	if (p_mctl->sync.sctrl.s_release)
		p_mctl->sync.sctrl.s_release();

	rc = msm_camio_sensor_clk_off(sync->pdev);
	if (rc < 0)
		pr_err("%s: msm_camio_sensor_clk_off failed:%d\n",
			 __func__, rc);

	pm_qos_update_request(&p_mctl->pm_qos_req_list,
				PM_QOS_DEFAULT_VALUE);
	pm_qos_remove_request(&p_mctl->pm_qos_req_list);
	wake_unlock(&p_mctl->sync.wake_lock);

	return rc;
}
Пример #29
0
int tdmb_fc8050_power_off(void)
{
	if ( fc8050_ctrl_info.TdmbPowerOnState == TRUE )
	{
		tdmb_fc8050_interrupt_lock();
		fc8050_ctrl_info.TdmbPowerOnState = FALSE;
		gpio_set_value(DMB_RESET_N, 0);
		gpio_set_value(DMB_EN, 0);
		//gpio_direction_output(DMB_INT_N, false);   
		gpio_set_value(DMB_INT_N, 0);		

#ifdef ANTENNA_SWITCHING
        gpio_set_value_cansleep(DMB_ANT_SEL_P_EAR, 1);
        gpio_set_value_cansleep(DMB_ANT_SEL_N_INNER, 0);  
#endif  /* ANTENNA_SWITCHING */

		wake_unlock(&fc8050_ctrl_info.wake_lock);

#ifdef PM_QOS		/* QoS release */
		if(pm_qos_request_active(&fc8050_ctrl_info.pm_req_list)) {
			pm_qos_update_request(&fc8050_ctrl_info.pm_req_list, PM_QOS_DEFAULT_VALUE);	
		}	
#endif
	}
	else
	{
		printk("tdmb_fc8050_power_on the power already turn off \n");
	}	

	printk("tdmb_fc8050_power_off completed \n");
	
	return TRUE;
}
Пример #30
0
static int atomisp_resume(struct device *dev)
{
	struct atomisp_device *isp = (struct atomisp_device *)
		dev_get_drvdata(dev);
	int ret;

	if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_VALLEYVIEW2) {
		ret = pmu_nc_set_power_state(TNG_ISP_ISLAND, OSPM_ISLAND_UP,
					MRFLD_ISPSSPM0);
		if (ret)
			return ret;
	}

	pm_qos_update_request(&isp->pm_qos, isp->max_isr_latency);

	/*Turn on ISP d-phy */
	ret = atomisp_ospm_dphy_up(isp);
	if (ret) {
		v4l2_err(&atomisp_dev,
			    "Failed to power up ISP!.\n");
		return -EINVAL;
	}

	/*restore register values for iUnit and iUnitPHY registers*/
	if (isp->saved_regs.pcicmdsts)
		atomisp_restore_iunit_reg(isp);

	if (IS_ISP2400)
		atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_LOW);

	return 0;
}