static int cpuboost_time_set(const char *val, struct kernel_param *kp)
{
	struct device *mpu_dev;
	int ret = param_set_ulong(val, kp);
	mpu_dev = omap2_get_mpuss_device();

	mutex_lock(&lock);

	cpuboost_time = (cpuboost_time > OMAP_CPUBOOST_TIME_MAX) ?
				 OMAP_CPUBOOST_TIME_MAX : cpuboost_time;

	cancel_delayed_work(&(cbs->work->work));
	printk(KERN_INFO "cpuboost_time_set = %ld\n", cpuboost_time);
	if (cpuboost_time > 0) {
		omap_device_scale(device, mpu_dev, OMAP_CPUBOOST_FREQ_MAX);
		schedule_delayed_work(&(cbs->work->work),
					msecs_to_jiffies(cpuboost_time));
	}

	if ((ret) || (cpuboost_time == 0)) {
		omap_device_scale(device, mpu_dev, OMAP_CPUBOOST_FREQ_MIN);
		printk(KERN_INFO "cpuboost_time cleared\n");
	}
	mutex_unlock(&lock);

	return ret;
}
static void cpuboost_process_work(struct work_struct *work)
{
	struct device *mpu_dev = omap2_get_mpuss_device();

	mutex_lock(&lock);
	omap_device_scale(device, mpu_dev, OMAP_CPUBOOST_FREQ_MIN);
	printk(KERN_INFO "cpuboost_time cleared\n");
	cpuboost_time = 0;
	mutex_unlock(&lock);
}
int omap_device_scale_gpu(struct device *req_dev, struct device *target_dev,
                          unsigned long rate)
{
    unsigned long freq = 0;

    /* find lowest frequency */
    opp_find_freq_ceil(target_dev, &freq);

    if (rate > freq)
        omap4_dpll_cascading_blocker_hold(target_dev);
    else
        omap4_dpll_cascading_blocker_release(target_dev);

    return omap_device_scale(req_dev, target_dev, rate);
}
Esempio n. 4
0
static int omap_rproc_scale(struct rproc *rproc, long val)
{
	return omap_device_scale(rproc->dev, rproc->dev, val);
}
Esempio n. 5
0
static int rpres_scale_dev(struct platform_device *pdev, long val)
{
	return omap_device_scale(&pdev->dev, &pdev->dev, val);
}
Esempio n. 6
0
static int omap2_rprm_device_scale(struct device *rdev, struct device *tdev,
		unsigned long val)
{
	return omap_device_scale(tdev, val);
}