static int gpubw_start(struct devfreq *devfreq)
{
    struct devfreq_msm_adreno_tz_data *priv;

    struct msm_busmon_extended_profile *bus_profile = container_of(
                (devfreq->profile),
                struct msm_busmon_extended_profile,
                profile);
    unsigned int t1, t2 = 2 * HIST;
    int i;


    devfreq->data = bus_profile->private_data;
    priv = devfreq->data;

    /* Set up the cut-over percentages for the bus calculation. */
    for (i = 0; i < priv->bus.num; i++) {
        t1 = (u32)(100 * priv->bus.ib[i]) /
             (u32)priv->bus.ib[priv->bus.num - 1];
        priv->bus.p_up[i] = t1 - HIST;
        priv->bus.p_down[i] = t2 - 2 * HIST;
        t2 = t1;
    }
    /* Set the upper-most and lower-most bounds correctly. */
    priv->bus.p_down[0] = 0;
    priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
                          priv->bus.p_down[1] : (2 * HIST);
    if (priv->bus.num >= 1)
        priv->bus.p_up[priv->bus.num - 1] = 100;
    _update_cutoff(priv, priv->bus.max);

    return 0;
}
static int tz_start(struct devfreq *devfreq)
{
	struct devfreq_msm_adreno_tz_data *priv;
	unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
	unsigned int t1, t2 = 2 * HIST;
	int i, out, ret;

	struct msm_adreno_extended_profile *ext_profile = container_of(
					(devfreq->profile),
					struct msm_adreno_extended_profile,
					profile);

	/*
	 * Assuming that we have only one instance of the adreno device
	 * connected to this governor,
	 * can safely restore the pointer to the governor private data
	 * from the container of the device profile
	 */
	devfreq->data = ext_profile->private_data;

	priv = devfreq->data;
	priv->nb.notifier_call = tz_notify;

	out = 1;
	if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
		for (i = 0; i < devfreq->profile->max_state; i++)
			tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
		tz_pwrlevels[0] = i;
	} else {
		pr_err(TAG "tz_pwrlevels[] is too short\n");
		return -EINVAL;
	}

	ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
			sizeof(tz_pwrlevels), NULL, 0);

	if (ret != 0)
		pr_err(TAG "tz_init failed\n");

	/* Set up the cut-over percentages for the bus calculation. */
	if (priv->bus.num) {
		for (i = 0; i < priv->bus.num; i++) {
			t1 = (u32)(100 * priv->bus.ib[i]) /
					(u32)priv->bus.ib[priv->bus.num - 1];
			priv->bus.p_up[i] = t1 - HIST;
			priv->bus.p_down[i] = t2 - 2 * HIST;
			t2 = t1;
		}
		/* Set the upper-most and lower-most bounds correctly. */
		priv->bus.p_down[0] = 0;
		priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
					priv->bus.p_down[1] : (2 * HIST);
		if (priv->bus.num - 1 >= 0)
			priv->bus.p_up[priv->bus.num - 1] = 100;
		_update_cutoff(priv, priv->bus.max);
	}

	return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
}
static int tz_start(struct devfreq *devfreq)
{
	struct devfreq_msm_adreno_tz_data *priv;
	unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
	unsigned int t1, t2 = 2 * HIST;
	int i, out, ret;

	if (devfreq->data == NULL) {
		pr_err(TAG "data is required for this governor\n");
		return -EINVAL;
	}

	priv = devfreq->data;
	priv->nb.notifier_call = tz_notify;

	out = 1;
	if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
		for (i = 0; i < devfreq->profile->max_state; i++)
			tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
		tz_pwrlevels[0] = i;
	} else {
		pr_err(TAG "tz_pwrlevels[] is too short\n");
		return -EINVAL;
	}

	ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
			sizeof(tz_pwrlevels), NULL, 0);

	if (ret != 0) {
		pr_err(TAG "tz_init failed\n");
		return ret;
	}

	/* Set up the cut-over percentages for the bus calculation. */
	if (priv->bus.num) {
		for (i = 0; i < priv->bus.num; i++) {
			t1 = (u32)(100 * priv->bus.ib[i]) /
					(u32)priv->bus.ib[priv->bus.num - 1];
			priv->bus.p_up[i] = t1 - HIST;
			priv->bus.p_down[i] = t2 - 2 * HIST;
			t2 = t1;
		}
		/* Set the upper-most and lower-most bounds correctly. */
		priv->bus.p_down[0] = 0;
		priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
					priv->bus.p_down[1] : (2 * HIST);
		if (priv->bus.num - 1 >= 0)
			priv->bus.p_up[priv->bus.num - 1] = 100;
		_update_cutoff(priv, priv->bus.max);
	}

	return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
}
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq,
				u32 *flag)
{
	int result = 0;
	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
	struct devfreq_dev_status stats;
	struct xstats b;
	int val, level = 0;
	int act_level;
	int norm_cycles;
	int gpu_percent;

	if (priv->bus.num)
		stats.private_data = &b;
	else
		stats.private_data = NULL;
	result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
	if (result) {
		pr_err(TAG "get_status failed %d\n", result);
		return result;
	}

	*freq = stats.current_frequency;
	*flag = 0;
	priv->bin.total_time += stats.total_time;
	priv->bin.busy_time += stats.busy_time;
	if (priv->bus.num) {
		priv->bus.total_time += stats.total_time;
		priv->bus.gpu_time += stats.busy_time;
		priv->bus.ram_time += b.ram_time;
		priv->bus.ram_time += b.ram_wait;
	}

	/*
	 * Do not waste CPU cycles running this algorithm if
	 * the GPU just started, or if less than FLOOR time
	 * has passed since the last run or the gpu hasn't been
	 * busier than MIN_BUSY.
	 */
	if ((stats.total_time == 0) ||
		(priv->bin.total_time < FLOOR) ||
		(unsigned int) priv->bin.busy_time < MIN_BUSY) {
		return 0;
	}

	level = devfreq_get_freq_level(devfreq, stats.current_frequency);

	if (level < 0) {
		pr_err(TAG "bad freq %ld\n", stats.current_frequency);
		return level;
	}

	/*
	 * If there is an extended block of busy processing,
	 * increase frequency.  Otherwise run the normal algorithm.
	 */
	if (priv->bin.busy_time > CEILING) {
		val = -1 * level;
	} else {
		val = __secure_tz_entry3(TZ_UPDATE_ID,
				level,
				priv->bin.total_time,
				priv->bin.busy_time);
	}
	priv->bin.total_time = 0;
	priv->bin.busy_time = 0;

	/*
	 * If the decision is to move to a different level, make sure the GPU
	 * frequency changes.
	 */
	if (val) {
		level += val;
		level = max(level, 0);
		level = min_t(int, level, devfreq->profile->max_state);
		goto clear;
	}

	if (priv->bus.total_time < LONG_FLOOR)
		goto end;
	norm_cycles = (unsigned int)priv->bus.ram_time /
			(unsigned int) priv->bus.total_time;
	gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
			(unsigned int) priv->bus.total_time;
	/*
	 * If there's a new high watermark, update the cutoffs and send the
	 * FAST hint.  Otherwise check the current value against the current
	 * cutoffs.
	 */
	if (norm_cycles > priv->bus.max) {
		_update_cutoff(priv, norm_cycles);
		*flag = DEVFREQ_FLAG_FAST_HINT;
	} else {
		/*
		 * Normalize by gpu_time unless it is a small fraction of
		 * the total time interval.
		 */
		norm_cycles = (100 * norm_cycles) / TARGET;
		act_level = priv->bus.index[level] + b.mod;
		act_level = (act_level < 0) ? 0 : act_level;
		act_level = (act_level >= priv->bus.num) ?
			(priv->bus.num - 1) : act_level;
		if (norm_cycles > priv->bus.up[act_level] &&
			gpu_percent > CAP)
			*flag = DEVFREQ_FLAG_FAST_HINT;
		else if (norm_cycles < priv->bus.down[act_level] && level)
			*flag = DEVFREQ_FLAG_SLOW_HINT;
	}

clear:
	priv->bus.total_time = 0;
	priv->bus.gpu_time = 0;
	priv->bus.ram_time = 0;

end:
	*freq = devfreq->profile->freq_table[level];
	return 0;
}
static int devfreq_gpubw_get_target(struct devfreq *df,
				unsigned long *freq,
				u32 *flag)
{

	struct devfreq_msm_adreno_tz_data *priv = df->data;
	struct msm_busmon_extended_profile *bus_profile = container_of(
					(df->profile),
					struct msm_busmon_extended_profile,
					profile);
	struct devfreq_dev_status stats;
	struct xstats b;
	int result;
	int level = 0;
	int act_level;
	int norm_cycles;
	int gpu_percent;
	/*
	 * Normalized AB should at max usage be the gpu_bimc frequency in MHz.
	 * Start with a reasonable value and let the system push it up to max.
	 */
	static int norm_ab_max = 300;
	int norm_ab;
	unsigned long ab_mbytes = 0;

	stats.private_data = &b;

	result = df->profile->get_dev_status(df->dev.parent, &stats);

	*freq = stats.current_frequency;

	priv->bus.total_time += stats.total_time;
	priv->bus.gpu_time += stats.busy_time;
	priv->bus.ram_time += b.ram_time;
	priv->bus.ram_wait += b.ram_wait;

	level = devfreq_get_freq_level(df, stats.current_frequency);

	if (priv->bus.total_time < LONG_FLOOR)
		return result;

	norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
			(unsigned int) priv->bus.total_time;
	gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
			(unsigned int) priv->bus.total_time;

	/*
	 * If there's a new high watermark, update the cutoffs and send the
	 * FAST hint.  Otherwise check the current value against the current
	 * cutoffs.
	 */
	if (norm_cycles > priv->bus.max) {
		_update_cutoff(priv, norm_cycles);
		bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
	} else {
		/* GPU votes for IB not AB so don't under vote the system */
		norm_cycles = (100 * norm_cycles) / TARGET;
		act_level = priv->bus.index[level] + b.mod;
		act_level = (act_level < 0) ? 0 : act_level;
		act_level = (act_level >= priv->bus.num) ?
		(priv->bus.num - 1) : act_level;
		if (norm_cycles > priv->bus.up[act_level] &&
				gpu_percent > CAP)
			bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
		else if (norm_cycles < priv->bus.down[act_level] && level)
			bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
	}

	/* Calculate the AB vote based on bus width if defined */
	if (priv->bus.width) {
		norm_ab =  (unsigned int)priv->bus.ram_time /
			(unsigned int) priv->bus.total_time;
		/* Calculate AB in Mega Bytes and roundup in BW_STEP */
		ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
		bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
	} else if (bus_profile->flag) {
static int devfreq_gpubw_get_target(struct devfreq *df,
                                    unsigned long *freq,
                                    u32 *flag)
{

    struct devfreq_msm_adreno_tz_data *priv = df->data;
    struct msm_busmon_extended_profile *bus_profile = container_of(
                (df->profile),
                struct msm_busmon_extended_profile,
                profile);
    struct devfreq_dev_status stats;
    struct xstats b;
    int result;
    int level = 0;
    int act_level;
    int norm_cycles;
    int gpu_percent;

    stats.private_data = &b;

    result = df->profile->get_dev_status(df->dev.parent, &stats);

    *freq = stats.current_frequency;

    priv->bus.total_time += stats.total_time;
    priv->bus.gpu_time += stats.busy_time;
    priv->bus.ram_time += b.ram_time;
    priv->bus.ram_time += b.ram_wait;

    level = devfreq_get_freq_level(df, stats.current_frequency);

    if (priv->bus.total_time < LONG_FLOOR)
        return result;

    norm_cycles = (unsigned int)priv->bus.ram_time /
                  (unsigned int) priv->bus.total_time;
    gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
                  (unsigned int) priv->bus.total_time;

    /*
     * If there's a new high watermark, update the cutoffs and send the
     * FAST hint.  Otherwise check the current value against the current
     * cutoffs.
     */
    if (norm_cycles > priv->bus.max) {
        _update_cutoff(priv, norm_cycles);
        bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
    } else {
        /* GPU votes for IB not AB so don't under vote the system */
        norm_cycles = (100 * norm_cycles) / TARGET;
        act_level = priv->bus.index[level] + b.mod;
        act_level = (act_level < 0) ? 0 : act_level;
        act_level = (act_level >= priv->bus.num) ?
                    (priv->bus.num - 1) : act_level;
        if (norm_cycles > priv->bus.up[act_level] &&
                gpu_percent > CAP)
            bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
        else if (norm_cycles < priv->bus.down[act_level] && level)
            bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
    }

    priv->bus.total_time = 0;
    priv->bus.gpu_time = 0;
    priv->bus.ram_time = 0;

    return result;
}