static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
		unsigned long val, void *data)
{
	int ret;
	struct cpufreq_policy *policy = data;
	struct cpufreq_frequency_table *table;
	unsigned int cpu = policy->cpu;

	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
		cpufreq_stats_update_policy_cpu(policy);
		return 0;
	}

	if (val != CPUFREQ_NOTIFY)
		return 0;
	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		return 0;
	ret = cpufreq_stats_create_table(policy, table);
	if (ret)
		return ret;
	return 0;
}
static int cpufreq_stats_create_table_cpu(unsigned int cpu)
{
	struct cpufreq_policy *policy;
	struct cpufreq_frequency_table *table;
	int ret = -ENODEV;

	policy = cpufreq_cpu_get(cpu);
	if (!policy)
		return -ENODEV;

	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		goto out;

	if (!per_cpu(all_cpufreq_stats, cpu))
		cpufreq_allstats_create(cpu);

	ret = cpufreq_stats_create_table(policy, table);

out:
	cpufreq_cpu_put(policy);
	return ret;
}
static ssize_t touchboost_freq_store(struct device *dev, struct device_attribute *attr,
					const char *buf, size_t count)
{
	unsigned int ret = -EINVAL;
	unsigned int input = 0;
	int i;
	struct cpufreq_frequency_table *table;

	// read value from input buffer
	ret = sscanf(buf, "%d", &input);

	if (ret != 1)
		return -EINVAL;

	// Get system frequency table
	table = cpufreq_frequency_get_table(0);	

	if (!table) 
	{
		pr_err("Touchboost switch : could not retrieve cpu freq table");
		return -EINVAL;
	} 
	else 
	{
		// Allow only frequencies in the system table
		for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) 
			if (table[i].frequency == input) 
			{
				input_boost_freq = input;
				pr_debug("Touchboost switch : frequency for touch boost found");
				return count;
			}
	}

	pr_err("Touchboost switch : invalid frequency requested");
	return -EINVAL;
}
static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
		unsigned long val, void *data)
{
	int ret = 0;
	struct cpufreq_policy *policy = data;
	struct cpufreq_frequency_table *table;
	unsigned int cpu = policy->cpu;

	if (val == CPUFREQ_UPDATE_POLICY_CPU) {
		cpufreq_stats_update_policy_cpu(policy);
		return 0;
	}

	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		return 0;

	if (val == CPUFREQ_CREATE_POLICY)
		ret = __cpufreq_stats_create_table(policy, table);
	else if (val == CPUFREQ_REMOVE_POLICY)
		__cpufreq_stats_free_table(policy);

	return (ret == -EBUSY)? 0 : ret;
}
static int msm_thermal_get_freq_table(void)
{
	int ret = 0;
	int i = 0;

	table = cpufreq_frequency_get_table(0);
	if (table == NULL) {
		pr_debug("%s: error reading cpufreq table\n", KBUILD_MODNAME);
		ret = -EINVAL;
		goto fail;
	}

	while (table[i].frequency != CPUFREQ_TABLE_END)
		i++;
#if defined (CONFIG_MACH_M2_REFRESHSPR)
	limit_idx_low = 5;
#else
	limit_idx_low = 0;
#endif
	limit_idx_high = limit_idx = i - 1;
	BUG_ON(limit_idx_high <= 0 || limit_idx_high <= limit_idx_low);
fail:
	return ret;
}
Example #6
0
static int cpufreq_stats_create_table_cpu(unsigned int cpu)
{
	struct cpufreq_policy *policy;
	struct cpufreq_frequency_table *table;
	int i, count, cpu_num, ret = -ENODEV;

	policy = cpufreq_cpu_get(cpu);
	if (!policy)
		return -ENODEV;

	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		goto out;

	count = 0;
	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;

		if (freq != CPUFREQ_ENTRY_INVALID)
			count++;
	}

	if (!per_cpu(all_cpufreq_stats, cpu))
		cpufreq_allstats_create(cpu, table, count);

	for_each_possible_cpu(cpu_num) {
		if (!per_cpu(cpufreq_power_stats, cpu_num))
			cpufreq_powerstats_create(cpu_num, table, count);
	}

	ret = cpufreq_stats_create_table(policy, table, count);

out:
	cpufreq_cpu_put(policy);
	return ret;
}
Example #7
0
static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
		unsigned int event)
{
	int rc;
	unsigned int min_freq = ~0;
	unsigned int max_freq = 0;
	unsigned int i;
	struct cpufreq_frequency_table *freq_table;

	switch (event) {
	case CPUFREQ_GOV_START:
		if (!cpu_online(new_policy->cpu))
			return -EINVAL;

		/*
		 * Do not register the idle hook and create sysfs
		 * entries if we have already done so.
		 */
		if (atomic_inc_return(&active_count) > 1)
			return 0;

		rc = sysfs_create_group(cpufreq_global_kobject,
				&interactivex_attr_group);
		if (rc)
			return rc;

		pm_idle_old = pm_idle;
		pm_idle = cpufreq_idle;
		policy = new_policy;
		enabled = 1;
        	register_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX active\n");
		freq_table = cpufreq_frequency_get_table(new_policy->cpu);
		for (i = 0; (freq_table[i].frequency != CPUFREQ_TABLE_END); i++) {
			unsigned int freq = freq_table[i].frequency;
			if (freq == CPUFREQ_ENTRY_INVALID) {
				continue;
			}
			if (freq < min_freq)
				min_freq = freq;
			if (freq > max_freq)
				max_freq = freq;
		}
		resum_speed = freq_table[(i-1)/2].frequency > min_freq ? freq_table[(i-1)/2].frequency : max_freq;		//Value in midrange of available CPU frequencies if sufficient number of freq bins available
		freq_threshld = max_freq;
		break;

	case CPUFREQ_GOV_STOP:
		if (atomic_dec_return(&active_count) > 1)
			return 0;

		sysfs_remove_group(cpufreq_global_kobject,
				&interactivex_attr_group);

		pm_idle = pm_idle_old;
		del_timer(&per_cpu(cpu_timer, new_policy->cpu));
		enabled = 0;
        	unregister_early_suspend(&interactivex_power_suspend);
        	pr_info("[imoseyon] interactiveX inactive\n");
			break;

	case CPUFREQ_GOV_LIMITS:
		if (new_policy->max < new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->max, CPUFREQ_RELATION_H);
		else if (new_policy->min > new_policy->cur)
			__cpufreq_driver_target(new_policy,
					new_policy->min, CPUFREQ_RELATION_L);
		break;
	}
	return 0;
}
/**
 * get_property - fetch a property of interest for a give cpu.
 * @cpu: cpu for which the property is required
 * @input: query parameter
 * @output: query return
 * @property: type of query (frequency, level, max level)
 *
 * This is the common function to
 * 1. get maximum cpu cooling states
 * 2. translate frequency to cooling state
 * 3. translate cooling state to frequency
 * Note that the code may be not in good shape
 * but it is written in this way in order to:
 * a) reduce duplicate code as most of the code can be shared.
 * b) make sure the logic is consistent when translating between
 *    cooling states and frequencies.
 *
 * Return: 0 on success, -EINVAL when invalid parameters are passed.
 */
static int get_property(unsigned int cpu, unsigned long input,
			unsigned int *output,
			enum cpufreq_cooling_property property)
{
	int i, j;
	unsigned long max_level = 0, level = 0;
	unsigned int freq = CPUFREQ_ENTRY_INVALID;
	int descend = -1;
	struct cpufreq_frequency_table *table =
					cpufreq_frequency_get_table(cpu);

	if (!output)
		return -EINVAL;

	if (!table)
		return -EINVAL;

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		/* ignore invalid entries */
		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
			continue;

		/* ignore duplicate entry */
		if (freq == table[i].frequency)
			continue;

		/* get the frequency order */
		if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
			descend = !!(freq > table[i].frequency);

		freq = table[i].frequency;
		max_level++;
	}

	/* get max level */
	if (property == GET_MAXL) {
		*output = (unsigned int)max_level;
		return 0;
	}

	if (property == GET_FREQ)
		level = descend ? input : (max_level - input - 1);

	for (i = 0, j = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		/* ignore invalid entry */
		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
			continue;

		/* ignore duplicate entry */
		if (freq == table[i].frequency)
			continue;

		/* now we have a valid frequency entry */
		freq = table[i].frequency;

		if (property == GET_LEVEL && (unsigned int)input == freq) {
			/* get level by frequency */
			*output = descend ? j : (max_level - j - 1);
			return 0;
		}
		if (property == GET_FREQ && level == j) {
			/* get frequency by level */
			*output = freq;
			return 0;
		}
		j++;
	}

	return -EINVAL;
}
static s32 cpufreq_governor_dbs(struct cpufreq_policy *policy, u32 event)
{
    s32 cpu = (s32)policy->cpu;
    struct cpu_dbs_info_s *dbs_info = NULL;
    u32 retValue = 0;
    ST_PWC_SWITCH_STRU cpufreq_control_nv = {0} ;
    /*cpu 信息*/
    dbs_info = &per_cpu(g_acpu_dbs_info, (u32)cpu);
    /*lint --e{744 } */
    switch (event) {
    case CPUFREQ_GOV_START:
        cpufreq_debug("CPUFREQ_GOV_START\n");
        mutex_lock(&dbs_mutex);

        dbs_enable++;

        /*cpu 信息初始化  函数??idle_time*/
        dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
                                  &dbs_info->prev_cpu_wall);
        dbs_info->cur_policy = policy;
        dbs_info->cpu = cpu;
        dbs_info->freq_table = cpufreq_frequency_get_table((u32)cpu);
        dbs_info->cpu_down_time = 0;
        dbs_info->cpu_up_time = 0;
        retValue = bsp_nvm_read(NV_ID_DRV_NV_PWC_SWITCH,(u8*)&cpufreq_control_nv,sizeof(ST_PWC_SWITCH_STRU));
        if (NV_OK == retValue)
        {
            g_cpufreq_lock_status_flag = cpufreq_control_nv.dfs;
        }
        else
        {
            cpufreq_err("read nv failed %d\n", retValue);
        }

        if (1 == dbs_enable) {
            retValue = bsp_nvm_read(NV_ID_DRV_NV_DFS_SWITCH,(u8*)&g_stDfsSwitch,sizeof(ST_PWC_DFS_STRU));
            if (NV_OK != retValue)
            {
                cpufreq_err("read nv failed use default value\n");
                g_stDfsSwitch.AcpuDownLimit = 20;
                g_stDfsSwitch.AcpuDownNum = 3;
                g_stDfsSwitch.AcpuUpLimit = 80;
                g_stDfsSwitch.AcpuUpNum = 1;
                g_stDfsSwitch.DFSTimerLen = 400;
            }

            dbs_tuners_ins.up_threshold = g_stDfsSwitch.AcpuUpLimit;
            dbs_tuners_ins.down_threshold = g_stDfsSwitch.AcpuDownLimit;
            dbs_tuners_ins.down_threshold_times = g_stDfsSwitch.AcpuDownNum;
            dbs_tuners_ins.up_threshold_times = g_stDfsSwitch.AcpuUpNum;
            dbs_tuners_ins.sampling_rate = g_stDfsSwitch.DFSTimerLen * 10000; /*unit:us*/
            /*
             * Start the timerschedule work, when this governor
             * is used for first time
             */

            register_icc_for_cpufreq();

            dbs_timer_init(dbs_info);
        }
        mutex_unlock(&dbs_mutex);
        break;

    case CPUFREQ_GOV_STOP:
        dbs_timer_exit(dbs_info);

        mutex_lock(&dbs_mutex);
        dbs_enable--;
        mutex_unlock(&dbs_mutex);
        break;

    case CPUFREQ_GOV_LIMITS:

        mutex_lock(&info_mutex);
        dbs_info->cpu_down_time = 0;
        dbs_info->cpu_up_time = 0;
        mutex_unlock(&info_mutex);
        if (policy->max < dbs_info->cur_policy->cur)
            __cpufreq_driver_target(dbs_info->cur_policy,
                                    policy->max, CPUFREQ_RELATION_H);
        else if (policy->min > dbs_info->cur_policy->cur)
            __cpufreq_driver_target(dbs_info->cur_policy,
                                    policy->min, CPUFREQ_RELATION_L);

        break;
    }
    return 0;
}
Example #10
0
/**
 * get_property - fetch a property of interest for a give cpu.
 * @cpu: cpu for which the property is required
 * @input: query parameter
 * @output: query return
 * @property: type of query (frequency, level, max level)
 *
 * This is the common function to
 * 1. get maximum cpu cooling states
 * 2. translate frequency to cooling state
 * 3. translate cooling state to frequency
 * Note that the code may be not in good shape
 * but it is written in this way in order to:
 * a) reduce duplicate code as most of the code can be shared.
 * b) make sure the logic is consistent when translating between
 *    cooling states and frequencies.
 *
 * Return: 0 on success, -EINVAL when invalid parameters are passed.
 */
static int get_property(unsigned int cpu, unsigned long input,
			unsigned int *output,
			enum cpufreq_cooling_property property)
{
	int i;
	unsigned long max_level = 0, level = 0;
	unsigned int freq = CPUFREQ_ENTRY_INVALID;
	int descend = -1;
	struct cpufreq_frequency_table *pos, *table =
					cpufreq_frequency_get_table(cpu);

	if (!output)
		return -EINVAL;

	if (!table)
		return -EINVAL;

	cpufreq_for_each_valid_entry(pos, table) {
		/* ignore duplicate entry */
		if (freq == pos->frequency)
			continue;

		/* get the frequency order */
		if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
			descend = freq > pos->frequency;

		freq = pos->frequency;
		max_level++;
	}

	/* No valid cpu frequency entry */
	if (max_level == 0)
		return -EINVAL;

	/* max_level is an index, not a counter */
	max_level--;

	/* get max level */
	if (property == GET_MAXL) {
		*output = (unsigned int)max_level;
		return 0;
	}

	if (property == GET_FREQ)
		level = descend ? input : (max_level - input);

	i = 0;
	cpufreq_for_each_valid_entry(pos, table) {
		/* ignore duplicate entry */
		if (freq == pos->frequency)
			continue;

		/* now we have a valid frequency entry */
		freq = pos->frequency;

		if (property == GET_LEVEL && (unsigned int)input == freq) {
			/* get level by frequency */
			*output = descend ? i : (max_level - i);
			return 0;
		}
		if (property == GET_FREQ && level == i) {
			/* get frequency by level */
			*output = freq;
			return 0;
		}
		i++;
	}

	return -EINVAL;
}
Example #11
0
static int __cpufreq_stats_create_table(struct cpufreq_policy *policy)
{
    unsigned int i, j, count = 0, ret = 0;
    struct cpufreq_stats *stat;
    unsigned int alloc_size;
    unsigned int cpu = policy->cpu;
    struct cpufreq_frequency_table *table;

    table = cpufreq_frequency_get_table(cpu);
    if (unlikely(!table))
        return 0;

    if (per_cpu(cpufreq_stats_table, cpu))
        return -EBUSY;
    stat = kzalloc(sizeof(*stat), GFP_KERNEL);
    if ((stat) == NULL)
        return -ENOMEM;

    ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
    if (ret)
        goto error_out;

    stat->cpu = cpu;
    per_cpu(cpufreq_stats_table, cpu) = stat;

    for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
        unsigned int freq = table[i].frequency;
        if (freq == CPUFREQ_ENTRY_INVALID)
            continue;
        count++;
    }

    alloc_size = count * sizeof(int) + count * sizeof(u64);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
    alloc_size += count * count * sizeof(int);
#endif
    stat->max_state = count;
    stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
    if (!stat->time_in_state) {
        ret = -ENOMEM;
        goto error_alloc;
    }
    stat->freq_table = (unsigned int *)(stat->time_in_state + count);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
    stat->trans_table = stat->freq_table + count;
#endif
    j = 0;
    for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
        unsigned int freq = table[i].frequency;
        if (freq == CPUFREQ_ENTRY_INVALID)
            continue;
        if (freq_table_get_index(stat, freq) == -1)
            stat->freq_table[j++] = freq;
    }
    stat->state_num = j;
    spin_lock(&cpufreq_stats_lock);
    stat->last_time = get_jiffies_64();
    stat->last_index = freq_table_get_index(stat, policy->cur);
    spin_unlock(&cpufreq_stats_lock);
    return 0;
error_alloc:
    sysfs_remove_group(&policy->kobj, &stats_attr_group);
error_out:
    kfree(stat);
    per_cpu(cpufreq_stats_table, cpu) = NULL;
    return ret;
}
Example #12
0
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
				   struct cpufreq_frequency_table *table,
				   unsigned int target_freq,
				   unsigned int relation,
				   unsigned int *index)
{
	struct cpufreq_frequency_table optimal = {
		.driver_data = ~0,
		.frequency = 0,
	};
	struct cpufreq_frequency_table suboptimal = {
		.driver_data = ~0,
		.frequency = 0,
	};
	unsigned int i;

	pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
					target_freq, relation, policy->cpu);

	switch (relation) {
	case CPUFREQ_RELATION_H:
		suboptimal.frequency = ~0;
		break;
	case CPUFREQ_RELATION_L:
		optimal.frequency = ~0;
		break;
	}

	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		if ((freq < policy->min) || (freq > policy->max))
			continue;
		switch (relation) {
		case CPUFREQ_RELATION_H:
			if (freq <= target_freq) {
				if (freq >= optimal.frequency) {
					optimal.frequency = freq;
					optimal.driver_data = i;
				}
			} else {
				if (freq <= suboptimal.frequency) {
					suboptimal.frequency = freq;
					suboptimal.driver_data = i;
				}
			}
			break;
		case CPUFREQ_RELATION_L:
			if (freq >= target_freq) {
				if (freq <= optimal.frequency) {
					optimal.frequency = freq;
					optimal.driver_data = i;
				}
			} else {
				if (freq >= suboptimal.frequency) {
					suboptimal.frequency = freq;
					suboptimal.driver_data = i;
				}
			}
			break;
		}
	}
	if (optimal.driver_data > i) {
		if (suboptimal.driver_data > i)
			return -EINVAL;
		*index = suboptimal.driver_data;
	} else
		*index = optimal.driver_data;

	pr_debug("target index is %u, freq is:%u kHz\n", *index,
		 table[*index].frequency);

	return 0;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);

int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy,
		unsigned int freq)
{
	struct cpufreq_frequency_table *table;
	int i;

	table = cpufreq_frequency_get_table(policy->cpu);
	if (unlikely(!table)) {
		pr_debug("%s: Unable to find frequency table\n", __func__);
		return -ENOENT;
	}

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		if (table[i].frequency == freq)
			return i;
	}

	return -EINVAL;
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_get_index);

/**
 * show_available_freqs - show available frequencies for the specified CPU
 */
static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf,
				    bool show_boost)
{
	unsigned int i = 0;
	ssize_t count = 0;
	struct cpufreq_frequency_table *table = policy->freq_table;

	if (!table)
		return -ENODEV;

	for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
		if (table[i].frequency == CPUFREQ_ENTRY_INVALID)
			continue;
		/*
		 * show_boost = true and driver_data = BOOST freq
		 * display BOOST freqs
		 *
		 * show_boost = false and driver_data = BOOST freq
		 * show_boost = true and driver_data != BOOST freq
		 * continue - do not display anything
		 *
		 * show_boost = false and driver_data != BOOST freq
		 * display NON BOOST freqs
		 */
		if (show_boost ^ (table[i].flags & CPUFREQ_BOOST_FREQ))
			continue;

		count += sprintf(&buf[count], "%d ", table[i].frequency);
	}
	count += sprintf(&buf[count], "\n");

	return count;

}

#define cpufreq_attr_available_freq(_name)	  \
struct freq_attr cpufreq_freq_attr_##_name##_freqs =     \
__ATTR_RO(_name##_frequencies)

/**
 * show_scaling_available_frequencies - show available normal frequencies for
 * the specified CPU
 */
static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy,
						  char *buf)
{
	return show_available_freqs(policy, buf, false);
}
cpufreq_attr_available_freq(scaling_available);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_available_freqs);

/**
 * show_available_boost_freqs - show available boost frequencies for
 * the specified CPU
 */
static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy,
					      char *buf)
{
	return show_available_freqs(policy, buf, true);
}
cpufreq_attr_available_freq(scaling_boost);
EXPORT_SYMBOL_GPL(cpufreq_freq_attr_scaling_boost_freqs);

struct freq_attr *cpufreq_generic_attr[] = {
	&cpufreq_freq_attr_scaling_available_freqs,
#ifdef CONFIG_CPU_FREQ_BOOST_SW
	&cpufreq_freq_attr_scaling_boost_freqs,
#endif
	NULL,
};
EXPORT_SYMBOL_GPL(cpufreq_generic_attr);

int cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
				      struct cpufreq_frequency_table *table)
{
	int ret = cpufreq_frequency_table_cpuinfo(policy, table);

	if (!ret)
		policy->freq_table = table;

	return ret;
}
struct cpufreq_frequency_table *op_get_freq_tbl(unsigned int cpu)
{
	return cpufreq_frequency_get_table(cpu);
}
static void greenmax_powersave_bias_init_cpu(int cpu)
{
	struct greenmax_info_s *greenmax_info = &per_cpu(greenmax_info, cpu);
	greenmax_info->freq_table = cpufreq_frequency_get_table(cpu);
	greenmax_info->freq_lo = 0;
}