Esempio n. 1
0
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
 * If new rate is smaller than the old, simply updating
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 */
static void update_sampling_rate(unsigned int new_rate)
{
	int cpu;

	od_tuners.sampling_rate = new_rate = max(new_rate,
			od_dbs_data.min_sampling_rate);

	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct od_cpu_dbs_info_s *dbs_info;
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
		cpufreq_cpu_put(policy);

		mutex_lock(&dbs_info->cdbs.timer_mutex);

		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			continue;
		}

		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;

		if (time_before(next_sampling, appointed_at)) {

			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);

			schedule_delayed_work_on(cpu, &dbs_info->cdbs.work,
					usecs_to_jiffies(new_rate));

		}
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
}
Esempio n. 2
0
static void check_temp(struct work_struct *work)
{
    struct cpufreq_policy *cpu_policy = NULL;
    struct tsens_device tsens_dev;
    unsigned long temp = 0;
    unsigned int max_freq = 0;
    int update_policy = 0;
    int cpu = 0;
    int ret = 0;

    tsens_dev.sensor_num = DEF_TEMP_SENSOR;
    ret = tsens_get_temp(&tsens_dev, &temp);
    if (ret) {
        pr_debug("msm_thermal: Unable to read TSENS sensor %d\n",
                 tsens_dev.sensor_num);
        goto reschedule;
    } else
        pr_info("msm_thermal: TSENS sensor %d (%ld C)\n",
                tsens_dev.sensor_num, temp);

    for_each_possible_cpu(cpu) {
        update_policy = 0;
        cpu_policy = cpufreq_cpu_get(cpu);
        if (!cpu_policy) {
            pr_debug("msm_thermal: NULL policy on cpu %d\n", cpu);
            continue;
        }
        if (temp >= allowed_max_high) {
            if (cpu_policy->max > allowed_max_freq) {
                update_policy = 1;
                max_freq = allowed_max_freq;
            } else {
                pr_debug("msm_thermal: policy max for cpu %d "
                         "already < allowed_max_freq\n", cpu);
            }
        } else if (temp < allowed_max_low) {
            if (cpu_policy->max < cpu_policy->cpuinfo.max_freq) {
                max_freq = cpu_policy->cpuinfo.max_freq;
                update_policy = 1;
            } else {
                pr_debug("msm_thermal: policy max for cpu %d "
                         "already at max allowed\n", cpu);
            }
        }

        if (update_policy)
            update_cpu_max_freq(cpu_policy, cpu, max_freq);

        cpufreq_cpu_put(cpu_policy);
    }

reschedule:
    if (enabled)
        schedule_delayed_work(&check_temp_work,
                              msecs_to_jiffies(check_interval_ms));
}
Esempio n. 3
0
/* must be called early in the CPU removal sequence (before
 * cpufreq_remove_dev) so that policy is still valid.
 */
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
	if (policy && (cpumask_weight(policy->cpus) == 1)) {
		pr_debug("%s: Free sysfs stat\n", __func__);
		sysfs_remove_group(&policy->kobj, &stats_attr_group);
	}
	if (policy)
		cpufreq_cpu_put(policy);
}
Esempio n. 4
0
/* must be called early in the CPU removal sequence (before
 * cpufreq_remove_dev) so that policy is still valid.
 */
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
	if (!cpufreq_frequency_get_table(cpu))
		return; 
	if (policy && policy->cpu == cpu)
		sysfs_remove_group(&policy->kobj, &stats_attr_group);
	if (policy)
		cpufreq_cpu_put(policy);
}
Esempio n. 5
0
unsigned int compat_cpufreq_quick_get_max(unsigned int cpu)
{
	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
	unsigned int ret_freq = 0;

	if (policy) {
		ret_freq = policy->max;
		cpufreq_cpu_put(policy);
	}

	return ret_freq;
}
static void cpufreq_stats_free_table(unsigned int cpu)
{
	struct cpufreq_policy *policy;

	policy = cpufreq_cpu_get(cpu);
	if (!policy)
		return;

	if (cpufreq_frequency_get_table(policy->cpu))
		__cpufreq_stats_free_table(policy);

	cpufreq_cpu_put(policy);
}
int aw_pm_begin(suspend_state_t state)
{
    struct cpufreq_policy *policy;

    PM_DBG("%d state begin:%d\n", state,debug_mask);

    //set freq max
#ifdef CONFIG_CPU_FREQ_USR_EVNT_NOTIFY
    //cpufreq_user_event_notify();
#endif
    
    backup_max_freq = 0;
    backup_min_freq = 0;
    policy = cpufreq_cpu_get(0);
    if (!policy)
    {
        PM_DBG("line:%d cpufreq_cpu_get failed!\n", __LINE__);
        goto out;
    }

    backup_max_freq = policy->max;
    backup_min_freq = policy->min;
    policy->user_policy.max= suspend_freq;
    policy->user_policy.min = suspend_freq;
    cpufreq_cpu_put(policy);
    cpufreq_update_policy(0);

    /*must init perfcounter, because delay_us and delay_ms is depandant perf counter*/
#ifndef GET_CYCLE_CNT
    backup_perfcounter();
    init_perfcounters (1, 0);
#endif

    if(unlikely(debug_mask&PM_STANDBY_PRINT_REG)){
        printk("before dev suspend , line:%d\n", __LINE__);
        show_reg(SW_VA_CCM_IO_BASE, (CCU_REG_LENGTH)*4, "ccu");
        show_reg(SW_VA_PORTC_IO_BASE, GPIO_REG_LENGTH*4, "gpio");
        show_reg(SW_VA_TIMERC_IO_BASE, TMR_REG_LENGTH*4, "timer");
        show_reg(SW_VA_TWI0_IO_BASE, TWI0_REG_LENGTH*4, "twi0");
        show_reg(SW_VA_SRAM_IO_BASE, SRAM_REG_LENGTH*4, "sram");
        if (userdef_reg_addr != 0 && userdef_reg_size != 0)
        {
            show_reg(userdef_reg_addr, userdef_reg_size*4, "user defined");
        }
    }
    return 0;

out:
    return -1;
}
Esempio n. 8
0
static void cpufreq_stats_free_table(unsigned int cpu)
{
	struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu);
	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
	if (policy && policy->cpu == cpu)
		sysfs_remove_group(&policy->kobj, &stats_attr_group);
	if (stat) {
		kfree(stat->time_in_state);
		kfree(stat);
	}
	per_cpu(cpufreq_stats_table, cpu) = NULL;
	if (policy)
		cpufreq_cpu_put(policy);
}
Esempio n. 9
0
/* must be called early in the CPU removal sequence (before
 * cpufreq_remove_dev) so that policy is still valid.
 */
static void cpufreq_stats_free_sysfs(unsigned int cpu)
{
    struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);

    if (!cpufreq_frequency_get_table(cpu))
        return;

    if (policy && !policy_is_shared(policy)) {
        pr_debug("%s: Free sysfs stat\n", __func__);
        sysfs_remove_group(&policy->kobj, &stats_attr_group);
    }
    if (policy)
        cpufreq_cpu_put(policy);
}
static void disable_msm_thermal(void)
{
	int cpu = 0;
	struct cpufreq_policy *cpu_policy = NULL;

	for_each_possible_cpu(cpu) {
		cpu_policy = cpufreq_cpu_get(cpu);
		if (cpu_policy) {
			if (cpu_policy->max < cpu_policy->cpuinfo.max_freq)
				update_cpu_max_freq(cpu_policy, cpu,
						    cpu_policy->
						    cpuinfo.max_freq);
			cpufreq_cpu_put(cpu_policy);
		}
	}
}
Esempio n. 11
0
static void cpufreq_stats_create_table(unsigned int cpu)
{
    struct cpufreq_policy *policy;

    /*
     * "likely(!policy)" because normally cpufreq_stats will be registered
     * before cpufreq driver
     */
    policy = cpufreq_cpu_get(cpu);
    if (likely(!policy))
        return;

    __cpufreq_stats_create_table(policy);

    cpufreq_cpu_put(policy);
}
Esempio n. 12
0
static int cpufreq_stats_create_table_cpu(unsigned int cpu)
{
  struct cpufreq_policy *policy;
  struct cpufreq_frequency_table *table;
  int ret = -ENODEV;
  policy = cpufreq_cpu_get(cpu);
  if (!policy)
    return -ENODEV;
  
  table = cpufreq_frequency_get_table(cpu);
  if (!table)
    goto out;
  
  ret = cpufreq_stats_create_table(policy, table);
  
out:
    cpufreq_cpu_put(policy);
    return ret;
}
static ssize_t show_all_time_in_state(struct kobject *kobj,
		struct kobj_attribute *attr, char *buf)
{
	ssize_t len = 0;
	unsigned int i, cpu, freq, index;
	struct all_cpufreq_stats *all_stat;
	struct cpufreq_policy *policy;

	len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
	for_each_possible_cpu(cpu) {
		len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
		if (cpu_online(cpu))
			cpufreq_stats_update(cpu);
	}

	if (!all_freq_table)
		goto out;
	for (i = 0; i < all_freq_table->table_size; i++) {
		freq = all_freq_table->freq_table[i];
		len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
		for_each_possible_cpu(cpu) {
			policy = cpufreq_cpu_get(cpu);
			if (policy == NULL)
				continue;
			all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
			index = get_index_all_cpufreq_stat(all_stat, freq);
			if (index != -1) {
				len += scnprintf(buf + len, PAGE_SIZE - len,
					"%llu\t\t", (unsigned long long)
					cputime64_to_clock_t(all_stat->time_in_state[index]));
			} else {
				len += scnprintf(buf + len, PAGE_SIZE - len,
						"N/A\t\t");
			}
			cpufreq_cpu_put(policy);
		}
	}

out:
	len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
	return len;
}
Esempio n. 14
0
/*
*********************************************************************************************************
*                           aw_pm_end
*
*Description: Notify the platform that system is in work mode now.
*
*Arguments  : none
*
*Return     : none
*
*Notes      : This function is called by the PM core right after resuming devices, to indicate to
*             the platform that the system has returned to the working state or
*             the transition to the sleep state has been aborted. This function is opposited to
*             aw_pm_begin function.
*********************************************************************************************************
*/
void aw_pm_end(void)
{
    struct cpufreq_policy *policy;

#ifndef GET_CYCLE_CNT
    #ifndef IO_MEASURE
            restore_perfcounter();
    #endif
#endif
    pm_disable_watchdog(dogMode);
    if (backup_max_freq != 0 && backup_min_freq != 0)
    {
        policy = cpufreq_cpu_get(0);
        if (!policy)
        {
            printk("cpufreq_cpu_get err! check it! aw_pm_end:%d\n", __LINE__);
            return;
        }
        
        policy->user_policy.max = backup_max_freq;
        policy->user_policy.min = backup_min_freq;
        cpufreq_cpu_put(policy);
        cpufreq_update_policy(0);
    }
    
    if(unlikely(debug_mask&PM_STANDBY_PRINT_REG)){
        printk("after dev suspend, line:%d\n", __LINE__);
        show_reg(SW_VA_CCM_IO_BASE, (CCU_REG_LENGTH)*4, "ccu");
        show_reg(SW_VA_PORTC_IO_BASE, GPIO_REG_LENGTH*4, "gpio");
        show_reg(SW_VA_TIMERC_IO_BASE, TMR_REG_LENGTH*4, "timer");
        show_reg(SW_VA_TWI0_IO_BASE, TWI0_REG_LENGTH*4, "twi0");
        show_reg(SW_VA_SRAM_IO_BASE, SRAM_REG_LENGTH*4, "sram");
        if (userdef_reg_addr != 0 && userdef_reg_size != 0)
        {
            show_reg(userdef_reg_addr, userdef_reg_size*4, "user defined");
        }
    }

    PM_DBG("aw_pm_end!\n");
}
Esempio n. 15
0
static int update_cpu_min_freq_all(uint32_t min)
{
	int cpu = 0;
	int ret = 0;
	struct cpufreq_policy *policy = NULL;

	if (!freq_table_get) {
		ret = check_freq_table();
		if (ret) {
			pr_err("%s:Fail to get freq table\n", __func__);
			return ret;
		}
	}
	/* If min is larger than allowed max */
	if (min != MSM_CPUFREQ_NO_LIMIT &&
			min > table[limit_idx_high].frequency)
		min = table[limit_idx_high].frequency;

	for_each_possible_cpu(cpu) {
		ret = msm_cpufreq_set_freq_limits(cpu, min, limited_max_freq);
		if (ret) {
			pr_err("%s:Fail to set limits for cpu%d\n",
					__func__, cpu);
			return ret;
		}

		if (cpu_online(cpu)) {
			policy = cpufreq_cpu_get(cpu);
			if (!policy)
				continue;
			cpufreq_driver_target(policy, policy->cur,
					CPUFREQ_RELATION_L);
			cpufreq_cpu_put(policy);
		}
	}

	return ret;
}
Esempio n. 16
0
static int cpufreq_stats_create_table_cpu(unsigned int cpu)
{
	struct cpufreq_policy *policy;
	struct cpufreq_frequency_table *table;
	int i, count, cpu_num, ret = -ENODEV;

	policy = cpufreq_cpu_get(cpu);
	if (!policy)
		return -ENODEV;

	table = cpufreq_frequency_get_table(cpu);
	if (!table)
		goto out;

	count = 0;
	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;

		if (freq != CPUFREQ_ENTRY_INVALID)
			count++;
	}

	if (!per_cpu(all_cpufreq_stats, cpu))
		cpufreq_allstats_create(cpu, table, count);

	for_each_possible_cpu(cpu_num) {
		if (!per_cpu(cpufreq_power_stats, cpu_num))
			cpufreq_powerstats_create(cpu_num, table, count);
	}

	ret = cpufreq_stats_create_table(policy, table, count);

out:
	cpufreq_cpu_put(policy);
	return ret;
}
Esempio n. 17
0
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
		struct cpufreq_frequency_table *table)
{
	unsigned int i, j, count = 0, ret = 0;
	struct cpufreq_stats *stat;
	struct cpufreq_policy *data;
	unsigned int alloc_size;
	unsigned int cpu = policy->cpu;
	if (per_cpu(cpufreq_stats_table, cpu))
		return -EBUSY;
	stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
	if ((stat) == NULL)
		return -ENOMEM;

	data = cpufreq_cpu_get(cpu);
	if (data == NULL) {
		ret = -EINVAL;
		goto error_get_fail;
	}

	ret = sysfs_create_group(&data->kobj, &stats_attr_group);
	if (ret)
		goto error_out;

	stat->cpu = cpu;
	per_cpu(cpufreq_stats_table, cpu) = stat;

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		count++;
	}

	alloc_size = count * sizeof(int) + count * sizeof(u64);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	alloc_size += count * count * sizeof(int);
#endif
	stat->max_state = count;
	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
	if (!stat->time_in_state) {
		ret = -ENOMEM;
		goto error_out;
	}
	stat->freq_table = (unsigned int *)(stat->time_in_state + count);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	stat->trans_table = stat->freq_table + count;
#endif
	j = 0;
	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		if (freq_table_get_index(stat, freq) == -1)
			stat->freq_table[j++] = freq;
	}
	stat->state_num = j;
	spin_lock(&cpufreq_stats_lock);
	stat->last_time = get_jiffies_64();
	stat->last_index = freq_table_get_index(stat, policy->cur);
	spin_unlock(&cpufreq_stats_lock);
	cpufreq_cpu_put(data);
	return 0;
error_out:
	cpufreq_cpu_put(data);
error_get_fail:
	kfree(stat);
	per_cpu(cpufreq_stats_table, cpu) = NULL;
	return ret;
}
/*
 * Every sampling_rate, we check, if current idle time is less than 20%
 * (default), then we try to increase frequency. Every sampling_rate, we look
 * for the lowest frequency which can sustain the load while keeping idle time
 * over 30%. If such a frequency exist, we try to decrease to this frequency.
 *
 * Any frequency increase takes it to the maximum frequency. Frequency reduction
 * happens at minimum steps of 5% (default) of current frequency
 */
static void od_check_cpu(int cpu, unsigned int load_freq)
{
	struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
	struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy;
	struct dbs_data *dbs_data = policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;

	dbs_info->freq_lo = 0;

	/* Check for frequency increase */
#ifdef CONFIG_ARCH_HI6XXX
    if(load_freq > od_tuners->od_6xxx_up_threshold * policy->cur) {
        unsigned int freq_next;
		/* If increase speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		if (load_freq > od_tuners->up_threshold * policy->cur)
            freq_next = policy->max;
        else
            freq_next = load_freq / od_tuners->od_6xxx_up_threshold;

		dbs_freq_increase(policy, freq_next);
        return;
	}
#else
	if (load_freq > od_tuners->up_threshold * policy->cur) {
		/* If switching to max speed, apply sampling_down_factor */
		if (policy->cur < policy->max)
			dbs_info->rate_mult =
				od_tuners->sampling_down_factor;
		dbs_freq_increase(policy, policy->max);
		return;
	}
#endif

	/* Check for frequency decrease */
	/* if we cannot reduce the frequency anymore, break out early */
	if (policy->cur == policy->min)
		return;

	/*
	 * The optimal frequency is the frequency that is the lowest that can
	 * support the current CPU usage without triggering the up policy. To be
	 * safe, we focus 10 points under the threshold.
	 */
#ifdef CONFIG_ARCH_HI6XXX
    if (load_freq < od_tuners->od_6xxx_down_threshold
			* policy->cur) {
		unsigned int freq_next;
		freq_next = load_freq / od_tuners->od_6xxx_down_threshold;
#else		
	if (load_freq < od_tuners->adj_up_threshold
			* policy->cur) {
		unsigned int freq_next;
		freq_next = load_freq / od_tuners->adj_up_threshold;
#endif

		/* No longer fully busy, reset rate_mult */
		dbs_info->rate_mult = 1;

		if (freq_next < policy->min)
			freq_next = policy->min;

		if (!od_tuners->powersave_bias) {
			__cpufreq_driver_target(policy, freq_next,
					CPUFREQ_RELATION_L);
			return;
		}

		freq_next = od_ops.powersave_bias_target(policy, freq_next,
					CPUFREQ_RELATION_L);
		__cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L);
	}
}

static void od_dbs_timer(struct work_struct *work)
{
	struct od_cpu_dbs_info_s *dbs_info =
		container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work);
	unsigned int cpu = dbs_info->cdbs.cur_policy->cpu;
	struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info,
			cpu);
	struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data;
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	int delay = 0, sample_type = core_dbs_info->sample_type;
	bool modify_all = true;

	mutex_lock(&core_dbs_info->cdbs.timer_mutex);
	if (!need_load_eval(&core_dbs_info->cdbs, od_tuners->sampling_rate)) {
		modify_all = false;
		goto max_delay;
	}

	/* Common NORMAL_SAMPLE setup */
	core_dbs_info->sample_type = OD_NORMAL_SAMPLE;
	if (sample_type == OD_SUB_SAMPLE) {
		delay = core_dbs_info->freq_lo_jiffies;
		__cpufreq_driver_target(core_dbs_info->cdbs.cur_policy,
				core_dbs_info->freq_lo, CPUFREQ_RELATION_H);
	} else {
		dbs_check_cpu(dbs_data, cpu);
		if (core_dbs_info->freq_lo) {
			/* Setup timer for SUB_SAMPLE */
			core_dbs_info->sample_type = OD_SUB_SAMPLE;
			delay = core_dbs_info->freq_hi_jiffies;
		}
	}

max_delay:
	if (!delay)
		delay = delay_for_sampling_rate(od_tuners->sampling_rate
				* core_dbs_info->rate_mult);

	gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy, delay, modify_all);
	mutex_unlock(&core_dbs_info->cdbs.timer_mutex);
}

/************************** sysfs interface ************************/
static struct common_dbs_data od_dbs_cdata;

/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
 * If new rate is smaller than the old, simply updating
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 */
static void update_sampling_rate(struct dbs_data *dbs_data,
		unsigned int new_rate)
{
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	int cpu;

	od_tuners->sampling_rate = new_rate = max(new_rate,
			dbs_data->min_sampling_rate);

	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct od_cpu_dbs_info_s *dbs_info;
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
		cpufreq_cpu_put(policy);

		mutex_lock(&dbs_info->cdbs.timer_mutex);

		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			continue;
		}

		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;

		if (time_before(next_sampling, appointed_at)) {

			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);

			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
					usecs_to_jiffies(new_rate), true);

		}
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
}

static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
		size_t count)
{
	unsigned int input;
	int ret;
	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

	update_sampling_rate(dbs_data, input);
	return count;
}
Esempio n. 19
0
static int cpufreq_stats_create_table(struct cpufreq_policy *policy,
		struct cpufreq_frequency_table *table)
{
	unsigned int i, j, count = 0, ret = 0;
	struct cpufreq_stats *stat;
	struct cpufreq_policy *data;
	unsigned int alloc_size;
	unsigned int cpu = policy->cpu;
	struct cpufreq_stats *prev_stat = per_cpu(prev_cpufreq_stats_table, cpu);

	if (per_cpu(cpufreq_stats_table, cpu))
		return -EBUSY;
	stat = kzalloc(sizeof(struct cpufreq_stats), GFP_KERNEL);
	if ((stat) == NULL)
		return -ENOMEM;

	if (prev_stat)
		memcpy(stat, prev_stat, sizeof(*prev_stat));

	data = cpufreq_cpu_get(cpu);
	if (data == NULL) {
		ret = -EINVAL;
		goto error_get_fail;
	}

	ret = sysfs_create_group(&data->kobj, &stats_attr_group);
	if (ret)
		goto error_out;

	stat->cpu = cpu;
	per_cpu(cpufreq_stats_table, cpu) = stat;

	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		count++;
	}

	alloc_size = count * sizeof(int) + count * sizeof(u64);

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	alloc_size += count * count * sizeof(int);
#endif
	stat->max_state = count;
	stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
	if (!stat->time_in_state) {
		ret = -ENOMEM;
		goto error_out;
	}
	stat->freq_table = (unsigned int *)(stat->time_in_state + count);

#ifdef CONFIG_EXYNOS_MARCH_DYNAMIC_CPU_HOTPLUG
	if (cpu == 0 && cl0_time_in_state.init_complete ==0) {
		cl0_time_in_state.time_in_state = kzalloc(alloc_size, GFP_KERNEL);
		cl0_time_in_state.freq_table = (unsigned int *)(cl0_time_in_state.time_in_state + count);
	}
	else if (cpu == 4 && cl1_time_in_state.init_complete == 0) {
		cl1_time_in_state.time_in_state = kzalloc(alloc_size, GFP_KERNEL);
		cl1_time_in_state.freq_table = (unsigned int *)(cl1_time_in_state.time_in_state + count);
	}
#endif

#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
	stat->trans_table = stat->freq_table + count;
#endif
	j = 0;
	for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
		unsigned int freq = table[i].frequency;
		if (freq == CPUFREQ_ENTRY_INVALID)
			continue;
		if (freq_table_get_index(stat, freq) == -1) {
#ifdef CONFIG_EXYNOS_MARCH_DYNAMIC_CPU_HOTPLUG
			if(cpu == 0 && cl0_time_in_state.init_complete == 0) {
				cl0_time_in_state.freq_table[j] = freq;
			}
			else if (cpu == 4 && cl1_time_in_state.init_complete == 0) {
				cl1_time_in_state.freq_table[j] = freq;
			}
#endif
			stat->freq_table[j++] = freq;
		}
	}
	stat->state_num = j;

#ifdef CONFIG_EXYNOS_MARCH_DYNAMIC_CPU_HOTPLUG
	if (cpu == 0 && cl0_time_in_state.init_complete == 0) {
		cl0_time_in_state.state_num = stat->state_num;
		cl0_time_in_state.init_complete = 1;
	}
	else if (cpu == 4 && cl1_time_in_state.init_complete == 0) {
		cl1_time_in_state.state_num = stat->state_num;
		cl1_time_in_state.init_complete = 1;
	}
#endif

	if (prev_stat) {
		memcpy(stat->time_in_state, prev_stat->time_in_state, alloc_size);
		kfree(prev_stat->time_in_state);
		kfree(prev_stat);
		per_cpu(prev_cpufreq_stats_table, cpu) = NULL;
	}

	spin_lock(&cpufreq_stats_lock);
	stat->last_time = get_jiffies_64();
	stat->last_index = freq_table_get_index(stat, policy->cur);
	if ((int)stat->last_index < 0)
		stat->last_index = 0;
	spin_unlock(&cpufreq_stats_lock);
	cpufreq_cpu_put(data);
	return 0;
error_out:
	cpufreq_cpu_put(data);
error_get_fail:
	kfree(stat);
	per_cpu(cpufreq_stats_table, cpu) = NULL;
	return ret;
}
void op_put_policy(struct cpufreq_policy *policy)
{
	cpufreq_cpu_put(policy);
}
Esempio n. 21
0
static int exynos_cpufreq_scale(unsigned int target_freq)
{
	struct cpufreq_frequency_table *freq_table = exynos_info->freq_table;
	unsigned int *volt_table = exynos_info->volt_table;
	struct cpufreq_policy *policy = cpufreq_cpu_get(0);
	unsigned int arm_volt, safe_arm_volt = 0;
	unsigned int mpll_freq_khz = exynos_info->mpll_freq_khz;
	struct device *dev = exynos_info->dev;
	unsigned int old_freq;
	int index, old_index;
	int ret = 0;

	old_freq = policy->cur;

	/*
	 * The policy max have been changed so that we cannot get proper
	 * old_index with cpufreq_frequency_table_target(). Thus, ignore
	 * policy and get the index from the raw frequency table.
	 */
	old_index = exynos_cpufreq_get_index(old_freq);
	if (old_index < 0) {
		ret = old_index;
		goto out;
	}

	index = exynos_cpufreq_get_index(target_freq);
	if (index < 0) {
		ret = index;
		goto out;
	}

	/*
	 * ARM clock source will be changed APLL to MPLL temporary
	 * To support this level, need to control regulator for
	 * required voltage level
	 */
	if (exynos_info->need_apll_change != NULL) {
		if (exynos_info->need_apll_change(old_index, index) &&
		   (freq_table[index].frequency < mpll_freq_khz) &&
		   (freq_table[old_index].frequency < mpll_freq_khz))
			safe_arm_volt = volt_table[exynos_info->pll_safe_idx];
	}
	arm_volt = volt_table[index];

	/* When the new frequency is higher than current frequency */
	if ((target_freq > old_freq) && !safe_arm_volt) {
		/* Firstly, voltage up to increase frequency */
		ret = regulator_set_voltage(arm_regulator, arm_volt, arm_volt);
		if (ret) {
			dev_err(dev, "failed to set cpu voltage to %d\n",
				arm_volt);
			return ret;
		}
	}

	if (safe_arm_volt) {
		ret = regulator_set_voltage(arm_regulator, safe_arm_volt,
				      safe_arm_volt);
		if (ret) {
			dev_err(dev, "failed to set cpu voltage to %d\n",
				safe_arm_volt);
			return ret;
		}
	}

	exynos_info->set_freq(old_index, index);

	/* When the new frequency is lower than current frequency */
	if ((target_freq < old_freq) ||
	   ((target_freq > old_freq) && safe_arm_volt)) {
		/* down the voltage after frequency change */
		ret = regulator_set_voltage(arm_regulator, arm_volt,
				arm_volt);
		if (ret) {
			dev_err(dev, "failed to set cpu voltage to %d\n",
				arm_volt);
			goto out;
		}
	}

out:
	cpufreq_cpu_put(policy);

	return ret;
}