static void cpufreq_interactive_timer_resched(unsigned long cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	u64 expires;
	unsigned long flags;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				  &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = round_to_nw_start(pcpu->last_evaluated_jiffy);
	del_timer(&pcpu->cpu_timer);
	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);

	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(timer_slack_val);
		del_timer(&pcpu->cpu_slack_timer);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
Esempio n. 2
0
static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
				      const char *buf, size_t count)
{
	unsigned int input, j;
	int ret;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

	if (input > 1)
		input = 1;

	if (input == cs_tuners.ignore_nice) /* nothing to do */
		return count;

	cs_tuners.ignore_nice = input;

	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct cs_cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(cs_cpu_dbs_info, j);
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
						&dbs_info->cdbs.prev_cpu_wall);
		if (cs_tuners.ignore_nice)
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];
	}
	return count;
}
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu, int time_override)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	unsigned long flags;
	unsigned long expires;
	if (time_override)
		expires = jiffies + time_override;
	else
		expires = jiffies + usecs_to_jiffies(timer_rate);

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static void cpufreq_interactive_timer_resched(
	struct cpufreq_interactive_cpuinfo *pcpu)
{
	unsigned long expires;
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				     &pcpu->time_in_idle_timestamp);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = jiffies + usecs_to_jiffies(pcpu->timer_rate);
	mod_timer_pinned(&pcpu->cpu_timer, expires);

	if (pcpu->timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(pcpu->timer_slack_val);
		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static u64 update_load(int cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	struct cpufreq_interactive_tunables *tunables =
		pcpu->policy->governor_data;
	u64 now;
	u64 now_idle;
	unsigned int delta_idle;
	unsigned int delta_time;
	u64 active_time;

	now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
	delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
	delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);

	if (delta_time <= delta_idle)
		active_time = 0;
	else
		active_time = delta_time - delta_idle;

	pcpu->cputime_speedadj += active_time * pcpu->policy->cur;

	pcpu->time_in_idle = now_idle;
	pcpu->time_in_idle_timestamp = now;
	return now;
}
static void cpufreq_interactive_timer_resched(
	struct cpufreq_interactive_cpuinfo *pcpu)
{
	struct cpufreq_interactive_tunables *tunables =
		pcpu->policy->governor_data;
	unsigned long expires;
	unsigned long flags;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				  &pcpu->time_in_idle_timestamp,
				  tunables->io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
	mod_timer_pinned(&pcpu->cpu_timer, expires);

	if (tunables->timer_slack_val >= 0 &&
	    pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(tunables->timer_slack_val);
		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy);
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static unsigned int get_curr_load(unsigned int cpu)
{
	int ret;
	unsigned int idle_time, wall_time;
	unsigned int cur_load;
	u64 cur_wall_time, cur_idle_time;
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	struct cpufreq_policy policy;

	ret = cpufreq_get_policy(&policy, cpu);
	if (ret)
		return -EINVAL;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	cur_load = 100 * (wall_time - idle_time) / wall_time;
	return cur_load;
}
Esempio n. 9
0
static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
		const char *buf, size_t count)
{
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	unsigned int input;
	int ret;

	unsigned int j;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;

	if (input > 1)
		input = 1;

	if (input == od_tuners->ignore_nice_load) { /* nothing to do */
		return count;
	}
	od_tuners->ignore_nice_load = input;

	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct od_cpu_dbs_info_s *dbs_info;
		dbs_info = &per_cpu(od_cpu_dbs_info, j);
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
		if (od_tuners->ignore_nice_load)
			dbs_info->cdbs.prev_cpu_nice =
				kcpustat_cpu(j).cpustat[CPUTIME_NICE];

	}
	return count;
}
/*
 *计算CPU占用率及算出将要调整的频率值
 */
unsigned int cpufreq_calccpu_result(u32 *nextfreq)
{
    u32 max_load_cpu = 0;
    struct cpufreq_policy *policy;
    u32 idle_time = 0, wall_time = 0;
    cputime64_t cur_wall_time = 0;
    cputime64_t cur_idle_time = 0;
    struct cpu_dbs_info_s *dbs_info;
    dbs_info = &per_cpu(g_acpu_dbs_info, 0);
    policy = dbs_info->cur_policy;

    cur_idle_time = get_cpu_idle_time(0, &cur_wall_time);

    idle_time = (u32)(cur_idle_time - dbs_info->prev_cpu_idle);
    wall_time = (u32)(cur_wall_time - dbs_info->prev_cpu_wall);

    dbs_info->prev_cpu_idle = cur_idle_time;
    dbs_info->prev_cpu_wall = cur_wall_time;

    /*获取cpu占用率*/
    max_load_cpu = 100 * (wall_time - idle_time) / wall_time;
    g_ulACpuload = max_load_cpu;

    /* Check for frequency increase or decrease*/
    if (max_load_cpu > dbs_tuners_ins.up_threshold)
    {
        dbs_info->cpu_up_time++;
        if (dbs_tuners_ins.up_threshold_times == dbs_info->cpu_up_time)
        {
            dbs_info->cpu_down_time = 0;
            dbs_info->cpu_up_time = 0;
            *nextfreq = policy->max;
            return CPUFREQ_RELATION_H;
        }
        return DFS_PROFILE_NOCHANGE;
    }
    if (max_load_cpu < dbs_tuners_ins.down_threshold)
    {
        dbs_info->cpu_down_time++;
        if (dbs_tuners_ins.down_threshold_times == dbs_info->cpu_down_time)
        {
            dbs_info->cpu_down_time = 0;
            dbs_info->cpu_up_time = 0;
            if (0 == max_load_cpu)
            {
                max_load_cpu = 1;
            }
            *nextfreq = (max_load_cpu * policy->cur)/	(dbs_tuners_ins.down_threshold);
            return CPUFREQ_RELATION_L;
        }
        return DFS_PROFILE_NOCHANGE;
    }
    *nextfreq = 0;
    dbs_info->cpu_down_time = 0;
    dbs_info->cpu_up_time = 0;
    return DFS_PROFILE_NOCHANGE;
}
Esempio n. 11
0
/*
 * sysfs interface to CPU idle counts
 */
static ssize_t
sysfs_show_idle_count(struct sys_device *dev, char *buf)
{
	char *curr = buf;

	curr += sprintf(curr, "sdma_clk usecount: %d\n", clk_get_usecount(sdma_clk));
	curr += sprintf(curr, "usb_ahb_clk usecount: %d\n", clk_get_usecount(usb_ahb_clk));
	curr += sprintf(curr, "emi_clk_gating_count: %d\n", emi_zero_count);
	curr += sprintf(curr, "ipu_clk usecount: %d\n", clk_get_usecount(ipu_clk));
//	curr += sprintf(curr, "Internal SD DMA: %d\n", sd_turn_of_dma);
	curr += sprintf(curr, "peri_pll_zero_count: %d\n", peri_pll_zero);
	curr += sprintf(curr, "emi_clk: %d\n", clk_get_usecount(emi_clk));
	curr += sprintf(curr, "idle_time: %llu us\n", get_cpu_idle_time(0));
	curr += sprintf(curr, "\n");
	return curr - buf;
}
Esempio n. 12
0
static int update_average_load(unsigned int freq, unsigned int cpu)
{
	struct cpufreq_policy cpu_policy;
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;
	unsigned int cur_load, load_at_max_freq;

	cpufreq_get_policy(&cpu_policy, cpu);

	/* if max freq is changed by the user this load calculator
	   needs to adjust itself otherwise its going to be all wrong */
	if (unlikely(pcpu->policy_max != cpu_policy.max))
		pcpu->policy_max = cpu_policy.max;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
	pcpu->prev_cpu_iowait = cur_iowait_time;

	if (idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	cur_load = 100 * (wall_time - idle_time) / wall_time;

	/* Calculate the scaled load across CPU */
	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;

	/* This is the first sample in this window*/
	pcpu->avg_load_maxfreq = pcpu->prev_avg_load_maxfreq + load_at_max_freq;
	pcpu->avg_load_maxfreq /= 2;
	pcpu->window_size = wall_time;

	return 0;
}
/*lint --e{550}*/
unsigned int cpufreq_calccpu_cpuload(void)
{
    u32 idle_time = 0;
    u32 wall_time = 0;
    unsigned int cpu_load = 0;
    cputime64_t cur_wall_time = 0;
    cputime64_t cur_idle_time = 0;
    struct cpu_dbs_info_s *dbs_info;
    dbs_info = &per_cpu(g_acpu_dbs_info, 0);

    cur_idle_time = get_cpu_idle_time(0, &cur_wall_time);
    idle_time = (u32)(cur_idle_time - dbs_info->prev_cpu_idle);
    wall_time = (u32)(cur_wall_time - dbs_info->prev_cpu_wall);

    cpu_load = (100 * (wall_time - idle_time) / wall_time);

    return cpu_load;
}
/*lint --e{718,746}*/
unsigned int cpufreq_calccpu_load_netif(void)
{
    struct cpu_dbs_info_s *dbs_info;
    cputime64_t cur_wall_time = 0;
    cputime64_t cur_idle_time = 0;
    u32 idle_time = 0, wall_time = 0;
    unsigned int load = 0;

    dbs_info = &per_cpu(g_netif_dbs_info, 0);
    cur_idle_time = get_cpu_idle_time(0, &cur_wall_time);

    idle_time = (u32)(cur_idle_time - dbs_info->prev_cpu_idle);
    wall_time = (u32)(cur_wall_time - dbs_info->prev_cpu_wall);

    dbs_info->prev_cpu_idle = cur_idle_time;
    dbs_info->prev_cpu_wall = cur_wall_time;

    load = (wall_time == 0) ?
           0 : (unsigned int)(100 * (wall_time - idle_time) / wall_time);
    return load;
}
Esempio n. 15
0
static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf,
		size_t count)
{
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	unsigned int input;
	int ret;
	unsigned int j;

	ret = sscanf(buf, "%u", &input);
	if (ret != 1)
		return -EINVAL;
	od_tuners->io_is_busy = !!input;

	/* we need to re-evaluate prev_cpu_idle */
	for_each_online_cpu(j) {
		struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
									j);
		dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
			&dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
	}
	return count;
}
static unsigned int get_delta_cpu_load_and_update(unsigned int cpu)
{
	u64 cur_wall_time, cur_idle_time;
	unsigned int wall_time, idle_time;
	struct cp_cpu_info *l_cp_info = &per_cpu(cp_info, cpu);

	/* last parameter 0 means that IO wait is considered idle */
	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);

	wall_time = (unsigned int)
		(cur_wall_time - l_cp_info->prev_cpu_wall);
	l_cp_info->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int)
		(cur_idle_time - l_cp_info->prev_cpu_idle);
	l_cp_info->prev_cpu_idle = cur_idle_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 100;
	else
		return 100 * (wall_time - idle_time) / wall_time;
}
Esempio n. 17
0
static unsigned int calc_cur_load(unsigned int cpu)
{
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	u64 cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
	pcpu->prev_cpu_iowait = cur_iowait_time;

	if (ignore_nice) {
		u64 cur_nice;
		unsigned long cur_nice_jiffies;

		cur_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE] - pcpu->prev_cpu_nice;
		cur_nice_jiffies = (unsigned long) cputime64_to_jiffies64(cur_nice);

		pcpu->prev_cpu_nice = kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];

		idle_time += jiffies_to_usecs(cur_nice_jiffies);
	}

	if (io_is_busy && idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	return 100 * (wall_time - idle_time) / wall_time;
}
static s32 cpufreq_governor_dbs(struct cpufreq_policy *policy, u32 event)
{
    s32 cpu = (s32)policy->cpu;
    struct cpu_dbs_info_s *dbs_info = NULL;
    u32 retValue = 0;
    ST_PWC_SWITCH_STRU cpufreq_control_nv = {0} ;
    /*cpu 信息*/
    dbs_info = &per_cpu(g_acpu_dbs_info, (u32)cpu);
    /*lint --e{744 } */
    switch (event) {
    case CPUFREQ_GOV_START:
        cpufreq_debug("CPUFREQ_GOV_START\n");
        mutex_lock(&dbs_mutex);

        dbs_enable++;

        /*cpu 信息初始化  函数??idle_time*/
        dbs_info->prev_cpu_idle = get_cpu_idle_time(0,
                                  &dbs_info->prev_cpu_wall);
        dbs_info->cur_policy = policy;
        dbs_info->cpu = cpu;
        dbs_info->freq_table = cpufreq_frequency_get_table((u32)cpu);
        dbs_info->cpu_down_time = 0;
        dbs_info->cpu_up_time = 0;
        retValue = bsp_nvm_read(NV_ID_DRV_NV_PWC_SWITCH,(u8*)&cpufreq_control_nv,sizeof(ST_PWC_SWITCH_STRU));
        if (NV_OK == retValue)
        {
            g_cpufreq_lock_status_flag = cpufreq_control_nv.dfs;
        }
        else
        {
            cpufreq_err("read nv failed %d\n", retValue);
        }

        if (1 == dbs_enable) {
            retValue = bsp_nvm_read(NV_ID_DRV_NV_DFS_SWITCH,(u8*)&g_stDfsSwitch,sizeof(ST_PWC_DFS_STRU));
            if (NV_OK != retValue)
            {
                cpufreq_err("read nv failed use default value\n");
                g_stDfsSwitch.AcpuDownLimit = 20;
                g_stDfsSwitch.AcpuDownNum = 3;
                g_stDfsSwitch.AcpuUpLimit = 80;
                g_stDfsSwitch.AcpuUpNum = 1;
                g_stDfsSwitch.DFSTimerLen = 400;
            }

            dbs_tuners_ins.up_threshold = g_stDfsSwitch.AcpuUpLimit;
            dbs_tuners_ins.down_threshold = g_stDfsSwitch.AcpuDownLimit;
            dbs_tuners_ins.down_threshold_times = g_stDfsSwitch.AcpuDownNum;
            dbs_tuners_ins.up_threshold_times = g_stDfsSwitch.AcpuUpNum;
            dbs_tuners_ins.sampling_rate = g_stDfsSwitch.DFSTimerLen * 10000; /*unit:us*/
            /*
             * Start the timerschedule work, when this governor
             * is used for first time
             */

            register_icc_for_cpufreq();

            dbs_timer_init(dbs_info);
        }
        mutex_unlock(&dbs_mutex);
        break;

    case CPUFREQ_GOV_STOP:
        dbs_timer_exit(dbs_info);

        mutex_lock(&dbs_mutex);
        dbs_enable--;
        mutex_unlock(&dbs_mutex);
        break;

    case CPUFREQ_GOV_LIMITS:

        mutex_lock(&info_mutex);
        dbs_info->cpu_down_time = 0;
        dbs_info->cpu_up_time = 0;
        mutex_unlock(&info_mutex);
        if (policy->max < dbs_info->cur_policy->cur)
            __cpufreq_driver_target(dbs_info->cur_policy,
                                    policy->max, CPUFREQ_RELATION_H);
        else if (policy->min > dbs_info->cur_policy->cur)
            __cpufreq_driver_target(dbs_info->cur_policy,
                                    policy->min, CPUFREQ_RELATION_L);

        break;
    }
    return 0;
}