Esempio n. 1
0
int single_freq_init (actuator_t *act)
{
	int err;
	struct cpufreq_policy *policy;
	struct cpufreq_available_frequencies *freq_list;
	freq_scaler_data_t *data;
	unsigned long freq_min, freq_max;
	
	act->data = data = malloc(sizeof(freq_scaler_data_t));
	fail_if(!data, "cannot allocate freq data block");
	
	err = cpufreq_get_hardware_limits(act->core, &freq_min, &freq_max);
	fail_if(err, "cannot get cpufreq hardware limits");
	act->min = freq_min;
	act->max = freq_max;
	
	policy = cpufreq_get_policy(act->core);
	fail_if(!policy, "cannot get cpufreq policy");
	if (strcmp(policy->governor, "userspace") != 0) {
		err = cpufreq_modify_policy_governor(act->core, "userspace");
		policy = cpufreq_get_policy(act->core);
		fail_if (strcmp(policy->governor, "userspace") != 0, "cannot set cpufreq policy to userspace");
	}
	
	freq_list = cpufreq_get_available_frequencies(act->core);
	data->freq_count = create_freq_array(freq_list, &data->freq_array);
	fail_if(data->freq_count < 1, "cannot get frequency list");
	
	act->value = act->set_value = cpufreq_get_freq_kernel(act->core);
	data->cur_index = get_freq_index(data, act->value);
	
	return 0;
fail:
	return -1;
}
Esempio n. 2
0
static int boost_mig_sync_thread(void *data)
{
	int dest_cpu = (int) data;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	while (1) {
		wait_event(s->sync_wq, s->pending || kthread_should_stop());

		if (kthread_should_stop())
			break;

		spin_lock_irqsave(&s->lock, flags);
		s->pending = false;
		src_cpu = s->src_cpu;
		spin_unlock_irqrestore(&s->lock, flags);

		ret = cpufreq_get_policy(&src_policy, src_cpu);
		if (ret)
			continue;

		ret = cpufreq_get_policy(&dest_policy, dest_cpu);
		if (ret)
			continue;

		if (dest_policy.cur >= src_policy.cur) {
			pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
				 dest_cpu, dest_policy.cur,
				 src_cpu, src_policy.cur);
			continue;
		}

		if (sync_threshold && (dest_policy.cur >= sync_threshold))
			continue;

		cancel_delayed_work_sync(&s->boost_rem);
		if (sync_threshold)
			s->boost_min = min(sync_threshold, src_policy.cur);
		else
			s->boost_min = src_policy.cur;

		/* Force policy re-evaluation to trigger adjust notifier. */
		get_online_cpus();
		if (cpu_online(dest_cpu)) {
			cpufreq_update_policy(dest_cpu);
			queue_delayed_work_on(dest_cpu, cpu_boost_wq,
				&s->boost_rem, msecs_to_jiffies(boost_ms));
		} else {
			s->boost_min = 0;
		}
		put_online_cpus();
	}

	return 0;
}
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	if (src_policy.min == src_policy.cpuinfo.min_freq) {
		pr_debug("No sync. Source CPU%d@%dKHz at min freq\n",
				src_cpu, src_policy.cur);

		return;
	}

	cancel_delayed_work_sync(&s->boost_rem);
	if (sync_threshold)
		s->boost_min = min(sync_threshold, src_policy.cur);
	else
		s->boost_min = src_policy.cur;

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(src_cpu))
		/*
		 * Send an unchanged policy update to the source
		 * CPU. Even though the policy isn't changed from
		 * its existing boosted or non-boosted state
		 * notifying the source CPU will let the governor
		 * know a boost happened on another CPU and that it
		 * should re-evaluate the frequency at the next timer
		 * event without interference from a min sample time.
		 */
		cpufreq_update_policy(src_cpu);
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
Esempio n. 4
0
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	if (src_policy.min == src_policy.cur &&
			src_policy.min <= dest_policy.min) {
		pr_debug("No sync. CPU%d@%dKHz == min freq@%dKHz\n",
			src_cpu, src_policy.cur,
			src_policy.min);
		return;
	}

	cancel_delayed_work_sync(&s->boost_rem);
	if (sync_threshold) {
		if (src_policy.cur >= sync_threshold)
			s->boost_min = sync_threshold;
		else
			s->boost_min = src_policy.cur;
	} else {
		s->boost_min = src_policy.cur;
	}

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
static void run_boost_migration(unsigned int cpu)
{
	int dest_cpu = cpu;
	int src_cpu, ret;
	struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
	struct cpufreq_policy dest_policy;
	struct cpufreq_policy src_policy;
	unsigned long flags;
	unsigned int req_freq;

	spin_lock_irqsave(&s->lock, flags);
	s->pending = false;
	src_cpu = s->src_cpu;
	spin_unlock_irqrestore(&s->lock, flags);

	ret = cpufreq_get_policy(&src_policy, src_cpu);
	if (ret)
		return;

	ret = cpufreq_get_policy(&dest_policy, dest_cpu);
	if (ret)
		return;

	req_freq = max((dest_policy.max * s->task_load) / 100,
					src_policy.cur);

	if (req_freq <= dest_policy.cpuinfo.min_freq) {
		pr_debug("No sync. Sync Freq:%u\n", req_freq);
		return;
	}

	if (sync_threshold)
		req_freq = min(sync_threshold, req_freq);

	cancel_delayed_work_sync(&s->boost_rem);

	s->boost_min = req_freq;

	/* Force policy re-evaluation to trigger adjust notifier. */
	get_online_cpus();
	if (cpu_online(dest_cpu)) {
		cpufreq_update_policy(dest_cpu);
		queue_delayed_work_on(dest_cpu, cpu_boost_wq,
			&s->boost_rem, msecs_to_jiffies(boost_ms));
	} else {
		s->boost_min = 0;
	}
	put_online_cpus();
}
Esempio n. 6
0
static int
syf_pwm_ioctl(struct inode *inode, struct file *file,
              unsigned int cmd, void *arg)
{
    int i;
    unsigned int freq;
    cpu_ctrl_t *cc				= (cpu_ctrl_t *) arg;
    pmu_results_t *r 			= &(cc->pmu);
    unsigned int amt_cpu 		= cc->amt_cpu;
    unsigned int amt_counter 	= cc->amt_counter;

    for (i = 0; i < amt_cpu; ++i) {
        struct syf_info_t *_sinfo = &per_cpu(_syf_info, i);

        switch (cmd) {
        case SYFPWM_TESTING:
            printk("TESTING.\n");
            break;

        case SYFPWM_PMU_START:
            pmu_start(amt_counter, cc->evt_t);
            break;

        case SYFPWM_PMU_STOP:
            pmu_stop(amt_counter);
            memcpy(r, &pmu, sizeof(pmu_results_t));
            break;

        case SYFPWM_GET_FEQU:
            mutex_lock(&_syf_mutex);
            cpufreq_get_policy(&_sinfo->cur_policy, i);
            freq = cpufreq_get(_sinfo->cur_policy.cpu);
            mutex_unlock(&_syf_mutex);
            break;

        case SYFPWM_SET_FEQU:
            mutex_lock(&_syf_mutex);
            cpufreq_get_policy(&_sinfo->cur_policy, i);
            freq = __cpufreq_driver_target(&_sinfo->cur_policy,
                                           (unsigned int) cc->cpu_freq,
                                           CPUFREQ_RELATION_H);
            mutex_unlock(&_syf_mutex);
            break;
        }
    }

    return 0;
}
Esempio n. 7
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret, freq;
	struct cpu_sync *i_sync_info;
	struct cpufreq_policy policy;

	for_each_online_cpu(i) {

		i_sync_info = &per_cpu(sync_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;

		// ensure, touch boost freq does never exceed max scaling freq
		if (input_boost_freq > policy.max)
			freq = policy.max;
		else
			freq = input_boost_freq;

		if (policy.cur >= freq)
			continue;

		cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
		i_sync_info->input_boost_min = freq;
		cpufreq_update_policy(i);
		queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
			&i_sync_info->input_boost_rem,
			msecs_to_jiffies(input_boost_ms));
	}
}
static void screen_off_limit(bool on)
{
	unsigned int i, ret;
	struct cpufreq_policy policy;
	struct ip_cpu_info *l_ip_info;

	/* not active, so exit */
	if (screen_off_max == UINT_MAX)
		return;

	for_each_online_cpu(i) {
		l_ip_info = &per_cpu(ip_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;

		if (on) {
			/* save current instance */
			l_ip_info->curr_max = policy.max;
			policy.max = screen_off_max;
		} else {
			/* restore */
			policy.max = l_ip_info->curr_max;
		}
		cpufreq_update_policy(i);
	}
}
Esempio n. 9
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret;
	struct cpufreq_policy policy;

	/* 
	 * to avoid concurrency issues we cancel rem_input_boost
	 * and wait for it to finish the work
	 */
	cancel_delayed_work_sync(&rem_input_boost);

	for_each_online_cpu(i)
	{
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;

		if (policy.cur < input_boost_freq)
		{
			boost_freq_buf = input_boost_freq;
			cpufreq_update_policy(i);
		}
	}

	queue_delayed_work_on(0, input_boost_wq, 
		&rem_input_boost, msecs_to_jiffies(30));
}
Esempio n. 10
0
static void do_input_boost(struct work_struct *work)
{
	unsigned int i, ret;
	struct cpu_sync *i_sync_info;
	struct cpufreq_policy policy;

	get_online_cpus();
	for_each_online_cpu(i) {

		i_sync_info = &per_cpu(sync_info, i);
		ret = cpufreq_get_policy(&policy, i);
		if (ret)
			continue;
		if (policy.cur >= input_boost_freq)
			continue;

		cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
		i_sync_info->input_boost_min = input_boost_freq;
		cpufreq_update_policy(i);
		queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
			&i_sync_info->input_boost_rem,
			msecs_to_jiffies(input_boost_ms));
	}
	put_online_cpus();
}
static unsigned int get_curr_load(unsigned int cpu)
{
	int ret;
	unsigned int idle_time, wall_time;
	unsigned int cur_load;
	u64 cur_wall_time, cur_idle_time;
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	struct cpufreq_policy policy;

	ret = cpufreq_get_policy(&policy, cpu);
	if (ret)
		return -EINVAL;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	cur_load = 100 * (wall_time - idle_time) / wall_time;
	return cur_load;
}
Esempio n. 12
0
static int do_new_policy(unsigned int cpu, struct cpufreq_policy *new_pol)
{
	struct cpufreq_policy *cur_pol = cpufreq_get_policy(cpu);
	int ret;

	if (!cur_pol) {
		printf(_("wrong, unknown or unhandled CPU?\n"));
		return -EINVAL;
	}

	if (!new_pol->min)
		new_pol->min = cur_pol->min;

	if (!new_pol->max)
		new_pol->max = cur_pol->max;

	if (!new_pol->governor)
		new_pol->governor = cur_pol->governor;

	ret = cpufreq_set_policy(cpu, new_pol);

	cpufreq_put_policy(cur_pol);

	return ret;
}
Esempio n. 13
0
static int cpu_has_cpufreq(unsigned int cpu)
{
	struct cpufreq_policy policy;
	if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
		return 0;
	return 1;
}
Esempio n. 14
0
static int set_cpufreq(int cpu, int min_freq, int max_freq)
{
	int ret;
	struct cpufreq_policy policy;

	pr_debug("set cpu freq: min %d, max %d\n", min_freq, max_freq);

	ret = cpufreq_get_policy(&policy, cpu);
	if (ret < 0) {
		pr_err("usecase-gov: failed to read policy\n");
		return ret;
	}

	if (policy.min > max_freq) {
		ret = cpufreq_update_freq(cpu, min_freq, policy.max);
		if (ret)
			pr_err("usecase-gov: update min cpufreq failed (1)\n");
	}
	if (policy.max < min_freq) {
		ret = cpufreq_update_freq(cpu, policy.min, max_freq);
		if (ret)
			pr_err("usecase-gov: update max cpufreq failed (2)\n");
	}

	ret = cpufreq_update_freq(cpu, min_freq, max_freq);
	if (ret)
		pr_err("usecase-gov: update min-max cpufreq failed\n");

	return ret;
}
static int update_cpu_max_freq(int cpu, int throttled_bin, unsigned temp)
{
	int ret;
	int max_frequency = max_freq(throttled_bin);

	ret = msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, max_frequency);
	if (ret)
		return ret;

	ret = cpufreq_update_policy(cpu);
	if (ret)
		return ret;

	if (max_frequency != MSM_CPUFREQ_NO_LIMIT) {
		struct cpufreq_policy policy;

		if ((ret = cpufreq_get_policy(&policy, cpu)) == 0)
			ret = cpufreq_driver_target(&policy, max_frequency, CPUFREQ_RELATION_L);
	}

	if (max_frequency != MSM_CPUFREQ_NO_LIMIT)
		pr_info("msm_thermal: limiting cpu%d max frequency to %d at %u degC\n",
				cpu, max_frequency, temp);
	else
		pr_info("msm_thermal: Max frequency reset for cpu%d at %u degC\n", cpu, temp);

	return ret;
}
Esempio n. 16
0
static int cd_set_cur_state(struct thermal_cooling_device *cdev,
							   unsigned long state)
{
	struct thermal_freq *therm = cdev->devdata;
	struct cpufreq_policy policy;
	int trip, i;

	if (!state && therm->state) {
		if (cpufreq_get_policy(&policy, 0))
			return -EINVAL;
		maximum_freq = policy.cpuinfo.max_freq;
		therm->current_trip = -1;
		therm->state = 0;
		therm->tdev->polling_delay = therm->idle_polling_delay;
		cpufreq_update_policy(0);

		return 0;
	}

	trip = -1;
	therm->state = state;
	for (i = 0; i < therm->trip_count; i++) {
		if (therm->current_temp < therm->trip_table[i].temp)
			break;
		trip = i;
	}

	if (i == therm->current_trip)
		return 0;

	if (cpufreq_get_policy(&policy, 0))
		return -EINVAL;

	if (i < 0) {
		therm->tdev->polling_delay = therm->idle_polling_delay;
		maximum_freq = policy.cpuinfo.max_freq;
	} else {
		therm->tdev->polling_delay =
					 therm->trip_table[i].polling_interval;
		maximum_freq = therm->trip_table[i].freq;
	}
	cpufreq_update_policy(0);
	therm->current_trip = i;

	return 0;
}
void cpufreq_save_default_governor(void)
{
	int ret;
	struct cpufreq_policy current_policy;
	ret = cpufreq_get_policy(&current_policy, 0);
	if (ret < 0)
		pr_err("%s: cpufreq_get_policy got error", __func__);
	memcpy(cpufreq_gov_default, current_policy.governor->name, 32);
}
/*
 * Userspace sends cpu#:min_freq_value to vote for min_freq_value as the new
 * scaling_min. To withdraw its vote it needs to enter cpu#:0
 */
static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
{
	int i, j, ntokens = 0;
	unsigned int val, cpu;
	const char *cp = buf;
	struct cpu_status *i_cpu_stats;
	struct cpufreq_policy policy;
	cpumask_var_t limit_mask;
	int ret;

	while ((cp = strpbrk(cp + 1, " :")))
		ntokens++;

	/* CPU:value pair */
	if (!(ntokens % 2))
		return -EINVAL;

	cp = buf;
	cpumask_clear(limit_mask);
	for (i = 0; i < ntokens; i += 2) {
		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
			return -EINVAL;
		if (cpu > (num_present_cpus() - 1))
			return -EINVAL;

		i_cpu_stats = &per_cpu(cpu_stats, cpu);

		i_cpu_stats->min = val;
		cpumask_set_cpu(cpu, limit_mask);

		cp = strchr(cp, ' ');
		cp++;
	}

	/*
	 * Since on synchronous systems policy is shared amongst multiple
	 * CPUs only one CPU needs to be updated for the limit to be
	 * reflected for the entire cluster. We can avoid updating the policy
	 * of other CPUs in the cluster once it is done for at least one CPU
	 * in the cluster
	 */
	get_online_cpus();
	for_each_cpu(i, limit_mask) {
		i_cpu_stats = &per_cpu(cpu_stats, i);

		if (cpufreq_get_policy(&policy, i))
			continue;

		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
			ret = cpufreq_update_policy(i);
			if (ret)
				continue;
		}
		for_each_cpu(j, policy.related_cpus)
			cpumask_clear_cpu(j, limit_mask);
	}
Esempio n. 19
0
static void read_governors() {
  unsigned int cpu;
  for ( cpu=0; cpu<ncpu; ++cpu ) {
    struct cpufreq_policy* policy = cpufreq_get_policy(cpu);
    if (policy) {
      strcpy(governor[cpu], policy->governor);
      cpufreq_put_policy(policy);
    } else {
      strcpy(governor[cpu], empty);
    }
  }
}
static gboolean
cpufreq_monitor_libcpufreq_run (CPUFreqMonitor *monitor)
{
    guint          cpu;
    CPUFreqPolicy *policy;

    g_object_get (G_OBJECT (monitor), "cpu", &cpu, NULL);

    policy = cpufreq_get_policy (cpu);
    if (!policy) {
        /* Check whether it failed because
         * cpu is not online.
         */
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0)
        if (!cpufreq_cpu_exists (cpu)) {
#else
        if (cpupower_is_cpu_online (cpu)) {
#endif
            g_object_set (G_OBJECT (monitor), "online", FALSE, NULL);
            return TRUE;
        }
        return FALSE;
    }

    g_object_set (G_OBJECT (monitor),
                  "online", TRUE,
                  "governor", policy->governor,
                  "frequency", cpufreq_get_freq_kernel (cpu),
                  NULL);

    cpufreq_put_policy (policy);

    return TRUE;
}

static gint
compare (gconstpointer a, gconstpointer b)
{
    gint aa, bb;

    aa = atoi ((gchar *) a);
    bb = atoi ((gchar *) b);

    if (aa == bb)
        return 0;
    else if (aa > bb)
        return -1;
    else
        return 1;
}
Esempio n. 21
0
static void wakeup_boost(void)
{
	unsigned int cpu, ret;
	struct cpufreq_policy policy;

	for_each_online_cpu(cpu) {
		ret = cpufreq_get_policy(&policy, cpu);
		if (ret)
			continue;

		policy.cur = policy.max;
		cpufreq_update_policy(cpu);
	}
}
static int set_cpu_min_freq(const char *buf, const struct kernel_param *kp)
{
	int i, j, ntokens = 0;
	unsigned int val, cpu;
	const char *cp = buf;
	struct cpu_status *i_cpu_stats;
	struct cpufreq_policy policy;
	cpumask_var_t limit_mask;
	int ret;

	while ((cp = strpbrk(cp + 1, " :")))
		ntokens++;

	
	if (!(ntokens % 2))
		return -EINVAL;

	cp = buf;
	cpumask_clear(limit_mask);
	for (i = 0; i < ntokens; i += 2) {
		if (sscanf(cp, "%u:%u", &cpu, &val) != 2)
			return -EINVAL;
		if (cpu > (num_present_cpus() - 1))
			return -EINVAL;

		i_cpu_stats = &per_cpu(cpu_stats, cpu);

		i_cpu_stats->min = val;
		cpumask_set_cpu(cpu, limit_mask);

		cp = strchr(cp, ' ');
		cp++;
	}

	get_online_cpus();
	for_each_cpu(i, limit_mask) {
		i_cpu_stats = &per_cpu(cpu_stats, i);

		if (cpufreq_get_policy(&policy, i))
			continue;

		if (cpu_online(i) && (policy.min != i_cpu_stats->min)) {
			ret = cpufreq_update_policy(i);
			if (ret)
				continue;
		}
		for_each_cpu(j, policy.related_cpus)
			cpumask_clear_cpu(j, limit_mask);
	}
Esempio n. 23
0
static int __init msm_rq_stats_init(void)
{
	int cpu = 0;
	struct cpufreq_policy cpu_policy;
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);

	cpufreq_get_policy(&cpu_policy, cpu);

	pcpu->policy_max = cpu_policy.max;
	pcpu->cur_freq = acpuclk_get_rate(cpu);

	cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);

	return 0;
}
Esempio n. 24
0
JNIEXPORT jstring JNICALL Java_platforms_x86_X86_1DVFS_getGovernor
  (JNIEnv *env, jobject obj, jint cpu)
{
	struct cpufreq_policy *policy;
	jstring governorName;

	policy = cpufreq_get_policy((unsigned long)cpu);

	if (policy) {
		governorName = (*env)->NewStringUTF(env, policy->governor);
		//(*env)->DeleteLocalRef(env, governorName);
	}

	//cpufreq_put_policy(policy);

	return governorName;
}
Esempio n. 25
0
static int save_before_settings(void) {
    unsigned int i;
    assert(saved_policies == NULL);
    saved_policies = calloc(num_cpus, sizeof(*saved_policies));
    if (saved_policies == NULL)
      return ENOMEM;
    for (i = 0; i < num_cpus; i++) {
        saved_policies[i] = cpufreq_get_policy(i);
        if (saved_policies[i] == NULL)
        {
            free(saved_policies);
            saved_policies=NULL;
            return EACCES;
        }
    }
    return 0;
}
Esempio n. 26
0
static int update_average_load(unsigned int freq, unsigned int cpu)
{
	struct cpufreq_policy cpu_policy;
	struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
	cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
	unsigned int idle_time, wall_time, iowait_time;
	unsigned int cur_load, load_at_max_freq;

	cpufreq_get_policy(&cpu_policy, cpu);

	/* if max freq is changed by the user this load calculator
	   needs to adjust itself otherwise its going to be all wrong */
	if (unlikely(pcpu->policy_max != cpu_policy.max))
		pcpu->policy_max = cpu_policy.max;

	cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time);
	cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);

	wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
	pcpu->prev_cpu_wall = cur_wall_time;

	idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
	pcpu->prev_cpu_idle = cur_idle_time;

	iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
	pcpu->prev_cpu_iowait = cur_iowait_time;

	if (idle_time >= iowait_time)
		idle_time -= iowait_time;

	if (unlikely(!wall_time || wall_time < idle_time))
		return 0;

	cur_load = 100 * (wall_time - idle_time) / wall_time;

	/* Calculate the scaled load across CPU */
	load_at_max_freq = (cur_load * freq) / pcpu->policy_max;

	/* This is the first sample in this window*/
	pcpu->avg_load_maxfreq = pcpu->prev_avg_load_maxfreq + load_at_max_freq;
	pcpu->avg_load_maxfreq /= 2;
	pcpu->window_size = wall_time;

	return 0;
}
Esempio n. 27
0
static int set_policy(int ncpus) {
int cpu;

  for (cpu=0; cpu < ncpus; cpu++) {
	struct cpufreq_policy *policy = cpufreq_get_policy(cpu);
	if (!policy)
		return -EINVAL;
	//printf("%lu %lu %s\n", policy->min, policy->max, policy->governor);

        if (policy->governor != "userspace"){
                cpufreq_modify_policy_governor(cpu, "userspace");
               
                 printf("%lu %lu %s\n", policy->min, policy->max, policy->governor);
                } 
              }
	 /*   cpufreq_put_policy(policy);*/
	return 0;
}
Esempio n. 28
0
static ssize_t
acpi_processor_write_performance(struct file *file,
				 const char __user * buffer,
				 size_t count, loff_t * data)
{
	int result = 0;
	struct seq_file *m = (struct seq_file *)file->private_data;
	struct acpi_processor *pr = (struct acpi_processor *)m->private;
	struct acpi_processor_performance *perf;
	char state_string[12] = { '\0' };
	unsigned int new_state = 0;
	struct cpufreq_policy policy;

	ACPI_FUNCTION_TRACE("acpi_processor_write_performance");

	if (!pr || (count > sizeof(state_string) - 1))
		return_VALUE(-EINVAL);

	perf = pr->performance;
	if (!perf)
		return_VALUE(-EINVAL);

	if (copy_from_user(state_string, buffer, count))
		return_VALUE(-EFAULT);

	state_string[count] = '\0';
	new_state = simple_strtoul(state_string, NULL, 0);

	if (new_state >= perf->state_count)
		return_VALUE(-EINVAL);

	cpufreq_get_policy(&policy, pr->id);

	policy.cpu = pr->id;
	policy.min = perf->states[new_state].core_frequency * 1000;
	policy.max = perf->states[new_state].core_frequency * 1000;

	result = cpufreq_set_policy(&policy);
	if (result)
		return_VALUE(result);

	return_VALUE(count);
}
/**ltl
 * 功能: 文件/proc/acpi/processor/CPU1/performance写操作函数,主要用来改变cpu的性能,写入参数为cpu的状态下标。
 * 参数:
 * 返回值:
 * 说明:
 */
static ssize_t
acpi_processor_write_performance(struct file *file,
				 const char __user * buffer,
				 size_t count, loff_t * data)
{
	int result = 0;
	struct seq_file *m = (struct seq_file *)file->private_data;
	struct acpi_processor *pr = (struct acpi_processor *)m->private;
	struct acpi_processor_performance *perf;
	char state_string[12] = { '\0' };
	unsigned int new_state = 0;
	struct cpufreq_policy policy;


	if (!pr || (count > sizeof(state_string) - 1))
		return -EINVAL;

	perf = pr->performance;
	if (!perf)
		return -EINVAL;

	if (copy_from_user(state_string, buffer, count))
		return -EFAULT;

	state_string[count] = '\0';
	new_state = simple_strtoul(state_string, NULL, 0); /* 新的性能状态 */

	if (new_state >= perf->state_count)
		return -EINVAL;
	/* 获取cpu id对应的cpu调频策略对象 */
	cpufreq_get_policy(&policy, pr->id);
	/* cpu ID */
	policy.cpu = pr->id;
	/* 此调频策略对应的频率 */
	policy.min = perf->states[new_state].core_frequency * 1000;
	policy.max = perf->states[new_state].core_frequency * 1000;
	/* 设置cpu的调频策略 */
	result = cpufreq_set_policy(&policy);
	if (result)
		return result;

	return count;
}
Esempio n. 30
0
static void __ref tplug_boost_work_fn(struct work_struct *work)
{
	struct cpufreq_policy policy;
	int cpu, ret;
	for(cpu = 1; cpu < NR_CPUS; cpu++) {
#ifdef CONFIG_SCHED_HMP
    if(tplug_hp_style == 1)
#else
	if(tplug_hp_enabled == 1)
#endif
		if(cpu_is_offline(cpu))
			cpu_up(cpu);
		ret = cpufreq_get_policy(&policy, cpu);
		if (ret)
			continue;
		old_policy[cpu] = policy;
		policy.min = policy.max;
		cpufreq_update_policy(cpu);
	}
	if(stop_boost == 0)
	queue_delayed_work_on(0, tplug_boost_wq, &tplug_boost,
			msecs_to_jiffies(10));
}